diff --git a/src/main/java/org/apache/commons/math3/exception/TooManyIterationsException.java b/src/main/java/org/apache/commons/math3/exception/TooManyIterationsException.java
new file mode 100644
index 000000000..dee58997f
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/exception/TooManyIterationsException.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.exception;
+
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+
+/**
+ * Exception to be thrown when the maximal number of iterations is exceeded.
+ *
+ * @since 3.1
+ * @version $Id$
+ */
+public class TooManyIterationsException extends MaxCountExceededException {
+ /** Serializable version Id. */
+ private static final long serialVersionUID = 20121211L;
+
+ /**
+ * Construct the exception.
+ *
+ * @param max Maximum number of evaluations.
+ */
+ public TooManyIterationsException(Number max) {
+ super(max);
+ getContext().addMessage(LocalizedFormats.ITERATIONS);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/exception/util/LocalizedFormats.java b/src/main/java/org/apache/commons/math3/exception/util/LocalizedFormats.java
index 93c57e616..a9428f6c1 100644
--- a/src/main/java/org/apache/commons/math3/exception/util/LocalizedFormats.java
+++ b/src/main/java/org/apache/commons/math3/exception/util/LocalizedFormats.java
@@ -148,6 +148,7 @@ public enum LocalizedFormats implements Localizable {
INVALID_REGRESSION_OBSERVATION("length of regressor array = {0} does not match the number of variables = {1} in the model"),
INVALID_ROUNDING_METHOD("invalid rounding method {0}, valid methods: {1} ({2}), {3} ({4}), {5} ({6}), {7} ({8}), {9} ({10}), {11} ({12}), {13} ({14}), {15} ({16})"),
ITERATOR_EXHAUSTED("iterator exhausted"),
+ ITERATIONS("iterations"), /* keep */
LCM_OVERFLOW_32_BITS("overflow: lcm({0}, {1}) is 2^31"),
LCM_OVERFLOW_64_BITS("overflow: lcm({0}, {1}) is 2^63"),
LIST_OF_CHROMOSOMES_BIGGER_THAN_POPULATION_SIZE("list of chromosomes bigger than maxPopulationSize"),
diff --git a/src/main/java/org/apache/commons/math3/fitting/CurveFitter.java b/src/main/java/org/apache/commons/math3/fitting/CurveFitter.java
new file mode 100644
index 000000000..e5c202473
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/CurveFitter.java
@@ -0,0 +1,230 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.analysis.MultivariateMatrixFunction;
+import org.apache.commons.math3.analysis.ParametricUnivariateFunction;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+import org.apache.commons.math3.optim.nonlinear.vector.MultivariateVectorOptimizer;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunction;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunctionJacobian;
+import org.apache.commons.math3.optim.nonlinear.vector.Target;
+import org.apache.commons.math3.optim.nonlinear.vector.Weight;
+
+/**
+ * Fitter for parametric univariate real functions y = f(x).
+ *
+ * When a univariate real function y = f(x) does depend on some
+ * unknown parameters p0, p1 ... pn-1,
+ * this class can be used to find these parameters. It does this
+ * by fitting the curve so it remains very close to a set of
+ * observed points (x0, y0), (x1,
+ * y1) ... (xk-1, yk-1). This fitting
+ * is done by finding the parameters values that minimizes the objective
+ * function ∑(yi-f(xi))2. This is
+ * really a least squares problem.
+ *
+ * @param Function to use for the fit.
+ *
+ * @version $Id: CurveFitter.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public class CurveFitter {
+ /** Optimizer to use for the fitting. */
+ private final MultivariateVectorOptimizer optimizer;
+ /** Observed points. */
+ private final List observations;
+
+ /**
+ * Simple constructor.
+ *
+ * @param optimizer Optimizer to use for the fitting.
+ * @since 3.1
+ */
+ public CurveFitter(final MultivariateVectorOptimizer optimizer) {
+ this.optimizer = optimizer;
+ observations = new ArrayList();
+ }
+
+ /** Add an observed (x,y) point to the sample with unit weight.
+ * Calling this method is equivalent to call
+ * {@code addObservedPoint(1.0, x, y)}.
+ * @param x abscissa of the point
+ * @param y observed value of the point at x, after fitting we should
+ * have f(x) as close as possible to this value
+ * @see #addObservedPoint(double, double, double)
+ * @see #addObservedPoint(WeightedObservedPoint)
+ * @see #getObservations()
+ */
+ public void addObservedPoint(double x, double y) {
+ addObservedPoint(1.0, x, y);
+ }
+
+ /** Add an observed weighted (x,y) point to the sample.
+ * @param weight weight of the observed point in the fit
+ * @param x abscissa of the point
+ * @param y observed value of the point at x, after fitting we should
+ * have f(x) as close as possible to this value
+ * @see #addObservedPoint(double, double)
+ * @see #addObservedPoint(WeightedObservedPoint)
+ * @see #getObservations()
+ */
+ public void addObservedPoint(double weight, double x, double y) {
+ observations.add(new WeightedObservedPoint(weight, x, y));
+ }
+
+ /** Add an observed weighted (x,y) point to the sample.
+ * @param observed observed point to add
+ * @see #addObservedPoint(double, double)
+ * @see #addObservedPoint(double, double, double)
+ * @see #getObservations()
+ */
+ public void addObservedPoint(WeightedObservedPoint observed) {
+ observations.add(observed);
+ }
+
+ /** Get the observed points.
+ * @return observed points
+ * @see #addObservedPoint(double, double)
+ * @see #addObservedPoint(double, double, double)
+ * @see #addObservedPoint(WeightedObservedPoint)
+ */
+ public WeightedObservedPoint[] getObservations() {
+ return observations.toArray(new WeightedObservedPoint[observations.size()]);
+ }
+
+ /**
+ * Remove all observations.
+ */
+ public void clearObservations() {
+ observations.clear();
+ }
+
+ /**
+ * Fit a curve.
+ * This method compute the coefficients of the curve that best
+ * fit the sample of observed points previously given through calls
+ * to the {@link #addObservedPoint(WeightedObservedPoint)
+ * addObservedPoint} method.
+ *
+ * @param f parametric function to fit.
+ * @param initialGuess first guess of the function parameters.
+ * @return the fitted parameters.
+ * @throws org.apache.commons.math3.exception.DimensionMismatchException
+ * if the start point dimension is wrong.
+ */
+ public double[] fit(T f, final double[] initialGuess) {
+ return fit(Integer.MAX_VALUE, f, initialGuess);
+ }
+
+ /**
+ * Fit a curve.
+ * This method compute the coefficients of the curve that best
+ * fit the sample of observed points previously given through calls
+ * to the {@link #addObservedPoint(WeightedObservedPoint)
+ * addObservedPoint} method.
+ *
+ * @param f parametric function to fit.
+ * @param initialGuess first guess of the function parameters.
+ * @param maxEval Maximum number of function evaluations.
+ * @return the fitted parameters.
+ * @throws org.apache.commons.math3.exception.TooManyEvaluationsException
+ * if the number of allowed evaluations is exceeded.
+ * @throws org.apache.commons.math3.exception.DimensionMismatchException
+ * if the start point dimension is wrong.
+ * @since 3.0
+ */
+ public double[] fit(int maxEval, T f,
+ final double[] initialGuess) {
+ // Prepare least squares problem.
+ double[] target = new double[observations.size()];
+ double[] weights = new double[observations.size()];
+ int i = 0;
+ for (WeightedObservedPoint point : observations) {
+ target[i] = point.getY();
+ weights[i] = point.getWeight();
+ ++i;
+ }
+
+ // Input to the optimizer: the model and its Jacobian.
+ final TheoreticalValuesFunction model = new TheoreticalValuesFunction(f);
+
+ // Perform the fit.
+ final PointVectorValuePair optimum
+ = optimizer.optimize(new MaxEval(maxEval),
+ model.getModelFunction(),
+ model.getModelFunctionJacobian(),
+ new Target(target),
+ new Weight(weights),
+ new InitialGuess(initialGuess));
+ // Extract the coefficients.
+ return optimum.getPointRef();
+ }
+
+ /** Vectorial function computing function theoretical values. */
+ private class TheoreticalValuesFunction {
+ /** Function to fit. */
+ private final ParametricUnivariateFunction f;
+
+ /**
+ * @param f function to fit.
+ */
+ public TheoreticalValuesFunction(final ParametricUnivariateFunction f) {
+ this.f = f;
+ }
+
+ /**
+ * @return the model function values.
+ */
+ public ModelFunction getModelFunction() {
+ return new ModelFunction(new MultivariateVectorFunction() {
+ /** {@inheritDoc} */
+ public double[] value(double[] point) {
+ // compute the residuals
+ final double[] values = new double[observations.size()];
+ int i = 0;
+ for (WeightedObservedPoint observed : observations) {
+ values[i++] = f.value(observed.getX(), point);
+ }
+
+ return values;
+ }
+ });
+ }
+
+ /**
+ * @return the model function Jacobian.
+ */
+ public ModelFunctionJacobian getModelFunctionJacobian() {
+ return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
+ public double[][] value(double[] point) {
+ final double[][] jacobian = new double[observations.size()][];
+ int i = 0;
+ for (WeightedObservedPoint observed : observations) {
+ jacobian[i++] = f.gradient(observed.getX(), point);
+ }
+ return jacobian;
+ }
+ });
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/GaussianFitter.java b/src/main/java/org/apache/commons/math3/fitting/GaussianFitter.java
new file mode 100644
index 000000000..f7c132668
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/GaussianFitter.java
@@ -0,0 +1,362 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import java.util.Arrays;
+import java.util.Comparator;
+import org.apache.commons.math3.analysis.function.Gaussian;
+import org.apache.commons.math3.exception.NullArgumentException;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.exception.OutOfRangeException;
+import org.apache.commons.math3.exception.ZeroException;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.optim.nonlinear.vector.MultivariateVectorOptimizer;
+import org.apache.commons.math3.util.FastMath;
+
+/**
+ * Fits points to a {@link
+ * org.apache.commons.math3.analysis.function.Gaussian.Parametric Gaussian} function.
+ *
+ * Usage example:
+ *
+ * GaussianFitter fitter = new GaussianFitter(
+ * new LevenbergMarquardtOptimizer());
+ * fitter.addObservedPoint(4.0254623, 531026.0);
+ * fitter.addObservedPoint(4.03128248, 984167.0);
+ * fitter.addObservedPoint(4.03839603, 1887233.0);
+ * fitter.addObservedPoint(4.04421621, 2687152.0);
+ * fitter.addObservedPoint(4.05132976, 3461228.0);
+ * fitter.addObservedPoint(4.05326982, 3580526.0);
+ * fitter.addObservedPoint(4.05779662, 3439750.0);
+ * fitter.addObservedPoint(4.0636168, 2877648.0);
+ * fitter.addObservedPoint(4.06943698, 2175960.0);
+ * fitter.addObservedPoint(4.07525716, 1447024.0);
+ * fitter.addObservedPoint(4.08237071, 717104.0);
+ * fitter.addObservedPoint(4.08366408, 620014.0);
+ * double[] parameters = fitter.fit();
+ *
+ *
+ * @since 2.2
+ * @version $Id: GaussianFitter.java 1416643 2012-12-03 19:37:14Z tn $
+ */
+public class GaussianFitter extends CurveFitter {
+ /**
+ * Constructs an instance using the specified optimizer.
+ *
+ * @param optimizer Optimizer to use for the fitting.
+ */
+ public GaussianFitter(MultivariateVectorOptimizer optimizer) {
+ super(optimizer);
+ }
+
+ /**
+ * Fits a Gaussian function to the observed points.
+ *
+ * @param initialGuess First guess values in the following order:
+ *
+ * - Norm
+ * - Mean
+ * - Sigma
+ *
+ * @return the parameters of the Gaussian function that best fits the
+ * observed points (in the same order as above).
+ * @since 3.0
+ */
+ public double[] fit(double[] initialGuess) {
+ final Gaussian.Parametric f = new Gaussian.Parametric() {
+ @Override
+ public double value(double x, double ... p) {
+ double v = Double.POSITIVE_INFINITY;
+ try {
+ v = super.value(x, p);
+ } catch (NotStrictlyPositiveException e) {
+ // Do nothing.
+ }
+ return v;
+ }
+
+ @Override
+ public double[] gradient(double x, double ... p) {
+ double[] v = { Double.POSITIVE_INFINITY,
+ Double.POSITIVE_INFINITY,
+ Double.POSITIVE_INFINITY };
+ try {
+ v = super.gradient(x, p);
+ } catch (NotStrictlyPositiveException e) {
+ // Do nothing.
+ }
+ return v;
+ }
+ };
+
+ return fit(f, initialGuess);
+ }
+
+ /**
+ * Fits a Gaussian function to the observed points.
+ *
+ * @return the parameters of the Gaussian function that best fits the
+ * observed points (in the same order as above).
+ */
+ public double[] fit() {
+ final double[] guess = (new ParameterGuesser(getObservations())).guess();
+ return fit(guess);
+ }
+
+ /**
+ * Guesses the parameters {@code norm}, {@code mean}, and {@code sigma}
+ * of a {@link org.apache.commons.math3.analysis.function.Gaussian.Parametric}
+ * based on the specified observed points.
+ */
+ public static class ParameterGuesser {
+ /** Normalization factor. */
+ private final double norm;
+ /** Mean. */
+ private final double mean;
+ /** Standard deviation. */
+ private final double sigma;
+
+ /**
+ * Constructs instance with the specified observed points.
+ *
+ * @param observations Observed points from which to guess the
+ * parameters of the Gaussian.
+ * @throws NullArgumentException if {@code observations} is
+ * {@code null}.
+ * @throws NumberIsTooSmallException if there are less than 3
+ * observations.
+ */
+ public ParameterGuesser(WeightedObservedPoint[] observations) {
+ if (observations == null) {
+ throw new NullArgumentException(LocalizedFormats.INPUT_ARRAY);
+ }
+ if (observations.length < 3) {
+ throw new NumberIsTooSmallException(observations.length, 3, true);
+ }
+
+ final WeightedObservedPoint[] sorted = sortObservations(observations);
+ final double[] params = basicGuess(sorted);
+
+ norm = params[0];
+ mean = params[1];
+ sigma = params[2];
+ }
+
+ /**
+ * Gets an estimation of the parameters.
+ *
+ * @return the guessed parameters, in the following order:
+ *
+ * - Normalization factor
+ * - Mean
+ * - Standard deviation
+ *
+ */
+ public double[] guess() {
+ return new double[] { norm, mean, sigma };
+ }
+
+ /**
+ * Sort the observations.
+ *
+ * @param unsorted Input observations.
+ * @return the input observations, sorted.
+ */
+ private WeightedObservedPoint[] sortObservations(WeightedObservedPoint[] unsorted) {
+ final WeightedObservedPoint[] observations = unsorted.clone();
+ final Comparator cmp
+ = new Comparator() {
+ public int compare(WeightedObservedPoint p1,
+ WeightedObservedPoint p2) {
+ if (p1 == null && p2 == null) {
+ return 0;
+ }
+ if (p1 == null) {
+ return -1;
+ }
+ if (p2 == null) {
+ return 1;
+ }
+ if (p1.getX() < p2.getX()) {
+ return -1;
+ }
+ if (p1.getX() > p2.getX()) {
+ return 1;
+ }
+ if (p1.getY() < p2.getY()) {
+ return -1;
+ }
+ if (p1.getY() > p2.getY()) {
+ return 1;
+ }
+ if (p1.getWeight() < p2.getWeight()) {
+ return -1;
+ }
+ if (p1.getWeight() > p2.getWeight()) {
+ return 1;
+ }
+ return 0;
+ }
+ };
+
+ Arrays.sort(observations, cmp);
+ return observations;
+ }
+
+ /**
+ * Guesses the parameters based on the specified observed points.
+ *
+ * @param points Observed points, sorted.
+ * @return the guessed parameters (normalization factor, mean and
+ * sigma).
+ */
+ private double[] basicGuess(WeightedObservedPoint[] points) {
+ final int maxYIdx = findMaxY(points);
+ final double n = points[maxYIdx].getY();
+ final double m = points[maxYIdx].getX();
+
+ double fwhmApprox;
+ try {
+ final double halfY = n + ((m - n) / 2);
+ final double fwhmX1 = interpolateXAtY(points, maxYIdx, -1, halfY);
+ final double fwhmX2 = interpolateXAtY(points, maxYIdx, 1, halfY);
+ fwhmApprox = fwhmX2 - fwhmX1;
+ } catch (OutOfRangeException e) {
+ // TODO: Exceptions should not be used for flow control.
+ fwhmApprox = points[points.length - 1].getX() - points[0].getX();
+ }
+ final double s = fwhmApprox / (2 * FastMath.sqrt(2 * FastMath.log(2)));
+
+ return new double[] { n, m, s };
+ }
+
+ /**
+ * Finds index of point in specified points with the largest Y.
+ *
+ * @param points Points to search.
+ * @return the index in specified points array.
+ */
+ private int findMaxY(WeightedObservedPoint[] points) {
+ int maxYIdx = 0;
+ for (int i = 1; i < points.length; i++) {
+ if (points[i].getY() > points[maxYIdx].getY()) {
+ maxYIdx = i;
+ }
+ }
+ return maxYIdx;
+ }
+
+ /**
+ * Interpolates using the specified points to determine X at the
+ * specified Y.
+ *
+ * @param points Points to use for interpolation.
+ * @param startIdx Index within points from which to start the search for
+ * interpolation bounds points.
+ * @param idxStep Index step for searching interpolation bounds points.
+ * @param y Y value for which X should be determined.
+ * @return the value of X for the specified Y.
+ * @throws ZeroException if {@code idxStep} is 0.
+ * @throws OutOfRangeException if specified {@code y} is not within the
+ * range of the specified {@code points}.
+ */
+ private double interpolateXAtY(WeightedObservedPoint[] points,
+ int startIdx,
+ int idxStep,
+ double y)
+ throws OutOfRangeException {
+ if (idxStep == 0) {
+ throw new ZeroException();
+ }
+ final WeightedObservedPoint[] twoPoints
+ = getInterpolationPointsForY(points, startIdx, idxStep, y);
+ final WeightedObservedPoint p1 = twoPoints[0];
+ final WeightedObservedPoint p2 = twoPoints[1];
+ if (p1.getY() == y) {
+ return p1.getX();
+ }
+ if (p2.getY() == y) {
+ return p2.getX();
+ }
+ return p1.getX() + (((y - p1.getY()) * (p2.getX() - p1.getX())) /
+ (p2.getY() - p1.getY()));
+ }
+
+ /**
+ * Gets the two bounding interpolation points from the specified points
+ * suitable for determining X at the specified Y.
+ *
+ * @param points Points to use for interpolation.
+ * @param startIdx Index within points from which to start search for
+ * interpolation bounds points.
+ * @param idxStep Index step for search for interpolation bounds points.
+ * @param y Y value for which X should be determined.
+ * @return the array containing two points suitable for determining X at
+ * the specified Y.
+ * @throws ZeroException if {@code idxStep} is 0.
+ * @throws OutOfRangeException if specified {@code y} is not within the
+ * range of the specified {@code points}.
+ */
+ private WeightedObservedPoint[] getInterpolationPointsForY(WeightedObservedPoint[] points,
+ int startIdx,
+ int idxStep,
+ double y)
+ throws OutOfRangeException {
+ if (idxStep == 0) {
+ throw new ZeroException();
+ }
+ for (int i = startIdx;
+ idxStep < 0 ? i + idxStep >= 0 : i + idxStep < points.length;
+ i += idxStep) {
+ final WeightedObservedPoint p1 = points[i];
+ final WeightedObservedPoint p2 = points[i + idxStep];
+ if (isBetween(y, p1.getY(), p2.getY())) {
+ if (idxStep < 0) {
+ return new WeightedObservedPoint[] { p2, p1 };
+ } else {
+ return new WeightedObservedPoint[] { p1, p2 };
+ }
+ }
+ }
+
+ // Boundaries are replaced by dummy values because the raised
+ // exception is caught and the message never displayed.
+ // TODO: Exceptions should not be used for flow control.
+ throw new OutOfRangeException(y,
+ Double.NEGATIVE_INFINITY,
+ Double.POSITIVE_INFINITY);
+ }
+
+ /**
+ * Determines whether a value is between two other values.
+ *
+ * @param value Value to test whether it is between {@code boundary1}
+ * and {@code boundary2}.
+ * @param boundary1 One end of the range.
+ * @param boundary2 Other end of the range.
+ * @return {@code true} if {@code value} is between {@code boundary1} and
+ * {@code boundary2} (inclusive), {@code false} otherwise.
+ */
+ private boolean isBetween(double value,
+ double boundary1,
+ double boundary2) {
+ return (value >= boundary1 && value <= boundary2) ||
+ (value >= boundary2 && value <= boundary1);
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/HarmonicFitter.java b/src/main/java/org/apache/commons/math3/fitting/HarmonicFitter.java
new file mode 100644
index 000000000..12badd2b1
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/HarmonicFitter.java
@@ -0,0 +1,382 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import org.apache.commons.math3.optim.nonlinear.vector.MultivariateVectorOptimizer;
+import org.apache.commons.math3.analysis.function.HarmonicOscillator;
+import org.apache.commons.math3.exception.ZeroException;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.util.FastMath;
+
+/**
+ * Class that implements a curve fitting specialized for sinusoids.
+ *
+ * Harmonic fitting is a very simple case of curve fitting. The
+ * estimated coefficients are the amplitude a, the pulsation ω and
+ * the phase φ: f (t) = a cos (ω t + φ)
. They are
+ * searched by a least square estimator initialized with a rough guess
+ * based on integrals.
+ *
+ * @version $Id: HarmonicFitter.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public class HarmonicFitter extends CurveFitter {
+ /**
+ * Simple constructor.
+ * @param optimizer Optimizer to use for the fitting.
+ */
+ public HarmonicFitter(final MultivariateVectorOptimizer optimizer) {
+ super(optimizer);
+ }
+
+ /**
+ * Fit an harmonic function to the observed points.
+ *
+ * @param initialGuess First guess values in the following order:
+ *
+ * - Amplitude
+ * - Angular frequency
+ * - Phase
+ *
+ * @return the parameters of the harmonic function that best fits the
+ * observed points (in the same order as above).
+ */
+ public double[] fit(double[] initialGuess) {
+ return fit(new HarmonicOscillator.Parametric(), initialGuess);
+ }
+
+ /**
+ * Fit an harmonic function to the observed points.
+ * An initial guess will be automatically computed.
+ *
+ * @return the parameters of the harmonic function that best fits the
+ * observed points (see the other {@link #fit(double[]) fit} method.
+ * @throws NumberIsTooSmallException if the sample is too short for the
+ * the first guess to be computed.
+ * @throws ZeroException if the first guess cannot be computed because
+ * the abscissa range is zero.
+ */
+ public double[] fit() {
+ return fit((new ParameterGuesser(getObservations())).guess());
+ }
+
+ /**
+ * This class guesses harmonic coefficients from a sample.
+ * The algorithm used to guess the coefficients is as follows:
+ *
+ * We know f (t) at some sampling points ti and want to find a,
+ * ω and φ such that f (t) = a cos (ω t + φ).
+ *
+ *
+ * From the analytical expression, we can compute two primitives :
+ *
+ * If2 (t) = ∫ f2 = a2 × [t + S (t)] / 2
+ * If'2 (t) = ∫ f'2 = a2 ω2 × [t - S (t)] / 2
+ * where S (t) = sin (2 (ω t + φ)) / (2 ω)
+ *
+ *
+ *
+ * We can remove S between these expressions :
+ *
+ * If'2 (t) = a2 ω2 t - ω2 If2 (t)
+ *
+ *
+ *
+ * The preceding expression shows that If'2 (t) is a linear
+ * combination of both t and If2 (t): If'2 (t) = A × t + B × If2 (t)
+ *
+ *
+ * From the primitive, we can deduce the same form for definite
+ * integrals between t1 and ti for each ti :
+ *
+ * If2 (ti) - If2 (t1) = A × (ti - t1) + B × (If2 (ti) - If2 (t1))
+ *
+ *
+ *
+ * We can find the coefficients A and B that best fit the sample
+ * to this linear expression by computing the definite integrals for
+ * each sample points.
+ *
+ *
+ * For a bilinear expression z (xi, yi) = A × xi + B × yi, the
+ * coefficients A and B that minimize a least square criterion
+ * ∑ (zi - z (xi, yi))2 are given by these expressions:
+ *
+ *
+ * ∑yiyi ∑xizi - ∑xiyi ∑yizi
+ * A = ------------------------
+ * ∑xixi ∑yiyi - ∑xiyi ∑xiyi
+ *
+ * ∑xixi ∑yizi - ∑xiyi ∑xizi
+ * B = ------------------------
+ * ∑xixi ∑yiyi - ∑xiyi ∑xiyi
+ *
+ *
+ *
+ *
+ * In fact, we can assume both a and ω are positive and
+ * compute them directly, knowing that A = a2 ω2 and that
+ * B = - ω2. The complete algorithm is therefore:
+ *
+ *
+ * for each ti from t1 to tn-1, compute:
+ * f (ti)
+ * f' (ti) = (f (ti+1) - f(ti-1)) / (ti+1 - ti-1)
+ * xi = ti - t1
+ * yi = ∫ f2 from t1 to ti
+ * zi = ∫ f'2 from t1 to ti
+ * update the sums ∑xixi, ∑yiyi, ∑xiyi, ∑xizi and ∑yizi
+ * end for
+ *
+ * |--------------------------
+ * \ | ∑yiyi ∑xizi - ∑xiyi ∑yizi
+ * a = \ | ------------------------
+ * \| ∑xiyi ∑xizi - ∑xixi ∑yizi
+ *
+ *
+ * |--------------------------
+ * \ | ∑xiyi ∑xizi - ∑xixi ∑yizi
+ * ω = \ | ------------------------
+ * \| ∑xixi ∑yiyi - ∑xiyi ∑xiyi
+ *
+ *
+ *
+ *
+ * Once we know ω, we can compute:
+ *
+ * fc = ω f (t) cos (ω t) - f' (t) sin (ω t)
+ * fs = ω f (t) sin (ω t) + f' (t) cos (ω t)
+ *
+ *
+ *
+ * It appears that fc = a ω cos (φ)
and
+ * fs = -a ω sin (φ)
, so we can use these
+ * expressions to compute φ. The best estimate over the sample is
+ * given by averaging these expressions.
+ *
+ *
+ * Since integrals and means are involved in the preceding
+ * estimations, these operations run in O(n) time, where n is the
+ * number of measurements.
+ */
+ public static class ParameterGuesser {
+ /** Amplitude. */
+ private final double a;
+ /** Angular frequency. */
+ private final double omega;
+ /** Phase. */
+ private final double phi;
+
+ /**
+ * Simple constructor.
+ *
+ * @param observations Sampled observations.
+ * @throws NumberIsTooSmallException if the sample is too short.
+ * @throws ZeroException if the abscissa range is zero.
+ * @throws MathIllegalStateException when the guessing procedure cannot
+ * produce sensible results.
+ */
+ public ParameterGuesser(WeightedObservedPoint[] observations) {
+ if (observations.length < 4) {
+ throw new NumberIsTooSmallException(LocalizedFormats.INSUFFICIENT_OBSERVED_POINTS_IN_SAMPLE,
+ observations.length, 4, true);
+ }
+
+ final WeightedObservedPoint[] sorted = sortObservations(observations);
+
+ final double aOmega[] = guessAOmega(sorted);
+ a = aOmega[0];
+ omega = aOmega[1];
+
+ phi = guessPhi(sorted);
+ }
+
+ /**
+ * Gets an estimation of the parameters.
+ *
+ * @return the guessed parameters, in the following order:
+ *
+ * - Amplitude
+ * - Angular frequency
+ * - Phase
+ *
+ */
+ public double[] guess() {
+ return new double[] { a, omega, phi };
+ }
+
+ /**
+ * Sort the observations with respect to the abscissa.
+ *
+ * @param unsorted Input observations.
+ * @return the input observations, sorted.
+ */
+ private WeightedObservedPoint[] sortObservations(WeightedObservedPoint[] unsorted) {
+ final WeightedObservedPoint[] observations = unsorted.clone();
+
+ // Since the samples are almost always already sorted, this
+ // method is implemented as an insertion sort that reorders the
+ // elements in place. Insertion sort is very efficient in this case.
+ WeightedObservedPoint curr = observations[0];
+ for (int j = 1; j < observations.length; ++j) {
+ WeightedObservedPoint prec = curr;
+ curr = observations[j];
+ if (curr.getX() < prec.getX()) {
+ // the current element should be inserted closer to the beginning
+ int i = j - 1;
+ WeightedObservedPoint mI = observations[i];
+ while ((i >= 0) && (curr.getX() < mI.getX())) {
+ observations[i + 1] = mI;
+ if (i-- != 0) {
+ mI = observations[i];
+ }
+ }
+ observations[i + 1] = curr;
+ curr = observations[j];
+ }
+ }
+
+ return observations;
+ }
+
+ /**
+ * Estimate a first guess of the amplitude and angular frequency.
+ * This method assumes that the {@link #sortObservations()} method
+ * has been called previously.
+ *
+ * @param observations Observations, sorted w.r.t. abscissa.
+ * @throws ZeroException if the abscissa range is zero.
+ * @throws MathIllegalStateException when the guessing procedure cannot
+ * produce sensible results.
+ * @return the guessed amplitude (at index 0) and circular frequency
+ * (at index 1).
+ */
+ private double[] guessAOmega(WeightedObservedPoint[] observations) {
+ final double[] aOmega = new double[2];
+
+ // initialize the sums for the linear model between the two integrals
+ double sx2 = 0;
+ double sy2 = 0;
+ double sxy = 0;
+ double sxz = 0;
+ double syz = 0;
+
+ double currentX = observations[0].getX();
+ double currentY = observations[0].getY();
+ double f2Integral = 0;
+ double fPrime2Integral = 0;
+ final double startX = currentX;
+ for (int i = 1; i < observations.length; ++i) {
+ // one step forward
+ final double previousX = currentX;
+ final double previousY = currentY;
+ currentX = observations[i].getX();
+ currentY = observations[i].getY();
+
+ // update the integrals of f2 and f'2
+ // considering a linear model for f (and therefore constant f')
+ final double dx = currentX - previousX;
+ final double dy = currentY - previousY;
+ final double f2StepIntegral =
+ dx * (previousY * previousY + previousY * currentY + currentY * currentY) / 3;
+ final double fPrime2StepIntegral = dy * dy / dx;
+
+ final double x = currentX - startX;
+ f2Integral += f2StepIntegral;
+ fPrime2Integral += fPrime2StepIntegral;
+
+ sx2 += x * x;
+ sy2 += f2Integral * f2Integral;
+ sxy += x * f2Integral;
+ sxz += x * fPrime2Integral;
+ syz += f2Integral * fPrime2Integral;
+ }
+
+ // compute the amplitude and pulsation coefficients
+ double c1 = sy2 * sxz - sxy * syz;
+ double c2 = sxy * sxz - sx2 * syz;
+ double c3 = sx2 * sy2 - sxy * sxy;
+ if ((c1 / c2 < 0) || (c2 / c3 < 0)) {
+ final int last = observations.length - 1;
+ // Range of the observations, assuming that the
+ // observations are sorted.
+ final double xRange = observations[last].getX() - observations[0].getX();
+ if (xRange == 0) {
+ throw new ZeroException();
+ }
+ aOmega[1] = 2 * Math.PI / xRange;
+
+ double yMin = Double.POSITIVE_INFINITY;
+ double yMax = Double.NEGATIVE_INFINITY;
+ for (int i = 1; i < observations.length; ++i) {
+ final double y = observations[i].getY();
+ if (y < yMin) {
+ yMin = y;
+ }
+ if (y > yMax) {
+ yMax = y;
+ }
+ }
+ aOmega[0] = 0.5 * (yMax - yMin);
+ } else {
+ if (c2 == 0) {
+ // In some ill-conditioned cases (cf. MATH-844), the guesser
+ // procedure cannot produce sensible results.
+ throw new MathIllegalStateException(LocalizedFormats.ZERO_DENOMINATOR);
+ }
+
+ aOmega[0] = FastMath.sqrt(c1 / c2);
+ aOmega[1] = FastMath.sqrt(c2 / c3);
+ }
+
+ return aOmega;
+ }
+
+ /**
+ * Estimate a first guess of the phase.
+ *
+ * @param observations Observations, sorted w.r.t. abscissa.
+ * @return the guessed phase.
+ */
+ private double guessPhi(WeightedObservedPoint[] observations) {
+ // initialize the means
+ double fcMean = 0;
+ double fsMean = 0;
+
+ double currentX = observations[0].getX();
+ double currentY = observations[0].getY();
+ for (int i = 1; i < observations.length; ++i) {
+ // one step forward
+ final double previousX = currentX;
+ final double previousY = currentY;
+ currentX = observations[i].getX();
+ currentY = observations[i].getY();
+ final double currentYPrime = (currentY - previousY) / (currentX - previousX);
+
+ double omegaX = omega * currentX;
+ double cosine = FastMath.cos(omegaX);
+ double sine = FastMath.sin(omegaX);
+ fcMean += omega * currentY * cosine - currentYPrime * sine;
+ fsMean += omega * currentY * sine + currentYPrime * cosine;
+ }
+
+ return FastMath.atan2(-fsMean, fcMean);
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/PolynomialFitter.java b/src/main/java/org/apache/commons/math3/fitting/PolynomialFitter.java
new file mode 100644
index 000000000..4767ea84f
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/PolynomialFitter.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import org.apache.commons.math3.analysis.polynomials.PolynomialFunction;
+import org.apache.commons.math3.optim.nonlinear.vector.MultivariateVectorOptimizer;
+
+/**
+ * Polynomial fitting is a very simple case of {@link CurveFitter curve fitting}.
+ * The estimated coefficients are the polynomial coefficients (see the
+ * {@link #fit(double[]) fit} method).
+ *
+ * @version $Id: PolynomialFitter.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public class PolynomialFitter extends CurveFitter {
+ /**
+ * Simple constructor.
+ *
+ * @param optimizer Optimizer to use for the fitting.
+ */
+ public PolynomialFitter(MultivariateVectorOptimizer optimizer) {
+ super(optimizer);
+ }
+
+ /**
+ * Get the coefficients of the polynomial fitting the weighted data points.
+ * The degree of the fitting polynomial is {@code guess.length - 1}.
+ *
+ * @param guess First guess for the coefficients. They must be sorted in
+ * increasing order of the polynomial's degree.
+ * @param maxEval Maximum number of evaluations of the polynomial.
+ * @return the coefficients of the polynomial that best fits the observed points.
+ * @throws org.apache.commons.math3.exception.TooManyEvaluationsException if
+ * the number of evaluations exceeds {@code maxEval}.
+ * @throws org.apache.commons.math3.exception.ConvergenceException
+ * if the algorithm failed to converge.
+ */
+ public double[] fit(int maxEval, double[] guess) {
+ return fit(maxEval, new PolynomialFunction.Parametric(), guess);
+ }
+
+ /**
+ * Get the coefficients of the polynomial fitting the weighted data points.
+ * The degree of the fitting polynomial is {@code guess.length - 1}.
+ *
+ * @param guess First guess for the coefficients. They must be sorted in
+ * increasing order of the polynomial's degree.
+ * @return the coefficients of the polynomial that best fits the observed points.
+ * @throws org.apache.commons.math3.exception.ConvergenceException
+ * if the algorithm failed to converge.
+ */
+ public double[] fit(double[] guess) {
+ return fit(new PolynomialFunction.Parametric(), guess);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/WeightedObservedPoint.java b/src/main/java/org/apache/commons/math3/fitting/WeightedObservedPoint.java
new file mode 100644
index 000000000..e4bca3468
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/WeightedObservedPoint.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import java.io.Serializable;
+
+/**
+ * This class is a simple container for weighted observed point in
+ * {@link CurveFitter curve fitting}.
+ * Instances of this class are guaranteed to be immutable.
+ * @version $Id: WeightedObservedPoint.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public class WeightedObservedPoint implements Serializable {
+ /** Serializable version id. */
+ private static final long serialVersionUID = 5306874947404636157L;
+ /** Weight of the measurement in the fitting process. */
+ private final double weight;
+ /** Abscissa of the point. */
+ private final double x;
+ /** Observed value of the function at x. */
+ private final double y;
+
+ /**
+ * Simple constructor.
+ *
+ * @param weight Weight of the measurement in the fitting process.
+ * @param x Abscissa of the measurement.
+ * @param y Ordinate of the measurement.
+ */
+ public WeightedObservedPoint(final double weight, final double x, final double y) {
+ this.weight = weight;
+ this.x = x;
+ this.y = y;
+ }
+
+ /**
+ * Gets the weight of the measurement in the fitting process.
+ *
+ * @return the weight of the measurement in the fitting process.
+ */
+ public double getWeight() {
+ return weight;
+ }
+
+ /**
+ * Gets the abscissa of the point.
+ *
+ * @return the abscissa of the point.
+ */
+ public double getX() {
+ return x;
+ }
+
+ /**
+ * Gets the observed value of the function at x.
+ *
+ * @return the observed value of the function at x.
+ */
+ public double getY() {
+ return y;
+ }
+
+}
+
diff --git a/src/main/java/org/apache/commons/math3/fitting/package-info.java b/src/main/java/org/apache/commons/math3/fitting/package-info.java
new file mode 100644
index 000000000..f430b85f6
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/package-info.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Classes to perform curve fitting.
+ *
+ * Curve fitting is a special case of a least squares problem
+ * were the parameters are the coefficients of a function {@code f}
+ * whose graph {@code y = f(x)} should pass through sample points, and
+ * were the objective function is the squared sum of the residuals
+ * f(xi) - yi
for observed points
+ * (xi, yi)
.
+ */
+package org.apache.commons.math3.fitting;
diff --git a/src/main/java/org/apache/commons/math3/optim/AbstractConvergenceChecker.java b/src/main/java/org/apache/commons/math3/optim/AbstractConvergenceChecker.java
new file mode 100644
index 000000000..7bf24ae80
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/AbstractConvergenceChecker.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+/**
+ * Base class for all convergence checker implementations.
+ *
+ * @param Type of (point, value) pair.
+ *
+ * @version $Id: AbstractConvergenceChecker.java 1370215 2012-08-07 12:38:59Z sebb $
+ * @since 3.0
+ */
+public abstract class AbstractConvergenceChecker
+ implements ConvergenceChecker {
+ /**
+ * Relative tolerance threshold.
+ */
+ private final double relativeThreshold;
+ /**
+ * Absolute tolerance threshold.
+ */
+ private final double absoluteThreshold;
+
+ /**
+ * Build an instance with a specified thresholds.
+ *
+ * @param relativeThreshold relative tolerance threshold
+ * @param absoluteThreshold absolute tolerance threshold
+ */
+ public AbstractConvergenceChecker(final double relativeThreshold,
+ final double absoluteThreshold) {
+ this.relativeThreshold = relativeThreshold;
+ this.absoluteThreshold = absoluteThreshold;
+ }
+
+ /**
+ * @return the relative threshold.
+ */
+ public double getRelativeThreshold() {
+ return relativeThreshold;
+ }
+
+ /**
+ * @return the absolute threshold.
+ */
+ public double getAbsoluteThreshold() {
+ return absoluteThreshold;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public abstract boolean converged(int iteration,
+ PAIR previous,
+ PAIR current);
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/BaseMultiStartMultivariateOptimizer.java b/src/main/java/org/apache/commons/math3/optim/BaseMultiStartMultivariateOptimizer.java
new file mode 100644
index 000000000..ad3b00a36
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/BaseMultiStartMultivariateOptimizer.java
@@ -0,0 +1,214 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.exception.NullArgumentException;
+import org.apache.commons.math3.random.RandomVectorGenerator;
+import org.apache.commons.math3.optim.InitialGuess;
+
+/**
+ * Base class multi-start optimizer for a multivariate function.
+ *
+ * This class wraps an optimizer in order to use it several times in
+ * turn with different starting points (trying to avoid being trapped
+ * in a local extremum when looking for a global one).
+ * It is not a "user" class.
+ *
+ * @param Type of the point/value pair returned by the optimization
+ * algorithm.
+ *
+ * @version $Id$
+ * @since 3.0
+ */
+public abstract class BaseMultiStartMultivariateOptimizer
+ extends BaseMultivariateOptimizer {
+ /** Underlying classical optimizer. */
+ private final BaseMultivariateOptimizer optimizer;
+ /** Number of evaluations already performed for all starts. */
+ private int totalEvaluations;
+ /** Number of starts to go. */
+ private int starts;
+ /** Random generator for multi-start. */
+ private RandomVectorGenerator generator;
+ /** Optimization data. */
+ private OptimizationData[] optimData;
+ /**
+ * Location in {@link #optimData} where the updated maximum
+ * number of evaluations will be stored.
+ */
+ private int maxEvalIndex = -1;
+ /**
+ * Location in {@link #optimData} where the updated start value
+ * will be stored.
+ */
+ private int initialGuessIndex = -1;
+
+ /**
+ * Create a multi-start optimizer from a single-start optimizer.
+ *
+ * @param optimizer Single-start optimizer to wrap.
+ * @param starts Number of starts to perform. If {@code starts == 1},
+ * the {@link #optimize(OptimizationData[]) optimize} will return the
+ * same solution as the given {@code optimizer} would return.
+ * @param generator Random vector generator to use for restarts.
+ * @throws NullArgumentException if {@code optimizer} or {@code generator}
+ * is {@code null}.
+ * @throws NotStrictlyPositiveException if {@code starts < 1}.
+ */
+ public BaseMultiStartMultivariateOptimizer(final BaseMultivariateOptimizer optimizer,
+ final int starts,
+ final RandomVectorGenerator generator) {
+ super(optimizer.getConvergenceChecker());
+
+ if (optimizer == null ||
+ generator == null) {
+ throw new NullArgumentException();
+ }
+ if (starts < 1) {
+ throw new NotStrictlyPositiveException(starts);
+ }
+
+ this.optimizer = optimizer;
+ this.starts = starts;
+ this.generator = generator;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int getEvaluations() {
+ return totalEvaluations;
+ }
+
+ /**
+ * Gets all the optima found during the last call to {@code optimize}.
+ * The optimizer stores all the optima found during a set of
+ * restarts. The {@code optimize} method returns the best point only.
+ * This method returns all the points found at the end of each starts,
+ * including the best one already returned by the {@code optimize} method.
+ *
+ * The returned array as one element for each start as specified
+ * in the constructor. It is ordered with the results from the
+ * runs that did converge first, sorted from best to worst
+ * objective value (i.e in ascending order if minimizing and in
+ * descending order if maximizing), followed by {@code null} elements
+ * corresponding to the runs that did not converge. This means all
+ * elements will be {@code null} if the {@code optimize} method did throw
+ * an exception.
+ * This also means that if the first element is not {@code null}, it is
+ * the best point found across all starts.
+ *
+ * The behaviour is undefined if this method is called before
+ * {@code optimize}; it will likely throw {@code NullPointerException}.
+ *
+ * @return an array containing the optima sorted from best to worst.
+ */
+ public abstract PAIR[] getOptima();
+
+ /**
+ * {@inheritDoc}
+ *
+ * @throws MathIllegalStateException if {@code optData} does not contain an
+ * instance of {@link MaxEval} or {@link InitialGuess}.
+ */
+ @Override
+ public PAIR optimize(OptimizationData... optData) {
+ // Store arguments in order to pass them to the internal optimizer.
+ optimData = optData;
+ // Set up base class and perform computations.
+ return super.optimize(optData);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected PAIR doOptimize() {
+ // Remove all instances of "MaxEval" and "InitialGuess" from the
+ // array that will be passed to the internal optimizer.
+ // The former is to enforce smaller numbers of allowed evaluations
+ // (according to how many have been used up already), and the latter
+ // to impose a different start value for each start.
+ for (int i = 0; i < optimData.length; i++) {
+ if (optimData[i] instanceof MaxEval) {
+ optimData[i] = null;
+ maxEvalIndex = i;
+ }
+ if (optimData[i] instanceof InitialGuess) {
+ optimData[i] = null;
+ initialGuessIndex = i;
+ continue;
+ }
+ }
+ if (maxEvalIndex == -1) {
+ throw new MathIllegalStateException();
+ }
+ if (initialGuessIndex == -1) {
+ throw new MathIllegalStateException();
+ }
+
+ RuntimeException lastException = null;
+ totalEvaluations = 0;
+ clear();
+
+ final int maxEval = getMaxEvaluations();
+ final double[] min = getLowerBound();
+ final double[] max = getUpperBound();
+ final double[] startPoint = getStartPoint();
+
+ // Multi-start loop.
+ for (int i = 0; i < starts; i++) {
+ // CHECKSTYLE: stop IllegalCatch
+ try {
+ // Decrease number of allowed evaluations.
+ optimData[maxEvalIndex] = new MaxEval(maxEval - totalEvaluations);
+ // New start value.
+ final double[] s = (i == 0) ?
+ startPoint :
+ generator.nextVector(); // XXX This does not enforce bounds!
+ optimData[initialGuessIndex] = new InitialGuess(s);
+ // Optimize.
+ final PAIR result = optimizer.optimize(optimData);
+ store(result);
+ } catch (RuntimeException mue) {
+ lastException = mue;
+ }
+ // CHECKSTYLE: resume IllegalCatch
+
+ totalEvaluations += optimizer.getEvaluations();
+ }
+
+ final PAIR[] optima = getOptima();
+ if (optima.length == 0) {
+ // All runs failed.
+ throw lastException; // Cannot be null if starts >= 1.
+ }
+
+ // Return the best optimum.
+ return optima[0];
+ }
+
+ /**
+ * Method that will be called in order to store each found optimum.
+ *
+ * @param optimum Result of an optimization run.
+ */
+ protected abstract void store(PAIR optimum);
+ /**
+ * Method that will called in order to clear all stored optima.
+ */
+ protected abstract void clear();
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/BaseMultivariateOptimizer.java b/src/main/java/org/apache/commons/math3/optim/BaseMultivariateOptimizer.java
new file mode 100644
index 000000000..48d1efc7d
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/BaseMultivariateOptimizer.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+import org.apache.commons.math3.optim.OptimizationData;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.SimpleBounds;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.exception.DimensionMismatchException;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.exception.NumberIsTooLargeException;
+
+/**
+ * Base class for implementing optimizers for multivariate functions.
+ * It contains the boiler-plate code for initial guess and bounds
+ * specifications.
+ * It is not a "user" class.
+ *
+ * @param Type of the point/value pair returned by the optimization
+ * algorithm.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public abstract class BaseMultivariateOptimizer
+ extends BaseOptimizer {
+ /** Initial guess. */
+ private double[] start;
+ /** Lower bounds. */
+ private double[] lowerBound;
+ /** Upper bounds. */
+ private double[] upperBound;
+
+ /**
+ * @param checker Convergence checker.
+ */
+ protected BaseMultivariateOptimizer(ConvergenceChecker checker) {
+ super(checker);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param optData Optimization data.
+ * The following data will be looked for:
+ *
+ * - {@link MaxEval}
+ * - {@link InitialGuess}
+ * - {@link SimpleBounds}
+ *
+ * @return {@inheritDoc}
+ */
+ @Override
+ public PAIR optimize(OptimizationData... optData) {
+ // Retrieve settings.
+ parseOptimizationData(optData);
+ // Check input consistency.
+ checkParameters();
+ // Perform optimization.
+ return super.optimize(optData);
+ }
+
+ /**
+ * Scans the list of (required and optional) optimization data that
+ * characterize the problem.
+ *
+ * @param optData Optimization data. The following data will be looked for:
+ *
+ * - {@link InitialGuess}
+ * - {@link SimpleBounds}
+ *
+ */
+ private void parseOptimizationData(OptimizationData... optData) {
+ // The existing values (as set by the previous call) are reused if
+ // not provided in the argument list.
+ for (OptimizationData data : optData) {
+ if (data instanceof InitialGuess) {
+ start = ((InitialGuess) data).getInitialGuess();
+ continue;
+ }
+ if (data instanceof SimpleBounds) {
+ final SimpleBounds bounds = (SimpleBounds) data;
+ lowerBound = bounds.getLower();
+ upperBound = bounds.getUpper();
+ continue;
+ }
+ }
+ }
+
+ /**
+ * Gets the initial guess.
+ *
+ * @return the initial guess, or {@code null} if not set.
+ */
+ public double[] getStartPoint() {
+ return start == null ? null : start.clone();
+ }
+ /**
+ * @return the lower bounds, or {@code null} if not set.
+ */
+ public double[] getLowerBound() {
+ return lowerBound == null ? null : lowerBound.clone();
+ }
+ /**
+ * @return the upper bounds, or {@code null} if not set.
+ */
+ public double[] getUpperBound() {
+ return upperBound == null ? null : upperBound.clone();
+ }
+
+ /**
+ * Check parameters consistency.
+ */
+ private void checkParameters() {
+ if (start != null) {
+ final int dim = start.length;
+ if (lowerBound != null) {
+ if (lowerBound.length != dim) {
+ throw new DimensionMismatchException(lowerBound.length, dim);
+ }
+ for (int i = 0; i < dim; i++) {
+ final double v = start[i];
+ final double lo = lowerBound[i];
+ if (v < lo) {
+ throw new NumberIsTooSmallException(v, lo, true);
+ }
+ }
+ }
+ if (upperBound != null) {
+ if (upperBound.length != dim) {
+ throw new DimensionMismatchException(upperBound.length, dim);
+ }
+ for (int i = 0; i < dim; i++) {
+ final double v = start[i];
+ final double hi = upperBound[i];
+ if (v > hi) {
+ throw new NumberIsTooLargeException(v, hi, true);
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/BaseOptimizer.java b/src/main/java/org/apache/commons/math3/optim/BaseOptimizer.java
new file mode 100644
index 000000000..48e105d4c
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/BaseOptimizer.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+import org.apache.commons.math3.util.Incrementor;
+import org.apache.commons.math3.optim.OptimizationData;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+import org.apache.commons.math3.exception.TooManyIterationsException;
+
+/**
+ * Base class for implementing optimizers.
+ * It contains the boiler-plate code for counting the number of evaluations
+ * of the objective function and the number of iterations of the algorithm,
+ * and storing the convergence checker.
+ * It is not a "user" class.
+ *
+ * @param Type of the point/value pair returned by the optimization
+ * algorithm.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public abstract class BaseOptimizer {
+ /** Evaluations counter. */
+ protected final Incrementor evaluations;
+ /** Iterations counter. */
+ protected final Incrementor iterations;
+ /** Convergence checker. */
+ private ConvergenceChecker checker;
+
+ /**
+ * @param checker Convergence checker.
+ */
+ protected BaseOptimizer(ConvergenceChecker checker) {
+ this.checker = checker;
+
+ evaluations = new Incrementor(0, new MaxEvalCallback());
+ iterations = new Incrementor(0, new MaxIterCallback());
+ }
+
+ /**
+ * Gets the maximal number of function evaluations.
+ *
+ * @return the maximal number of function evaluations.
+ */
+ public int getMaxEvaluations() {
+ return evaluations.getMaximalCount();
+ }
+
+ /**
+ * Gets the number of evaluations of the objective function.
+ * The number of evaluations corresponds to the last call to the
+ * {@code optimize} method. It is 0 if the method has not been
+ * called yet.
+ *
+ * @return the number of evaluations of the objective function.
+ */
+ public int getEvaluations() {
+ return evaluations.getCount();
+ }
+
+ /**
+ * Gets the maximal number of iterations.
+ *
+ * @return the maximal number of iterations.
+ */
+ public int getMaxIterations() {
+ return iterations.getMaximalCount();
+ }
+
+ /**
+ * Gets the number of iterations performed by the algorithm.
+ * The number iterations corresponds to the last call to the
+ * {@code optimize} method. It is 0 if the method has not been
+ * called yet.
+ *
+ * @return the number of evaluations of the objective function.
+ */
+ public int getIterations() {
+ return iterations.getCount();
+ }
+
+ /**
+ * Gets the convergence checker.
+ *
+ * @return the object used to check for convergence.
+ */
+ public ConvergenceChecker getConvergenceChecker() {
+ return checker;
+ }
+
+ /**
+ * Stores data and performs the optimization.
+ *
+ * @param optData Optimization data. The following data will be looked for:
+ *
+ * - {@link MaxEval}
+ * - {@link MaxIter}
+ *
+ * @return a point/value pair that satifies the convergence criteria.
+ * @throws TooManyEvaluationsException if the maximal number of
+ * evaluations is exceeded.
+ * @throws TooManyIterationsException if the maximal number of
+ * iterations is exceeded.
+ */
+ public PAIR optimize(OptimizationData... optData)
+ throws TooManyEvaluationsException,
+ TooManyIterationsException {
+ // Retrieve settings.
+ parseOptimizationData(optData);
+ // Reset counters.
+ evaluations.resetCount();
+ iterations.resetCount();
+ // Perform optimization.
+ return doOptimize();
+ }
+
+ /**
+ * Performs the bulk of the optimization algorithm.
+ *
+ * @return the point/value pair giving the optimal value of the
+ * objective function.
+ */
+ protected abstract PAIR doOptimize();
+
+ /**
+ * Increment the evaluation count.
+ *
+ * @throws TooManyEvaluationsException if the allowed evaluations
+ * have been exhausted.
+ */
+ protected void incrementEvaluationCount()
+ throws TooManyEvaluationsException {
+ evaluations.incrementCount();
+ }
+
+ /**
+ * Increment the iteration count.
+ *
+ * @throws TooManyIterationsException if the allowed iterations
+ * have been exhausted.
+ */
+ protected void incrementIterationCount()
+ throws TooManyIterationsException {
+ iterations.incrementCount();
+ }
+
+ /**
+ * Scans the list of (required and optional) optimization data that
+ * characterize the problem.
+ *
+ * @param optData Optimization data.
+ * The following data will be looked for:
+ *
+ * - {@link MaxEval}
+ * - {@link MaxIter}
+ *
+ */
+ private void parseOptimizationData(OptimizationData... optData) {
+ // The existing values (as set by the previous call) are reused if
+ // not provided in the argument list.
+ for (OptimizationData data : optData) {
+ if (data instanceof MaxEval) {
+ evaluations.setMaximalCount(((MaxEval) data).getMaxEval());
+ continue;
+ }
+ if (data instanceof MaxIter) {
+ iterations.setMaximalCount(((MaxIter) data).getMaxIter());
+ continue;
+ }
+ }
+ }
+
+ /**
+ * Defines the action to perform when reaching the maximum number
+ * of evaluations.
+ */
+ private static class MaxEvalCallback
+ implements Incrementor.MaxCountExceededCallback {
+ /**
+ * {@inheritDoc}
+ * @throws TooManyEvaluationsException.
+ */
+ public void trigger(int max) {
+ throw new TooManyEvaluationsException(max);
+ }
+ }
+
+ /**
+ * Defines the action to perform when reaching the maximum number
+ * of evaluations.
+ */
+ private static class MaxIterCallback
+ implements Incrementor.MaxCountExceededCallback {
+ /**
+ * {@inheritDoc}
+ * @throws TooManyIterationsException.
+ */
+ public void trigger(int max) {
+ throw new TooManyIterationsException(max);
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/ConvergenceChecker.java b/src/main/java/org/apache/commons/math3/optim/ConvergenceChecker.java
new file mode 100644
index 000000000..f49e98bd2
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/ConvergenceChecker.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim;
+
+/**
+ * This interface specifies how to check if an optimization algorithm has
+ * converged.
+ *
+ * Deciding if convergence has been reached is a problem-dependent issue. The
+ * user should provide a class implementing this interface to allow the
+ * optimization algorithm to stop its search according to the problem at hand.
+ *
+ * For convenience, three implementations that fit simple needs are already
+ * provided: {@link SimpleValueChecker}, {@link SimpleVectorValueChecker} and
+ * {@link SimplePointChecker}. The first two consider that convergence is
+ * reached when the objective function value does not change much anymore, it
+ * does not use the point set at all.
+ * The third one considers that convergence is reached when the input point
+ * set does not change much anymore, it does not use objective function value
+ * at all.
+ *
+ * @param Type of the (point, objective value) pair.
+ *
+ * @see org.apache.commons.math3.optim.SimplePointChecker
+ * @see org.apache.commons.math3.optim.SimpleValueChecker
+ * @see org.apache.commons.math3.optim.SimpleVectorValueChecker
+ *
+ * @version $Id: ConvergenceChecker.java 1364392 2012-07-22 18:27:12Z tn $
+ * @since 3.0
+ */
+public interface ConvergenceChecker {
+ /**
+ * Check if the optimization algorithm has converged.
+ *
+ * @param iteration Current iteration.
+ * @param previous Best point in the previous iteration.
+ * @param current Best point in the current iteration.
+ * @return {@code true} if the algorithm is considered to have converged.
+ */
+ boolean converged(int iteration, PAIR previous, PAIR current);
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/GoalType.java b/src/main/java/org/apache/commons/math3/optim/GoalType.java
new file mode 100644
index 000000000..00a0621a2
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/GoalType.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+/**
+ * Goal type for an optimization problem (minimization or maximization of
+ * a scalar function.
+ *
+ * @version $Id: GoalType.java 1364392 2012-07-22 18:27:12Z tn $
+ * @since 2.0
+ */
+public enum GoalType implements OptimizationData {
+ /** Maximization. */
+ MAXIMIZE,
+ /** Minimization. */
+ MINIMIZE
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/InitialGuess.java b/src/main/java/org/apache/commons/math3/optim/InitialGuess.java
new file mode 100644
index 000000000..06480acab
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/InitialGuess.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim;
+
+/**
+ * Starting point (first guess) of the optimization procedure.
+ *
+ * Immutable class.
+ *
+ * @version $Id: InitialGuess.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 3.1
+ */
+public class InitialGuess implements OptimizationData {
+ /** Initial guess. */
+ private final double[] init;
+
+ /**
+ * @param startPoint Initial guess.
+ */
+ public InitialGuess(double[] startPoint) {
+ init = startPoint.clone();
+ }
+
+ /**
+ * Gets the initial guess.
+ *
+ * @return the initial guess.
+ */
+ public double[] getInitialGuess() {
+ return init.clone();
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/MaxEval.java b/src/main/java/org/apache/commons/math3/optim/MaxEval.java
new file mode 100644
index 000000000..6a5bbbfc7
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/MaxEval.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+
+/**
+ * Maximum number of evaluations of the function to be optimized.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public class MaxEval implements OptimizationData {
+ /** Allowed number of evalutations. */
+ private final int maxEval;
+
+ /**
+ * @param max Allowed number of evalutations.
+ * @throws NotStrictlyPositiveException if {@code max <= 0}.
+ */
+ public MaxEval(int max) {
+ if (max <= 0) {
+ throw new NotStrictlyPositiveException(max);
+ }
+
+ maxEval = max;
+ }
+
+ /**
+ * Gets the maximum number of evaluations.
+ *
+ * @return the allowed number of evaluations.
+ */
+ public int getMaxEval() {
+ return maxEval;
+ }
+
+ /**
+ * Factory method that creates instance of this class that represents
+ * a virtually unlimited number of evaluations.
+ *
+ * @return a new instance suitable for allowing {@link Integer#MAX_VALUE}
+ * evaluations.
+ */
+ public static MaxEval unlimited() {
+ return new MaxEval(Integer.MAX_VALUE);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/MaxIter.java b/src/main/java/org/apache/commons/math3/optim/MaxIter.java
new file mode 100644
index 000000000..7a88af734
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/MaxIter.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+
+/**
+ * Maximum number of iterations performed by an (iterative) algorithm.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public class MaxIter implements OptimizationData {
+ /** Allowed number of evalutations. */
+ private final int maxIter;
+
+ /**
+ * @param max Allowed number of iterations.
+ * @throws NotStrictlyPositiveException if {@code max <= 0}.
+ */
+ public MaxIter(int max) {
+ if (max <= 0) {
+ throw new NotStrictlyPositiveException(max);
+ }
+
+ maxIter = max;
+ }
+
+ /**
+ * Gets the maximum number of evaluations.
+ *
+ * @return the allowed number of evaluations.
+ */
+ public int getMaxIter() {
+ return maxIter;
+ }
+
+ /**
+ * Factory method that creates instance of this class that represents
+ * a virtually unlimited number of iterations.
+ *
+ * @return a new instance suitable for allowing {@link Integer#MAX_VALUE}
+ * evaluations.
+ */
+ public static MaxIter unlimited() {
+ return new MaxIter(Integer.MAX_VALUE);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/ObjectiveFunction.java b/src/main/java/org/apache/commons/math3/optim/ObjectiveFunction.java
new file mode 100644
index 000000000..b882f234b
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/ObjectiveFunction.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+import org.apache.commons.math3.analysis.MultivariateFunction;
+
+/**
+ * Scalar function to be optimized.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public class ObjectiveFunction implements OptimizationData {
+ /** Function to be optimized. */
+ private final MultivariateFunction function;
+
+ /**
+ * @param f Function to be optimized.
+ */
+ public ObjectiveFunction(MultivariateFunction f) {
+ function = f;
+ }
+
+ /**
+ * Gets the function to be optimized.
+ *
+ * @return the objective function.
+ */
+ public MultivariateFunction getObjectiveFunction() {
+ return function;
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/OptimizationData.java b/src/main/java/org/apache/commons/math3/optim/OptimizationData.java
new file mode 100644
index 000000000..e0ada5bb1
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/OptimizationData.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+/**
+ * Marker interface.
+ * Implementations will provide functionality (optional or required) needed
+ * by the optimizers, and those will need to check the actual type of the
+ * arguments and perform the appropriate cast in order to access the data
+ * they need.
+ *
+ * @version $Id: OptimizationData.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 3.1
+ */
+public interface OptimizationData {}
diff --git a/src/main/java/org/apache/commons/math3/optim/PointValuePair.java b/src/main/java/org/apache/commons/math3/optim/PointValuePair.java
new file mode 100644
index 000000000..65810b58e
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/PointValuePair.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+import java.io.Serializable;
+import org.apache.commons.math3.util.Pair;
+
+/**
+ * This class holds a point and the value of an objective function at
+ * that point.
+ *
+ * @see PointVectorValuePair
+ * @see org.apache.commons.math3.analysis.MultivariateFunction
+ * @version $Id: PointValuePair.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 3.0
+ */
+public class PointValuePair extends Pair implements Serializable {
+ /** Serializable UID. */
+ private static final long serialVersionUID = 20120513L;
+
+ /**
+ * Builds a point/objective function value pair.
+ *
+ * @param point Point coordinates. This instance will store
+ * a copy of the array, not the array passed as argument.
+ * @param value Value of the objective function at the point.
+ */
+ public PointValuePair(final double[] point,
+ final double value) {
+ this(point, value, true);
+ }
+
+ /**
+ * Builds a point/objective function value pair.
+ *
+ * @param point Point coordinates.
+ * @param value Value of the objective function at the point.
+ * @param copyArray if {@code true}, the input array will be copied,
+ * otherwise it will be referenced.
+ */
+ public PointValuePair(final double[] point,
+ final double value,
+ final boolean copyArray) {
+ super(copyArray ? ((point == null) ? null :
+ point.clone()) :
+ point,
+ value);
+ }
+
+ /**
+ * Gets the point.
+ *
+ * @return a copy of the stored point.
+ */
+ public double[] getPoint() {
+ final double[] p = getKey();
+ return p == null ? null : p.clone();
+ }
+
+ /**
+ * Gets a reference to the point.
+ *
+ * @return a reference to the internal array storing the point.
+ */
+ public double[] getPointRef() {
+ return getKey();
+ }
+
+ /**
+ * Replace the instance with a data transfer object for serialization.
+ * @return data transfer object that will be serialized
+ */
+ private Object writeReplace() {
+ return new DataTransferObject(getKey(), getValue());
+ }
+
+ /** Internal class used only for serialization. */
+ private static class DataTransferObject implements Serializable {
+ /** Serializable UID. */
+ private static final long serialVersionUID = 20120513L;
+ /**
+ * Point coordinates.
+ * @Serial
+ */
+ private final double[] point;
+ /**
+ * Value of the objective function at the point.
+ * @Serial
+ */
+ private final double value;
+
+ /** Simple constructor.
+ * @param point Point coordinates.
+ * @param value Value of the objective function at the point.
+ */
+ public DataTransferObject(final double[] point, final double value) {
+ this.point = point.clone();
+ this.value = value;
+ }
+
+ /** Replace the deserialized data transfer object with a {@link PointValuePair}.
+ * @return replacement {@link PointValuePair}
+ */
+ private Object readResolve() {
+ return new PointValuePair(point, value, false);
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/PointVectorValuePair.java b/src/main/java/org/apache/commons/math3/optim/PointVectorValuePair.java
new file mode 100644
index 000000000..05f5a4fad
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/PointVectorValuePair.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+import java.io.Serializable;
+import org.apache.commons.math3.util.Pair;
+
+/**
+ * This class holds a point and the vectorial value of an objective function at
+ * that point.
+ *
+ * @see PointValuePair
+ * @see org.apache.commons.math3.analysis.MultivariateVectorFunction
+ * @version $Id: PointVectorValuePair.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 3.0
+ */
+public class PointVectorValuePair extends Pair implements Serializable {
+ /** Serializable UID. */
+ private static final long serialVersionUID = 20120513L;
+
+ /**
+ * Builds a point/objective function value pair.
+ *
+ * @param point Point coordinates. This instance will store
+ * a copy of the array, not the array passed as argument.
+ * @param value Value of the objective function at the point.
+ */
+ public PointVectorValuePair(final double[] point,
+ final double[] value) {
+ this(point, value, true);
+ }
+
+ /**
+ * Build a point/objective function value pair.
+ *
+ * @param point Point coordinates.
+ * @param value Value of the objective function at the point.
+ * @param copyArray if {@code true}, the input arrays will be copied,
+ * otherwise they will be referenced.
+ */
+ public PointVectorValuePair(final double[] point,
+ final double[] value,
+ final boolean copyArray) {
+ super(copyArray ?
+ ((point == null) ? null :
+ point.clone()) :
+ point,
+ copyArray ?
+ ((value == null) ? null :
+ value.clone()) :
+ value);
+ }
+
+ /**
+ * Gets the point.
+ *
+ * @return a copy of the stored point.
+ */
+ public double[] getPoint() {
+ final double[] p = getKey();
+ return p == null ? null : p.clone();
+ }
+
+ /**
+ * Gets a reference to the point.
+ *
+ * @return a reference to the internal array storing the point.
+ */
+ public double[] getPointRef() {
+ return getKey();
+ }
+
+ /**
+ * Gets the value of the objective function.
+ *
+ * @return a copy of the stored value of the objective function.
+ */
+ @Override
+ public double[] getValue() {
+ final double[] v = super.getValue();
+ return v == null ? null : v.clone();
+ }
+
+ /**
+ * Gets a reference to the value of the objective function.
+ *
+ * @return a reference to the internal array storing the value of
+ * the objective function.
+ */
+ public double[] getValueRef() {
+ return super.getValue();
+ }
+
+ /**
+ * Replace the instance with a data transfer object for serialization.
+ * @return data transfer object that will be serialized
+ */
+ private Object writeReplace() {
+ return new DataTransferObject(getKey(), getValue());
+ }
+
+ /** Internal class used only for serialization. */
+ private static class DataTransferObject implements Serializable {
+ /** Serializable UID. */
+ private static final long serialVersionUID = 20120513L;
+ /**
+ * Point coordinates.
+ * @Serial
+ */
+ private final double[] point;
+ /**
+ * Value of the objective function at the point.
+ * @Serial
+ */
+ private final double[] value;
+
+ /** Simple constructor.
+ * @param point Point coordinates.
+ * @param value Value of the objective function at the point.
+ */
+ public DataTransferObject(final double[] point, final double[] value) {
+ this.point = point.clone();
+ this.value = value.clone();
+ }
+
+ /** Replace the deserialized data transfer object with a {@link PointValuePair}.
+ * @return replacement {@link PointValuePair}
+ */
+ private Object readResolve() {
+ return new PointVectorValuePair(point, value, false);
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/SimpleBounds.java b/src/main/java/org/apache/commons/math3/optim/SimpleBounds.java
new file mode 100644
index 000000000..71ecd587e
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/SimpleBounds.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+import java.util.Arrays;
+
+/**
+ * Simple optimization constraints: lower and upper bounds.
+ * The valid range of the parameters is an interval that can be infinite
+ * (in one or both directions).
+ *
+ * Immutable class.
+ *
+ * @version $Id: SimpleBounds.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 3.1
+ */
+public class SimpleBounds implements OptimizationData {
+ /** Lower bounds. */
+ private final double[] lower;
+ /** Upper bounds. */
+ private final double[] upper;
+
+ /**
+ * @param lB Lower bounds.
+ * @param uB Upper bounds.
+ */
+ public SimpleBounds(double[] lB,
+ double[] uB) {
+ lower = lB.clone();
+ upper = uB.clone();
+ }
+
+ /**
+ * Gets the lower bounds.
+ *
+ * @return the lower bounds.
+ */
+ public double[] getLower() {
+ return lower.clone();
+ }
+ /**
+ * Gets the upper bounds.
+ *
+ * @return the upper bounds.
+ */
+ public double[] getUpper() {
+ return upper.clone();
+ }
+
+ /**
+ * Factory method that creates instance of this class that represents
+ * unbounded ranges.
+ *
+ * @param dim Number of parameters.
+ * @return a new instance suitable for passing to an optimizer that
+ * requires bounds specification.
+ */
+ public static SimpleBounds unbounded(int dim) {
+ final double[] lB = new double[dim];
+ Arrays.fill(lB, Double.NEGATIVE_INFINITY);
+ final double[] uB = new double[dim];
+ Arrays.fill(uB, Double.POSITIVE_INFINITY);
+
+ return new SimpleBounds(lB, uB);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/SimplePointChecker.java b/src/main/java/org/apache/commons/math3/optim/SimplePointChecker.java
new file mode 100644
index 000000000..ea98a6fed
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/SimplePointChecker.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.util.Pair;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+
+/**
+ * Simple implementation of the {@link ConvergenceChecker} interface using
+ * only point coordinates.
+ *
+ * Convergence is considered to have been reached if either the relative
+ * difference between each point coordinate are smaller than a threshold
+ * or if either the absolute difference between the point coordinates are
+ * smaller than another threshold.
+ *
+ * The {@link #converged(int,Pair,Pair) converged} method will also return
+ * {@code true} if the number of iterations has been set (see
+ * {@link #SimplePointChecker(double,double,int) this constructor}).
+ *
+ * @param Type of the (point, value) pair.
+ * The type of the "value" part of the pair (not used by this class).
+ *
+ * @version $Id: SimplePointChecker.java 1413127 2012-11-24 04:37:30Z psteitz $
+ * @since 3.0
+ */
+public class SimplePointChecker>
+ extends AbstractConvergenceChecker {
+ /**
+ * If {@link #maxIterationCount} is set to this value, the number of
+ * iterations will never cause {@link #converged(int, Pair, Pair)}
+ * to return {@code true}.
+ */
+ private static final int ITERATION_CHECK_DISABLED = -1;
+ /**
+ * Number of iterations after which the
+ * {@link #converged(int, Pair, Pair)} method
+ * will return true (unless the check is disabled).
+ */
+ private final int maxIterationCount;
+
+ /**
+ * Build an instance with specified thresholds.
+ * In order to perform only relative checks, the absolute tolerance
+ * must be set to a negative value. In order to perform only absolute
+ * checks, the relative tolerance must be set to a negative value.
+ *
+ * @param relativeThreshold relative tolerance threshold
+ * @param absoluteThreshold absolute tolerance threshold
+ */
+ public SimplePointChecker(final double relativeThreshold,
+ final double absoluteThreshold) {
+ super(relativeThreshold, absoluteThreshold);
+ maxIterationCount = ITERATION_CHECK_DISABLED;
+ }
+
+ /**
+ * Builds an instance with specified thresholds.
+ * In order to perform only relative checks, the absolute tolerance
+ * must be set to a negative value. In order to perform only absolute
+ * checks, the relative tolerance must be set to a negative value.
+ *
+ * @param relativeThreshold Relative tolerance threshold.
+ * @param absoluteThreshold Absolute tolerance threshold.
+ * @param maxIter Maximum iteration count.
+ * @throws NotStrictlyPositiveException if {@code maxIter <= 0}.
+ *
+ * @since 3.1
+ */
+ public SimplePointChecker(final double relativeThreshold,
+ final double absoluteThreshold,
+ final int maxIter) {
+ super(relativeThreshold, absoluteThreshold);
+
+ if (maxIter <= 0) {
+ throw new NotStrictlyPositiveException(maxIter);
+ }
+ maxIterationCount = maxIter;
+ }
+
+ /**
+ * Check if the optimization algorithm has converged considering the
+ * last two points.
+ * This method may be called several times from the same algorithm
+ * iteration with different points. This can be detected by checking the
+ * iteration number at each call if needed. Each time this method is
+ * called, the previous and current point correspond to points with the
+ * same role at each iteration, so they can be compared. As an example,
+ * simplex-based algorithms call this method for all points of the simplex,
+ * not only for the best or worst ones.
+ *
+ * @param iteration Index of current iteration
+ * @param previous Best point in the previous iteration.
+ * @param current Best point in the current iteration.
+ * @return {@code true} if the arguments satify the convergence criterion.
+ */
+ @Override
+ public boolean converged(final int iteration,
+ final PAIR previous,
+ final PAIR current) {
+ if (maxIterationCount != ITERATION_CHECK_DISABLED) {
+ if (iteration >= maxIterationCount) {
+ return true;
+ }
+ }
+
+ final double[] p = previous.getKey();
+ final double[] c = current.getKey();
+ for (int i = 0; i < p.length; ++i) {
+ final double pi = p[i];
+ final double ci = c[i];
+ final double difference = FastMath.abs(pi - ci);
+ final double size = FastMath.max(FastMath.abs(pi), FastMath.abs(ci));
+ if (difference > size * getRelativeThreshold() &&
+ difference > getAbsoluteThreshold()) {
+ return false;
+ }
+ }
+ return true;
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/SimpleValueChecker.java b/src/main/java/org/apache/commons/math3/optim/SimpleValueChecker.java
new file mode 100644
index 000000000..dcd991a44
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/SimpleValueChecker.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim;
+
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+
+/**
+ * Simple implementation of the {@link ConvergenceChecker} interface using
+ * only objective function values.
+ *
+ * Convergence is considered to have been reached if either the relative
+ * difference between the objective function values is smaller than a
+ * threshold or if either the absolute difference between the objective
+ * function values is smaller than another threshold.
+ *
+ * The {@link #converged(int,PointValuePair,PointValuePair) converged}
+ * method will also return {@code true} if the number of iterations has been set
+ * (see {@link #SimpleValueChecker(double,double,int) this constructor}).
+ *
+ * @version $Id: SimpleValueChecker.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 3.0
+ */
+public class SimpleValueChecker
+ extends AbstractConvergenceChecker {
+ /**
+ * If {@link #maxIterationCount} is set to this value, the number of
+ * iterations will never cause
+ * {@link #converged(int,PointValuePair,PointValuePair)}
+ * to return {@code true}.
+ */
+ private static final int ITERATION_CHECK_DISABLED = -1;
+ /**
+ * Number of iterations after which the
+ * {@link #converged(int,PointValuePair,PointValuePair)} method
+ * will return true (unless the check is disabled).
+ */
+ private final int maxIterationCount;
+
+ /** Build an instance with specified thresholds.
+ *
+ * In order to perform only relative checks, the absolute tolerance
+ * must be set to a negative value. In order to perform only absolute
+ * checks, the relative tolerance must be set to a negative value.
+ *
+ * @param relativeThreshold relative tolerance threshold
+ * @param absoluteThreshold absolute tolerance threshold
+ */
+ public SimpleValueChecker(final double relativeThreshold,
+ final double absoluteThreshold) {
+ super(relativeThreshold, absoluteThreshold);
+ maxIterationCount = ITERATION_CHECK_DISABLED;
+ }
+
+ /**
+ * Builds an instance with specified thresholds.
+ *
+ * In order to perform only relative checks, the absolute tolerance
+ * must be set to a negative value. In order to perform only absolute
+ * checks, the relative tolerance must be set to a negative value.
+ *
+ * @param relativeThreshold relative tolerance threshold
+ * @param absoluteThreshold absolute tolerance threshold
+ * @param maxIter Maximum iteration count.
+ * @throws NotStrictlyPositiveException if {@code maxIter <= 0}.
+ *
+ * @since 3.1
+ */
+ public SimpleValueChecker(final double relativeThreshold,
+ final double absoluteThreshold,
+ final int maxIter) {
+ super(relativeThreshold, absoluteThreshold);
+
+ if (maxIter <= 0) {
+ throw new NotStrictlyPositiveException(maxIter);
+ }
+ maxIterationCount = maxIter;
+ }
+
+ /**
+ * Check if the optimization algorithm has converged considering the
+ * last two points.
+ * This method may be called several time from the same algorithm
+ * iteration with different points. This can be detected by checking the
+ * iteration number at each call if needed. Each time this method is
+ * called, the previous and current point correspond to points with the
+ * same role at each iteration, so they can be compared. As an example,
+ * simplex-based algorithms call this method for all points of the simplex,
+ * not only for the best or worst ones.
+ *
+ * @param iteration Index of current iteration
+ * @param previous Best point in the previous iteration.
+ * @param current Best point in the current iteration.
+ * @return {@code true} if the algorithm has converged.
+ */
+ @Override
+ public boolean converged(final int iteration,
+ final PointValuePair previous,
+ final PointValuePair current) {
+ if (maxIterationCount != ITERATION_CHECK_DISABLED) {
+ if (iteration >= maxIterationCount) {
+ return true;
+ }
+ }
+
+ final double p = previous.getValue();
+ final double c = current.getValue();
+ final double difference = FastMath.abs(p - c);
+ final double size = FastMath.max(FastMath.abs(p), FastMath.abs(c));
+ return difference <= size * getRelativeThreshold() ||
+ difference <= getAbsoluteThreshold();
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/SimpleVectorValueChecker.java b/src/main/java/org/apache/commons/math3/optim/SimpleVectorValueChecker.java
new file mode 100644
index 000000000..131e16326
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/SimpleVectorValueChecker.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim;
+
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+
+/**
+ * Simple implementation of the {@link ConvergenceChecker} interface using
+ * only objective function values.
+ *
+ * Convergence is considered to have been reached if either the relative
+ * difference between the objective function values is smaller than a
+ * threshold or if either the absolute difference between the objective
+ * function values is smaller than another threshold for all vectors elements.
+ *
+ * The {@link #converged(int,PointVectorValuePair,PointVectorValuePair) converged}
+ * method will also return {@code true} if the number of iterations has been set
+ * (see {@link #SimpleVectorValueChecker(double,double,int) this constructor}).
+ *
+ * @version $Id: SimpleVectorValueChecker.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 3.0
+ */
+public class SimpleVectorValueChecker
+ extends AbstractConvergenceChecker {
+ /**
+ * If {@link #maxIterationCount} is set to this value, the number of
+ * iterations will never cause
+ * {@link #converged(int,PointVectorValuePair,PointVectorValuePair)}
+ * to return {@code true}.
+ */
+ private static final int ITERATION_CHECK_DISABLED = -1;
+ /**
+ * Number of iterations after which the
+ * {@link #converged(int,PointVectorValuePair,PointVectorValuePair)} method
+ * will return true (unless the check is disabled).
+ */
+ private final int maxIterationCount;
+
+ /**
+ * Build an instance with specified thresholds.
+ *
+ * In order to perform only relative checks, the absolute tolerance
+ * must be set to a negative value. In order to perform only absolute
+ * checks, the relative tolerance must be set to a negative value.
+ *
+ * @param relativeThreshold relative tolerance threshold
+ * @param absoluteThreshold absolute tolerance threshold
+ */
+ public SimpleVectorValueChecker(final double relativeThreshold,
+ final double absoluteThreshold) {
+ super(relativeThreshold, absoluteThreshold);
+ maxIterationCount = ITERATION_CHECK_DISABLED;
+ }
+
+ /**
+ * Builds an instance with specified tolerance thresholds and
+ * iteration count.
+ *
+ * In order to perform only relative checks, the absolute tolerance
+ * must be set to a negative value. In order to perform only absolute
+ * checks, the relative tolerance must be set to a negative value.
+ *
+ * @param relativeThreshold Relative tolerance threshold.
+ * @param absoluteThreshold Absolute tolerance threshold.
+ * @param maxIter Maximum iteration count.
+ * @throws NotStrictlyPositiveException if {@code maxIter <= 0}.
+ *
+ * @since 3.1
+ */
+ public SimpleVectorValueChecker(final double relativeThreshold,
+ final double absoluteThreshold,
+ final int maxIter) {
+ super(relativeThreshold, absoluteThreshold);
+
+ if (maxIter <= 0) {
+ throw new NotStrictlyPositiveException(maxIter);
+ }
+ maxIterationCount = maxIter;
+ }
+
+ /**
+ * Check if the optimization algorithm has converged considering the
+ * last two points.
+ * This method may be called several times from the same algorithm
+ * iteration with different points. This can be detected by checking the
+ * iteration number at each call if needed. Each time this method is
+ * called, the previous and current point correspond to points with the
+ * same role at each iteration, so they can be compared. As an example,
+ * simplex-based algorithms call this method for all points of the simplex,
+ * not only for the best or worst ones.
+ *
+ * @param iteration Index of current iteration
+ * @param previous Best point in the previous iteration.
+ * @param current Best point in the current iteration.
+ * @return {@code true} if the arguments satify the convergence criterion.
+ */
+ @Override
+ public boolean converged(final int iteration,
+ final PointVectorValuePair previous,
+ final PointVectorValuePair current) {
+ if (maxIterationCount != ITERATION_CHECK_DISABLED) {
+ if (iteration >= maxIterationCount) {
+ return true;
+ }
+ }
+
+ final double[] p = previous.getValueRef();
+ final double[] c = current.getValueRef();
+ for (int i = 0; i < p.length; ++i) {
+ final double pi = p[i];
+ final double ci = c[i];
+ final double difference = FastMath.abs(pi - ci);
+ final double size = FastMath.max(FastMath.abs(pi), FastMath.abs(ci));
+ if (difference > size * getRelativeThreshold() &&
+ difference > getAbsoluteThreshold()) {
+ return false;
+ }
+ }
+ return true;
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/linear/LinearConstraint.java b/src/main/java/org/apache/commons/math3/optim/linear/LinearConstraint.java
new file mode 100644
index 000000000..c3812fb0a
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/linear/LinearConstraint.java
@@ -0,0 +1,229 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.linear;
+
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import org.apache.commons.math3.linear.MatrixUtils;
+import org.apache.commons.math3.linear.RealVector;
+import org.apache.commons.math3.linear.ArrayRealVector;
+
+/**
+ * A linear constraint for a linear optimization problem.
+ *
+ * A linear constraint has one of the forms:
+ *
+ * - c1x1 + ... cnxn = v
+ * - c1x1 + ... cnxn <= v
+ * - c1x1 + ... cnxn >= v
+ * - l1x1 + ... lnxn + lcst =
+ * r1x1 + ... rnxn + rcst
+ * - l1x1 + ... lnxn + lcst <=
+ * r1x1 + ... rnxn + rcst
+ * - l1x1 + ... lnxn + lcst >=
+ * r1x1 + ... rnxn + rcst
+ *
+ * The ci, li or ri are the coefficients of the constraints, the xi
+ * are the coordinates of the current point and v is the value of the constraint.
+ *
+ *
+ * @version $Id: LinearConstraint.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public class LinearConstraint implements Serializable {
+ /** Serializable version identifier. */
+ private static final long serialVersionUID = -764632794033034092L;
+ /** Coefficients of the constraint (left hand side). */
+ private final transient RealVector coefficients;
+ /** Relationship between left and right hand sides (=, <=, >=). */
+ private final Relationship relationship;
+ /** Value of the constraint (right hand side). */
+ private final double value;
+
+ /**
+ * Build a constraint involving a single linear equation.
+ *
+ * A linear constraint with a single linear equation has one of the forms:
+ *
+ * - c1x1 + ... cnxn = v
+ * - c1x1 + ... cnxn <= v
+ * - c1x1 + ... cnxn >= v
+ *
+ *
+ * @param coefficients The coefficients of the constraint (left hand side)
+ * @param relationship The type of (in)equality used in the constraint
+ * @param value The value of the constraint (right hand side)
+ */
+ public LinearConstraint(final double[] coefficients,
+ final Relationship relationship,
+ final double value) {
+ this(new ArrayRealVector(coefficients), relationship, value);
+ }
+
+ /**
+ * Build a constraint involving a single linear equation.
+ *
+ * A linear constraint with a single linear equation has one of the forms:
+ *
+ * - c1x1 + ... cnxn = v
+ * - c1x1 + ... cnxn <= v
+ * - c1x1 + ... cnxn >= v
+ *
+ *
+ * @param coefficients The coefficients of the constraint (left hand side)
+ * @param relationship The type of (in)equality used in the constraint
+ * @param value The value of the constraint (right hand side)
+ */
+ public LinearConstraint(final RealVector coefficients,
+ final Relationship relationship,
+ final double value) {
+ this.coefficients = coefficients;
+ this.relationship = relationship;
+ this.value = value;
+ }
+
+ /**
+ * Build a constraint involving two linear equations.
+ *
+ * A linear constraint with two linear equation has one of the forms:
+ *
+ * - l1x1 + ... lnxn + lcst =
+ * r1x1 + ... rnxn + rcst
+ * - l1x1 + ... lnxn + lcst <=
+ * r1x1 + ... rnxn + rcst
+ * - l1x1 + ... lnxn + lcst >=
+ * r1x1 + ... rnxn + rcst
+ *
+ *
+ * @param lhsCoefficients The coefficients of the linear expression on the left hand side of the constraint
+ * @param lhsConstant The constant term of the linear expression on the left hand side of the constraint
+ * @param relationship The type of (in)equality used in the constraint
+ * @param rhsCoefficients The coefficients of the linear expression on the right hand side of the constraint
+ * @param rhsConstant The constant term of the linear expression on the right hand side of the constraint
+ */
+ public LinearConstraint(final double[] lhsCoefficients, final double lhsConstant,
+ final Relationship relationship,
+ final double[] rhsCoefficients, final double rhsConstant) {
+ double[] sub = new double[lhsCoefficients.length];
+ for (int i = 0; i < sub.length; ++i) {
+ sub[i] = lhsCoefficients[i] - rhsCoefficients[i];
+ }
+ this.coefficients = new ArrayRealVector(sub, false);
+ this.relationship = relationship;
+ this.value = rhsConstant - lhsConstant;
+ }
+
+ /**
+ * Build a constraint involving two linear equations.
+ *
+ * A linear constraint with two linear equation has one of the forms:
+ *
+ * - l1x1 + ... lnxn + lcst =
+ * r1x1 + ... rnxn + rcst
+ * - l1x1 + ... lnxn + lcst <=
+ * r1x1 + ... rnxn + rcst
+ * - l1x1 + ... lnxn + lcst >=
+ * r1x1 + ... rnxn + rcst
+ *
+ *
+ * @param lhsCoefficients The coefficients of the linear expression on the left hand side of the constraint
+ * @param lhsConstant The constant term of the linear expression on the left hand side of the constraint
+ * @param relationship The type of (in)equality used in the constraint
+ * @param rhsCoefficients The coefficients of the linear expression on the right hand side of the constraint
+ * @param rhsConstant The constant term of the linear expression on the right hand side of the constraint
+ */
+ public LinearConstraint(final RealVector lhsCoefficients, final double lhsConstant,
+ final Relationship relationship,
+ final RealVector rhsCoefficients, final double rhsConstant) {
+ this.coefficients = lhsCoefficients.subtract(rhsCoefficients);
+ this.relationship = relationship;
+ this.value = rhsConstant - lhsConstant;
+ }
+
+ /**
+ * Gets the coefficients of the constraint (left hand side).
+ *
+ * @return the coefficients of the constraint (left hand side).
+ */
+ public RealVector getCoefficients() {
+ return coefficients;
+ }
+
+ /**
+ * Gets the relationship between left and right hand sides.
+ *
+ * @return the relationship between left and right hand sides.
+ */
+ public Relationship getRelationship() {
+ return relationship;
+ }
+
+ /**
+ * Gets the value of the constraint (right hand side).
+ *
+ * @return the value of the constraint (right hand side).
+ */
+ public double getValue() {
+ return value;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other) {
+ return true;
+ }
+ if (other instanceof LinearConstraint) {
+ LinearConstraint rhs = (LinearConstraint) other;
+ return relationship == rhs.relationship &&
+ value == rhs.value &&
+ coefficients.equals(rhs.coefficients);
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return relationship.hashCode() ^
+ Double.valueOf(value).hashCode() ^
+ coefficients.hashCode();
+ }
+
+ /**
+ * Serialize the instance.
+ * @param oos stream where object should be written
+ * @throws IOException if object cannot be written to stream
+ */
+ private void writeObject(ObjectOutputStream oos)
+ throws IOException {
+ oos.defaultWriteObject();
+ MatrixUtils.serializeRealVector(coefficients, oos);
+ }
+
+ /**
+ * Deserialize the instance.
+ * @param ois stream from which the object should be read
+ * @throws ClassNotFoundException if a class in the stream cannot be found
+ * @throws IOException if object cannot be read from the stream
+ */
+ private void readObject(ObjectInputStream ois)
+ throws ClassNotFoundException, IOException {
+ ois.defaultReadObject();
+ MatrixUtils.deserializeRealVector(this, "coefficients", ois);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/linear/LinearConstraintSet.java b/src/main/java/org/apache/commons/math3/optim/linear/LinearConstraintSet.java
new file mode 100644
index 000000000..cf5279a34
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/linear/LinearConstraintSet.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.linear;
+
+import java.util.Set;
+import java.util.HashSet;
+import java.util.Collection;
+import java.util.Collections;
+import org.apache.commons.math3.optim.OptimizationData;
+
+/**
+ * Class that represents a set of {@link LinearConstraint linear constraints}.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public class LinearConstraintSet implements OptimizationData {
+ /** Set of constraints. */
+ private final Set linearConstraints
+ = new HashSet();
+
+ /**
+ * Creates a set containing the given constraints.
+ *
+ * @param constraints Constraints.
+ */
+ public LinearConstraintSet(LinearConstraint... constraints) {
+ for (LinearConstraint c : constraints) {
+ linearConstraints.add(c);
+ }
+ }
+
+ /**
+ * Creates a set containing the given constraints.
+ *
+ * @param constraints Constraints.
+ */
+ public LinearConstraintSet(Collection constraints) {
+ linearConstraints.addAll(constraints);
+ }
+
+ /**
+ * Gets the set of linear constraints.
+ *
+ * @return the constraints.
+ */
+ public Collection getConstraints() {
+ return Collections.unmodifiableSet(linearConstraints);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/linear/LinearObjectiveFunction.java b/src/main/java/org/apache/commons/math3/optim/linear/LinearObjectiveFunction.java
new file mode 100644
index 000000000..9e0064d8c
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/linear/LinearObjectiveFunction.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.linear;
+
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.linear.MatrixUtils;
+import org.apache.commons.math3.linear.RealVector;
+import org.apache.commons.math3.linear.ArrayRealVector;
+import org.apache.commons.math3.optim.OptimizationData;
+
+/**
+ * An objective function for a linear optimization problem.
+ *
+ * A linear objective function has one the form:
+ *
+ * c1x1 + ... cnxn + d
+ *
+ * The ci and d are the coefficients of the equation,
+ * the xi are the coordinates of the current point.
+ *
+ *
+ * @version $Id: LinearObjectiveFunction.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public class LinearObjectiveFunction
+ implements MultivariateFunction,
+ OptimizationData,
+ Serializable {
+ /** Serializable version identifier. */
+ private static final long serialVersionUID = -4531815507568396090L;
+ /** Coefficients of the linear equation (ci). */
+ private final transient RealVector coefficients;
+ /** Constant term of the linear equation. */
+ private final double constantTerm;
+
+ /**
+ * @param coefficients Coefficients for the linear equation being optimized.
+ * @param constantTerm Constant term of the linear equation.
+ */
+ public LinearObjectiveFunction(double[] coefficients, double constantTerm) {
+ this(new ArrayRealVector(coefficients), constantTerm);
+ }
+
+ /**
+ * @param coefficients Coefficients for the linear equation being optimized.
+ * @param constantTerm Constant term of the linear equation.
+ */
+ public LinearObjectiveFunction(RealVector coefficients, double constantTerm) {
+ this.coefficients = coefficients;
+ this.constantTerm = constantTerm;
+ }
+
+ /**
+ * Gets the coefficients of the linear equation being optimized.
+ *
+ * @return coefficients of the linear equation being optimized.
+ */
+ public RealVector getCoefficients() {
+ return coefficients;
+ }
+
+ /**
+ * Gets the constant of the linear equation being optimized.
+ *
+ * @return constant of the linear equation being optimized.
+ */
+ public double getConstantTerm() {
+ return constantTerm;
+ }
+
+ /**
+ * Computes the value of the linear equation at the current point.
+ *
+ * @param point Point at which linear equation must be evaluated.
+ * @return the value of the linear equation at the current point.
+ */
+ public double value(final double[] point) {
+ return value(new ArrayRealVector(point, false));
+ }
+
+ /**
+ * Computes the value of the linear equation at the current point.
+ *
+ * @param point Point at which linear equation must be evaluated.
+ * @return the value of the linear equation at the current point.
+ */
+ public double value(final RealVector point) {
+ return coefficients.dotProduct(point) + constantTerm;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other) {
+ return true;
+ }
+ if (other instanceof LinearObjectiveFunction) {
+ LinearObjectiveFunction rhs = (LinearObjectiveFunction) other;
+ return (constantTerm == rhs.constantTerm) && coefficients.equals(rhs.coefficients);
+ }
+
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return Double.valueOf(constantTerm).hashCode() ^ coefficients.hashCode();
+ }
+
+ /**
+ * Serialize the instance.
+ * @param oos stream where object should be written
+ * @throws IOException if object cannot be written to stream
+ */
+ private void writeObject(ObjectOutputStream oos)
+ throws IOException {
+ oos.defaultWriteObject();
+ MatrixUtils.serializeRealVector(coefficients, oos);
+ }
+
+ /**
+ * Deserialize the instance.
+ * @param ois stream from which the object should be read
+ * @throws ClassNotFoundException if a class in the stream cannot be found
+ * @throws IOException if object cannot be read from the stream
+ */
+ private void readObject(ObjectInputStream ois)
+ throws ClassNotFoundException, IOException {
+ ois.defaultReadObject();
+ MatrixUtils.deserializeRealVector(this, "coefficients", ois);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/linear/LinearOptimizer.java b/src/main/java/org/apache/commons/math3/optim/linear/LinearOptimizer.java
new file mode 100644
index 000000000..d88feefc6
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/linear/LinearOptimizer.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.linear;
+
+import java.util.Collection;
+import java.util.Collections;
+import org.apache.commons.math3.exception.TooManyIterationsException;
+import org.apache.commons.math3.optim.OptimizationData;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer;
+
+/**
+ * Base class for implementing linear optimizers.
+ *
+ * @version $Id: AbstractLinearOptimizer.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 3.1
+ */
+public abstract class LinearOptimizer
+ extends MultivariateOptimizer {
+ /**
+ * Linear objective function.
+ */
+ private LinearObjectiveFunction function;
+ /**
+ * Linear constraints.
+ */
+ private Collection linearConstraints;
+ /**
+ * Whether to restrict the variables to non-negative values.
+ */
+ private boolean nonNegative;
+
+ /**
+ * Simple constructor with default settings.
+ *
+ */
+ protected LinearOptimizer() {
+ super(null); // No convergence checker.
+ }
+
+ /**
+ * @return {@code true} if the variables are restricted to non-negative values.
+ */
+ protected boolean isRestrictedToNonNegative() {
+ return nonNegative;
+ }
+
+ /**
+ * @return the optimization type.
+ */
+ protected LinearObjectiveFunction getFunction() {
+ return function;
+ }
+
+ /**
+ * @return the optimization type.
+ */
+ protected Collection getConstraints() {
+ return Collections.unmodifiableCollection(linearConstraints);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param optData Optimization data. The following data will be looked for:
+ *
+ * - {@link org.apache.commons.math3.optim.MaxIter}
+ * - {@link LinearObjectiveFunction}
+ * - {@link LinearConstraintSet}
+ * - {@link NonNegativeConstraint}
+ *
+ * @return {@inheritDoc}
+ * @throws TooManyIterationsException if the maximal number of
+ * iterations is exceeded.
+ */
+ @Override
+ public PointValuePair optimize(OptimizationData... optData)
+ throws TooManyIterationsException {
+ // Retrieve settings.
+ parseOptimizationData(optData);
+ // Set up base class and perform computation.
+ return super.optimize(optData);
+ }
+
+ /**
+ * Scans the list of (required and optional) optimization data that
+ * characterize the problem.
+ *
+ * @param optData Optimization data.
+ * The following data will be looked for:
+ *
+ * - {@link LinearObjectiveFunction}
+ * - {@link LinearConstraintSet}
+ * - {@link NonNegativeConstraint}
+ *
+ */
+ private void parseOptimizationData(OptimizationData... optData) {
+ // The existing values (as set by the previous call) are reused if
+ // not provided in the argument list.
+ for (OptimizationData data : optData) {
+ if (data instanceof LinearObjectiveFunction) {
+ function = (LinearObjectiveFunction) data;
+ continue;
+ }
+ if (data instanceof LinearConstraintSet) {
+ linearConstraints = ((LinearConstraintSet) data).getConstraints();
+ continue;
+ }
+ if (data instanceof NonNegativeConstraint) {
+ nonNegative = ((NonNegativeConstraint) data).isRestrictedToNonNegative();
+ continue;
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/linear/NoFeasibleSolutionException.java b/src/main/java/org/apache/commons/math3/optim/linear/NoFeasibleSolutionException.java
new file mode 100644
index 000000000..15209b7a2
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/linear/NoFeasibleSolutionException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.linear;
+
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+
+/**
+ * This class represents exceptions thrown by optimizers when no solution fulfills the constraints.
+ *
+ * @version $Id: NoFeasibleSolutionException.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public class NoFeasibleSolutionException extends MathIllegalStateException {
+ /** Serializable version identifier. */
+ private static final long serialVersionUID = -3044253632189082760L;
+
+ /**
+ * Simple constructor using a default message.
+ */
+ public NoFeasibleSolutionException() {
+ super(LocalizedFormats.NO_FEASIBLE_SOLUTION);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/linear/NonNegativeConstraint.java b/src/main/java/org/apache/commons/math3/optim/linear/NonNegativeConstraint.java
new file mode 100644
index 000000000..35d018131
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/linear/NonNegativeConstraint.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.linear;
+
+import org.apache.commons.math3.optim.OptimizationData;
+
+/**
+ * A constraint for a linear optimization problem indicating whether all
+ * variables must be restricted to non-negative values.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public class NonNegativeConstraint implements OptimizationData {
+ /** Whether the variables are all positive. */
+ private final boolean isRestricted;
+
+ /**
+ * @param restricted If {@code true}, all the variables must be positive.
+ */
+ public NonNegativeConstraint(boolean restricted) {
+ isRestricted = restricted;
+ }
+
+ /**
+ * Indicates whether all the variables must be restricted to non-negative
+ * values.
+ *
+ * @return {@code true} if all the variables must be positive.
+ */
+ public boolean isRestrictedToNonNegative() {
+ return isRestricted;
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/linear/Relationship.java b/src/main/java/org/apache/commons/math3/optim/linear/Relationship.java
new file mode 100644
index 000000000..acd7e6654
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/linear/Relationship.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.linear;
+
+/**
+ * Types of relationships between two cells in a Solver {@link LinearConstraint}.
+ *
+ * @version $Id: Relationship.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public enum Relationship {
+ /** Equality relationship. */
+ EQ("="),
+ /** Lesser than or equal relationship. */
+ LEQ("<="),
+ /** Greater than or equal relationship. */
+ GEQ(">=");
+
+ /** Display string for the relationship. */
+ private final String stringValue;
+
+ /**
+ * Simple constructor.
+ *
+ * @param stringValue Display string for the relationship.
+ */
+ private Relationship(String stringValue) {
+ this.stringValue = stringValue;
+ }
+
+ @Override
+ public String toString() {
+ return stringValue;
+ }
+
+ /**
+ * Gets the relationship obtained when multiplying all coefficients by -1.
+ *
+ * @return the opposite relationship.
+ */
+ public Relationship oppositeRelationship() {
+ switch (this) {
+ case LEQ :
+ return GEQ;
+ case GEQ :
+ return LEQ;
+ default :
+ return EQ;
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/linear/SimplexSolver.java b/src/main/java/org/apache/commons/math3/optim/linear/SimplexSolver.java
new file mode 100644
index 000000000..e01811275
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/linear/SimplexSolver.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.linear;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.commons.math3.exception.TooManyIterationsException;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.util.Precision;
+
+/**
+ * Solves a linear problem using the "Two-Phase Simplex" method.
+ *
+ * @version $Id: SimplexSolver.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public class SimplexSolver extends LinearOptimizer {
+ /** Default amount of error to accept for algorithm convergence. */
+ private static final double DEFAULT_EPSILON = 1.0e-6;
+
+ /** Default amount of error to accept in floating point comparisons (as ulps). */
+ private static final int DEFAULT_ULPS = 10;
+
+ /** Amount of error to accept for algorithm convergence. */
+ private final double epsilon;
+
+ /** Amount of error to accept in floating point comparisons (as ulps). */
+ private final int maxUlps;
+
+ /**
+ * Builds a simplex solver with default settings.
+ */
+ public SimplexSolver() {
+ this(DEFAULT_EPSILON, DEFAULT_ULPS);
+ }
+
+ /**
+ * Builds a simplex solver with a specified accepted amount of error.
+ *
+ * @param epsilon Amount of error to accept for algorithm convergence.
+ * @param maxUlps Amount of error to accept in floating point comparisons.
+ */
+ public SimplexSolver(final double epsilon,
+ final int maxUlps) {
+ this.epsilon = epsilon;
+ this.maxUlps = maxUlps;
+ }
+
+ /**
+ * Returns the column with the most negative coefficient in the objective function row.
+ *
+ * @param tableau Simple tableau for the problem.
+ * @return the column with the most negative coefficient.
+ */
+ private Integer getPivotColumn(SimplexTableau tableau) {
+ double minValue = 0;
+ Integer minPos = null;
+ for (int i = tableau.getNumObjectiveFunctions(); i < tableau.getWidth() - 1; i++) {
+ final double entry = tableau.getEntry(0, i);
+ // check if the entry is strictly smaller than the current minimum
+ // do not use a ulp/epsilon check
+ if (entry < minValue) {
+ minValue = entry;
+ minPos = i;
+ }
+ }
+ return minPos;
+ }
+
+ /**
+ * Returns the row with the minimum ratio as given by the minimum ratio test (MRT).
+ *
+ * @param tableau Simple tableau for the problem.
+ * @param col Column to test the ratio of (see {@link #getPivotColumn(SimplexTableau)}).
+ * @return the row with the minimum ratio.
+ */
+ private Integer getPivotRow(SimplexTableau tableau, final int col) {
+ // create a list of all the rows that tie for the lowest score in the minimum ratio test
+ List minRatioPositions = new ArrayList();
+ double minRatio = Double.MAX_VALUE;
+ for (int i = tableau.getNumObjectiveFunctions(); i < tableau.getHeight(); i++) {
+ final double rhs = tableau.getEntry(i, tableau.getWidth() - 1);
+ final double entry = tableau.getEntry(i, col);
+
+ if (Precision.compareTo(entry, 0d, maxUlps) > 0) {
+ final double ratio = rhs / entry;
+ // check if the entry is strictly equal to the current min ratio
+ // do not use a ulp/epsilon check
+ final int cmp = Double.compare(ratio, minRatio);
+ if (cmp == 0) {
+ minRatioPositions.add(i);
+ } else if (cmp < 0) {
+ minRatio = ratio;
+ minRatioPositions = new ArrayList();
+ minRatioPositions.add(i);
+ }
+ }
+ }
+
+ if (minRatioPositions.size() == 0) {
+ return null;
+ } else if (minRatioPositions.size() > 1) {
+ // there's a degeneracy as indicated by a tie in the minimum ratio test
+
+ // 1. check if there's an artificial variable that can be forced out of the basis
+ if (tableau.getNumArtificialVariables() > 0) {
+ for (Integer row : minRatioPositions) {
+ for (int i = 0; i < tableau.getNumArtificialVariables(); i++) {
+ int column = i + tableau.getArtificialVariableOffset();
+ final double entry = tableau.getEntry(row, column);
+ if (Precision.equals(entry, 1d, maxUlps) && row.equals(tableau.getBasicRow(column))) {
+ return row;
+ }
+ }
+ }
+ }
+
+ // 2. apply Bland's rule to prevent cycling:
+ // take the row for which the corresponding basic variable has the smallest index
+ //
+ // see http://www.stanford.edu/class/msande310/blandrule.pdf
+ // see http://en.wikipedia.org/wiki/Bland%27s_rule (not equivalent to the above paper)
+ //
+ // Additional heuristic: if we did not get a solution after half of maxIterations
+ // revert to the simple case of just returning the top-most row
+ // This heuristic is based on empirical data gathered while investigating MATH-828.
+ if (getEvaluations() < getMaxEvaluations() / 2) {
+ Integer minRow = null;
+ int minIndex = tableau.getWidth();
+ final int varStart = tableau.getNumObjectiveFunctions();
+ final int varEnd = tableau.getWidth() - 1;
+ for (Integer row : minRatioPositions) {
+ for (int i = varStart; i < varEnd && !row.equals(minRow); i++) {
+ final Integer basicRow = tableau.getBasicRow(i);
+ if (basicRow != null && basicRow.equals(row)) {
+ if (i < minIndex) {
+ minIndex = i;
+ minRow = row;
+ }
+ }
+ }
+ }
+ return minRow;
+ }
+ }
+ return minRatioPositions.get(0);
+ }
+
+ /**
+ * Runs one iteration of the Simplex method on the given model.
+ *
+ * @param tableau Simple tableau for the problem.
+ * @throws TooManyIterationsException if the allowed number of iterations has been exhausted.
+ * @throws UnboundedSolutionException if the model is found not to have a bounded solution.
+ */
+ protected void doIteration(final SimplexTableau tableau)
+ throws TooManyIterationsException,
+ UnboundedSolutionException {
+
+ incrementIterationCount();
+
+ Integer pivotCol = getPivotColumn(tableau);
+ Integer pivotRow = getPivotRow(tableau, pivotCol);
+ if (pivotRow == null) {
+ throw new UnboundedSolutionException();
+ }
+
+ // set the pivot element to 1
+ double pivotVal = tableau.getEntry(pivotRow, pivotCol);
+ tableau.divideRow(pivotRow, pivotVal);
+
+ // set the rest of the pivot column to 0
+ for (int i = 0; i < tableau.getHeight(); i++) {
+ if (i != pivotRow) {
+ final double multiplier = tableau.getEntry(i, pivotCol);
+ tableau.subtractRow(i, pivotRow, multiplier);
+ }
+ }
+ }
+
+ /**
+ * Solves Phase 1 of the Simplex method.
+ *
+ * @param tableau Simple tableau for the problem.
+ * @throws TooManyIterationsException if the allowed number of iterations has been exhausted.
+ * @throws UnboundedSolutionException if the model is found not to have a bounded solution.
+ * @throws NoFeasibleSolutionException if there is no feasible solution?
+ */
+ protected void solvePhase1(final SimplexTableau tableau)
+ throws TooManyIterationsException,
+ UnboundedSolutionException,
+ NoFeasibleSolutionException {
+
+ // make sure we're in Phase 1
+ if (tableau.getNumArtificialVariables() == 0) {
+ return;
+ }
+
+ while (!tableau.isOptimal()) {
+ doIteration(tableau);
+ }
+
+ // if W is not zero then we have no feasible solution
+ if (!Precision.equals(tableau.getEntry(0, tableau.getRhsOffset()), 0d, epsilon)) {
+ throw new NoFeasibleSolutionException();
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public PointValuePair doOptimize()
+ throws TooManyIterationsException,
+ UnboundedSolutionException,
+ NoFeasibleSolutionException {
+ final SimplexTableau tableau =
+ new SimplexTableau(getFunction(),
+ getConstraints(),
+ getGoalType(),
+ isRestrictedToNonNegative(),
+ epsilon,
+ maxUlps);
+
+ solvePhase1(tableau);
+ tableau.dropPhase1Objective();
+
+ while (!tableau.isOptimal()) {
+ doIteration(tableau);
+ }
+ return tableau.getSolution();
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/linear/SimplexTableau.java b/src/main/java/org/apache/commons/math3/optim/linear/SimplexTableau.java
new file mode 100644
index 000000000..ec77345a8
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/linear/SimplexTableau.java
@@ -0,0 +1,637 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.linear;
+
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.commons.math3.linear.Array2DRowRealMatrix;
+import org.apache.commons.math3.linear.MatrixUtils;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.RealVector;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.util.Precision;
+
+/**
+ * A tableau for use in the Simplex method.
+ *
+ *
+ * Example:
+ *
+ * W | Z | x1 | x2 | x- | s1 | s2 | a1 | RHS
+ * ---------------------------------------------------
+ * -1 0 0 0 0 0 0 1 0 <= phase 1 objective
+ * 0 1 -15 -10 0 0 0 0 0 <= phase 2 objective
+ * 0 0 1 0 0 1 0 0 2 <= constraint 1
+ * 0 0 0 1 0 0 1 0 3 <= constraint 2
+ * 0 0 1 1 0 0 0 1 4 <= constraint 3
+ *
+ * W: Phase 1 objective function
+ * Z: Phase 2 objective function
+ * x1 & x2: Decision variables
+ * x-: Extra decision variable to allow for negative values
+ * s1 & s2: Slack/Surplus variables
+ * a1: Artificial variable
+ * RHS: Right hand side
+ *
+ * @version $Id: SimplexTableau.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+class SimplexTableau implements Serializable {
+
+ /** Column label for negative vars. */
+ private static final String NEGATIVE_VAR_COLUMN_LABEL = "x-";
+
+ /** Default amount of error to accept in floating point comparisons (as ulps). */
+ private static final int DEFAULT_ULPS = 10;
+
+ /** The cut-off threshold to zero-out entries. */
+ private static final double CUTOFF_THRESHOLD = 1e-12;
+
+ /** Serializable version identifier. */
+ private static final long serialVersionUID = -1369660067587938365L;
+
+ /** Linear objective function. */
+ private final LinearObjectiveFunction f;
+
+ /** Linear constraints. */
+ private final List constraints;
+
+ /** Whether to restrict the variables to non-negative values. */
+ private final boolean restrictToNonNegative;
+
+ /** The variables each column represents */
+ private final List columnLabels = new ArrayList();
+
+ /** Simple tableau. */
+ private transient RealMatrix tableau;
+
+ /** Number of decision variables. */
+ private final int numDecisionVariables;
+
+ /** Number of slack variables. */
+ private final int numSlackVariables;
+
+ /** Number of artificial variables. */
+ private int numArtificialVariables;
+
+ /** Amount of error to accept when checking for optimality. */
+ private final double epsilon;
+
+ /** Amount of error to accept in floating point comparisons. */
+ private final int maxUlps;
+
+ /**
+ * Builds a tableau for a linear problem.
+ *
+ * @param f Linear objective function.
+ * @param constraints Linear constraints.
+ * @param goalType Optimization goal: either {@link GoalType#MAXIMIZE}
+ * or {@link GoalType#MINIMIZE}.
+ * @param restrictToNonNegative Whether to restrict the variables to non-negative values.
+ * @param epsilon Amount of error to accept when checking for optimality.
+ */
+ SimplexTableau(final LinearObjectiveFunction f,
+ final Collection constraints,
+ final GoalType goalType,
+ final boolean restrictToNonNegative,
+ final double epsilon) {
+ this(f, constraints, goalType, restrictToNonNegative, epsilon, DEFAULT_ULPS);
+ }
+
+ /**
+ * Build a tableau for a linear problem.
+ * @param f linear objective function
+ * @param constraints linear constraints
+ * @param goalType type of optimization goal: either {@link GoalType#MAXIMIZE} or {@link GoalType#MINIMIZE}
+ * @param restrictToNonNegative whether to restrict the variables to non-negative values
+ * @param epsilon amount of error to accept when checking for optimality
+ * @param maxUlps amount of error to accept in floating point comparisons
+ */
+ SimplexTableau(final LinearObjectiveFunction f,
+ final Collection constraints,
+ final GoalType goalType,
+ final boolean restrictToNonNegative,
+ final double epsilon,
+ final int maxUlps) {
+ this.f = f;
+ this.constraints = normalizeConstraints(constraints);
+ this.restrictToNonNegative = restrictToNonNegative;
+ this.epsilon = epsilon;
+ this.maxUlps = maxUlps;
+ this.numDecisionVariables = f.getCoefficients().getDimension() +
+ (restrictToNonNegative ? 0 : 1);
+ this.numSlackVariables = getConstraintTypeCounts(Relationship.LEQ) +
+ getConstraintTypeCounts(Relationship.GEQ);
+ this.numArtificialVariables = getConstraintTypeCounts(Relationship.EQ) +
+ getConstraintTypeCounts(Relationship.GEQ);
+ this.tableau = createTableau(goalType == GoalType.MAXIMIZE);
+ initializeColumnLabels();
+ }
+
+ /**
+ * Initialize the labels for the columns.
+ */
+ protected void initializeColumnLabels() {
+ if (getNumObjectiveFunctions() == 2) {
+ columnLabels.add("W");
+ }
+ columnLabels.add("Z");
+ for (int i = 0; i < getOriginalNumDecisionVariables(); i++) {
+ columnLabels.add("x" + i);
+ }
+ if (!restrictToNonNegative) {
+ columnLabels.add(NEGATIVE_VAR_COLUMN_LABEL);
+ }
+ for (int i = 0; i < getNumSlackVariables(); i++) {
+ columnLabels.add("s" + i);
+ }
+ for (int i = 0; i < getNumArtificialVariables(); i++) {
+ columnLabels.add("a" + i);
+ }
+ columnLabels.add("RHS");
+ }
+
+ /**
+ * Create the tableau by itself.
+ * @param maximize if true, goal is to maximize the objective function
+ * @return created tableau
+ */
+ protected RealMatrix createTableau(final boolean maximize) {
+
+ // create a matrix of the correct size
+ int width = numDecisionVariables + numSlackVariables +
+ numArtificialVariables + getNumObjectiveFunctions() + 1; // + 1 is for RHS
+ int height = constraints.size() + getNumObjectiveFunctions();
+ Array2DRowRealMatrix matrix = new Array2DRowRealMatrix(height, width);
+
+ // initialize the objective function rows
+ if (getNumObjectiveFunctions() == 2) {
+ matrix.setEntry(0, 0, -1);
+ }
+ int zIndex = (getNumObjectiveFunctions() == 1) ? 0 : 1;
+ matrix.setEntry(zIndex, zIndex, maximize ? 1 : -1);
+ RealVector objectiveCoefficients =
+ maximize ? f.getCoefficients().mapMultiply(-1) : f.getCoefficients();
+ copyArray(objectiveCoefficients.toArray(), matrix.getDataRef()[zIndex]);
+ matrix.setEntry(zIndex, width - 1,
+ maximize ? f.getConstantTerm() : -1 * f.getConstantTerm());
+
+ if (!restrictToNonNegative) {
+ matrix.setEntry(zIndex, getSlackVariableOffset() - 1,
+ getInvertedCoefficientSum(objectiveCoefficients));
+ }
+
+ // initialize the constraint rows
+ int slackVar = 0;
+ int artificialVar = 0;
+ for (int i = 0; i < constraints.size(); i++) {
+ LinearConstraint constraint = constraints.get(i);
+ int row = getNumObjectiveFunctions() + i;
+
+ // decision variable coefficients
+ copyArray(constraint.getCoefficients().toArray(), matrix.getDataRef()[row]);
+
+ // x-
+ if (!restrictToNonNegative) {
+ matrix.setEntry(row, getSlackVariableOffset() - 1,
+ getInvertedCoefficientSum(constraint.getCoefficients()));
+ }
+
+ // RHS
+ matrix.setEntry(row, width - 1, constraint.getValue());
+
+ // slack variables
+ if (constraint.getRelationship() == Relationship.LEQ) {
+ matrix.setEntry(row, getSlackVariableOffset() + slackVar++, 1); // slack
+ } else if (constraint.getRelationship() == Relationship.GEQ) {
+ matrix.setEntry(row, getSlackVariableOffset() + slackVar++, -1); // excess
+ }
+
+ // artificial variables
+ if ((constraint.getRelationship() == Relationship.EQ) ||
+ (constraint.getRelationship() == Relationship.GEQ)) {
+ matrix.setEntry(0, getArtificialVariableOffset() + artificialVar, 1);
+ matrix.setEntry(row, getArtificialVariableOffset() + artificialVar++, 1);
+ matrix.setRowVector(0, matrix.getRowVector(0).subtract(matrix.getRowVector(row)));
+ }
+ }
+
+ return matrix;
+ }
+
+ /**
+ * Get new versions of the constraints which have positive right hand sides.
+ * @param originalConstraints original (not normalized) constraints
+ * @return new versions of the constraints
+ */
+ public List normalizeConstraints(Collection originalConstraints) {
+ List normalized = new ArrayList();
+ for (LinearConstraint constraint : originalConstraints) {
+ normalized.add(normalize(constraint));
+ }
+ return normalized;
+ }
+
+ /**
+ * Get a new equation equivalent to this one with a positive right hand side.
+ * @param constraint reference constraint
+ * @return new equation
+ */
+ private LinearConstraint normalize(final LinearConstraint constraint) {
+ if (constraint.getValue() < 0) {
+ return new LinearConstraint(constraint.getCoefficients().mapMultiply(-1),
+ constraint.getRelationship().oppositeRelationship(),
+ -1 * constraint.getValue());
+ }
+ return new LinearConstraint(constraint.getCoefficients(),
+ constraint.getRelationship(), constraint.getValue());
+ }
+
+ /**
+ * Get the number of objective functions in this tableau.
+ * @return 2 for Phase 1. 1 for Phase 2.
+ */
+ protected final int getNumObjectiveFunctions() {
+ return this.numArtificialVariables > 0 ? 2 : 1;
+ }
+
+ /**
+ * Get a count of constraints corresponding to a specified relationship.
+ * @param relationship relationship to count
+ * @return number of constraint with the specified relationship
+ */
+ private int getConstraintTypeCounts(final Relationship relationship) {
+ int count = 0;
+ for (final LinearConstraint constraint : constraints) {
+ if (constraint.getRelationship() == relationship) {
+ ++count;
+ }
+ }
+ return count;
+ }
+
+ /**
+ * Get the -1 times the sum of all coefficients in the given array.
+ * @param coefficients coefficients to sum
+ * @return the -1 times the sum of all coefficients in the given array.
+ */
+ protected static double getInvertedCoefficientSum(final RealVector coefficients) {
+ double sum = 0;
+ for (double coefficient : coefficients.toArray()) {
+ sum -= coefficient;
+ }
+ return sum;
+ }
+
+ /**
+ * Checks whether the given column is basic.
+ * @param col index of the column to check
+ * @return the row that the variable is basic in. null if the column is not basic
+ */
+ protected Integer getBasicRow(final int col) {
+ Integer row = null;
+ for (int i = 0; i < getHeight(); i++) {
+ final double entry = getEntry(i, col);
+ if (Precision.equals(entry, 1d, maxUlps) && (row == null)) {
+ row = i;
+ } else if (!Precision.equals(entry, 0d, maxUlps)) {
+ return null;
+ }
+ }
+ return row;
+ }
+
+ /**
+ * Removes the phase 1 objective function, positive cost non-artificial variables,
+ * and the non-basic artificial variables from this tableau.
+ */
+ protected void dropPhase1Objective() {
+ if (getNumObjectiveFunctions() == 1) {
+ return;
+ }
+
+ Set columnsToDrop = new TreeSet();
+ columnsToDrop.add(0);
+
+ // positive cost non-artificial variables
+ for (int i = getNumObjectiveFunctions(); i < getArtificialVariableOffset(); i++) {
+ final double entry = tableau.getEntry(0, i);
+ if (Precision.compareTo(entry, 0d, epsilon) > 0) {
+ columnsToDrop.add(i);
+ }
+ }
+
+ // non-basic artificial variables
+ for (int i = 0; i < getNumArtificialVariables(); i++) {
+ int col = i + getArtificialVariableOffset();
+ if (getBasicRow(col) == null) {
+ columnsToDrop.add(col);
+ }
+ }
+
+ double[][] matrix = new double[getHeight() - 1][getWidth() - columnsToDrop.size()];
+ for (int i = 1; i < getHeight(); i++) {
+ int col = 0;
+ for (int j = 0; j < getWidth(); j++) {
+ if (!columnsToDrop.contains(j)) {
+ matrix[i - 1][col++] = tableau.getEntry(i, j);
+ }
+ }
+ }
+
+ // remove the columns in reverse order so the indices are correct
+ Integer[] drop = columnsToDrop.toArray(new Integer[columnsToDrop.size()]);
+ for (int i = drop.length - 1; i >= 0; i--) {
+ columnLabels.remove((int) drop[i]);
+ }
+
+ this.tableau = new Array2DRowRealMatrix(matrix);
+ this.numArtificialVariables = 0;
+ }
+
+ /**
+ * @param src the source array
+ * @param dest the destination array
+ */
+ private void copyArray(final double[] src, final double[] dest) {
+ System.arraycopy(src, 0, dest, getNumObjectiveFunctions(), src.length);
+ }
+
+ /**
+ * Returns whether the problem is at an optimal state.
+ * @return whether the model has been solved
+ */
+ boolean isOptimal() {
+ for (int i = getNumObjectiveFunctions(); i < getWidth() - 1; i++) {
+ final double entry = tableau.getEntry(0, i);
+ if (Precision.compareTo(entry, 0d, epsilon) < 0) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Get the current solution.
+ * @return current solution
+ */
+ protected PointValuePair getSolution() {
+ int negativeVarColumn = columnLabels.indexOf(NEGATIVE_VAR_COLUMN_LABEL);
+ Integer negativeVarBasicRow = negativeVarColumn > 0 ? getBasicRow(negativeVarColumn) : null;
+ double mostNegative = negativeVarBasicRow == null ? 0 : getEntry(negativeVarBasicRow, getRhsOffset());
+
+ Set basicRows = new HashSet();
+ double[] coefficients = new double[getOriginalNumDecisionVariables()];
+ for (int i = 0; i < coefficients.length; i++) {
+ int colIndex = columnLabels.indexOf("x" + i);
+ if (colIndex < 0) {
+ coefficients[i] = 0;
+ continue;
+ }
+ Integer basicRow = getBasicRow(colIndex);
+ if (basicRow != null && basicRow == 0) {
+ // if the basic row is found to be the objective function row
+ // set the coefficient to 0 -> this case handles unconstrained
+ // variables that are still part of the objective function
+ coefficients[i] = 0;
+ } else if (basicRows.contains(basicRow)) {
+ // if multiple variables can take a given value
+ // then we choose the first and set the rest equal to 0
+ coefficients[i] = 0 - (restrictToNonNegative ? 0 : mostNegative);
+ } else {
+ basicRows.add(basicRow);
+ coefficients[i] =
+ (basicRow == null ? 0 : getEntry(basicRow, getRhsOffset())) -
+ (restrictToNonNegative ? 0 : mostNegative);
+ }
+ }
+ return new PointValuePair(coefficients, f.value(coefficients));
+ }
+
+ /**
+ * Subtracts a multiple of one row from another.
+ *
+ * After application of this operation, the following will hold:
+ *
minuendRow = minuendRow - multiple * subtrahendRow
+ *
+ * @param dividendRow index of the row
+ * @param divisor value of the divisor
+ */
+ protected void divideRow(final int dividendRow, final double divisor) {
+ for (int j = 0; j < getWidth(); j++) {
+ tableau.setEntry(dividendRow, j, tableau.getEntry(dividendRow, j) / divisor);
+ }
+ }
+
+ /**
+ * Subtracts a multiple of one row from another.
+ *
+ * After application of this operation, the following will hold:
+ *
minuendRow = minuendRow - multiple * subtrahendRow
+ *
+ * @param minuendRow row index
+ * @param subtrahendRow row index
+ * @param multiple multiplication factor
+ */
+ protected void subtractRow(final int minuendRow, final int subtrahendRow,
+ final double multiple) {
+ for (int i = 0; i < getWidth(); i++) {
+ double result = tableau.getEntry(minuendRow, i) - tableau.getEntry(subtrahendRow, i) * multiple;
+ // cut-off values smaller than the CUTOFF_THRESHOLD, otherwise may lead to numerical instabilities
+ if (FastMath.abs(result) < CUTOFF_THRESHOLD) {
+ result = 0.0;
+ }
+ tableau.setEntry(minuendRow, i, result);
+ }
+ }
+
+ /**
+ * Get the width of the tableau.
+ * @return width of the tableau
+ */
+ protected final int getWidth() {
+ return tableau.getColumnDimension();
+ }
+
+ /**
+ * Get the height of the tableau.
+ * @return height of the tableau
+ */
+ protected final int getHeight() {
+ return tableau.getRowDimension();
+ }
+
+ /**
+ * Get an entry of the tableau.
+ * @param row row index
+ * @param column column index
+ * @return entry at (row, column)
+ */
+ protected final double getEntry(final int row, final int column) {
+ return tableau.getEntry(row, column);
+ }
+
+ /**
+ * Set an entry of the tableau.
+ * @param row row index
+ * @param column column index
+ * @param value for the entry
+ */
+ protected final void setEntry(final int row, final int column,
+ final double value) {
+ tableau.setEntry(row, column, value);
+ }
+
+ /**
+ * Get the offset of the first slack variable.
+ * @return offset of the first slack variable
+ */
+ protected final int getSlackVariableOffset() {
+ return getNumObjectiveFunctions() + numDecisionVariables;
+ }
+
+ /**
+ * Get the offset of the first artificial variable.
+ * @return offset of the first artificial variable
+ */
+ protected final int getArtificialVariableOffset() {
+ return getNumObjectiveFunctions() + numDecisionVariables + numSlackVariables;
+ }
+
+ /**
+ * Get the offset of the right hand side.
+ * @return offset of the right hand side
+ */
+ protected final int getRhsOffset() {
+ return getWidth() - 1;
+ }
+
+ /**
+ * Get the number of decision variables.
+ *
+ * If variables are not restricted to positive values, this will include 1 extra decision variable to represent
+ * the absolute value of the most negative variable.
+ *
+ * @return number of decision variables
+ * @see #getOriginalNumDecisionVariables()
+ */
+ protected final int getNumDecisionVariables() {
+ return numDecisionVariables;
+ }
+
+ /**
+ * Get the original number of decision variables.
+ * @return original number of decision variables
+ * @see #getNumDecisionVariables()
+ */
+ protected final int getOriginalNumDecisionVariables() {
+ return f.getCoefficients().getDimension();
+ }
+
+ /**
+ * Get the number of slack variables.
+ * @return number of slack variables
+ */
+ protected final int getNumSlackVariables() {
+ return numSlackVariables;
+ }
+
+ /**
+ * Get the number of artificial variables.
+ * @return number of artificial variables
+ */
+ protected final int getNumArtificialVariables() {
+ return numArtificialVariables;
+ }
+
+ /**
+ * Get the tableau data.
+ * @return tableau data
+ */
+ protected final double[][] getData() {
+ return tableau.getData();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+
+ if (this == other) {
+ return true;
+ }
+
+ if (other instanceof SimplexTableau) {
+ SimplexTableau rhs = (SimplexTableau) other;
+ return (restrictToNonNegative == rhs.restrictToNonNegative) &&
+ (numDecisionVariables == rhs.numDecisionVariables) &&
+ (numSlackVariables == rhs.numSlackVariables) &&
+ (numArtificialVariables == rhs.numArtificialVariables) &&
+ (epsilon == rhs.epsilon) &&
+ (maxUlps == rhs.maxUlps) &&
+ f.equals(rhs.f) &&
+ constraints.equals(rhs.constraints) &&
+ tableau.equals(rhs.tableau);
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return Boolean.valueOf(restrictToNonNegative).hashCode() ^
+ numDecisionVariables ^
+ numSlackVariables ^
+ numArtificialVariables ^
+ Double.valueOf(epsilon).hashCode() ^
+ maxUlps ^
+ f.hashCode() ^
+ constraints.hashCode() ^
+ tableau.hashCode();
+ }
+
+ /**
+ * Serialize the instance.
+ * @param oos stream where object should be written
+ * @throws IOException if object cannot be written to stream
+ */
+ private void writeObject(ObjectOutputStream oos)
+ throws IOException {
+ oos.defaultWriteObject();
+ MatrixUtils.serializeRealMatrix(tableau, oos);
+ }
+
+ /**
+ * Deserialize the instance.
+ * @param ois stream from which the object should be read
+ * @throws ClassNotFoundException if a class in the stream cannot be found
+ * @throws IOException if object cannot be read from the stream
+ */
+ private void readObject(ObjectInputStream ois)
+ throws ClassNotFoundException, IOException {
+ ois.defaultReadObject();
+ MatrixUtils.deserializeRealMatrix(this, "tableau", ois);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/linear/UnboundedSolutionException.java b/src/main/java/org/apache/commons/math3/optim/linear/UnboundedSolutionException.java
new file mode 100644
index 000000000..4044d5a05
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/linear/UnboundedSolutionException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.linear;
+
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+
+/**
+ * This class represents exceptions thrown by optimizers when a solution escapes to infinity.
+ *
+ * @version $Id: UnboundedSolutionException.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public class UnboundedSolutionException extends MathIllegalStateException {
+ /** Serializable version identifier. */
+ private static final long serialVersionUID = 940539497277290619L;
+
+ /**
+ * Simple constructor using a default message.
+ */
+ public UnboundedSolutionException() {
+ super(LocalizedFormats.UNBOUNDED_SOLUTION);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/linear/package-info.java b/src/main/java/org/apache/commons/math3/optim/linear/package-info.java
new file mode 100644
index 000000000..64c46973f
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/linear/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.linear;
+
+/**
+ * Optimization algorithms for linear constrained problems.
+ */
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/GradientMultivariateOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/GradientMultivariateOptimizer.java
new file mode 100644
index 000000000..345cda004
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/GradientMultivariateOptimizer.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar;
+
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.optim.OptimizationData;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+
+/**
+ * Base class for implementing optimizers for multivariate scalar
+ * differentiable functions.
+ * It contains boiler-plate code for dealing with gradient evaluation.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public abstract class GradientMultivariateOptimizer
+ extends MultivariateOptimizer {
+ /**
+ * Gradient of the objective function.
+ */
+ private MultivariateVectorFunction gradient;
+
+ /**
+ * @param checker Convergence checker.
+ */
+ protected GradientMultivariateOptimizer(ConvergenceChecker checker) {
+ super(checker);
+ }
+
+ /**
+ * Compute the gradient vector.
+ *
+ * @param params Point at which the gradient must be evaluated.
+ * @return the gradient at the specified point.
+ */
+ protected double[] computeObjectiveGradient(final double[] params) {
+ return gradient.value(params);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param optData Optimization data.
+ * The following data will be looked for:
+ *
+ * - {@link org.apache.commons.math3.optim.MaxEval}
+ * - {@link org.apache.commons.math3.optim.InitialGuess}
+ * - {@link org.apache.commons.math3.optim.SimpleBounds}
+ * - {@link org.apache.commons.math3.optim.GoalType}
+ * - {@link org.apache.commons.math3.optim.ObjectiveFunction}
+ * - {@link ObjectiveFunctionGradient}
+ *
+ * @return {@inheritDoc}
+ * @throws TooManyEvaluationsException if the maximal number of
+ * evaluations (of the objective function) is exceeded.
+ */
+ @Override
+ public PointValuePair optimize(OptimizationData... optData)
+ throws TooManyEvaluationsException {
+ // Retrieve settings.
+ parseOptimizationData(optData);
+ // Set up base class and perform computation.
+ return super.optimize(optData);
+ }
+
+ /**
+ * Scans the list of (required and optional) optimization data that
+ * characterize the problem.
+ *
+ * @param optData Optimization data.
+ * The following data will be looked for:
+ *
+ * - {@link ObjectiveFunction}
+ *
+ */
+ private void parseOptimizationData(OptimizationData... optData) {
+ // The existing values (as set by the previous call) are reused if
+ // not provided in the argument list.
+ for (OptimizationData data : optData) {
+ if (data instanceof ObjectiveFunctionGradient) {
+ gradient = ((ObjectiveFunctionGradient) data).getObjectiveFunctionGradient();
+ // If more data must be parsed, this statement _must_ be
+ // changed to "continue".
+ break;
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/LeastSquaresConverter.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/LeastSquaresConverter.java
new file mode 100644
index 000000000..c9694ae6c
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/LeastSquaresConverter.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.scalar;
+
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.exception.DimensionMismatchException;
+import org.apache.commons.math3.linear.RealMatrix;
+
+/**
+ * This class converts
+ * {@link MultivariateVectorFunction vectorial objective functions} to
+ * {@link MultivariateFunction scalar objective functions}
+ * when the goal is to minimize them.
+ *
+ * This class is mostly used when the vectorial objective function represents
+ * a theoretical result computed from a point set applied to a model and
+ * the models point must be adjusted to fit the theoretical result to some
+ * reference observations. The observations may be obtained for example from
+ * physical measurements whether the model is built from theoretical
+ * considerations.
+ *
+ * This class computes a possibly weighted squared sum of the residuals, which is
+ * a scalar value. The residuals are the difference between the theoretical model
+ * (i.e. the output of the vectorial objective function) and the observations. The
+ * class implements the {@link MultivariateFunction} interface and can therefore be
+ * minimized by any optimizer supporting scalar objectives functions.This is one way
+ * to perform a least square estimation. There are other ways to do this without using
+ * this converter, as some optimization algorithms directly support vectorial objective
+ * functions.
+ *
+ * This class support combination of residuals with or without weights and correlations.
+ *
+ * @see MultivariateFunction
+ * @see MultivariateVectorFunction
+ * @version $Id: LeastSquaresConverter.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+
+public class LeastSquaresConverter implements MultivariateFunction {
+ /** Underlying vectorial function. */
+ private final MultivariateVectorFunction function;
+ /** Observations to be compared to objective function to compute residuals. */
+ private final double[] observations;
+ /** Optional weights for the residuals. */
+ private final double[] weights;
+ /** Optional scaling matrix (weight and correlations) for the residuals. */
+ private final RealMatrix scale;
+
+ /**
+ * Builds a simple converter for uncorrelated residuals with identical
+ * weights.
+ *
+ * @param function vectorial residuals function to wrap
+ * @param observations observations to be compared to objective function to compute residuals
+ */
+ public LeastSquaresConverter(final MultivariateVectorFunction function,
+ final double[] observations) {
+ this.function = function;
+ this.observations = observations.clone();
+ this.weights = null;
+ this.scale = null;
+ }
+
+ /**
+ * Builds a simple converter for uncorrelated residuals with the
+ * specified weights.
+ *
+ * The scalar objective function value is computed as:
+ *
+ * objective = ∑weighti(observationi-objectivei)2
+ *
+ *
+ *
+ * Weights can be used for example to combine residuals with different standard
+ * deviations. As an example, consider a residuals array in which even elements
+ * are angular measurements in degrees with a 0.01° standard deviation and
+ * odd elements are distance measurements in meters with a 15m standard deviation.
+ * In this case, the weights array should be initialized with value
+ * 1.0/(0.012) in the even elements and 1.0/(15.02) in the
+ * odd elements (i.e. reciprocals of variances).
+ *
+ *
+ * The array computed by the objective function, the observations array and the
+ * weights array must have consistent sizes or a {@link DimensionMismatchException}
+ * will be triggered while computing the scalar objective.
+ *
+ *
+ * @param function vectorial residuals function to wrap
+ * @param observations observations to be compared to objective function to compute residuals
+ * @param weights weights to apply to the residuals
+ * @throws DimensionMismatchException if the observations vector and the weights
+ * vector dimensions do not match (objective function dimension is checked only when
+ * the {@link #value(double[])} method is called)
+ */
+ public LeastSquaresConverter(final MultivariateVectorFunction function,
+ final double[] observations,
+ final double[] weights) {
+ if (observations.length != weights.length) {
+ throw new DimensionMismatchException(observations.length, weights.length);
+ }
+ this.function = function;
+ this.observations = observations.clone();
+ this.weights = weights.clone();
+ this.scale = null;
+ }
+
+ /**
+ * Builds a simple converter for correlated residuals with the
+ * specified weights.
+ *
+ * The scalar objective function value is computed as:
+ *
+ * objective = yTy with y = scale×(observation-objective)
+ *
+ *
+ *
+ * The array computed by the objective function, the observations array and the
+ * the scaling matrix must have consistent sizes or a {@link DimensionMismatchException}
+ * will be triggered while computing the scalar objective.
+ *
+ *
+ * @param function vectorial residuals function to wrap
+ * @param observations observations to be compared to objective function to compute residuals
+ * @param scale scaling matrix
+ * @throws DimensionMismatchException if the observations vector and the scale
+ * matrix dimensions do not match (objective function dimension is checked only when
+ * the {@link #value(double[])} method is called)
+ */
+ public LeastSquaresConverter(final MultivariateVectorFunction function,
+ final double[] observations,
+ final RealMatrix scale) {
+ if (observations.length != scale.getColumnDimension()) {
+ throw new DimensionMismatchException(observations.length, scale.getColumnDimension());
+ }
+ this.function = function;
+ this.observations = observations.clone();
+ this.weights = null;
+ this.scale = scale.copy();
+ }
+
+ /** {@inheritDoc} */
+ public double value(final double[] point) {
+ // compute residuals
+ final double[] residuals = function.value(point);
+ if (residuals.length != observations.length) {
+ throw new DimensionMismatchException(residuals.length, observations.length);
+ }
+ for (int i = 0; i < residuals.length; ++i) {
+ residuals[i] -= observations[i];
+ }
+
+ // compute sum of squares
+ double sumSquares = 0;
+ if (weights != null) {
+ for (int i = 0; i < residuals.length; ++i) {
+ final double ri = residuals[i];
+ sumSquares += weights[i] * ri * ri;
+ }
+ } else if (scale != null) {
+ for (final double yi : scale.operate(residuals)) {
+ sumSquares += yi * yi;
+ }
+ } else {
+ for (final double ri : residuals) {
+ sumSquares += ri * ri;
+ }
+ }
+
+ return sumSquares;
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/MultiStartMultivariateOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/MultiStartMultivariateOptimizer.java
new file mode 100644
index 000000000..169d642d6
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/MultiStartMultivariateOptimizer.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Comparator;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.exception.NullArgumentException;
+import org.apache.commons.math3.random.RandomVectorGenerator;
+import org.apache.commons.math3.optim.BaseMultiStartMultivariateOptimizer;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.GoalType;
+
+/**
+ * Multi-start optimizer.
+ *
+ * This class wraps an optimizer in order to use it several times in
+ * turn with different starting points (trying to avoid being trapped
+ * in a local extremum when looking for a global one).
+ *
+ * @version $Id$
+ * @since 3.0
+ */
+public class MultiStartMultivariateOptimizer
+ extends BaseMultiStartMultivariateOptimizer {
+ /** Underlying optimizer. */
+ private final MultivariateOptimizer optimizer;
+ /** Found optima. */
+ private final List optima = new ArrayList();
+
+ /**
+ * Create a multi-start optimizer from a single-start optimizer.
+ *
+ * @param optimizer Single-start optimizer to wrap.
+ * @param starts Number of starts to perform.
+ * If {@code starts == 1}, the result will be same as if {@code optimizer}
+ * is called directly.
+ * @param generator Random vector generator to use for restarts.
+ * @throws NullArgumentException if {@code optimizer} or {@code generator}
+ * is {@code null}.
+ * @throws NotStrictlyPositiveException if {@code starts < 1}.
+ */
+ public MultiStartMultivariateOptimizer(final MultivariateOptimizer optimizer,
+ final int starts,
+ final RandomVectorGenerator generator)
+ throws NullArgumentException,
+ NotStrictlyPositiveException {
+ super(optimizer, starts, generator);
+ this.optimizer = optimizer;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public PointValuePair[] getOptima() {
+ Collections.sort(optima, getPairComparator());
+ return optima.toArray(new PointValuePair[0]);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ protected void store(PointValuePair optimum) {
+ optima.add(optimum);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ protected void clear() {
+ optima.clear();
+ }
+
+ /**
+ * @return a comparator for sorting the optima.
+ */
+ private Comparator getPairComparator() {
+ return new Comparator() {
+ public int compare(final PointValuePair o1,
+ final PointValuePair o2) {
+ if (o1 == null) {
+ return (o2 == null) ? 0 : 1;
+ } else if (o2 == null) {
+ return -1;
+ }
+ final double v1 = o1.getValue();
+ final double v2 = o2.getValue();
+ return (optimizer.getGoalType() == GoalType.MINIMIZE) ?
+ Double.compare(v1, v2) : Double.compare(v2, v1);
+ }
+ };
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/MultivariateFunctionMappingAdapter.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/MultivariateFunctionMappingAdapter.java
new file mode 100644
index 000000000..96174bf79
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/MultivariateFunctionMappingAdapter.java
@@ -0,0 +1,295 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar;
+
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.analysis.UnivariateFunction;
+import org.apache.commons.math3.analysis.function.Logit;
+import org.apache.commons.math3.analysis.function.Sigmoid;
+import org.apache.commons.math3.exception.DimensionMismatchException;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.util.MathUtils;
+
+/**
+ * Adapter for mapping bounded {@link MultivariateFunction} to unbounded ones.
+ *
+ *
+ * This adapter can be used to wrap functions subject to simple bounds on
+ * parameters so they can be used by optimizers that do not directly
+ * support simple bounds.
+ *
+ *
+ * The principle is that the user function that will be wrapped will see its
+ * parameters bounded as required, i.e when its {@code value} method is called
+ * with argument array {@code point}, the elements array will fulfill requirement
+ * {@code lower[i] <= point[i] <= upper[i]} for all i. Some of the components
+ * may be unbounded or bounded only on one side if the corresponding bound is
+ * set to an infinite value. The optimizer will not manage the user function by
+ * itself, but it will handle this adapter and it is this adapter that will take
+ * care the bounds are fulfilled. The adapter {@link #value(double[])} method will
+ * be called by the optimizer with unbound parameters, and the adapter will map
+ * the unbounded value to the bounded range using appropriate functions like
+ * {@link Sigmoid} for double bounded elements for example.
+ *
+ *
+ * As the optimizer sees only unbounded parameters, it should be noted that the
+ * start point or simplex expected by the optimizer should be unbounded, so the
+ * user is responsible for converting his bounded point to unbounded by calling
+ * {@link #boundedToUnbounded(double[])} before providing them to the optimizer.
+ * For the same reason, the point returned by the {@link
+ * org.apache.commons.math3.optimization.BaseMultivariateOptimizer#optimize(int,
+ * MultivariateFunction, org.apache.commons.math3.optimization.GoalType, double[])}
+ * method is unbounded. So to convert this point to bounded, users must call
+ * {@link #unboundedToBounded(double[])} by themselves!
+ *
+ * This adapter is only a poor man solution to simple bounds optimization constraints
+ * that can be used with simple optimizers like
+ * {@link org.apache.commons.math3.optim.nonlinear.scalar.noderiv.SimplexOptimizer
+ * SimplexOptimizer}.
+ * A better solution is to use an optimizer that directly supports simple bounds like
+ * {@link org.apache.commons.math3.optim.nonlinear.scalar.noderiv.CMAESOptimizer
+ * CMAESOptimizer} or
+ * {@link org.apache.commons.math3.optim.nonlinear.scalar.noderiv.BOBYQAOptimizer
+ * BOBYQAOptimizer}.
+ * One caveat of this poor-man's solution is that behavior near the bounds may be
+ * numerically unstable as bounds are mapped from infinite values.
+ * Another caveat is that convergence values are evaluated by the optimizer with
+ * respect to unbounded variables, so there will be scales differences when
+ * converted to bounded variables.
+ *
+ *
+ * @see MultivariateFunctionPenaltyAdapter
+ *
+ * @version $Id: MultivariateFunctionMappingAdapter.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 3.0
+ */
+public class MultivariateFunctionMappingAdapter
+ implements MultivariateFunction {
+ /** Underlying bounded function. */
+ private final MultivariateFunction bounded;
+ /** Mapping functions. */
+ private final Mapper[] mappers;
+
+ /** Simple constructor.
+ * @param bounded bounded function
+ * @param lower lower bounds for each element of the input parameters array
+ * (some elements may be set to {@code Double.NEGATIVE_INFINITY} for
+ * unbounded values)
+ * @param upper upper bounds for each element of the input parameters array
+ * (some elements may be set to {@code Double.POSITIVE_INFINITY} for
+ * unbounded values)
+ * @exception DimensionMismatchException if lower and upper bounds are not
+ * consistent, either according to dimension or to values
+ */
+ public MultivariateFunctionMappingAdapter(final MultivariateFunction bounded,
+ final double[] lower, final double[] upper) {
+ // safety checks
+ MathUtils.checkNotNull(lower);
+ MathUtils.checkNotNull(upper);
+ if (lower.length != upper.length) {
+ throw new DimensionMismatchException(lower.length, upper.length);
+ }
+ for (int i = 0; i < lower.length; ++i) {
+ // note the following test is written in such a way it also fails for NaN
+ if (!(upper[i] >= lower[i])) {
+ throw new NumberIsTooSmallException(upper[i], lower[i], true);
+ }
+ }
+
+ this.bounded = bounded;
+ this.mappers = new Mapper[lower.length];
+ for (int i = 0; i < mappers.length; ++i) {
+ if (Double.isInfinite(lower[i])) {
+ if (Double.isInfinite(upper[i])) {
+ // element is unbounded, no transformation is needed
+ mappers[i] = new NoBoundsMapper();
+ } else {
+ // element is simple-bounded on the upper side
+ mappers[i] = new UpperBoundMapper(upper[i]);
+ }
+ } else {
+ if (Double.isInfinite(upper[i])) {
+ // element is simple-bounded on the lower side
+ mappers[i] = new LowerBoundMapper(lower[i]);
+ } else {
+ // element is double-bounded
+ mappers[i] = new LowerUpperBoundMapper(lower[i], upper[i]);
+ }
+ }
+ }
+ }
+
+ /**
+ * Maps an array from unbounded to bounded.
+ *
+ * @param point Unbounded values.
+ * @return the bounded values.
+ */
+ public double[] unboundedToBounded(double[] point) {
+ // Map unbounded input point to bounded point.
+ final double[] mapped = new double[mappers.length];
+ for (int i = 0; i < mappers.length; ++i) {
+ mapped[i] = mappers[i].unboundedToBounded(point[i]);
+ }
+
+ return mapped;
+ }
+
+ /**
+ * Maps an array from bounded to unbounded.
+ *
+ * @param point Bounded values.
+ * @return the unbounded values.
+ */
+ public double[] boundedToUnbounded(double[] point) {
+ // Map bounded input point to unbounded point.
+ final double[] mapped = new double[mappers.length];
+ for (int i = 0; i < mappers.length; ++i) {
+ mapped[i] = mappers[i].boundedToUnbounded(point[i]);
+ }
+
+ return mapped;
+ }
+
+ /**
+ * Compute the underlying function value from an unbounded point.
+ *
+ * This method simply bounds the unbounded point using the mappings
+ * set up at construction and calls the underlying function using
+ * the bounded point.
+ *
+ * @param point unbounded value
+ * @return underlying function value
+ * @see #unboundedToBounded(double[])
+ */
+ public double value(double[] point) {
+ return bounded.value(unboundedToBounded(point));
+ }
+
+ /** Mapping interface. */
+ private interface Mapper {
+ /**
+ * Maps a value from unbounded to bounded.
+ *
+ * @param y Unbounded value.
+ * @return the bounded value.
+ */
+ double unboundedToBounded(double y);
+
+ /**
+ * Maps a value from bounded to unbounded.
+ *
+ * @param x Bounded value.
+ * @return the unbounded value.
+ */
+ double boundedToUnbounded(double x);
+ }
+
+ /** Local class for no bounds mapping. */
+ private static class NoBoundsMapper implements Mapper {
+ /** {@inheritDoc} */
+ public double unboundedToBounded(final double y) {
+ return y;
+ }
+
+ /** {@inheritDoc} */
+ public double boundedToUnbounded(final double x) {
+ return x;
+ }
+ }
+
+ /** Local class for lower bounds mapping. */
+ private static class LowerBoundMapper implements Mapper {
+ /** Low bound. */
+ private final double lower;
+
+ /**
+ * Simple constructor.
+ *
+ * @param lower lower bound
+ */
+ public LowerBoundMapper(final double lower) {
+ this.lower = lower;
+ }
+
+ /** {@inheritDoc} */
+ public double unboundedToBounded(final double y) {
+ return lower + FastMath.exp(y);
+ }
+
+ /** {@inheritDoc} */
+ public double boundedToUnbounded(final double x) {
+ return FastMath.log(x - lower);
+ }
+
+ }
+
+ /** Local class for upper bounds mapping. */
+ private static class UpperBoundMapper implements Mapper {
+
+ /** Upper bound. */
+ private final double upper;
+
+ /** Simple constructor.
+ * @param upper upper bound
+ */
+ public UpperBoundMapper(final double upper) {
+ this.upper = upper;
+ }
+
+ /** {@inheritDoc} */
+ public double unboundedToBounded(final double y) {
+ return upper - FastMath.exp(-y);
+ }
+
+ /** {@inheritDoc} */
+ public double boundedToUnbounded(final double x) {
+ return -FastMath.log(upper - x);
+ }
+
+ }
+
+ /** Local class for lower and bounds mapping. */
+ private static class LowerUpperBoundMapper implements Mapper {
+ /** Function from unbounded to bounded. */
+ private final UnivariateFunction boundingFunction;
+ /** Function from bounded to unbounded. */
+ private final UnivariateFunction unboundingFunction;
+
+ /**
+ * Simple constructor.
+ *
+ * @param lower lower bound
+ * @param upper upper bound
+ */
+ public LowerUpperBoundMapper(final double lower, final double upper) {
+ boundingFunction = new Sigmoid(lower, upper);
+ unboundingFunction = new Logit(lower, upper);
+ }
+
+ /** {@inheritDoc} */
+ public double unboundedToBounded(final double y) {
+ return boundingFunction.value(y);
+ }
+
+ /** {@inheritDoc} */
+ public double boundedToUnbounded(final double x) {
+ return unboundingFunction.value(x);
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/MultivariateFunctionPenaltyAdapter.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/MultivariateFunctionPenaltyAdapter.java
new file mode 100644
index 000000000..1454a2cfa
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/MultivariateFunctionPenaltyAdapter.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar;
+
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.exception.DimensionMismatchException;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.util.MathUtils;
+
+/**
+ * Adapter extending bounded {@link MultivariateFunction} to an unbouded
+ * domain using a penalty function.
+ *
+ *
+ * This adapter can be used to wrap functions subject to simple bounds on
+ * parameters so they can be used by optimizers that do not directly
+ * support simple bounds.
+ *
+ *
+ * The principle is that the user function that will be wrapped will see its
+ * parameters bounded as required, i.e when its {@code value} method is called
+ * with argument array {@code point}, the elements array will fulfill requirement
+ * {@code lower[i] <= point[i] <= upper[i]} for all i. Some of the components
+ * may be unbounded or bounded only on one side if the corresponding bound is
+ * set to an infinite value. The optimizer will not manage the user function by
+ * itself, but it will handle this adapter and it is this adapter that will take
+ * care the bounds are fulfilled. The adapter {@link #value(double[])} method will
+ * be called by the optimizer with unbound parameters, and the adapter will check
+ * if the parameters is within range or not. If it is in range, then the underlying
+ * user function will be called, and if it is not the value of a penalty function
+ * will be returned instead.
+ *
+ *
+ * This adapter is only a poor-man's solution to simple bounds optimization
+ * constraints that can be used with simple optimizers like
+ * {@link org.apache.commons.math3.optim.nonlinear.scalar.noderiv.SimplexOptimizer
+ * SimplexOptimizer}.
+ * A better solution is to use an optimizer that directly supports simple bounds like
+ * {@link org.apache.commons.math3.optim.nonlinear.scalar.noderiv.CMAESOptimizer
+ * CMAESOptimizer} or
+ * {@link org.apache.commons.math3.optim.nonlinear.scalar.noderiv.BOBYQAOptimizer
+ * BOBYQAOptimizer}.
+ * One caveat of this poor-man's solution is that if start point or start simplex
+ * is completely outside of the allowed range, only the penalty function is used,
+ * and the optimizer may converge without ever entering the range.
+ *
+ *
+ * @see MultivariateFunctionMappingAdapter
+ *
+ * @version $Id: MultivariateFunctionPenaltyAdapter.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 3.0
+ */
+public class MultivariateFunctionPenaltyAdapter
+ implements MultivariateFunction {
+ /** Underlying bounded function. */
+ private final MultivariateFunction bounded;
+ /** Lower bounds. */
+ private final double[] lower;
+ /** Upper bounds. */
+ private final double[] upper;
+ /** Penalty offset. */
+ private final double offset;
+ /** Penalty scales. */
+ private final double[] scale;
+
+ /**
+ * Simple constructor.
+ *
+ * When the optimizer provided points are out of range, the value of the
+ * penalty function will be used instead of the value of the underlying
+ * function. In order for this penalty to be effective in rejecting this
+ * point during the optimization process, the penalty function value should
+ * be defined with care. This value is computed as:
+ *
+ * penalty(point) = offset + ∑i[scale[i] * √|point[i]-boundary[i]|]
+ *
+ * where indices i correspond to all the components that violates their boundaries.
+ *
+ *
+ * So when attempting a function minimization, offset should be larger than
+ * the maximum expected value of the underlying function and scale components
+ * should all be positive. When attempting a function maximization, offset
+ * should be lesser than the minimum expected value of the underlying function
+ * and scale components should all be negative.
+ * minimization, and lesser than the minimum expected value of the underlying
+ * function when attempting maximization.
+ *
+ *
+ * These choices for the penalty function have two properties. First, all out
+ * of range points will return a function value that is worse than the value
+ * returned by any in range point. Second, the penalty is worse for large
+ * boundaries violation than for small violations, so the optimizer has an hint
+ * about the direction in which it should search for acceptable points.
+ *
+ * @param bounded bounded function
+ * @param lower lower bounds for each element of the input parameters array
+ * (some elements may be set to {@code Double.NEGATIVE_INFINITY} for
+ * unbounded values)
+ * @param upper upper bounds for each element of the input parameters array
+ * (some elements may be set to {@code Double.POSITIVE_INFINITY} for
+ * unbounded values)
+ * @param offset base offset of the penalty function
+ * @param scale scale of the penalty function
+ * @exception DimensionMismatchException if lower bounds, upper bounds and
+ * scales are not consistent, either according to dimension or to bounadary
+ * values
+ */
+ public MultivariateFunctionPenaltyAdapter(final MultivariateFunction bounded,
+ final double[] lower, final double[] upper,
+ final double offset, final double[] scale) {
+
+ // safety checks
+ MathUtils.checkNotNull(lower);
+ MathUtils.checkNotNull(upper);
+ MathUtils.checkNotNull(scale);
+ if (lower.length != upper.length) {
+ throw new DimensionMismatchException(lower.length, upper.length);
+ }
+ if (lower.length != scale.length) {
+ throw new DimensionMismatchException(lower.length, scale.length);
+ }
+ for (int i = 0; i < lower.length; ++i) {
+ // note the following test is written in such a way it also fails for NaN
+ if (!(upper[i] >= lower[i])) {
+ throw new NumberIsTooSmallException(upper[i], lower[i], true);
+ }
+ }
+
+ this.bounded = bounded;
+ this.lower = lower.clone();
+ this.upper = upper.clone();
+ this.offset = offset;
+ this.scale = scale.clone();
+ }
+
+ /**
+ * Computes the underlying function value from an unbounded point.
+ *
+ * This method simply returns the value of the underlying function
+ * if the unbounded point already fulfills the bounds, and compute
+ * a replacement value using the offset and scale if bounds are
+ * violated, without calling the function at all.
+ *
+ * @param point unbounded point
+ * @return either underlying function value or penalty function value
+ */
+ public double value(double[] point) {
+
+ for (int i = 0; i < scale.length; ++i) {
+ if ((point[i] < lower[i]) || (point[i] > upper[i])) {
+ // bound violation starting at this component
+ double sum = 0;
+ for (int j = i; j < scale.length; ++j) {
+ final double overshoot;
+ if (point[j] < lower[j]) {
+ overshoot = scale[j] * (lower[j] - point[j]);
+ } else if (point[j] > upper[j]) {
+ overshoot = scale[j] * (point[j] - upper[j]);
+ } else {
+ overshoot = 0;
+ }
+ sum += FastMath.sqrt(overshoot);
+ }
+ return offset + sum;
+ }
+ }
+
+ // all boundaries are fulfilled, we are in the expected
+ // domain of the underlying function
+ return bounded.value(point);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/MultivariateOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/MultivariateOptimizer.java
new file mode 100644
index 000000000..3298b62a3
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/MultivariateOptimizer.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar;
+
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.optim.BaseMultivariateOptimizer;
+import org.apache.commons.math3.optim.OptimizationData;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.ObjectiveFunction;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+
+/**
+ * Base class for a multivariate scalar function optimizer.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public abstract class MultivariateOptimizer
+ extends BaseMultivariateOptimizer {
+ /** Objective function. */
+ private MultivariateFunction function;
+ /** Type of optimization. */
+ private GoalType goal;
+
+ /**
+ * @param checker Convergence checker.
+ */
+ protected MultivariateOptimizer(ConvergenceChecker checker) {
+ super(checker);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param optData Optimization data. The following data will be looked for:
+ *
+ * - {@link org.apache.commons.math3.optim.MaxEval}
+ * - {@link org.apache.commons.math3.optim.InitialGuess}
+ * - {@link org.apache.commons.math3.optim.SimpleBounds}
+ * - {@link ObjectiveFunction}
+ * - {@link GoalType}
+ *
+ * @return {@inheritDoc}
+ * @throws TooManyEvaluationsException if the maximal number of
+ * evaluations is exceeded.
+ */
+ @Override
+ public PointValuePair optimize(OptimizationData... optData)
+ throws TooManyEvaluationsException {
+ // Retrieve settings.
+ parseOptimizationData(optData);
+ // Set up base class and perform computation.
+ return super.optimize(optData);
+ }
+
+ /**
+ * Scans the list of (required and optional) optimization data that
+ * characterize the problem.
+ *
+ * @param optData Optimization data.
+ * The following data will be looked for:
+ *
+ * - {@link ObjectiveFunction}
+ * - {@link GoalType}
+ *
+ */
+ private void parseOptimizationData(OptimizationData... optData) {
+ // The existing values (as set by the previous call) are reused if
+ // not provided in the argument list.
+ for (OptimizationData data : optData) {
+ if (data instanceof GoalType) {
+ goal = (GoalType) data;
+ continue;
+ }
+ if (data instanceof ObjectiveFunction) {
+ function = ((ObjectiveFunction) data).getObjectiveFunction();
+ continue;
+ }
+ }
+ }
+
+ /**
+ * @return the optimization type.
+ */
+ public GoalType getGoalType() {
+ return goal;
+ }
+
+ /**
+ * Computes the objective function value.
+ * This method must be called by subclasses to enforce the
+ * evaluation counter limit.
+ *
+ * @param params Point at which the objective function must be evaluated.
+ * @return the objective function value at the specified point.
+ * @throws TooManyEvaluationsException if the maximal number of
+ * evaluations is exceeded.
+ */
+ protected double computeObjectiveValue(double[] params) {
+ super.incrementEvaluationCount();
+ return function.value(params);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/ObjectiveFunctionGradient.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/ObjectiveFunctionGradient.java
new file mode 100644
index 000000000..8e56c9c2b
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/ObjectiveFunctionGradient.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar;
+
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.optim.OptimizationData;
+
+/**
+ * Gradient of the scalar function to be optimized.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public class ObjectiveFunctionGradient implements OptimizationData {
+ /** Function to be optimized. */
+ private final MultivariateVectorFunction gradient;
+
+ /**
+ * @param g Gradient of the function to be optimized.
+ */
+ public ObjectiveFunctionGradient(MultivariateVectorFunction g) {
+ gradient = g;
+ }
+
+ /**
+ * Gets the gradient of the function to be optimized.
+ *
+ * @return the objective function gradient.
+ */
+ public MultivariateVectorFunction getObjectiveFunctionGradient() {
+ return gradient;
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/NonLinearConjugateGradientOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/NonLinearConjugateGradientOptimizer.java
new file mode 100644
index 000000000..f0720cdba
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/NonLinearConjugateGradientOptimizer.java
@@ -0,0 +1,393 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.scalar.gradient;
+
+import org.apache.commons.math3.analysis.UnivariateFunction;
+import org.apache.commons.math3.analysis.solvers.BrentSolver;
+import org.apache.commons.math3.analysis.solvers.UnivariateSolver;
+import org.apache.commons.math3.exception.MathInternalError;
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.optim.OptimizationData;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.optim.nonlinear.scalar.GradientMultivariateOptimizer;
+import org.apache.commons.math3.util.FastMath;
+
+/**
+ * Non-linear conjugate gradient optimizer.
+ *
+ * This class supports both the Fletcher-Reeves and the Polak-Ribière
+ * update formulas for the conjugate search directions.
+ * It also supports optional preconditioning.
+ *
+ *
+ * @version $Id: NonLinearConjugateGradientOptimizer.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public class NonLinearConjugateGradientOptimizer
+ extends GradientMultivariateOptimizer {
+ /** Update formula for the beta parameter. */
+ private final Formula updateFormula;
+ /** Preconditioner (may be null). */
+ private final Preconditioner preconditioner;
+ /** solver to use in the line search (may be null). */
+ private final UnivariateSolver solver;
+ /** Initial step used to bracket the optimum in line search. */
+ private double initialStep = 1;
+
+ /**
+ * Constructor with default {@link BrentSolver line search solver} and
+ * {@link IdentityPreconditioner preconditioner}.
+ *
+ * @param updateFormula formula to use for updating the β parameter,
+ * must be one of {@link Formula#FLETCHER_REEVES} or
+ * {@link Formula#POLAK_RIBIERE}.
+ * @param checker Convergence checker.
+ */
+ public NonLinearConjugateGradientOptimizer(final Formula updateFormula,
+ ConvergenceChecker checker) {
+ this(updateFormula,
+ checker,
+ new BrentSolver(),
+ new IdentityPreconditioner());
+ }
+
+ /**
+ * Available choices of update formulas for the updating the parameter
+ * that is used to compute the successive conjugate search directions.
+ * For non-linear conjugate gradients, there are
+ * two formulas:
+ *
+ * - Fletcher-Reeves formula
+ * - Polak-Ribière formula
+ *
+ *
+ * On the one hand, the Fletcher-Reeves formula is guaranteed to converge
+ * if the start point is close enough of the optimum whether the
+ * Polak-Ribière formula may not converge in rare cases. On the
+ * other hand, the Polak-Ribière formula is often faster when it
+ * does converge. Polak-Ribière is often used.
+ *
+ * @since 2.0
+ */
+ public static enum Formula {
+ /** Fletcher-Reeves formula. */
+ FLETCHER_REEVES,
+ /** Polak-Ribière formula. */
+ POLAK_RIBIERE
+ }
+
+ /**
+ * The initial step is a factor with respect to the search direction
+ * (which itself is roughly related to the gradient of the function).
+ *
+ * It is used to find an interval that brackets the optimum in line
+ * search.
+ *
+ * @since 3.1
+ */
+ public static class BracketingStep implements OptimizationData {
+ /** Initial step. */
+ private final double initialStep;
+
+ /**
+ * @param step Initial step for the bracket search.
+ */
+ public BracketingStep(double step) {
+ initialStep = step;
+ }
+
+ /**
+ * Gets the initial step.
+ *
+ * @return the initial step.
+ */
+ public double getBracketingStep() {
+ return initialStep;
+ }
+ }
+
+ /**
+ * Constructor with default {@link IdentityPreconditioner preconditioner}.
+ *
+ * @param updateFormula formula to use for updating the β parameter,
+ * must be one of {@link Formula#FLETCHER_REEVES} or
+ * {@link Formula#POLAK_RIBIERE}.
+ * @param checker Convergence checker.
+ * @param lineSearchSolver Solver to use during line search.
+ */
+ public NonLinearConjugateGradientOptimizer(final Formula updateFormula,
+ ConvergenceChecker checker,
+ final UnivariateSolver lineSearchSolver) {
+ this(updateFormula,
+ checker,
+ lineSearchSolver,
+ new IdentityPreconditioner());
+ }
+
+ /**
+ * @param updateFormula formula to use for updating the β parameter,
+ * must be one of {@link Formula#FLETCHER_REEVES} or
+ * {@link Formula#POLAK_RIBIERE}.
+ * @param checker Convergence checker.
+ * @param lineSearchSolver Solver to use during line search.
+ * @param preconditioner Preconditioner.
+ */
+ public NonLinearConjugateGradientOptimizer(final Formula updateFormula,
+ ConvergenceChecker checker,
+ final UnivariateSolver lineSearchSolver,
+ final Preconditioner preconditioner) {
+ super(checker);
+
+ this.updateFormula = updateFormula;
+ solver = lineSearchSolver;
+ this.preconditioner = preconditioner;
+ initialStep = 1;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param optData Optimization data.
+ * The following data will be looked for:
+ *
+ * - {@link org.apache.commons.math3.optim.MaxEval}
+ * - {@link org.apache.commons.math3.optim.InitialGuess}
+ * - {@link org.apache.commons.math3.optim.SimpleBounds}
+ * - {@link org.apache.commons.math3.optim.GoalType}
+ * - {@link org.apache.commons.math3.optim.ObjectiveFunction}
+ * - {@link org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunctionGradient}
+ * - {@link BracketingStep}
+ *
+ * @return {@inheritDoc}
+ * @throws TooManyEvaluationsException if the maximal number of
+ * evaluations (of the objective function) is exceeded.
+ */
+ @Override
+ public PointValuePair optimize(OptimizationData... optData)
+ throws TooManyEvaluationsException {
+ // Retrieve settings.
+ parseOptimizationData(optData);
+ // Set up base class and perform computation.
+ return super.optimize(optData);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected PointValuePair doOptimize() {
+ final ConvergenceChecker checker = getConvergenceChecker();
+ final double[] point = getStartPoint();
+ final GoalType goal = getGoalType();
+ final int n = point.length;
+ double[] r = computeObjectiveGradient(point);
+ if (goal == GoalType.MINIMIZE) {
+ for (int i = 0; i < n; i++) {
+ r[i] = -r[i];
+ }
+ }
+
+ // Initial search direction.
+ double[] steepestDescent = preconditioner.precondition(point, r);
+ double[] searchDirection = steepestDescent.clone();
+
+ double delta = 0;
+ for (int i = 0; i < n; ++i) {
+ delta += r[i] * searchDirection[i];
+ }
+
+ PointValuePair current = null;
+ int iter = 0;
+ int maxEval = getMaxEvaluations();
+ while (true) {
+ ++iter;
+
+ final double objective = computeObjectiveValue(point);
+ PointValuePair previous = current;
+ current = new PointValuePair(point, objective);
+ if (previous != null) {
+ if (checker.converged(iter, previous, current)) {
+ // We have found an optimum.
+ return current;
+ }
+ }
+
+ // Find the optimal step in the search direction.
+ final UnivariateFunction lsf = new LineSearchFunction(point, searchDirection);
+ final double uB = findUpperBound(lsf, 0, initialStep);
+ // XXX Last parameters is set to a value close to zero in order to
+ // work around the divergence problem in the "testCircleFitting"
+ // unit test (see MATH-439).
+ final double step = solver.solve(maxEval, lsf, 0, uB, 1e-15);
+ maxEval -= solver.getEvaluations(); // Subtract used up evaluations.
+
+ // Validate new point.
+ for (int i = 0; i < point.length; ++i) {
+ point[i] += step * searchDirection[i];
+ }
+
+ r = computeObjectiveGradient(point);
+ if (goal == GoalType.MINIMIZE) {
+ for (int i = 0; i < n; ++i) {
+ r[i] = -r[i];
+ }
+ }
+
+ // Compute beta.
+ final double deltaOld = delta;
+ final double[] newSteepestDescent = preconditioner.precondition(point, r);
+ delta = 0;
+ for (int i = 0; i < n; ++i) {
+ delta += r[i] * newSteepestDescent[i];
+ }
+
+ final double beta;
+ switch (updateFormula) {
+ case FLETCHER_REEVES:
+ beta = delta / deltaOld;
+ break;
+ case POLAK_RIBIERE:
+ double deltaMid = 0;
+ for (int i = 0; i < r.length; ++i) {
+ deltaMid += r[i] * steepestDescent[i];
+ }
+ beta = (delta - deltaMid) / deltaOld;
+ break;
+ default:
+ // Should never happen.
+ throw new MathInternalError();
+ }
+ steepestDescent = newSteepestDescent;
+
+ // Compute conjugate search direction.
+ if (iter % n == 0 ||
+ beta < 0) {
+ // Break conjugation: reset search direction.
+ searchDirection = steepestDescent.clone();
+ } else {
+ // Compute new conjugate search direction.
+ for (int i = 0; i < n; ++i) {
+ searchDirection[i] = steepestDescent[i] + beta * searchDirection[i];
+ }
+ }
+ }
+ }
+
+ /**
+ * Scans the list of (required and optional) optimization data that
+ * characterize the problem.
+ *
+ * @param optData Optimization data.
+ * The following data will be looked for:
+ *
+ * - {@link InitialStep}
+ *
+ */
+ private void parseOptimizationData(OptimizationData... optData) {
+ // The existing values (as set by the previous call) are reused if
+ // not provided in the argument list.
+ for (OptimizationData data : optData) {
+ if (data instanceof BracketingStep) {
+ initialStep = ((BracketingStep) data).getBracketingStep();
+ // If more data must be parsed, this statement _must_ be
+ // changed to "continue".
+ break;
+ }
+ }
+ }
+
+ /**
+ * Finds the upper bound b ensuring bracketing of a root between a and b.
+ *
+ * @param f function whose root must be bracketed.
+ * @param a lower bound of the interval.
+ * @param h initial step to try.
+ * @return b such that f(a) and f(b) have opposite signs.
+ * @throws MathIllegalStateException if no bracket can be found.
+ */
+ private double findUpperBound(final UnivariateFunction f,
+ final double a, final double h) {
+ final double yA = f.value(a);
+ double yB = yA;
+ for (double step = h; step < Double.MAX_VALUE; step *= FastMath.max(2, yA / yB)) {
+ final double b = a + step;
+ yB = f.value(b);
+ if (yA * yB <= 0) {
+ return b;
+ }
+ }
+ throw new MathIllegalStateException(LocalizedFormats.UNABLE_TO_BRACKET_OPTIMUM_IN_LINE_SEARCH);
+ }
+
+ /** Default identity preconditioner. */
+ public static class IdentityPreconditioner implements Preconditioner {
+ /** {@inheritDoc} */
+ public double[] precondition(double[] variables, double[] r) {
+ return r.clone();
+ }
+ }
+
+ /**
+ * Internal class for line search.
+ *
+ * The function represented by this class is the dot product of
+ * the objective function gradient and the search direction. Its
+ * value is zero when the gradient is orthogonal to the search
+ * direction, i.e. when the objective function value is a local
+ * extremum along the search direction.
+ *
+ */
+ private class LineSearchFunction implements UnivariateFunction {
+ /** Current point. */
+ private final double[] currentPoint;
+ /** Search direction. */
+ private final double[] searchDirection;
+
+ /**
+ * @param point Current point.
+ * @param direction Search direction.
+ */
+ public LineSearchFunction(double[] point,
+ double[] direction) {
+ currentPoint = point.clone();
+ searchDirection = direction.clone();
+ }
+
+ /** {@inheritDoc} */
+ public double value(double x) {
+ // current point in the search direction
+ final double[] shiftedPoint = currentPoint.clone();
+ for (int i = 0; i < shiftedPoint.length; ++i) {
+ shiftedPoint[i] += x * searchDirection[i];
+ }
+
+ // gradient of the objective function
+ final double[] gradient = computeObjectiveGradient(shiftedPoint);
+
+ // dot product with the search direction
+ double dotProduct = 0;
+ for (int i = 0; i < gradient.length; ++i) {
+ dotProduct += gradient[i] * searchDirection[i];
+ }
+
+ return dotProduct;
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/Preconditioner.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/Preconditioner.java
new file mode 100644
index 000000000..cd4c79886
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/Preconditioner.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.scalar.gradient;
+
+/**
+ * This interface represents a preconditioner for differentiable scalar
+ * objective function optimizers.
+ * @version $Id: Preconditioner.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public interface Preconditioner {
+ /**
+ * Precondition a search direction.
+ *
+ * The returned preconditioned search direction must be computed fast or
+ * the algorithm performances will drop drastically. A classical approach
+ * is to compute only the diagonal elements of the hessian and to divide
+ * the raw search direction by these elements if they are all positive.
+ * If at least one of them is negative, it is safer to return a clone of
+ * the raw search direction as if the hessian was the identity matrix. The
+ * rationale for this simplified choice is that a negative diagonal element
+ * means the current point is far from the optimum and preconditioning will
+ * not be efficient anyway in this case.
+ *
+ * @param point current point at which the search direction was computed
+ * @param r raw search direction (i.e. opposite of the gradient)
+ * @return approximation of H-1r where H is the objective function hessian
+ */
+ double[] precondition(double[] point, double[] r);
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/package-info.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/package-info.java
new file mode 100644
index 000000000..e516c7d8e
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar.gradient;
+
+/**
+ * This package provides optimization algorithms that require derivatives.
+ */
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/AbstractSimplex.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/AbstractSimplex.java
new file mode 100644
index 000000000..67eed181d
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/AbstractSimplex.java
@@ -0,0 +1,346 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
+
+import java.util.Arrays;
+import java.util.Comparator;
+
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.exception.DimensionMismatchException;
+import org.apache.commons.math3.exception.ZeroException;
+import org.apache.commons.math3.exception.OutOfRangeException;
+import org.apache.commons.math3.exception.NullArgumentException;
+import org.apache.commons.math3.exception.MathIllegalArgumentException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.OptimizationData;
+
+/**
+ * This class implements the simplex concept.
+ * It is intended to be used in conjunction with {@link SimplexOptimizer}.
+ *
+ * The initial configuration of the simplex is set by the constructors
+ * {@link #AbstractSimplex(double[])} or {@link #AbstractSimplex(double[][])}.
+ * The other {@link #AbstractSimplex(int) constructor} will set all steps
+ * to 1, thus building a default configuration from a unit hypercube.
+ *
+ * Users must call the {@link #build(double[]) build} method in order
+ * to create the data structure that will be acted on by the other methods of
+ * this class.
+ *
+ * @see SimplexOptimizer
+ * @version $Id: AbstractSimplex.java 1397759 2012-10-13 01:12:58Z erans $
+ * @since 3.0
+ */
+public abstract class AbstractSimplex implements OptimizationData {
+ /** Simplex. */
+ private PointValuePair[] simplex;
+ /** Start simplex configuration. */
+ private double[][] startConfiguration;
+ /** Simplex dimension (must be equal to {@code simplex.length - 1}). */
+ private final int dimension;
+
+ /**
+ * Build a unit hypercube simplex.
+ *
+ * @param n Dimension of the simplex.
+ */
+ protected AbstractSimplex(int n) {
+ this(n, 1d);
+ }
+
+ /**
+ * Build a hypercube simplex with the given side length.
+ *
+ * @param n Dimension of the simplex.
+ * @param sideLength Length of the sides of the hypercube.
+ */
+ protected AbstractSimplex(int n,
+ double sideLength) {
+ this(createHypercubeSteps(n, sideLength));
+ }
+
+ /**
+ * The start configuration for simplex is built from a box parallel to
+ * the canonical axes of the space. The simplex is the subset of vertices
+ * of a box parallel to the canonical axes. It is built as the path followed
+ * while traveling from one vertex of the box to the diagonally opposite
+ * vertex moving only along the box edges. The first vertex of the box will
+ * be located at the start point of the optimization.
+ * As an example, in dimension 3 a simplex has 4 vertices. Setting the
+ * steps to (1, 10, 2) and the start point to (1, 1, 1) would imply the
+ * start simplex would be: { (1, 1, 1), (2, 1, 1), (2, 11, 1), (2, 11, 3) }.
+ * The first vertex would be set to the start point at (1, 1, 1) and the
+ * last vertex would be set to the diagonally opposite vertex at (2, 11, 3).
+ *
+ * @param steps Steps along the canonical axes representing box edges. They
+ * may be negative but not zero.
+ * @throws NullArgumentException if {@code steps} is {@code null}.
+ * @throws ZeroException if one of the steps is zero.
+ */
+ protected AbstractSimplex(final double[] steps) {
+ if (steps == null) {
+ throw new NullArgumentException();
+ }
+ if (steps.length == 0) {
+ throw new ZeroException();
+ }
+ dimension = steps.length;
+
+ // Only the relative position of the n final vertices with respect
+ // to the first one are stored.
+ startConfiguration = new double[dimension][dimension];
+ for (int i = 0; i < dimension; i++) {
+ final double[] vertexI = startConfiguration[i];
+ for (int j = 0; j < i + 1; j++) {
+ if (steps[j] == 0) {
+ throw new ZeroException(LocalizedFormats.EQUAL_VERTICES_IN_SIMPLEX);
+ }
+ System.arraycopy(steps, 0, vertexI, 0, j + 1);
+ }
+ }
+ }
+
+ /**
+ * The real initial simplex will be set up by moving the reference
+ * simplex such that its first point is located at the start point of the
+ * optimization.
+ *
+ * @param referenceSimplex Reference simplex.
+ * @throws NotStrictlyPositiveException if the reference simplex does not
+ * contain at least one point.
+ * @throws DimensionMismatchException if there is a dimension mismatch
+ * in the reference simplex.
+ * @throws IllegalArgumentException if one of its vertices is duplicated.
+ */
+ protected AbstractSimplex(final double[][] referenceSimplex) {
+ if (referenceSimplex.length <= 0) {
+ throw new NotStrictlyPositiveException(LocalizedFormats.SIMPLEX_NEED_ONE_POINT,
+ referenceSimplex.length);
+ }
+ dimension = referenceSimplex.length - 1;
+
+ // Only the relative position of the n final vertices with respect
+ // to the first one are stored.
+ startConfiguration = new double[dimension][dimension];
+ final double[] ref0 = referenceSimplex[0];
+
+ // Loop over vertices.
+ for (int i = 0; i < referenceSimplex.length; i++) {
+ final double[] refI = referenceSimplex[i];
+
+ // Safety checks.
+ if (refI.length != dimension) {
+ throw new DimensionMismatchException(refI.length, dimension);
+ }
+ for (int j = 0; j < i; j++) {
+ final double[] refJ = referenceSimplex[j];
+ boolean allEquals = true;
+ for (int k = 0; k < dimension; k++) {
+ if (refI[k] != refJ[k]) {
+ allEquals = false;
+ break;
+ }
+ }
+ if (allEquals) {
+ throw new MathIllegalArgumentException(LocalizedFormats.EQUAL_VERTICES_IN_SIMPLEX,
+ i, j);
+ }
+ }
+
+ // Store vertex i position relative to vertex 0 position.
+ if (i > 0) {
+ final double[] confI = startConfiguration[i - 1];
+ for (int k = 0; k < dimension; k++) {
+ confI[k] = refI[k] - ref0[k];
+ }
+ }
+ }
+ }
+
+ /**
+ * Get simplex dimension.
+ *
+ * @return the dimension of the simplex.
+ */
+ public int getDimension() {
+ return dimension;
+ }
+
+ /**
+ * Get simplex size.
+ * After calling the {@link #build(double[]) build} method, this method will
+ * will be equivalent to {@code getDimension() + 1}.
+ *
+ * @return the size of the simplex.
+ */
+ public int getSize() {
+ return simplex.length;
+ }
+
+ /**
+ * Compute the next simplex of the algorithm.
+ *
+ * @param evaluationFunction Evaluation function.
+ * @param comparator Comparator to use to sort simplex vertices from best
+ * to worst.
+ * @throws org.apache.commons.math3.exception.TooManyEvaluationsException
+ * if the algorithm fails to converge.
+ */
+ public abstract void iterate(final MultivariateFunction evaluationFunction,
+ final Comparator comparator);
+
+ /**
+ * Build an initial simplex.
+ *
+ * @param startPoint First point of the simplex.
+ * @throws DimensionMismatchException if the start point does not match
+ * simplex dimension.
+ */
+ public void build(final double[] startPoint) {
+ if (dimension != startPoint.length) {
+ throw new DimensionMismatchException(dimension, startPoint.length);
+ }
+
+ // Set first vertex.
+ simplex = new PointValuePair[dimension + 1];
+ simplex[0] = new PointValuePair(startPoint, Double.NaN);
+
+ // Set remaining vertices.
+ for (int i = 0; i < dimension; i++) {
+ final double[] confI = startConfiguration[i];
+ final double[] vertexI = new double[dimension];
+ for (int k = 0; k < dimension; k++) {
+ vertexI[k] = startPoint[k] + confI[k];
+ }
+ simplex[i + 1] = new PointValuePair(vertexI, Double.NaN);
+ }
+ }
+
+ /**
+ * Evaluate all the non-evaluated points of the simplex.
+ *
+ * @param evaluationFunction Evaluation function.
+ * @param comparator Comparator to use to sort simplex vertices from best to worst.
+ * @throws org.apache.commons.math3.exception.TooManyEvaluationsException
+ * if the maximal number of evaluations is exceeded.
+ */
+ public void evaluate(final MultivariateFunction evaluationFunction,
+ final Comparator comparator) {
+ // Evaluate the objective function at all non-evaluated simplex points.
+ for (int i = 0; i < simplex.length; i++) {
+ final PointValuePair vertex = simplex[i];
+ final double[] point = vertex.getPointRef();
+ if (Double.isNaN(vertex.getValue())) {
+ simplex[i] = new PointValuePair(point, evaluationFunction.value(point), false);
+ }
+ }
+
+ // Sort the simplex from best to worst.
+ Arrays.sort(simplex, comparator);
+ }
+
+ /**
+ * Replace the worst point of the simplex by a new point.
+ *
+ * @param pointValuePair Point to insert.
+ * @param comparator Comparator to use for sorting the simplex vertices
+ * from best to worst.
+ */
+ protected void replaceWorstPoint(PointValuePair pointValuePair,
+ final Comparator comparator) {
+ for (int i = 0; i < dimension; i++) {
+ if (comparator.compare(simplex[i], pointValuePair) > 0) {
+ PointValuePair tmp = simplex[i];
+ simplex[i] = pointValuePair;
+ pointValuePair = tmp;
+ }
+ }
+ simplex[dimension] = pointValuePair;
+ }
+
+ /**
+ * Get the points of the simplex.
+ *
+ * @return all the simplex points.
+ */
+ public PointValuePair[] getPoints() {
+ final PointValuePair[] copy = new PointValuePair[simplex.length];
+ System.arraycopy(simplex, 0, copy, 0, simplex.length);
+ return copy;
+ }
+
+ /**
+ * Get the simplex point stored at the requested {@code index}.
+ *
+ * @param index Location.
+ * @return the point at location {@code index}.
+ */
+ public PointValuePair getPoint(int index) {
+ if (index < 0 ||
+ index >= simplex.length) {
+ throw new OutOfRangeException(index, 0, simplex.length - 1);
+ }
+ return simplex[index];
+ }
+
+ /**
+ * Store a new point at location {@code index}.
+ * Note that no deep-copy of {@code point} is performed.
+ *
+ * @param index Location.
+ * @param point New value.
+ */
+ protected void setPoint(int index, PointValuePair point) {
+ if (index < 0 ||
+ index >= simplex.length) {
+ throw new OutOfRangeException(index, 0, simplex.length - 1);
+ }
+ simplex[index] = point;
+ }
+
+ /**
+ * Replace all points.
+ * Note that no deep-copy of {@code points} is performed.
+ *
+ * @param points New Points.
+ */
+ protected void setPoints(PointValuePair[] points) {
+ if (points.length != simplex.length) {
+ throw new DimensionMismatchException(points.length, simplex.length);
+ }
+ simplex = points;
+ }
+
+ /**
+ * Create steps for a unit hypercube.
+ *
+ * @param n Dimension of the hypercube.
+ * @param sideLength Length of the sides of the hypercube.
+ * @return the steps.
+ */
+ private static double[] createHypercubeSteps(int n,
+ double sideLength) {
+ final double[] steps = new double[n];
+ for (int i = 0; i < n; i++) {
+ steps[i] = sideLength;
+ }
+ return steps;
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/BOBYQAOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/BOBYQAOptimizer.java
new file mode 100644
index 000000000..3b68693c8
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/BOBYQAOptimizer.java
@@ -0,0 +1,2482 @@
+// CHECKSTYLE: stop all
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
+
+import java.util.Arrays;
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.exception.OutOfRangeException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.linear.Array2DRowRealMatrix;
+import org.apache.commons.math3.linear.ArrayRealVector;
+import org.apache.commons.math3.linear.RealVector;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer;
+
+/**
+ * Powell's BOBYQA algorithm. This implementation is translated and
+ * adapted from the Fortran version available
+ * here.
+ * See
+ * this paper for an introduction.
+ *
+ * BOBYQA is particularly well suited for high dimensional problems
+ * where derivatives are not available. In most cases it outperforms the
+ * {@link PowellOptimizer} significantly. Stochastic algorithms like
+ * {@link CMAESOptimizer} succeed more often than BOBYQA, but are more
+ * expensive. BOBYQA could also be considered as a replacement of any
+ * derivative-based optimizer when the derivatives are approximated by
+ * finite differences.
+ *
+ * @version $Id: BOBYQAOptimizer.java 1413131 2012-11-24 04:44:02Z psteitz $
+ * @since 3.0
+ */
+public class BOBYQAOptimizer
+ extends MultivariateOptimizer {
+ /** Minimum dimension of the problem: {@value} */
+ public static final int MINIMUM_PROBLEM_DIMENSION = 2;
+ /** Default value for {@link #initialTrustRegionRadius}: {@value} . */
+ public static final double DEFAULT_INITIAL_RADIUS = 10.0;
+ /** Default value for {@link #stoppingTrustRegionRadius}: {@value} . */
+ public static final double DEFAULT_STOPPING_RADIUS = 1E-8;
+
+ private static final double ZERO = 0d;
+ private static final double ONE = 1d;
+ private static final double TWO = 2d;
+ private static final double TEN = 10d;
+ private static final double SIXTEEN = 16d;
+ private static final double TWO_HUNDRED_FIFTY = 250d;
+ private static final double MINUS_ONE = -ONE;
+ private static final double HALF = ONE / 2;
+ private static final double ONE_OVER_FOUR = ONE / 4;
+ private static final double ONE_OVER_EIGHT = ONE / 8;
+ private static final double ONE_OVER_TEN = ONE / 10;
+ private static final double ONE_OVER_A_THOUSAND = ONE / 1000;
+
+ /**
+ * numberOfInterpolationPoints XXX
+ */
+ private final int numberOfInterpolationPoints;
+ /**
+ * initialTrustRegionRadius XXX
+ */
+ private double initialTrustRegionRadius;
+ /**
+ * stoppingTrustRegionRadius XXX
+ */
+ private final double stoppingTrustRegionRadius;
+ /** Goal type (minimize or maximize). */
+ private boolean isMinimize;
+ /**
+ * Current best values for the variables to be optimized.
+ * The vector will be changed in-place to contain the values of the least
+ * calculated objective function values.
+ */
+ private ArrayRealVector currentBest;
+ /** Differences between the upper and lower bounds. */
+ private double[] boundDifference;
+ /**
+ * Index of the interpolation point at the trust region center.
+ */
+ private int trustRegionCenterInterpolationPointIndex;
+ /**
+ * Last n columns of matrix H (where n is the dimension
+ * of the problem).
+ * XXX "bmat" in the original code.
+ */
+ private Array2DRowRealMatrix bMatrix;
+ /**
+ * Factorization of the leading npt square submatrix of H, this
+ * factorization being Z ZT, which provides both the correct
+ * rank and positive semi-definiteness.
+ * XXX "zmat" in the original code.
+ */
+ private Array2DRowRealMatrix zMatrix;
+ /**
+ * Coordinates of the interpolation points relative to {@link #originShift}.
+ * XXX "xpt" in the original code.
+ */
+ private Array2DRowRealMatrix interpolationPoints;
+ /**
+ * Shift of origin that should reduce the contributions from rounding
+ * errors to values of the model and Lagrange functions.
+ * XXX "xbase" in the original code.
+ */
+ private ArrayRealVector originShift;
+ /**
+ * Values of the objective function at the interpolation points.
+ * XXX "fval" in the original code.
+ */
+ private ArrayRealVector fAtInterpolationPoints;
+ /**
+ * Displacement from {@link #originShift} of the trust region center.
+ * XXX "xopt" in the original code.
+ */
+ private ArrayRealVector trustRegionCenterOffset;
+ /**
+ * Gradient of the quadratic model at {@link #originShift} +
+ * {@link #trustRegionCenterOffset}.
+ * XXX "gopt" in the original code.
+ */
+ private ArrayRealVector gradientAtTrustRegionCenter;
+ /**
+ * Differences {@link #getLowerBound()} - {@link #originShift}.
+ * All the components of every {@link #trustRegionCenterOffset} are going
+ * to satisfy the bounds
+ * {@link #getLowerBound() lowerBound}i ≤
+ * {@link #trustRegionCenterOffset}i,
+ * with appropriate equalities when {@link #trustRegionCenterOffset} is
+ * on a constraint boundary.
+ * XXX "sl" in the original code.
+ */
+ private ArrayRealVector lowerDifference;
+ /**
+ * Differences {@link #getUpperBound()} - {@link #originShift}
+ * All the components of every {@link #trustRegionCenterOffset} are going
+ * to satisfy the bounds
+ * {@link #trustRegionCenterOffset}i ≤
+ * {@link #getUpperBound() upperBound}i,
+ * with appropriate equalities when {@link #trustRegionCenterOffset} is
+ * on a constraint boundary.
+ * XXX "su" in the original code.
+ */
+ private ArrayRealVector upperDifference;
+ /**
+ * Parameters of the implicit second derivatives of the quadratic model.
+ * XXX "pq" in the original code.
+ */
+ private ArrayRealVector modelSecondDerivativesParameters;
+ /**
+ * Point chosen by function {@link #trsbox(double,ArrayRealVector,
+ * ArrayRealVector, ArrayRealVector,ArrayRealVector,ArrayRealVector) trsbox}
+ * or {@link #altmov(int,double) altmov}.
+ * Usually {@link #originShift} + {@link #newPoint} is the vector of
+ * variables for the next evaluation of the objective function.
+ * It also satisfies the constraints indicated in {@link #lowerDifference}
+ * and {@link #upperDifference}.
+ * XXX "xnew" in the original code.
+ */
+ private ArrayRealVector newPoint;
+ /**
+ * Alternative to {@link #newPoint}, chosen by
+ * {@link #altmov(int,double) altmov}.
+ * It may replace {@link #newPoint} in order to increase the denominator
+ * in the {@link #update(double, double, int) updating procedure}.
+ * XXX "xalt" in the original code.
+ */
+ private ArrayRealVector alternativeNewPoint;
+ /**
+ * Trial step from {@link #trustRegionCenterOffset} which is usually
+ * {@link #newPoint} - {@link #trustRegionCenterOffset}.
+ * XXX "d__" in the original code.
+ */
+ private ArrayRealVector trialStepPoint;
+ /**
+ * Values of the Lagrange functions at a new point.
+ * XXX "vlag" in the original code.
+ */
+ private ArrayRealVector lagrangeValuesAtNewPoint;
+ /**
+ * Explicit second derivatives of the quadratic model.
+ * XXX "hq" in the original code.
+ */
+ private ArrayRealVector modelSecondDerivativesValues;
+
+ /**
+ * @param numberOfInterpolationPoints Number of interpolation conditions.
+ * For a problem of dimension {@code n}, its value must be in the interval
+ * {@code [n+2, (n+1)(n+2)/2]}.
+ * Choices that exceed {@code 2n+1} are not recommended.
+ */
+ public BOBYQAOptimizer(int numberOfInterpolationPoints) {
+ this(numberOfInterpolationPoints,
+ DEFAULT_INITIAL_RADIUS,
+ DEFAULT_STOPPING_RADIUS);
+ }
+
+ /**
+ * @param numberOfInterpolationPoints Number of interpolation conditions.
+ * For a problem of dimension {@code n}, its value must be in the interval
+ * {@code [n+2, (n+1)(n+2)/2]}.
+ * Choices that exceed {@code 2n+1} are not recommended.
+ * @param initialTrustRegionRadius Initial trust region radius.
+ * @param stoppingTrustRegionRadius Stopping trust region radius.
+ */
+ public BOBYQAOptimizer(int numberOfInterpolationPoints,
+ double initialTrustRegionRadius,
+ double stoppingTrustRegionRadius) {
+ super(null); // No custom convergence criterion.
+ this.numberOfInterpolationPoints = numberOfInterpolationPoints;
+ this.initialTrustRegionRadius = initialTrustRegionRadius;
+ this.stoppingTrustRegionRadius = stoppingTrustRegionRadius;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected PointValuePair doOptimize() {
+ final double[] lowerBound = getLowerBound();
+ final double[] upperBound = getUpperBound();
+
+ // Validity checks.
+ setup(lowerBound, upperBound);
+
+ isMinimize = (getGoalType() == GoalType.MINIMIZE);
+ currentBest = new ArrayRealVector(getStartPoint());
+
+ final double value = bobyqa(lowerBound, upperBound);
+
+ return new PointValuePair(currentBest.getDataRef(),
+ isMinimize ? value : -value);
+ }
+
+ /**
+ * This subroutine seeks the least value of a function of many variables,
+ * by applying a trust region method that forms quadratic models by
+ * interpolation. There is usually some freedom in the interpolation
+ * conditions, which is taken up by minimizing the Frobenius norm of
+ * the change to the second derivative of the model, beginning with the
+ * zero matrix. The values of the variables are constrained by upper and
+ * lower bounds. The arguments of the subroutine are as follows.
+ *
+ * N must be set to the number of variables and must be at least two.
+ * NPT is the number of interpolation conditions. Its value must be in
+ * the interval [N+2,(N+1)(N+2)/2]. Choices that exceed 2*N+1 are not
+ * recommended.
+ * Initial values of the variables must be set in X(1),X(2),...,X(N). They
+ * will be changed to the values that give the least calculated F.
+ * For I=1,2,...,N, XL(I) and XU(I) must provide the lower and upper
+ * bounds, respectively, on X(I). The construction of quadratic models
+ * requires XL(I) to be strictly less than XU(I) for each I. Further,
+ * the contribution to a model from changes to the I-th variable is
+ * damaged severely by rounding errors if XU(I)-XL(I) is too small.
+ * RHOBEG and RHOEND must be set to the initial and final values of a trust
+ * region radius, so both must be positive with RHOEND no greater than
+ * RHOBEG. Typically, RHOBEG should be about one tenth of the greatest
+ * expected change to a variable, while RHOEND should indicate the
+ * accuracy that is required in the final values of the variables. An
+ * error return occurs if any of the differences XU(I)-XL(I), I=1,...,N,
+ * is less than 2*RHOBEG.
+ * MAXFUN must be set to an upper bound on the number of calls of CALFUN.
+ * The array W will be used for working space. Its length must be at least
+ * (NPT+5)*(NPT+N)+3*N*(N+5)/2.
+ *
+ * @param lowerBound Lower bounds.
+ * @param upperBound Upper bounds.
+ * @return the value of the objective at the optimum.
+ */
+ private double bobyqa(double[] lowerBound,
+ double[] upperBound) {
+ printMethod(); // XXX
+
+ final int n = currentBest.getDimension();
+
+ // Return if there is insufficient space between the bounds. Modify the
+ // initial X if necessary in order to avoid conflicts between the bounds
+ // and the construction of the first quadratic model. The lower and upper
+ // bounds on moves from the updated X are set now, in the ISL and ISU
+ // partitions of W, in order to provide useful and exact information about
+ // components of X that become within distance RHOBEG from their bounds.
+
+ for (int j = 0; j < n; j++) {
+ final double boundDiff = boundDifference[j];
+ lowerDifference.setEntry(j, lowerBound[j] - currentBest.getEntry(j));
+ upperDifference.setEntry(j, upperBound[j] - currentBest.getEntry(j));
+ if (lowerDifference.getEntry(j) >= -initialTrustRegionRadius) {
+ if (lowerDifference.getEntry(j) >= ZERO) {
+ currentBest.setEntry(j, lowerBound[j]);
+ lowerDifference.setEntry(j, ZERO);
+ upperDifference.setEntry(j, boundDiff);
+ } else {
+ currentBest.setEntry(j, lowerBound[j] + initialTrustRegionRadius);
+ lowerDifference.setEntry(j, -initialTrustRegionRadius);
+ // Computing MAX
+ final double deltaOne = upperBound[j] - currentBest.getEntry(j);
+ upperDifference.setEntry(j, Math.max(deltaOne, initialTrustRegionRadius));
+ }
+ } else if (upperDifference.getEntry(j) <= initialTrustRegionRadius) {
+ if (upperDifference.getEntry(j) <= ZERO) {
+ currentBest.setEntry(j, upperBound[j]);
+ lowerDifference.setEntry(j, -boundDiff);
+ upperDifference.setEntry(j, ZERO);
+ } else {
+ currentBest.setEntry(j, upperBound[j] - initialTrustRegionRadius);
+ // Computing MIN
+ final double deltaOne = lowerBound[j] - currentBest.getEntry(j);
+ final double deltaTwo = -initialTrustRegionRadius;
+ lowerDifference.setEntry(j, Math.min(deltaOne, deltaTwo));
+ upperDifference.setEntry(j, initialTrustRegionRadius);
+ }
+ }
+ }
+
+ // Make the call of BOBYQB.
+
+ return bobyqb(lowerBound, upperBound);
+ } // bobyqa
+
+ // ----------------------------------------------------------------------------------------
+
+ /**
+ * The arguments N, NPT, X, XL, XU, RHOBEG, RHOEND, IPRINT and MAXFUN
+ * are identical to the corresponding arguments in SUBROUTINE BOBYQA.
+ * XBASE holds a shift of origin that should reduce the contributions
+ * from rounding errors to values of the model and Lagrange functions.
+ * XPT is a two-dimensional array that holds the coordinates of the
+ * interpolation points relative to XBASE.
+ * FVAL holds the values of F at the interpolation points.
+ * XOPT is set to the displacement from XBASE of the trust region centre.
+ * GOPT holds the gradient of the quadratic model at XBASE+XOPT.
+ * HQ holds the explicit second derivatives of the quadratic model.
+ * PQ contains the parameters of the implicit second derivatives of the
+ * quadratic model.
+ * BMAT holds the last N columns of H.
+ * ZMAT holds the factorization of the leading NPT by NPT submatrix of H,
+ * this factorization being ZMAT times ZMAT^T, which provides both the
+ * correct rank and positive semi-definiteness.
+ * NDIM is the first dimension of BMAT and has the value NPT+N.
+ * SL and SU hold the differences XL-XBASE and XU-XBASE, respectively.
+ * All the components of every XOPT are going to satisfy the bounds
+ * SL(I) .LEQ. XOPT(I) .LEQ. SU(I), with appropriate equalities when
+ * XOPT is on a constraint boundary.
+ * XNEW is chosen by SUBROUTINE TRSBOX or ALTMOV. Usually XBASE+XNEW is the
+ * vector of variables for the next call of CALFUN. XNEW also satisfies
+ * the SL and SU constraints in the way that has just been mentioned.
+ * XALT is an alternative to XNEW, chosen by ALTMOV, that may replace XNEW
+ * in order to increase the denominator in the updating of UPDATE.
+ * D is reserved for a trial step from XOPT, which is usually XNEW-XOPT.
+ * VLAG contains the values of the Lagrange functions at a new point X.
+ * They are part of a product that requires VLAG to be of length NDIM.
+ * W is a one-dimensional array that is used for working space. Its length
+ * must be at least 3*NDIM = 3*(NPT+N).
+ *
+ * @param lowerBound Lower bounds.
+ * @param upperBound Upper bounds.
+ * @return the value of the objective at the optimum.
+ */
+ private double bobyqb(double[] lowerBound,
+ double[] upperBound) {
+ printMethod(); // XXX
+
+ final int n = currentBest.getDimension();
+ final int npt = numberOfInterpolationPoints;
+ final int np = n + 1;
+ final int nptm = npt - np;
+ final int nh = n * np / 2;
+
+ final ArrayRealVector work1 = new ArrayRealVector(n);
+ final ArrayRealVector work2 = new ArrayRealVector(npt);
+ final ArrayRealVector work3 = new ArrayRealVector(npt);
+
+ double cauchy = Double.NaN;
+ double alpha = Double.NaN;
+ double dsq = Double.NaN;
+ double crvmin = Double.NaN;
+
+ // Set some constants.
+ // Parameter adjustments
+
+ // Function Body
+
+ // The call of PRELIM sets the elements of XBASE, XPT, FVAL, GOPT, HQ, PQ,
+ // BMAT and ZMAT for the first iteration, with the corresponding values of
+ // of NF and KOPT, which are the number of calls of CALFUN so far and the
+ // index of the interpolation point at the trust region centre. Then the
+ // initial XOPT is set too. The branch to label 720 occurs if MAXFUN is
+ // less than NPT. GOPT will be updated if KOPT is different from KBASE.
+
+ trustRegionCenterInterpolationPointIndex = 0;
+
+ prelim(lowerBound, upperBound);
+ double xoptsq = ZERO;
+ for (int i = 0; i < n; i++) {
+ trustRegionCenterOffset.setEntry(i, interpolationPoints.getEntry(trustRegionCenterInterpolationPointIndex, i));
+ // Computing 2nd power
+ final double deltaOne = trustRegionCenterOffset.getEntry(i);
+ xoptsq += deltaOne * deltaOne;
+ }
+ double fsave = fAtInterpolationPoints.getEntry(0);
+ final int kbase = 0;
+
+ // Complete the settings that are required for the iterative procedure.
+
+ int ntrits = 0;
+ int itest = 0;
+ int knew = 0;
+ int nfsav = getEvaluations();
+ double rho = initialTrustRegionRadius;
+ double delta = rho;
+ double diffa = ZERO;
+ double diffb = ZERO;
+ double diffc = ZERO;
+ double f = ZERO;
+ double beta = ZERO;
+ double adelt = ZERO;
+ double denom = ZERO;
+ double ratio = ZERO;
+ double dnorm = ZERO;
+ double scaden = ZERO;
+ double biglsq = ZERO;
+ double distsq = ZERO;
+
+ // Update GOPT if necessary before the first iteration and after each
+ // call of RESCUE that makes a call of CALFUN.
+
+ int state = 20;
+ for(;;) switch (state) {
+ case 20: {
+ printState(20); // XXX
+ if (trustRegionCenterInterpolationPointIndex != kbase) {
+ int ih = 0;
+ for (int j = 0; j < n; j++) {
+ for (int i = 0; i <= j; i++) {
+ if (i < j) {
+ gradientAtTrustRegionCenter.setEntry(j, gradientAtTrustRegionCenter.getEntry(j) + modelSecondDerivativesValues.getEntry(ih) * trustRegionCenterOffset.getEntry(i));
+ }
+ gradientAtTrustRegionCenter.setEntry(i, gradientAtTrustRegionCenter.getEntry(i) + modelSecondDerivativesValues.getEntry(ih) * trustRegionCenterOffset.getEntry(j));
+ ih++;
+ }
+ }
+ if (getEvaluations() > npt) {
+ for (int k = 0; k < npt; k++) {
+ double temp = ZERO;
+ for (int j = 0; j < n; j++) {
+ temp += interpolationPoints.getEntry(k, j) * trustRegionCenterOffset.getEntry(j);
+ }
+ temp *= modelSecondDerivativesParameters.getEntry(k);
+ for (int i = 0; i < n; i++) {
+ gradientAtTrustRegionCenter.setEntry(i, gradientAtTrustRegionCenter.getEntry(i) + temp * interpolationPoints.getEntry(k, i));
+ }
+ }
+ // throw new PathIsExploredException(); // XXX
+ }
+ }
+
+ // Generate the next point in the trust region that provides a small value
+ // of the quadratic model subject to the constraints on the variables.
+ // The int NTRITS is set to the number "trust region" iterations that
+ // have occurred since the last "alternative" iteration. If the length
+ // of XNEW-XOPT is less than HALF*RHO, however, then there is a branch to
+ // label 650 or 680 with NTRITS=-1, instead of calculating F at XNEW.
+
+ }
+ case 60: {
+ printState(60); // XXX
+ final ArrayRealVector gnew = new ArrayRealVector(n);
+ final ArrayRealVector xbdi = new ArrayRealVector(n);
+ final ArrayRealVector s = new ArrayRealVector(n);
+ final ArrayRealVector hs = new ArrayRealVector(n);
+ final ArrayRealVector hred = new ArrayRealVector(n);
+
+ final double[] dsqCrvmin = trsbox(delta, gnew, xbdi, s,
+ hs, hred);
+ dsq = dsqCrvmin[0];
+ crvmin = dsqCrvmin[1];
+
+ // Computing MIN
+ double deltaOne = delta;
+ double deltaTwo = Math.sqrt(dsq);
+ dnorm = Math.min(deltaOne, deltaTwo);
+ if (dnorm < HALF * rho) {
+ ntrits = -1;
+ // Computing 2nd power
+ deltaOne = TEN * rho;
+ distsq = deltaOne * deltaOne;
+ if (getEvaluations() <= nfsav + 2) {
+ state = 650; break;
+ }
+
+ // The following choice between labels 650 and 680 depends on whether or
+ // not our work with the current RHO seems to be complete. Either RHO is
+ // decreased or termination occurs if the errors in the quadratic model at
+ // the last three interpolation points compare favourably with predictions
+ // of likely improvements to the model within distance HALF*RHO of XOPT.
+
+ // Computing MAX
+ deltaOne = Math.max(diffa, diffb);
+ final double errbig = Math.max(deltaOne, diffc);
+ final double frhosq = rho * ONE_OVER_EIGHT * rho;
+ if (crvmin > ZERO &&
+ errbig > frhosq * crvmin) {
+ state = 650; break;
+ }
+ final double bdtol = errbig / rho;
+ for (int j = 0; j < n; j++) {
+ double bdtest = bdtol;
+ if (newPoint.getEntry(j) == lowerDifference.getEntry(j)) {
+ bdtest = work1.getEntry(j);
+ }
+ if (newPoint.getEntry(j) == upperDifference.getEntry(j)) {
+ bdtest = -work1.getEntry(j);
+ }
+ if (bdtest < bdtol) {
+ double curv = modelSecondDerivativesValues.getEntry((j + j * j) / 2);
+ for (int k = 0; k < npt; k++) {
+ // Computing 2nd power
+ final double d1 = interpolationPoints.getEntry(k, j);
+ curv += modelSecondDerivativesParameters.getEntry(k) * (d1 * d1);
+ }
+ bdtest += HALF * curv * rho;
+ if (bdtest < bdtol) {
+ state = 650; break;
+ }
+ // throw new PathIsExploredException(); // XXX
+ }
+ }
+ state = 680; break;
+ }
+ ++ntrits;
+
+ // Severe cancellation is likely to occur if XOPT is too far from XBASE.
+ // If the following test holds, then XBASE is shifted so that XOPT becomes
+ // zero. The appropriate changes are made to BMAT and to the second
+ // derivatives of the current model, beginning with the changes to BMAT
+ // that do not depend on ZMAT. VLAG is used temporarily for working space.
+
+ }
+ case 90: {
+ printState(90); // XXX
+ if (dsq <= xoptsq * ONE_OVER_A_THOUSAND) {
+ final double fracsq = xoptsq * ONE_OVER_FOUR;
+ double sumpq = ZERO;
+ // final RealVector sumVector
+ // = new ArrayRealVector(npt, -HALF * xoptsq).add(interpolationPoints.operate(trustRegionCenter));
+ for (int k = 0; k < npt; k++) {
+ sumpq += modelSecondDerivativesParameters.getEntry(k);
+ double sum = -HALF * xoptsq;
+ for (int i = 0; i < n; i++) {
+ sum += interpolationPoints.getEntry(k, i) * trustRegionCenterOffset.getEntry(i);
+ }
+ // sum = sumVector.getEntry(k); // XXX "testAckley" and "testDiffPow" fail.
+ work2.setEntry(k, sum);
+ final double temp = fracsq - HALF * sum;
+ for (int i = 0; i < n; i++) {
+ work1.setEntry(i, bMatrix.getEntry(k, i));
+ lagrangeValuesAtNewPoint.setEntry(i, sum * interpolationPoints.getEntry(k, i) + temp * trustRegionCenterOffset.getEntry(i));
+ final int ip = npt + i;
+ for (int j = 0; j <= i; j++) {
+ bMatrix.setEntry(ip, j,
+ bMatrix.getEntry(ip, j)
+ + work1.getEntry(i) * lagrangeValuesAtNewPoint.getEntry(j)
+ + lagrangeValuesAtNewPoint.getEntry(i) * work1.getEntry(j));
+ }
+ }
+ }
+
+ // Then the revisions of BMAT that depend on ZMAT are calculated.
+
+ for (int m = 0; m < nptm; m++) {
+ double sumz = ZERO;
+ double sumw = ZERO;
+ for (int k = 0; k < npt; k++) {
+ sumz += zMatrix.getEntry(k, m);
+ lagrangeValuesAtNewPoint.setEntry(k, work2.getEntry(k) * zMatrix.getEntry(k, m));
+ sumw += lagrangeValuesAtNewPoint.getEntry(k);
+ }
+ for (int j = 0; j < n; j++) {
+ double sum = (fracsq * sumz - HALF * sumw) * trustRegionCenterOffset.getEntry(j);
+ for (int k = 0; k < npt; k++) {
+ sum += lagrangeValuesAtNewPoint.getEntry(k) * interpolationPoints.getEntry(k, j);
+ }
+ work1.setEntry(j, sum);
+ for (int k = 0; k < npt; k++) {
+ bMatrix.setEntry(k, j,
+ bMatrix.getEntry(k, j)
+ + sum * zMatrix.getEntry(k, m));
+ }
+ }
+ for (int i = 0; i < n; i++) {
+ final int ip = i + npt;
+ final double temp = work1.getEntry(i);
+ for (int j = 0; j <= i; j++) {
+ bMatrix.setEntry(ip, j,
+ bMatrix.getEntry(ip, j)
+ + temp * work1.getEntry(j));
+ }
+ }
+ }
+
+ // The following instructions complete the shift, including the changes
+ // to the second derivative parameters of the quadratic model.
+
+ int ih = 0;
+ for (int j = 0; j < n; j++) {
+ work1.setEntry(j, -HALF * sumpq * trustRegionCenterOffset.getEntry(j));
+ for (int k = 0; k < npt; k++) {
+ work1.setEntry(j, work1.getEntry(j) + modelSecondDerivativesParameters.getEntry(k) * interpolationPoints.getEntry(k, j));
+ interpolationPoints.setEntry(k, j, interpolationPoints.getEntry(k, j) - trustRegionCenterOffset.getEntry(j));
+ }
+ for (int i = 0; i <= j; i++) {
+ modelSecondDerivativesValues.setEntry(ih,
+ modelSecondDerivativesValues.getEntry(ih)
+ + work1.getEntry(i) * trustRegionCenterOffset.getEntry(j)
+ + trustRegionCenterOffset.getEntry(i) * work1.getEntry(j));
+ bMatrix.setEntry(npt + i, j, bMatrix.getEntry(npt + j, i));
+ ih++;
+ }
+ }
+ for (int i = 0; i < n; i++) {
+ originShift.setEntry(i, originShift.getEntry(i) + trustRegionCenterOffset.getEntry(i));
+ newPoint.setEntry(i, newPoint.getEntry(i) - trustRegionCenterOffset.getEntry(i));
+ lowerDifference.setEntry(i, lowerDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i));
+ upperDifference.setEntry(i, upperDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i));
+ trustRegionCenterOffset.setEntry(i, ZERO);
+ }
+ xoptsq = ZERO;
+ }
+ if (ntrits == 0) {
+ state = 210; break;
+ }
+ state = 230; break;
+
+ // XBASE is also moved to XOPT by a call of RESCUE. This calculation is
+ // more expensive than the previous shift, because new matrices BMAT and
+ // ZMAT are generated from scratch, which may include the replacement of
+ // interpolation points whose positions seem to be causing near linear
+ // dependence in the interpolation conditions. Therefore RESCUE is called
+ // only if rounding errors have reduced by at least a factor of two the
+ // denominator of the formula for updating the H matrix. It provides a
+ // useful safeguard, but is not invoked in most applications of BOBYQA.
+
+ }
+ case 210: {
+ printState(210); // XXX
+ // Pick two alternative vectors of variables, relative to XBASE, that
+ // are suitable as new positions of the KNEW-th interpolation point.
+ // Firstly, XNEW is set to the point on a line through XOPT and another
+ // interpolation point that minimizes the predicted value of the next
+ // denominator, subject to ||XNEW - XOPT|| .LEQ. ADELT and to the SL
+ // and SU bounds. Secondly, XALT is set to the best feasible point on
+ // a constrained version of the Cauchy step of the KNEW-th Lagrange
+ // function, the corresponding value of the square of this function
+ // being returned in CAUCHY. The choice between these alternatives is
+ // going to be made when the denominator is calculated.
+
+ final double[] alphaCauchy = altmov(knew, adelt);
+ alpha = alphaCauchy[0];
+ cauchy = alphaCauchy[1];
+
+ for (int i = 0; i < n; i++) {
+ trialStepPoint.setEntry(i, newPoint.getEntry(i) - trustRegionCenterOffset.getEntry(i));
+ }
+
+ // Calculate VLAG and BETA for the current choice of D. The scalar
+ // product of D with XPT(K,.) is going to be held in W(NPT+K) for
+ // use when VQUAD is calculated.
+
+ }
+ case 230: {
+ printState(230); // XXX
+ for (int k = 0; k < npt; k++) {
+ double suma = ZERO;
+ double sumb = ZERO;
+ double sum = ZERO;
+ for (int j = 0; j < n; j++) {
+ suma += interpolationPoints.getEntry(k, j) * trialStepPoint.getEntry(j);
+ sumb += interpolationPoints.getEntry(k, j) * trustRegionCenterOffset.getEntry(j);
+ sum += bMatrix.getEntry(k, j) * trialStepPoint.getEntry(j);
+ }
+ work3.setEntry(k, suma * (HALF * suma + sumb));
+ lagrangeValuesAtNewPoint.setEntry(k, sum);
+ work2.setEntry(k, suma);
+ }
+ beta = ZERO;
+ for (int m = 0; m < nptm; m++) {
+ double sum = ZERO;
+ for (int k = 0; k < npt; k++) {
+ sum += zMatrix.getEntry(k, m) * work3.getEntry(k);
+ }
+ beta -= sum * sum;
+ for (int k = 0; k < npt; k++) {
+ lagrangeValuesAtNewPoint.setEntry(k, lagrangeValuesAtNewPoint.getEntry(k) + sum * zMatrix.getEntry(k, m));
+ }
+ }
+ dsq = ZERO;
+ double bsum = ZERO;
+ double dx = ZERO;
+ for (int j = 0; j < n; j++) {
+ // Computing 2nd power
+ final double d1 = trialStepPoint.getEntry(j);
+ dsq += d1 * d1;
+ double sum = ZERO;
+ for (int k = 0; k < npt; k++) {
+ sum += work3.getEntry(k) * bMatrix.getEntry(k, j);
+ }
+ bsum += sum * trialStepPoint.getEntry(j);
+ final int jp = npt + j;
+ for (int i = 0; i < n; i++) {
+ sum += bMatrix.getEntry(jp, i) * trialStepPoint.getEntry(i);
+ }
+ lagrangeValuesAtNewPoint.setEntry(jp, sum);
+ bsum += sum * trialStepPoint.getEntry(j);
+ dx += trialStepPoint.getEntry(j) * trustRegionCenterOffset.getEntry(j);
+ }
+
+ beta = dx * dx + dsq * (xoptsq + dx + dx + HALF * dsq) + beta - bsum; // Original
+ // beta += dx * dx + dsq * (xoptsq + dx + dx + HALF * dsq) - bsum; // XXX "testAckley" and "testDiffPow" fail.
+ // beta = dx * dx + dsq * (xoptsq + 2 * dx + HALF * dsq) + beta - bsum; // XXX "testDiffPow" fails.
+
+ lagrangeValuesAtNewPoint.setEntry(trustRegionCenterInterpolationPointIndex,
+ lagrangeValuesAtNewPoint.getEntry(trustRegionCenterInterpolationPointIndex) + ONE);
+
+ // If NTRITS is zero, the denominator may be increased by replacing
+ // the step D of ALTMOV by a Cauchy step. Then RESCUE may be called if
+ // rounding errors have damaged the chosen denominator.
+
+ if (ntrits == 0) {
+ // Computing 2nd power
+ final double d1 = lagrangeValuesAtNewPoint.getEntry(knew);
+ denom = d1 * d1 + alpha * beta;
+ if (denom < cauchy && cauchy > ZERO) {
+ for (int i = 0; i < n; i++) {
+ newPoint.setEntry(i, alternativeNewPoint.getEntry(i));
+ trialStepPoint.setEntry(i, newPoint.getEntry(i) - trustRegionCenterOffset.getEntry(i));
+ }
+ cauchy = ZERO; // XXX Useful statement?
+ state = 230; break;
+ }
+ // Alternatively, if NTRITS is positive, then set KNEW to the index of
+ // the next interpolation point to be deleted to make room for a trust
+ // region step. Again RESCUE may be called if rounding errors have damaged_
+ // the chosen denominator, which is the reason for attempting to select
+ // KNEW before calculating the next value of the objective function.
+
+ } else {
+ final double delsq = delta * delta;
+ scaden = ZERO;
+ biglsq = ZERO;
+ knew = 0;
+ for (int k = 0; k < npt; k++) {
+ if (k == trustRegionCenterInterpolationPointIndex) {
+ continue;
+ }
+ double hdiag = ZERO;
+ for (int m = 0; m < nptm; m++) {
+ // Computing 2nd power
+ final double d1 = zMatrix.getEntry(k, m);
+ hdiag += d1 * d1;
+ }
+ // Computing 2nd power
+ final double d2 = lagrangeValuesAtNewPoint.getEntry(k);
+ final double den = beta * hdiag + d2 * d2;
+ distsq = ZERO;
+ for (int j = 0; j < n; j++) {
+ // Computing 2nd power
+ final double d3 = interpolationPoints.getEntry(k, j) - trustRegionCenterOffset.getEntry(j);
+ distsq += d3 * d3;
+ }
+ // Computing MAX
+ // Computing 2nd power
+ final double d4 = distsq / delsq;
+ final double temp = Math.max(ONE, d4 * d4);
+ if (temp * den > scaden) {
+ scaden = temp * den;
+ knew = k;
+ denom = den;
+ }
+ // Computing MAX
+ // Computing 2nd power
+ final double d5 = lagrangeValuesAtNewPoint.getEntry(k);
+ biglsq = Math.max(biglsq, temp * (d5 * d5));
+ }
+ }
+
+ // Put the variables for the next calculation of the objective function
+ // in XNEW, with any adjustments for the bounds.
+
+ // Calculate the value of the objective function at XBASE+XNEW, unless
+ // the limit on the number of calculations of F has been reached.
+
+ }
+ case 360: {
+ printState(360); // XXX
+ for (int i = 0; i < n; i++) {
+ // Computing MIN
+ // Computing MAX
+ final double d3 = lowerBound[i];
+ final double d4 = originShift.getEntry(i) + newPoint.getEntry(i);
+ final double d1 = Math.max(d3, d4);
+ final double d2 = upperBound[i];
+ currentBest.setEntry(i, Math.min(d1, d2));
+ if (newPoint.getEntry(i) == lowerDifference.getEntry(i)) {
+ currentBest.setEntry(i, lowerBound[i]);
+ }
+ if (newPoint.getEntry(i) == upperDifference.getEntry(i)) {
+ currentBest.setEntry(i, upperBound[i]);
+ }
+ }
+
+ f = computeObjectiveValue(currentBest.toArray());
+
+ if (!isMinimize)
+ f = -f;
+ if (ntrits == -1) {
+ fsave = f;
+ state = 720; break;
+ }
+
+ // Use the quadratic model to predict the change in F due to the step D,
+ // and set DIFF to the error of this prediction.
+
+ final double fopt = fAtInterpolationPoints.getEntry(trustRegionCenterInterpolationPointIndex);
+ double vquad = ZERO;
+ int ih = 0;
+ for (int j = 0; j < n; j++) {
+ vquad += trialStepPoint.getEntry(j) * gradientAtTrustRegionCenter.getEntry(j);
+ for (int i = 0; i <= j; i++) {
+ double temp = trialStepPoint.getEntry(i) * trialStepPoint.getEntry(j);
+ if (i == j) {
+ temp *= HALF;
+ }
+ vquad += modelSecondDerivativesValues.getEntry(ih) * temp;
+ ih++;
+ }
+ }
+ for (int k = 0; k < npt; k++) {
+ // Computing 2nd power
+ final double d1 = work2.getEntry(k);
+ final double d2 = d1 * d1; // "d1" must be squared first to prevent test failures.
+ vquad += HALF * modelSecondDerivativesParameters.getEntry(k) * d2;
+ }
+ final double diff = f - fopt - vquad;
+ diffc = diffb;
+ diffb = diffa;
+ diffa = Math.abs(diff);
+ if (dnorm > rho) {
+ nfsav = getEvaluations();
+ }
+
+ // Pick the next value of DELTA after a trust region step.
+
+ if (ntrits > 0) {
+ if (vquad >= ZERO) {
+ throw new MathIllegalStateException(LocalizedFormats.TRUST_REGION_STEP_FAILED, vquad);
+ }
+ ratio = (f - fopt) / vquad;
+ final double hDelta = HALF * delta;
+ if (ratio <= ONE_OVER_TEN) {
+ // Computing MIN
+ delta = Math.min(hDelta, dnorm);
+ } else if (ratio <= .7) {
+ // Computing MAX
+ delta = Math.max(hDelta, dnorm);
+ } else {
+ // Computing MAX
+ delta = Math.max(hDelta, 2 * dnorm);
+ }
+ if (delta <= rho * 1.5) {
+ delta = rho;
+ }
+
+ // Recalculate KNEW and DENOM if the new F is less than FOPT.
+
+ if (f < fopt) {
+ final int ksav = knew;
+ final double densav = denom;
+ final double delsq = delta * delta;
+ scaden = ZERO;
+ biglsq = ZERO;
+ knew = 0;
+ for (int k = 0; k < npt; k++) {
+ double hdiag = ZERO;
+ for (int m = 0; m < nptm; m++) {
+ // Computing 2nd power
+ final double d1 = zMatrix.getEntry(k, m);
+ hdiag += d1 * d1;
+ }
+ // Computing 2nd power
+ final double d1 = lagrangeValuesAtNewPoint.getEntry(k);
+ final double den = beta * hdiag + d1 * d1;
+ distsq = ZERO;
+ for (int j = 0; j < n; j++) {
+ // Computing 2nd power
+ final double d2 = interpolationPoints.getEntry(k, j) - newPoint.getEntry(j);
+ distsq += d2 * d2;
+ }
+ // Computing MAX
+ // Computing 2nd power
+ final double d3 = distsq / delsq;
+ final double temp = Math.max(ONE, d3 * d3);
+ if (temp * den > scaden) {
+ scaden = temp * den;
+ knew = k;
+ denom = den;
+ }
+ // Computing MAX
+ // Computing 2nd power
+ final double d4 = lagrangeValuesAtNewPoint.getEntry(k);
+ final double d5 = temp * (d4 * d4);
+ biglsq = Math.max(biglsq, d5);
+ }
+ if (scaden <= HALF * biglsq) {
+ knew = ksav;
+ denom = densav;
+ }
+ }
+ }
+
+ // Update BMAT and ZMAT, so that the KNEW-th interpolation point can be
+ // moved. Also update the second derivative terms of the model.
+
+ update(beta, denom, knew);
+
+ ih = 0;
+ final double pqold = modelSecondDerivativesParameters.getEntry(knew);
+ modelSecondDerivativesParameters.setEntry(knew, ZERO);
+ for (int i = 0; i < n; i++) {
+ final double temp = pqold * interpolationPoints.getEntry(knew, i);
+ for (int j = 0; j <= i; j++) {
+ modelSecondDerivativesValues.setEntry(ih, modelSecondDerivativesValues.getEntry(ih) + temp * interpolationPoints.getEntry(knew, j));
+ ih++;
+ }
+ }
+ for (int m = 0; m < nptm; m++) {
+ final double temp = diff * zMatrix.getEntry(knew, m);
+ for (int k = 0; k < npt; k++) {
+ modelSecondDerivativesParameters.setEntry(k, modelSecondDerivativesParameters.getEntry(k) + temp * zMatrix.getEntry(k, m));
+ }
+ }
+
+ // Include the new interpolation point, and make the changes to GOPT at
+ // the old XOPT that are caused by the updating of the quadratic model.
+
+ fAtInterpolationPoints.setEntry(knew, f);
+ for (int i = 0; i < n; i++) {
+ interpolationPoints.setEntry(knew, i, newPoint.getEntry(i));
+ work1.setEntry(i, bMatrix.getEntry(knew, i));
+ }
+ for (int k = 0; k < npt; k++) {
+ double suma = ZERO;
+ for (int m = 0; m < nptm; m++) {
+ suma += zMatrix.getEntry(knew, m) * zMatrix.getEntry(k, m);
+ }
+ double sumb = ZERO;
+ for (int j = 0; j < n; j++) {
+ sumb += interpolationPoints.getEntry(k, j) * trustRegionCenterOffset.getEntry(j);
+ }
+ final double temp = suma * sumb;
+ for (int i = 0; i < n; i++) {
+ work1.setEntry(i, work1.getEntry(i) + temp * interpolationPoints.getEntry(k, i));
+ }
+ }
+ for (int i = 0; i < n; i++) {
+ gradientAtTrustRegionCenter.setEntry(i, gradientAtTrustRegionCenter.getEntry(i) + diff * work1.getEntry(i));
+ }
+
+ // Update XOPT, GOPT and KOPT if the new calculated F is less than FOPT.
+
+ if (f < fopt) {
+ trustRegionCenterInterpolationPointIndex = knew;
+ xoptsq = ZERO;
+ ih = 0;
+ for (int j = 0; j < n; j++) {
+ trustRegionCenterOffset.setEntry(j, newPoint.getEntry(j));
+ // Computing 2nd power
+ final double d1 = trustRegionCenterOffset.getEntry(j);
+ xoptsq += d1 * d1;
+ for (int i = 0; i <= j; i++) {
+ if (i < j) {
+ gradientAtTrustRegionCenter.setEntry(j, gradientAtTrustRegionCenter.getEntry(j) + modelSecondDerivativesValues.getEntry(ih) * trialStepPoint.getEntry(i));
+ }
+ gradientAtTrustRegionCenter.setEntry(i, gradientAtTrustRegionCenter.getEntry(i) + modelSecondDerivativesValues.getEntry(ih) * trialStepPoint.getEntry(j));
+ ih++;
+ }
+ }
+ for (int k = 0; k < npt; k++) {
+ double temp = ZERO;
+ for (int j = 0; j < n; j++) {
+ temp += interpolationPoints.getEntry(k, j) * trialStepPoint.getEntry(j);
+ }
+ temp *= modelSecondDerivativesParameters.getEntry(k);
+ for (int i = 0; i < n; i++) {
+ gradientAtTrustRegionCenter.setEntry(i, gradientAtTrustRegionCenter.getEntry(i) + temp * interpolationPoints.getEntry(k, i));
+ }
+ }
+ }
+
+ // Calculate the parameters of the least Frobenius norm interpolant to
+ // the current data, the gradient of this interpolant at XOPT being put
+ // into VLAG(NPT+I), I=1,2,...,N.
+
+ if (ntrits > 0) {
+ for (int k = 0; k < npt; k++) {
+ lagrangeValuesAtNewPoint.setEntry(k, fAtInterpolationPoints.getEntry(k) - fAtInterpolationPoints.getEntry(trustRegionCenterInterpolationPointIndex));
+ work3.setEntry(k, ZERO);
+ }
+ for (int j = 0; j < nptm; j++) {
+ double sum = ZERO;
+ for (int k = 0; k < npt; k++) {
+ sum += zMatrix.getEntry(k, j) * lagrangeValuesAtNewPoint.getEntry(k);
+ }
+ for (int k = 0; k < npt; k++) {
+ work3.setEntry(k, work3.getEntry(k) + sum * zMatrix.getEntry(k, j));
+ }
+ }
+ for (int k = 0; k < npt; k++) {
+ double sum = ZERO;
+ for (int j = 0; j < n; j++) {
+ sum += interpolationPoints.getEntry(k, j) * trustRegionCenterOffset.getEntry(j);
+ }
+ work2.setEntry(k, work3.getEntry(k));
+ work3.setEntry(k, sum * work3.getEntry(k));
+ }
+ double gqsq = ZERO;
+ double gisq = ZERO;
+ for (int i = 0; i < n; i++) {
+ double sum = ZERO;
+ for (int k = 0; k < npt; k++) {
+ sum += bMatrix.getEntry(k, i) *
+ lagrangeValuesAtNewPoint.getEntry(k) + interpolationPoints.getEntry(k, i) * work3.getEntry(k);
+ }
+ if (trustRegionCenterOffset.getEntry(i) == lowerDifference.getEntry(i)) {
+ // Computing MIN
+ // Computing 2nd power
+ final double d1 = Math.min(ZERO, gradientAtTrustRegionCenter.getEntry(i));
+ gqsq += d1 * d1;
+ // Computing 2nd power
+ final double d2 = Math.min(ZERO, sum);
+ gisq += d2 * d2;
+ } else if (trustRegionCenterOffset.getEntry(i) == upperDifference.getEntry(i)) {
+ // Computing MAX
+ // Computing 2nd power
+ final double d1 = Math.max(ZERO, gradientAtTrustRegionCenter.getEntry(i));
+ gqsq += d1 * d1;
+ // Computing 2nd power
+ final double d2 = Math.max(ZERO, sum);
+ gisq += d2 * d2;
+ } else {
+ // Computing 2nd power
+ final double d1 = gradientAtTrustRegionCenter.getEntry(i);
+ gqsq += d1 * d1;
+ gisq += sum * sum;
+ }
+ lagrangeValuesAtNewPoint.setEntry(npt + i, sum);
+ }
+
+ // Test whether to replace the new quadratic model by the least Frobenius
+ // norm interpolant, making the replacement if the test is satisfied.
+
+ ++itest;
+ if (gqsq < TEN * gisq) {
+ itest = 0;
+ }
+ if (itest >= 3) {
+ for (int i = 0, max = Math.max(npt, nh); i < max; i++) {
+ if (i < n) {
+ gradientAtTrustRegionCenter.setEntry(i, lagrangeValuesAtNewPoint.getEntry(npt + i));
+ }
+ if (i < npt) {
+ modelSecondDerivativesParameters.setEntry(i, work2.getEntry(i));
+ }
+ if (i < nh) {
+ modelSecondDerivativesValues.setEntry(i, ZERO);
+ }
+ itest = 0;
+ }
+ }
+ }
+
+ // If a trust region step has provided a sufficient decrease in F, then
+ // branch for another trust region calculation. The case NTRITS=0 occurs
+ // when the new interpolation point was reached by an alternative step.
+
+ if (ntrits == 0) {
+ state = 60; break;
+ }
+ if (f <= fopt + ONE_OVER_TEN * vquad) {
+ state = 60; break;
+ }
+
+ // Alternatively, find out if the interpolation points are close enough
+ // to the best point so far.
+
+ // Computing MAX
+ // Computing 2nd power
+ final double d1 = TWO * delta;
+ // Computing 2nd power
+ final double d2 = TEN * rho;
+ distsq = Math.max(d1 * d1, d2 * d2);
+ }
+ case 650: {
+ printState(650); // XXX
+ knew = -1;
+ for (int k = 0; k < npt; k++) {
+ double sum = ZERO;
+ for (int j = 0; j < n; j++) {
+ // Computing 2nd power
+ final double d1 = interpolationPoints.getEntry(k, j) - trustRegionCenterOffset.getEntry(j);
+ sum += d1 * d1;
+ }
+ if (sum > distsq) {
+ knew = k;
+ distsq = sum;
+ }
+ }
+
+ // If KNEW is positive, then ALTMOV finds alternative new positions for
+ // the KNEW-th interpolation point within distance ADELT of XOPT. It is
+ // reached via label 90. Otherwise, there is a branch to label 60 for
+ // another trust region iteration, unless the calculations with the
+ // current RHO are complete.
+
+ if (knew >= 0) {
+ final double dist = Math.sqrt(distsq);
+ if (ntrits == -1) {
+ // Computing MIN
+ delta = Math.min(ONE_OVER_TEN * delta, HALF * dist);
+ if (delta <= rho * 1.5) {
+ delta = rho;
+ }
+ }
+ ntrits = 0;
+ // Computing MAX
+ // Computing MIN
+ final double d1 = Math.min(ONE_OVER_TEN * dist, delta);
+ adelt = Math.max(d1, rho);
+ dsq = adelt * adelt;
+ state = 90; break;
+ }
+ if (ntrits == -1) {
+ state = 680; break;
+ }
+ if (ratio > ZERO) {
+ state = 60; break;
+ }
+ if (Math.max(delta, dnorm) > rho) {
+ state = 60; break;
+ }
+
+ // The calculations with the current value of RHO are complete. Pick the
+ // next values of RHO and DELTA.
+ }
+ case 680: {
+ printState(680); // XXX
+ if (rho > stoppingTrustRegionRadius) {
+ delta = HALF * rho;
+ ratio = rho / stoppingTrustRegionRadius;
+ if (ratio <= SIXTEEN) {
+ rho = stoppingTrustRegionRadius;
+ } else if (ratio <= TWO_HUNDRED_FIFTY) {
+ rho = Math.sqrt(ratio) * stoppingTrustRegionRadius;
+ } else {
+ rho *= ONE_OVER_TEN;
+ }
+ delta = Math.max(delta, rho);
+ ntrits = 0;
+ nfsav = getEvaluations();
+ state = 60; break;
+ }
+
+ // Return from the calculation, after another Newton-Raphson step, if
+ // it is too short to have been tried before.
+
+ if (ntrits == -1) {
+ state = 360; break;
+ }
+ }
+ case 720: {
+ printState(720); // XXX
+ if (fAtInterpolationPoints.getEntry(trustRegionCenterInterpolationPointIndex) <= fsave) {
+ for (int i = 0; i < n; i++) {
+ // Computing MIN
+ // Computing MAX
+ final double d3 = lowerBound[i];
+ final double d4 = originShift.getEntry(i) + trustRegionCenterOffset.getEntry(i);
+ final double d1 = Math.max(d3, d4);
+ final double d2 = upperBound[i];
+ currentBest.setEntry(i, Math.min(d1, d2));
+ if (trustRegionCenterOffset.getEntry(i) == lowerDifference.getEntry(i)) {
+ currentBest.setEntry(i, lowerBound[i]);
+ }
+ if (trustRegionCenterOffset.getEntry(i) == upperDifference.getEntry(i)) {
+ currentBest.setEntry(i, upperBound[i]);
+ }
+ }
+ f = fAtInterpolationPoints.getEntry(trustRegionCenterInterpolationPointIndex);
+ }
+ return f;
+ }
+ default: {
+ throw new MathIllegalStateException(LocalizedFormats.SIMPLE_MESSAGE, "bobyqb");
+ }}
+ } // bobyqb
+
+ // ----------------------------------------------------------------------------------------
+
+ /**
+ * The arguments N, NPT, XPT, XOPT, BMAT, ZMAT, NDIM, SL and SU all have
+ * the same meanings as the corresponding arguments of BOBYQB.
+ * KOPT is the index of the optimal interpolation point.
+ * KNEW is the index of the interpolation point that is going to be moved.
+ * ADELT is the current trust region bound.
+ * XNEW will be set to a suitable new position for the interpolation point
+ * XPT(KNEW,.). Specifically, it satisfies the SL, SU and trust region
+ * bounds and it should provide a large denominator in the next call of
+ * UPDATE. The step XNEW-XOPT from XOPT is restricted to moves along the
+ * straight lines through XOPT and another interpolation point.
+ * XALT also provides a large value of the modulus of the KNEW-th Lagrange
+ * function subject to the constraints that have been mentioned, its main
+ * difference from XNEW being that XALT-XOPT is a constrained version of
+ * the Cauchy step within the trust region. An exception is that XALT is
+ * not calculated if all components of GLAG (see below) are zero.
+ * ALPHA will be set to the KNEW-th diagonal element of the H matrix.
+ * CAUCHY will be set to the square of the KNEW-th Lagrange function at
+ * the step XALT-XOPT from XOPT for the vector XALT that is returned,
+ * except that CAUCHY is set to zero if XALT is not calculated.
+ * GLAG is a working space vector of length N for the gradient of the
+ * KNEW-th Lagrange function at XOPT.
+ * HCOL is a working space vector of length NPT for the second derivative
+ * coefficients of the KNEW-th Lagrange function.
+ * W is a working space vector of length 2N that is going to hold the
+ * constrained Cauchy step from XOPT of the Lagrange function, followed
+ * by the downhill version of XALT when the uphill step is calculated.
+ *
+ * Set the first NPT components of W to the leading elements of the
+ * KNEW-th column of the H matrix.
+ * @param knew
+ * @param adelt
+ */
+ private double[] altmov(
+ int knew,
+ double adelt
+ ) {
+ printMethod(); // XXX
+
+ final int n = currentBest.getDimension();
+ final int npt = numberOfInterpolationPoints;
+
+ final ArrayRealVector glag = new ArrayRealVector(n);
+ final ArrayRealVector hcol = new ArrayRealVector(npt);
+
+ final ArrayRealVector work1 = new ArrayRealVector(n);
+ final ArrayRealVector work2 = new ArrayRealVector(n);
+
+ for (int k = 0; k < npt; k++) {
+ hcol.setEntry(k, ZERO);
+ }
+ for (int j = 0, max = npt - n - 1; j < max; j++) {
+ final double tmp = zMatrix.getEntry(knew, j);
+ for (int k = 0; k < npt; k++) {
+ hcol.setEntry(k, hcol.getEntry(k) + tmp * zMatrix.getEntry(k, j));
+ }
+ }
+ final double alpha = hcol.getEntry(knew);
+ final double ha = HALF * alpha;
+
+ // Calculate the gradient of the KNEW-th Lagrange function at XOPT.
+
+ for (int i = 0; i < n; i++) {
+ glag.setEntry(i, bMatrix.getEntry(knew, i));
+ }
+ for (int k = 0; k < npt; k++) {
+ double tmp = ZERO;
+ for (int j = 0; j < n; j++) {
+ tmp += interpolationPoints.getEntry(k, j) * trustRegionCenterOffset.getEntry(j);
+ }
+ tmp *= hcol.getEntry(k);
+ for (int i = 0; i < n; i++) {
+ glag.setEntry(i, glag.getEntry(i) + tmp * interpolationPoints.getEntry(k, i));
+ }
+ }
+
+ // Search for a large denominator along the straight lines through XOPT
+ // and another interpolation point. SLBD and SUBD will be lower and upper
+ // bounds on the step along each of these lines in turn. PREDSQ will be
+ // set to the square of the predicted denominator for each line. PRESAV
+ // will be set to the largest admissible value of PREDSQ that occurs.
+
+ double presav = ZERO;
+ double step = Double.NaN;
+ int ksav = 0;
+ int ibdsav = 0;
+ double stpsav = 0;
+ for (int k = 0; k < npt; k++) {
+ if (k == trustRegionCenterInterpolationPointIndex) {
+ continue;
+ }
+ double dderiv = ZERO;
+ double distsq = ZERO;
+ for (int i = 0; i < n; i++) {
+ final double tmp = interpolationPoints.getEntry(k, i) - trustRegionCenterOffset.getEntry(i);
+ dderiv += glag.getEntry(i) * tmp;
+ distsq += tmp * tmp;
+ }
+ double subd = adelt / Math.sqrt(distsq);
+ double slbd = -subd;
+ int ilbd = 0;
+ int iubd = 0;
+ final double sumin = Math.min(ONE, subd);
+
+ // Revise SLBD and SUBD if necessary because of the bounds in SL and SU.
+
+ for (int i = 0; i < n; i++) {
+ final double tmp = interpolationPoints.getEntry(k, i) - trustRegionCenterOffset.getEntry(i);
+ if (tmp > ZERO) {
+ if (slbd * tmp < lowerDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) {
+ slbd = (lowerDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) / tmp;
+ ilbd = -i - 1;
+ }
+ if (subd * tmp > upperDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) {
+ // Computing MAX
+ subd = Math.max(sumin,
+ (upperDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) / tmp);
+ iubd = i + 1;
+ }
+ } else if (tmp < ZERO) {
+ if (slbd * tmp > upperDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) {
+ slbd = (upperDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) / tmp;
+ ilbd = i + 1;
+ }
+ if (subd * tmp < lowerDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) {
+ // Computing MAX
+ subd = Math.max(sumin,
+ (lowerDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i)) / tmp);
+ iubd = -i - 1;
+ }
+ }
+ }
+
+ // Seek a large modulus of the KNEW-th Lagrange function when the index
+ // of the other interpolation point on the line through XOPT is KNEW.
+
+ step = slbd;
+ int isbd = ilbd;
+ double vlag = Double.NaN;
+ if (k == knew) {
+ final double diff = dderiv - ONE;
+ vlag = slbd * (dderiv - slbd * diff);
+ final double d1 = subd * (dderiv - subd * diff);
+ if (Math.abs(d1) > Math.abs(vlag)) {
+ step = subd;
+ vlag = d1;
+ isbd = iubd;
+ }
+ final double d2 = HALF * dderiv;
+ final double d3 = d2 - diff * slbd;
+ final double d4 = d2 - diff * subd;
+ if (d3 * d4 < ZERO) {
+ final double d5 = d2 * d2 / diff;
+ if (Math.abs(d5) > Math.abs(vlag)) {
+ step = d2 / diff;
+ vlag = d5;
+ isbd = 0;
+ }
+ }
+
+ // Search along each of the other lines through XOPT and another point.
+
+ } else {
+ vlag = slbd * (ONE - slbd);
+ final double tmp = subd * (ONE - subd);
+ if (Math.abs(tmp) > Math.abs(vlag)) {
+ step = subd;
+ vlag = tmp;
+ isbd = iubd;
+ }
+ if (subd > HALF) {
+ if (Math.abs(vlag) < ONE_OVER_FOUR) {
+ step = HALF;
+ vlag = ONE_OVER_FOUR;
+ isbd = 0;
+ }
+ }
+ vlag *= dderiv;
+ }
+
+ // Calculate PREDSQ for the current line search and maintain PRESAV.
+
+ final double tmp = step * (ONE - step) * distsq;
+ final double predsq = vlag * vlag * (vlag * vlag + ha * tmp * tmp);
+ if (predsq > presav) {
+ presav = predsq;
+ ksav = k;
+ stpsav = step;
+ ibdsav = isbd;
+ }
+ }
+
+ // Construct XNEW in a way that satisfies the bound constraints exactly.
+
+ for (int i = 0; i < n; i++) {
+ final double tmp = trustRegionCenterOffset.getEntry(i) + stpsav * (interpolationPoints.getEntry(ksav, i) - trustRegionCenterOffset.getEntry(i));
+ newPoint.setEntry(i, Math.max(lowerDifference.getEntry(i),
+ Math.min(upperDifference.getEntry(i), tmp)));
+ }
+ if (ibdsav < 0) {
+ newPoint.setEntry(-ibdsav - 1, lowerDifference.getEntry(-ibdsav - 1));
+ }
+ if (ibdsav > 0) {
+ newPoint.setEntry(ibdsav - 1, upperDifference.getEntry(ibdsav - 1));
+ }
+
+ // Prepare for the iterative method that assembles the constrained Cauchy
+ // step in W. The sum of squares of the fixed components of W is formed in
+ // WFIXSQ, and the free components of W are set to BIGSTP.
+
+ final double bigstp = adelt + adelt;
+ int iflag = 0;
+ double cauchy = Double.NaN;
+ double csave = ZERO;
+ while (true) {
+ double wfixsq = ZERO;
+ double ggfree = ZERO;
+ for (int i = 0; i < n; i++) {
+ final double glagValue = glag.getEntry(i);
+ work1.setEntry(i, ZERO);
+ if (Math.min(trustRegionCenterOffset.getEntry(i) - lowerDifference.getEntry(i), glagValue) > ZERO ||
+ Math.max(trustRegionCenterOffset.getEntry(i) - upperDifference.getEntry(i), glagValue) < ZERO) {
+ work1.setEntry(i, bigstp);
+ // Computing 2nd power
+ ggfree += glagValue * glagValue;
+ }
+ }
+ if (ggfree == ZERO) {
+ return new double[] { alpha, ZERO };
+ }
+
+ // Investigate whether more components of W can be fixed.
+ final double tmp1 = adelt * adelt - wfixsq;
+ if (tmp1 > ZERO) {
+ step = Math.sqrt(tmp1 / ggfree);
+ ggfree = ZERO;
+ for (int i = 0; i < n; i++) {
+ if (work1.getEntry(i) == bigstp) {
+ final double tmp2 = trustRegionCenterOffset.getEntry(i) - step * glag.getEntry(i);
+ if (tmp2 <= lowerDifference.getEntry(i)) {
+ work1.setEntry(i, lowerDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i));
+ // Computing 2nd power
+ final double d1 = work1.getEntry(i);
+ wfixsq += d1 * d1;
+ } else if (tmp2 >= upperDifference.getEntry(i)) {
+ work1.setEntry(i, upperDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i));
+ // Computing 2nd power
+ final double d1 = work1.getEntry(i);
+ wfixsq += d1 * d1;
+ } else {
+ // Computing 2nd power
+ final double d1 = glag.getEntry(i);
+ ggfree += d1 * d1;
+ }
+ }
+ }
+ }
+
+ // Set the remaining free components of W and all components of XALT,
+ // except that W may be scaled later.
+
+ double gw = ZERO;
+ for (int i = 0; i < n; i++) {
+ final double glagValue = glag.getEntry(i);
+ if (work1.getEntry(i) == bigstp) {
+ work1.setEntry(i, -step * glagValue);
+ final double min = Math.min(upperDifference.getEntry(i),
+ trustRegionCenterOffset.getEntry(i) + work1.getEntry(i));
+ alternativeNewPoint.setEntry(i, Math.max(lowerDifference.getEntry(i), min));
+ } else if (work1.getEntry(i) == ZERO) {
+ alternativeNewPoint.setEntry(i, trustRegionCenterOffset.getEntry(i));
+ } else if (glagValue > ZERO) {
+ alternativeNewPoint.setEntry(i, lowerDifference.getEntry(i));
+ } else {
+ alternativeNewPoint.setEntry(i, upperDifference.getEntry(i));
+ }
+ gw += glagValue * work1.getEntry(i);
+ }
+
+ // Set CURV to the curvature of the KNEW-th Lagrange function along W.
+ // Scale W by a factor less than one if that can reduce the modulus of
+ // the Lagrange function at XOPT+W. Set CAUCHY to the final value of
+ // the square of this function.
+
+ double curv = ZERO;
+ for (int k = 0; k < npt; k++) {
+ double tmp = ZERO;
+ for (int j = 0; j < n; j++) {
+ tmp += interpolationPoints.getEntry(k, j) * work1.getEntry(j);
+ }
+ curv += hcol.getEntry(k) * tmp * tmp;
+ }
+ if (iflag == 1) {
+ curv = -curv;
+ }
+ if (curv > -gw &&
+ curv < -gw * (ONE + Math.sqrt(TWO))) {
+ final double scale = -gw / curv;
+ for (int i = 0; i < n; i++) {
+ final double tmp = trustRegionCenterOffset.getEntry(i) + scale * work1.getEntry(i);
+ alternativeNewPoint.setEntry(i, Math.max(lowerDifference.getEntry(i),
+ Math.min(upperDifference.getEntry(i), tmp)));
+ }
+ // Computing 2nd power
+ final double d1 = HALF * gw * scale;
+ cauchy = d1 * d1;
+ } else {
+ // Computing 2nd power
+ final double d1 = gw + HALF * curv;
+ cauchy = d1 * d1;
+ }
+
+ // If IFLAG is zero, then XALT is calculated as before after reversing
+ // the sign of GLAG. Thus two XALT vectors become available. The one that
+ // is chosen is the one that gives the larger value of CAUCHY.
+
+ if (iflag == 0) {
+ for (int i = 0; i < n; i++) {
+ glag.setEntry(i, -glag.getEntry(i));
+ work2.setEntry(i, alternativeNewPoint.getEntry(i));
+ }
+ csave = cauchy;
+ iflag = 1;
+ } else {
+ break;
+ }
+ }
+ if (csave > cauchy) {
+ for (int i = 0; i < n; i++) {
+ alternativeNewPoint.setEntry(i, work2.getEntry(i));
+ }
+ cauchy = csave;
+ }
+
+ return new double[] { alpha, cauchy };
+ } // altmov
+
+ // ----------------------------------------------------------------------------------------
+
+ /**
+ * SUBROUTINE PRELIM sets the elements of XBASE, XPT, FVAL, GOPT, HQ, PQ,
+ * BMAT and ZMAT for the first iteration, and it maintains the values of
+ * NF and KOPT. The vector X is also changed by PRELIM.
+ *
+ * The arguments N, NPT, X, XL, XU, RHOBEG, IPRINT and MAXFUN are the
+ * same as the corresponding arguments in SUBROUTINE BOBYQA.
+ * The arguments XBASE, XPT, FVAL, HQ, PQ, BMAT, ZMAT, NDIM, SL and SU
+ * are the same as the corresponding arguments in BOBYQB, the elements
+ * of SL and SU being set in BOBYQA.
+ * GOPT is usually the gradient of the quadratic model at XOPT+XBASE, but
+ * it is set by PRELIM to the gradient of the quadratic model at XBASE.
+ * If XOPT is nonzero, BOBYQB will change it to its usual value later.
+ * NF is maintaned as the number of calls of CALFUN so far.
+ * KOPT will be such that the least calculated value of F so far is at
+ * the point XPT(KOPT,.)+XBASE in the space of the variables.
+ *
+ * @param lowerBound Lower bounds.
+ * @param upperBound Upper bounds.
+ */
+ private void prelim(double[] lowerBound,
+ double[] upperBound) {
+ printMethod(); // XXX
+
+ final int n = currentBest.getDimension();
+ final int npt = numberOfInterpolationPoints;
+ final int ndim = bMatrix.getRowDimension();
+
+ final double rhosq = initialTrustRegionRadius * initialTrustRegionRadius;
+ final double recip = 1d / rhosq;
+ final int np = n + 1;
+
+ // Set XBASE to the initial vector of variables, and set the initial
+ // elements of XPT, BMAT, HQ, PQ and ZMAT to zero.
+
+ for (int j = 0; j < n; j++) {
+ originShift.setEntry(j, currentBest.getEntry(j));
+ for (int k = 0; k < npt; k++) {
+ interpolationPoints.setEntry(k, j, ZERO);
+ }
+ for (int i = 0; i < ndim; i++) {
+ bMatrix.setEntry(i, j, ZERO);
+ }
+ }
+ for (int i = 0, max = n * np / 2; i < max; i++) {
+ modelSecondDerivativesValues.setEntry(i, ZERO);
+ }
+ for (int k = 0; k < npt; k++) {
+ modelSecondDerivativesParameters.setEntry(k, ZERO);
+ for (int j = 0, max = npt - np; j < max; j++) {
+ zMatrix.setEntry(k, j, ZERO);
+ }
+ }
+
+ // Begin the initialization procedure. NF becomes one more than the number
+ // of function values so far. The coordinates of the displacement of the
+ // next initial interpolation point from XBASE are set in XPT(NF+1,.).
+
+ int ipt = 0;
+ int jpt = 0;
+ double fbeg = Double.NaN;
+ do {
+ final int nfm = getEvaluations();
+ final int nfx = nfm - n;
+ final int nfmm = nfm - 1;
+ final int nfxm = nfx - 1;
+ double stepa = 0;
+ double stepb = 0;
+ if (nfm <= 2 * n) {
+ if (nfm >= 1 &&
+ nfm <= n) {
+ stepa = initialTrustRegionRadius;
+ if (upperDifference.getEntry(nfmm) == ZERO) {
+ stepa = -stepa;
+ // throw new PathIsExploredException(); // XXX
+ }
+ interpolationPoints.setEntry(nfm, nfmm, stepa);
+ } else if (nfm > n) {
+ stepa = interpolationPoints.getEntry(nfx, nfxm);
+ stepb = -initialTrustRegionRadius;
+ if (lowerDifference.getEntry(nfxm) == ZERO) {
+ stepb = Math.min(TWO * initialTrustRegionRadius, upperDifference.getEntry(nfxm));
+ // throw new PathIsExploredException(); // XXX
+ }
+ if (upperDifference.getEntry(nfxm) == ZERO) {
+ stepb = Math.max(-TWO * initialTrustRegionRadius, lowerDifference.getEntry(nfxm));
+ // throw new PathIsExploredException(); // XXX
+ }
+ interpolationPoints.setEntry(nfm, nfxm, stepb);
+ }
+ } else {
+ final int tmp1 = (nfm - np) / n;
+ jpt = nfm - tmp1 * n - n;
+ ipt = jpt + tmp1;
+ if (ipt > n) {
+ final int tmp2 = jpt;
+ jpt = ipt - n;
+ ipt = tmp2;
+// throw new PathIsExploredException(); // XXX
+ }
+ final int iptMinus1 = ipt - 1;
+ final int jptMinus1 = jpt - 1;
+ interpolationPoints.setEntry(nfm, iptMinus1, interpolationPoints.getEntry(ipt, iptMinus1));
+ interpolationPoints.setEntry(nfm, jptMinus1, interpolationPoints.getEntry(jpt, jptMinus1));
+ }
+
+ // Calculate the next value of F. The least function value so far and
+ // its index are required.
+
+ for (int j = 0; j < n; j++) {
+ currentBest.setEntry(j, Math.min(Math.max(lowerBound[j],
+ originShift.getEntry(j) + interpolationPoints.getEntry(nfm, j)),
+ upperBound[j]));
+ if (interpolationPoints.getEntry(nfm, j) == lowerDifference.getEntry(j)) {
+ currentBest.setEntry(j, lowerBound[j]);
+ }
+ if (interpolationPoints.getEntry(nfm, j) == upperDifference.getEntry(j)) {
+ currentBest.setEntry(j, upperBound[j]);
+ }
+ }
+
+ final double objectiveValue = computeObjectiveValue(currentBest.toArray());
+ final double f = isMinimize ? objectiveValue : -objectiveValue;
+ final int numEval = getEvaluations(); // nfm + 1
+ fAtInterpolationPoints.setEntry(nfm, f);
+
+ if (numEval == 1) {
+ fbeg = f;
+ trustRegionCenterInterpolationPointIndex = 0;
+ } else if (f < fAtInterpolationPoints.getEntry(trustRegionCenterInterpolationPointIndex)) {
+ trustRegionCenterInterpolationPointIndex = nfm;
+ }
+
+ // Set the nonzero initial elements of BMAT and the quadratic model in the
+ // cases when NF is at most 2*N+1. If NF exceeds N+1, then the positions
+ // of the NF-th and (NF-N)-th interpolation points may be switched, in
+ // order that the function value at the first of them contributes to the
+ // off-diagonal second derivative terms of the initial quadratic model.
+
+ if (numEval <= 2 * n + 1) {
+ if (numEval >= 2 &&
+ numEval <= n + 1) {
+ gradientAtTrustRegionCenter.setEntry(nfmm, (f - fbeg) / stepa);
+ if (npt < numEval + n) {
+ final double oneOverStepA = ONE / stepa;
+ bMatrix.setEntry(0, nfmm, -oneOverStepA);
+ bMatrix.setEntry(nfm, nfmm, oneOverStepA);
+ bMatrix.setEntry(npt + nfmm, nfmm, -HALF * rhosq);
+ // throw new PathIsExploredException(); // XXX
+ }
+ } else if (numEval >= n + 2) {
+ final int ih = nfx * (nfx + 1) / 2 - 1;
+ final double tmp = (f - fbeg) / stepb;
+ final double diff = stepb - stepa;
+ modelSecondDerivativesValues.setEntry(ih, TWO * (tmp - gradientAtTrustRegionCenter.getEntry(nfxm)) / diff);
+ gradientAtTrustRegionCenter.setEntry(nfxm, (gradientAtTrustRegionCenter.getEntry(nfxm) * stepb - tmp * stepa) / diff);
+ if (stepa * stepb < ZERO) {
+ if (f < fAtInterpolationPoints.getEntry(nfm - n)) {
+ fAtInterpolationPoints.setEntry(nfm, fAtInterpolationPoints.getEntry(nfm - n));
+ fAtInterpolationPoints.setEntry(nfm - n, f);
+ if (trustRegionCenterInterpolationPointIndex == nfm) {
+ trustRegionCenterInterpolationPointIndex = nfm - n;
+ }
+ interpolationPoints.setEntry(nfm - n, nfxm, stepb);
+ interpolationPoints.setEntry(nfm, nfxm, stepa);
+ }
+ }
+ bMatrix.setEntry(0, nfxm, -(stepa + stepb) / (stepa * stepb));
+ bMatrix.setEntry(nfm, nfxm, -HALF / interpolationPoints.getEntry(nfm - n, nfxm));
+ bMatrix.setEntry(nfm - n, nfxm,
+ -bMatrix.getEntry(0, nfxm) - bMatrix.getEntry(nfm, nfxm));
+ zMatrix.setEntry(0, nfxm, Math.sqrt(TWO) / (stepa * stepb));
+ zMatrix.setEntry(nfm, nfxm, Math.sqrt(HALF) / rhosq);
+ // zMatrix.setEntry(nfm, nfxm, Math.sqrt(HALF) * recip); // XXX "testAckley" and "testDiffPow" fail.
+ zMatrix.setEntry(nfm - n, nfxm,
+ -zMatrix.getEntry(0, nfxm) - zMatrix.getEntry(nfm, nfxm));
+ }
+
+ // Set the off-diagonal second derivatives of the Lagrange functions and
+ // the initial quadratic model.
+
+ } else {
+ zMatrix.setEntry(0, nfxm, recip);
+ zMatrix.setEntry(nfm, nfxm, recip);
+ zMatrix.setEntry(ipt, nfxm, -recip);
+ zMatrix.setEntry(jpt, nfxm, -recip);
+
+ final int ih = ipt * (ipt - 1) / 2 + jpt - 1;
+ final double tmp = interpolationPoints.getEntry(nfm, ipt - 1) * interpolationPoints.getEntry(nfm, jpt - 1);
+ modelSecondDerivativesValues.setEntry(ih, (fbeg - fAtInterpolationPoints.getEntry(ipt) - fAtInterpolationPoints.getEntry(jpt) + f) / tmp);
+// throw new PathIsExploredException(); // XXX
+ }
+ } while (getEvaluations() < npt);
+ } // prelim
+
+
+ // ----------------------------------------------------------------------------------------
+
+ /**
+ * A version of the truncated conjugate gradient is applied. If a line
+ * search is restricted by a constraint, then the procedure is restarted,
+ * the values of the variables that are at their bounds being fixed. If
+ * the trust region boundary is reached, then further changes may be made
+ * to D, each one being in the two dimensional space that is spanned
+ * by the current D and the gradient of Q at XOPT+D, staying on the trust
+ * region boundary. Termination occurs when the reduction in Q seems to
+ * be close to the greatest reduction that can be achieved.
+ * The arguments N, NPT, XPT, XOPT, GOPT, HQ, PQ, SL and SU have the same
+ * meanings as the corresponding arguments of BOBYQB.
+ * DELTA is the trust region radius for the present calculation, which
+ * seeks a small value of the quadratic model within distance DELTA of
+ * XOPT subject to the bounds on the variables.
+ * XNEW will be set to a new vector of variables that is approximately
+ * the one that minimizes the quadratic model within the trust region
+ * subject to the SL and SU constraints on the variables. It satisfies
+ * as equations the bounds that become active during the calculation.
+ * D is the calculated trial step from XOPT, generated iteratively from an
+ * initial value of zero. Thus XNEW is XOPT+D after the final iteration.
+ * GNEW holds the gradient of the quadratic model at XOPT+D. It is updated
+ * when D is updated.
+ * xbdi.get( is a working space vector. For I=1,2,...,N, the element xbdi.get((I) is
+ * set to -1.0, 0.0, or 1.0, the value being nonzero if and only if the
+ * I-th variable has become fixed at a bound, the bound being SL(I) or
+ * SU(I) in the case xbdi.get((I)=-1.0 or xbdi.get((I)=1.0, respectively. This
+ * information is accumulated during the construction of XNEW.
+ * The arrays S, HS and HRED are also used for working space. They hold the
+ * current search direction, and the changes in the gradient of Q along S
+ * and the reduced D, respectively, where the reduced D is the same as D,
+ * except that the components of the fixed variables are zero.
+ * DSQ will be set to the square of the length of XNEW-XOPT.
+ * CRVMIN is set to zero if D reaches the trust region boundary. Otherwise
+ * it is set to the least curvature of H that occurs in the conjugate
+ * gradient searches that are not restricted by any constraints. The
+ * value CRVMIN=-1.0D0 is set, however, if all of these searches are
+ * constrained.
+ * @param delta
+ * @param gnew
+ * @param xbdi
+ * @param s
+ * @param hs
+ * @param hred
+ */
+ private double[] trsbox(
+ double delta,
+ ArrayRealVector gnew,
+ ArrayRealVector xbdi,
+ ArrayRealVector s,
+ ArrayRealVector hs,
+ ArrayRealVector hred
+ ) {
+ printMethod(); // XXX
+
+ final int n = currentBest.getDimension();
+ final int npt = numberOfInterpolationPoints;
+
+ double dsq = Double.NaN;
+ double crvmin = Double.NaN;
+
+ // Local variables
+ double ds;
+ int iu;
+ double dhd, dhs, cth, shs, sth, ssq, beta=0, sdec, blen;
+ int iact = -1;
+ int nact = 0;
+ double angt = 0, qred;
+ int isav;
+ double temp = 0, xsav = 0, xsum = 0, angbd = 0, dredg = 0, sredg = 0;
+ int iterc;
+ double resid = 0, delsq = 0, ggsav = 0, tempa = 0, tempb = 0,
+ redmax = 0, dredsq = 0, redsav = 0, gredsq = 0, rednew = 0;
+ int itcsav = 0;
+ double rdprev = 0, rdnext = 0, stplen = 0, stepsq = 0;
+ int itermax = 0;
+
+ // Set some constants.
+
+ // Function Body
+
+ // The sign of GOPT(I) gives the sign of the change to the I-th variable
+ // that will reduce Q from its value at XOPT. Thus xbdi.get((I) shows whether
+ // or not to fix the I-th variable at one of its bounds initially, with
+ // NACT being set to the number of fixed variables. D and GNEW are also
+ // set for the first iteration. DELSQ is the upper bound on the sum of
+ // squares of the free variables. QRED is the reduction in Q so far.
+
+ iterc = 0;
+ nact = 0;
+ for (int i = 0; i < n; i++) {
+ xbdi.setEntry(i, ZERO);
+ if (trustRegionCenterOffset.getEntry(i) <= lowerDifference.getEntry(i)) {
+ if (gradientAtTrustRegionCenter.getEntry(i) >= ZERO) {
+ xbdi.setEntry(i, MINUS_ONE);
+ }
+ } else if (trustRegionCenterOffset.getEntry(i) >= upperDifference.getEntry(i)) {
+ if (gradientAtTrustRegionCenter.getEntry(i) <= ZERO) {
+ xbdi.setEntry(i, ONE);
+ }
+ }
+ if (xbdi.getEntry(i) != ZERO) {
+ ++nact;
+ }
+ trialStepPoint.setEntry(i, ZERO);
+ gnew.setEntry(i, gradientAtTrustRegionCenter.getEntry(i));
+ }
+ delsq = delta * delta;
+ qred = ZERO;
+ crvmin = MINUS_ONE;
+
+ // Set the next search direction of the conjugate gradient method. It is
+ // the steepest descent direction initially and when the iterations are
+ // restarted because a variable has just been fixed by a bound, and of
+ // course the components of the fixed variables are zero. ITERMAX is an
+ // upper bound on the indices of the conjugate gradient iterations.
+
+ int state = 20;
+ for(;;) {
+ switch (state) {
+ case 20: {
+ printState(20); // XXX
+ beta = ZERO;
+ }
+ case 30: {
+ printState(30); // XXX
+ stepsq = ZERO;
+ for (int i = 0; i < n; i++) {
+ if (xbdi.getEntry(i) != ZERO) {
+ s.setEntry(i, ZERO);
+ } else if (beta == ZERO) {
+ s.setEntry(i, -gnew.getEntry(i));
+ } else {
+ s.setEntry(i, beta * s.getEntry(i) - gnew.getEntry(i));
+ }
+ // Computing 2nd power
+ final double d1 = s.getEntry(i);
+ stepsq += d1 * d1;
+ }
+ if (stepsq == ZERO) {
+ state = 190; break;
+ }
+ if (beta == ZERO) {
+ gredsq = stepsq;
+ itermax = iterc + n - nact;
+ }
+ if (gredsq * delsq <= qred * 1e-4 * qred) {
+ state = 190; break;
+ }
+
+ // Multiply the search direction by the second derivative matrix of Q and
+ // calculate some scalars for the choice of steplength. Then set BLEN to
+ // the length of the the step to the trust region boundary and STPLEN to
+ // the steplength, ignoring the simple bounds.
+
+ state = 210; break;
+ }
+ case 50: {
+ printState(50); // XXX
+ resid = delsq;
+ ds = ZERO;
+ shs = ZERO;
+ for (int i = 0; i < n; i++) {
+ if (xbdi.getEntry(i) == ZERO) {
+ // Computing 2nd power
+ final double d1 = trialStepPoint.getEntry(i);
+ resid -= d1 * d1;
+ ds += s.getEntry(i) * trialStepPoint.getEntry(i);
+ shs += s.getEntry(i) * hs.getEntry(i);
+ }
+ }
+ if (resid <= ZERO) {
+ state = 90; break;
+ }
+ temp = Math.sqrt(stepsq * resid + ds * ds);
+ if (ds < ZERO) {
+ blen = (temp - ds) / stepsq;
+ } else {
+ blen = resid / (temp + ds);
+ }
+ stplen = blen;
+ if (shs > ZERO) {
+ // Computing MIN
+ stplen = Math.min(blen, gredsq / shs);
+ }
+
+ // Reduce STPLEN if necessary in order to preserve the simple bounds,
+ // letting IACT be the index of the new constrained variable.
+
+ iact = -1;
+ for (int i = 0; i < n; i++) {
+ if (s.getEntry(i) != ZERO) {
+ xsum = trustRegionCenterOffset.getEntry(i) + trialStepPoint.getEntry(i);
+ if (s.getEntry(i) > ZERO) {
+ temp = (upperDifference.getEntry(i) - xsum) / s.getEntry(i);
+ } else {
+ temp = (lowerDifference.getEntry(i) - xsum) / s.getEntry(i);
+ }
+ if (temp < stplen) {
+ stplen = temp;
+ iact = i;
+ }
+ }
+ }
+
+ // Update CRVMIN, GNEW and D. Set SDEC to the decrease that occurs in Q.
+
+ sdec = ZERO;
+ if (stplen > ZERO) {
+ ++iterc;
+ temp = shs / stepsq;
+ if (iact == -1 && temp > ZERO) {
+ crvmin = Math.min(crvmin,temp);
+ if (crvmin == MINUS_ONE) {
+ crvmin = temp;
+ }
+ }
+ ggsav = gredsq;
+ gredsq = ZERO;
+ for (int i = 0; i < n; i++) {
+ gnew.setEntry(i, gnew.getEntry(i) + stplen * hs.getEntry(i));
+ if (xbdi.getEntry(i) == ZERO) {
+ // Computing 2nd power
+ final double d1 = gnew.getEntry(i);
+ gredsq += d1 * d1;
+ }
+ trialStepPoint.setEntry(i, trialStepPoint.getEntry(i) + stplen * s.getEntry(i));
+ }
+ // Computing MAX
+ final double d1 = stplen * (ggsav - HALF * stplen * shs);
+ sdec = Math.max(d1, ZERO);
+ qred += sdec;
+ }
+
+ // Restart the conjugate gradient method if it has hit a new bound.
+
+ if (iact >= 0) {
+ ++nact;
+ xbdi.setEntry(iact, ONE);
+ if (s.getEntry(iact) < ZERO) {
+ xbdi.setEntry(iact, MINUS_ONE);
+ }
+ // Computing 2nd power
+ final double d1 = trialStepPoint.getEntry(iact);
+ delsq -= d1 * d1;
+ if (delsq <= ZERO) {
+ state = 190; break;
+ }
+ state = 20; break;
+ }
+
+ // If STPLEN is less than BLEN, then either apply another conjugate
+ // gradient iteration or RETURN.
+
+ if (stplen < blen) {
+ if (iterc == itermax) {
+ state = 190; break;
+ }
+ if (sdec <= qred * .01) {
+ state = 190; break;
+ }
+ beta = gredsq / ggsav;
+ state = 30; break;
+ }
+ }
+ case 90: {
+ printState(90); // XXX
+ crvmin = ZERO;
+
+ // Prepare for the alternative iteration by calculating some scalars
+ // and by multiplying the reduced D by the second derivative matrix of
+ // Q, where S holds the reduced D in the call of GGMULT.
+
+ }
+ case 100: {
+ printState(100); // XXX
+ if (nact >= n - 1) {
+ state = 190; break;
+ }
+ dredsq = ZERO;
+ dredg = ZERO;
+ gredsq = ZERO;
+ for (int i = 0; i < n; i++) {
+ if (xbdi.getEntry(i) == ZERO) {
+ // Computing 2nd power
+ double d1 = trialStepPoint.getEntry(i);
+ dredsq += d1 * d1;
+ dredg += trialStepPoint.getEntry(i) * gnew.getEntry(i);
+ // Computing 2nd power
+ d1 = gnew.getEntry(i);
+ gredsq += d1 * d1;
+ s.setEntry(i, trialStepPoint.getEntry(i));
+ } else {
+ s.setEntry(i, ZERO);
+ }
+ }
+ itcsav = iterc;
+ state = 210; break;
+ // Let the search direction S be a linear combination of the reduced D
+ // and the reduced G that is orthogonal to the reduced D.
+ }
+ case 120: {
+ printState(120); // XXX
+ ++iterc;
+ temp = gredsq * dredsq - dredg * dredg;
+ if (temp <= qred * 1e-4 * qred) {
+ state = 190; break;
+ }
+ temp = Math.sqrt(temp);
+ for (int i = 0; i < n; i++) {
+ if (xbdi.getEntry(i) == ZERO) {
+ s.setEntry(i, (dredg * trialStepPoint.getEntry(i) - dredsq * gnew.getEntry(i)) / temp);
+ } else {
+ s.setEntry(i, ZERO);
+ }
+ }
+ sredg = -temp;
+
+ // By considering the simple bounds on the variables, calculate an upper
+ // bound on the tangent of half the angle of the alternative iteration,
+ // namely ANGBD, except that, if already a free variable has reached a
+ // bound, there is a branch back to label 100 after fixing that variable.
+
+ angbd = ONE;
+ iact = -1;
+ for (int i = 0; i < n; i++) {
+ if (xbdi.getEntry(i) == ZERO) {
+ tempa = trustRegionCenterOffset.getEntry(i) + trialStepPoint.getEntry(i) - lowerDifference.getEntry(i);
+ tempb = upperDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i) - trialStepPoint.getEntry(i);
+ if (tempa <= ZERO) {
+ ++nact;
+ xbdi.setEntry(i, MINUS_ONE);
+ state = 100; break;
+ } else if (tempb <= ZERO) {
+ ++nact;
+ xbdi.setEntry(i, ONE);
+ state = 100; break;
+ }
+ // Computing 2nd power
+ double d1 = trialStepPoint.getEntry(i);
+ // Computing 2nd power
+ double d2 = s.getEntry(i);
+ ssq = d1 * d1 + d2 * d2;
+ // Computing 2nd power
+ d1 = trustRegionCenterOffset.getEntry(i) - lowerDifference.getEntry(i);
+ temp = ssq - d1 * d1;
+ if (temp > ZERO) {
+ temp = Math.sqrt(temp) - s.getEntry(i);
+ if (angbd * temp > tempa) {
+ angbd = tempa / temp;
+ iact = i;
+ xsav = MINUS_ONE;
+ }
+ }
+ // Computing 2nd power
+ d1 = upperDifference.getEntry(i) - trustRegionCenterOffset.getEntry(i);
+ temp = ssq - d1 * d1;
+ if (temp > ZERO) {
+ temp = Math.sqrt(temp) + s.getEntry(i);
+ if (angbd * temp > tempb) {
+ angbd = tempb / temp;
+ iact = i;
+ xsav = ONE;
+ }
+ }
+ }
+ }
+
+ // Calculate HHD and some curvatures for the alternative iteration.
+
+ state = 210; break;
+ }
+ case 150: {
+ printState(150); // XXX
+ shs = ZERO;
+ dhs = ZERO;
+ dhd = ZERO;
+ for (int i = 0; i < n; i++) {
+ if (xbdi.getEntry(i) == ZERO) {
+ shs += s.getEntry(i) * hs.getEntry(i);
+ dhs += trialStepPoint.getEntry(i) * hs.getEntry(i);
+ dhd += trialStepPoint.getEntry(i) * hred.getEntry(i);
+ }
+ }
+
+ // Seek the greatest reduction in Q for a range of equally spaced values
+ // of ANGT in [0,ANGBD], where ANGT is the tangent of half the angle of
+ // the alternative iteration.
+
+ redmax = ZERO;
+ isav = -1;
+ redsav = ZERO;
+ iu = (int) (angbd * 17. + 3.1);
+ for (int i = 0; i < iu; i++) {
+ angt = angbd * i / iu;
+ sth = (angt + angt) / (ONE + angt * angt);
+ temp = shs + angt * (angt * dhd - dhs - dhs);
+ rednew = sth * (angt * dredg - sredg - HALF * sth * temp);
+ if (rednew > redmax) {
+ redmax = rednew;
+ isav = i;
+ rdprev = redsav;
+ } else if (i == isav + 1) {
+ rdnext = rednew;
+ }
+ redsav = rednew;
+ }
+
+ // Return if the reduction is zero. Otherwise, set the sine and cosine
+ // of the angle of the alternative iteration, and calculate SDEC.
+
+ if (isav < 0) {
+ state = 190; break;
+ }
+ if (isav < iu) {
+ temp = (rdnext - rdprev) / (redmax + redmax - rdprev - rdnext);
+ angt = angbd * (isav + HALF * temp) / iu;
+ }
+ cth = (ONE - angt * angt) / (ONE + angt * angt);
+ sth = (angt + angt) / (ONE + angt * angt);
+ temp = shs + angt * (angt * dhd - dhs - dhs);
+ sdec = sth * (angt * dredg - sredg - HALF * sth * temp);
+ if (sdec <= ZERO) {
+ state = 190; break;
+ }
+
+ // Update GNEW, D and HRED. If the angle of the alternative iteration
+ // is restricted by a bound on a free variable, that variable is fixed
+ // at the bound.
+
+ dredg = ZERO;
+ gredsq = ZERO;
+ for (int i = 0; i < n; i++) {
+ gnew.setEntry(i, gnew.getEntry(i) + (cth - ONE) * hred.getEntry(i) + sth * hs.getEntry(i));
+ if (xbdi.getEntry(i) == ZERO) {
+ trialStepPoint.setEntry(i, cth * trialStepPoint.getEntry(i) + sth * s.getEntry(i));
+ dredg += trialStepPoint.getEntry(i) * gnew.getEntry(i);
+ // Computing 2nd power
+ final double d1 = gnew.getEntry(i);
+ gredsq += d1 * d1;
+ }
+ hred.setEntry(i, cth * hred.getEntry(i) + sth * hs.getEntry(i));
+ }
+ qred += sdec;
+ if (iact >= 0 && isav == iu) {
+ ++nact;
+ xbdi.setEntry(iact, xsav);
+ state = 100; break;
+ }
+
+ // If SDEC is sufficiently small, then RETURN after setting XNEW to
+ // XOPT+D, giving careful attention to the bounds.
+
+ if (sdec > qred * .01) {
+ state = 120; break;
+ }
+ }
+ case 190: {
+ printState(190); // XXX
+ dsq = ZERO;
+ for (int i = 0; i < n; i++) {
+ // Computing MAX
+ // Computing MIN
+ final double min = Math.min(trustRegionCenterOffset.getEntry(i) + trialStepPoint.getEntry(i),
+ upperDifference.getEntry(i));
+ newPoint.setEntry(i, Math.max(min, lowerDifference.getEntry(i)));
+ if (xbdi.getEntry(i) == MINUS_ONE) {
+ newPoint.setEntry(i, lowerDifference.getEntry(i));
+ }
+ if (xbdi.getEntry(i) == ONE) {
+ newPoint.setEntry(i, upperDifference.getEntry(i));
+ }
+ trialStepPoint.setEntry(i, newPoint.getEntry(i) - trustRegionCenterOffset.getEntry(i));
+ // Computing 2nd power
+ final double d1 = trialStepPoint.getEntry(i);
+ dsq += d1 * d1;
+ }
+ return new double[] { dsq, crvmin };
+ // The following instructions multiply the current S-vector by the second
+ // derivative matrix of the quadratic model, putting the product in HS.
+ // They are reached from three different parts of the software above and
+ // they can be regarded as an external subroutine.
+ }
+ case 210: {
+ printState(210); // XXX
+ int ih = 0;
+ for (int j = 0; j < n; j++) {
+ hs.setEntry(j, ZERO);
+ for (int i = 0; i <= j; i++) {
+ if (i < j) {
+ hs.setEntry(j, hs.getEntry(j) + modelSecondDerivativesValues.getEntry(ih) * s.getEntry(i));
+ }
+ hs.setEntry(i, hs.getEntry(i) + modelSecondDerivativesValues.getEntry(ih) * s.getEntry(j));
+ ih++;
+ }
+ }
+ final RealVector tmp = interpolationPoints.operate(s).ebeMultiply(modelSecondDerivativesParameters);
+ for (int k = 0; k < npt; k++) {
+ if (modelSecondDerivativesParameters.getEntry(k) != ZERO) {
+ for (int i = 0; i < n; i++) {
+ hs.setEntry(i, hs.getEntry(i) + tmp.getEntry(k) * interpolationPoints.getEntry(k, i));
+ }
+ }
+ }
+ if (crvmin != ZERO) {
+ state = 50; break;
+ }
+ if (iterc > itcsav) {
+ state = 150; break;
+ }
+ for (int i = 0; i < n; i++) {
+ hred.setEntry(i, hs.getEntry(i));
+ }
+ state = 120; break;
+ }
+ default: {
+ throw new MathIllegalStateException(LocalizedFormats.SIMPLE_MESSAGE, "trsbox");
+ }}
+ }
+ } // trsbox
+
+ // ----------------------------------------------------------------------------------------
+
+ /**
+ * The arrays BMAT and ZMAT are updated, as required by the new position
+ * of the interpolation point that has the index KNEW. The vector VLAG has
+ * N+NPT components, set on entry to the first NPT and last N components
+ * of the product Hw in equation (4.11) of the Powell (2006) paper on
+ * NEWUOA. Further, BETA is set on entry to the value of the parameter
+ * with that name, and DENOM is set to the denominator of the updating
+ * formula. Elements of ZMAT may be treated as zero if their moduli are
+ * at most ZTEST. The first NDIM elements of W are used for working space.
+ * @param beta
+ * @param denom
+ * @param knew
+ */
+ private void update(
+ double beta,
+ double denom,
+ int knew
+ ) {
+ printMethod(); // XXX
+
+ final int n = currentBest.getDimension();
+ final int npt = numberOfInterpolationPoints;
+ final int nptm = npt - n - 1;
+
+ // XXX Should probably be split into two arrays.
+ final ArrayRealVector work = new ArrayRealVector(npt + n);
+
+ double ztest = ZERO;
+ for (int k = 0; k < npt; k++) {
+ for (int j = 0; j < nptm; j++) {
+ // Computing MAX
+ ztest = Math.max(ztest, Math.abs(zMatrix.getEntry(k, j)));
+ }
+ }
+ ztest *= 1e-20;
+
+ // Apply the rotations that put zeros in the KNEW-th row of ZMAT.
+
+ for (int j = 1; j < nptm; j++) {
+ final double d1 = zMatrix.getEntry(knew, j);
+ if (Math.abs(d1) > ztest) {
+ // Computing 2nd power
+ final double d2 = zMatrix.getEntry(knew, 0);
+ // Computing 2nd power
+ final double d3 = zMatrix.getEntry(knew, j);
+ final double d4 = Math.sqrt(d2 * d2 + d3 * d3);
+ final double d5 = zMatrix.getEntry(knew, 0) / d4;
+ final double d6 = zMatrix.getEntry(knew, j) / d4;
+ for (int i = 0; i < npt; i++) {
+ final double d7 = d5 * zMatrix.getEntry(i, 0) + d6 * zMatrix.getEntry(i, j);
+ zMatrix.setEntry(i, j, d5 * zMatrix.getEntry(i, j) - d6 * zMatrix.getEntry(i, 0));
+ zMatrix.setEntry(i, 0, d7);
+ }
+ }
+ zMatrix.setEntry(knew, j, ZERO);
+ }
+
+ // Put the first NPT components of the KNEW-th column of HLAG into W,
+ // and calculate the parameters of the updating formula.
+
+ for (int i = 0; i < npt; i++) {
+ work.setEntry(i, zMatrix.getEntry(knew, 0) * zMatrix.getEntry(i, 0));
+ }
+ final double alpha = work.getEntry(knew);
+ final double tau = lagrangeValuesAtNewPoint.getEntry(knew);
+ lagrangeValuesAtNewPoint.setEntry(knew, lagrangeValuesAtNewPoint.getEntry(knew) - ONE);
+
+ // Complete the updating of ZMAT.
+
+ final double sqrtDenom = Math.sqrt(denom);
+ final double d1 = tau / sqrtDenom;
+ final double d2 = zMatrix.getEntry(knew, 0) / sqrtDenom;
+ for (int i = 0; i < npt; i++) {
+ zMatrix.setEntry(i, 0,
+ d1 * zMatrix.getEntry(i, 0) - d2 * lagrangeValuesAtNewPoint.getEntry(i));
+ }
+
+ // Finally, update the matrix BMAT.
+
+ for (int j = 0; j < n; j++) {
+ final int jp = npt + j;
+ work.setEntry(jp, bMatrix.getEntry(knew, j));
+ final double d3 = (alpha * lagrangeValuesAtNewPoint.getEntry(jp) - tau * work.getEntry(jp)) / denom;
+ final double d4 = (-beta * work.getEntry(jp) - tau * lagrangeValuesAtNewPoint.getEntry(jp)) / denom;
+ for (int i = 0; i <= jp; i++) {
+ bMatrix.setEntry(i, j,
+ bMatrix.getEntry(i, j) + d3 * lagrangeValuesAtNewPoint.getEntry(i) + d4 * work.getEntry(i));
+ if (i >= npt) {
+ bMatrix.setEntry(jp, (i - npt), bMatrix.getEntry(i, j));
+ }
+ }
+ }
+ } // update
+
+ /**
+ * Performs validity checks.
+ *
+ * @param lowerBound Lower bounds (constraints) of the objective variables.
+ * @param upperBound Upperer bounds (constraints) of the objective variables.
+ */
+ private void setup(double[] lowerBound,
+ double[] upperBound) {
+ printMethod(); // XXX
+
+ double[] init = getStartPoint();
+ final int dimension = init.length;
+
+ // Check problem dimension.
+ if (dimension < MINIMUM_PROBLEM_DIMENSION) {
+ throw new NumberIsTooSmallException(dimension, MINIMUM_PROBLEM_DIMENSION, true);
+ }
+ // Check number of interpolation points.
+ final int[] nPointsInterval = { dimension + 2, (dimension + 2) * (dimension + 1) / 2 };
+ if (numberOfInterpolationPoints < nPointsInterval[0] ||
+ numberOfInterpolationPoints > nPointsInterval[1]) {
+ throw new OutOfRangeException(LocalizedFormats.NUMBER_OF_INTERPOLATION_POINTS,
+ numberOfInterpolationPoints,
+ nPointsInterval[0],
+ nPointsInterval[1]);
+ }
+
+ // Initialize bound differences.
+ boundDifference = new double[dimension];
+
+ double requiredMinDiff = 2 * initialTrustRegionRadius;
+ double minDiff = Double.POSITIVE_INFINITY;
+ for (int i = 0; i < dimension; i++) {
+ boundDifference[i] = upperBound[i] - lowerBound[i];
+ minDiff = Math.min(minDiff, boundDifference[i]);
+ }
+ if (minDiff < requiredMinDiff) {
+ initialTrustRegionRadius = minDiff / 3.0;
+ }
+
+ // Initialize the data structures used by the "bobyqa" method.
+ bMatrix = new Array2DRowRealMatrix(dimension + numberOfInterpolationPoints,
+ dimension);
+ zMatrix = new Array2DRowRealMatrix(numberOfInterpolationPoints,
+ numberOfInterpolationPoints - dimension - 1);
+ interpolationPoints = new Array2DRowRealMatrix(numberOfInterpolationPoints,
+ dimension);
+ originShift = new ArrayRealVector(dimension);
+ fAtInterpolationPoints = new ArrayRealVector(numberOfInterpolationPoints);
+ trustRegionCenterOffset = new ArrayRealVector(dimension);
+ gradientAtTrustRegionCenter = new ArrayRealVector(dimension);
+ lowerDifference = new ArrayRealVector(dimension);
+ upperDifference = new ArrayRealVector(dimension);
+ modelSecondDerivativesParameters = new ArrayRealVector(numberOfInterpolationPoints);
+ newPoint = new ArrayRealVector(dimension);
+ alternativeNewPoint = new ArrayRealVector(dimension);
+ trialStepPoint = new ArrayRealVector(dimension);
+ lagrangeValuesAtNewPoint = new ArrayRealVector(dimension + numberOfInterpolationPoints);
+ modelSecondDerivativesValues = new ArrayRealVector(dimension * (dimension + 1) / 2);
+ }
+
+ /**
+ * Creates a new array.
+ *
+ * @param n Dimension of the returned array.
+ * @param value Value for each element.
+ * @return an array containing {@code n} elements set to the given
+ * {@code value}.
+ */
+ private static double[] fillNewArray(int n,
+ double value) {
+ double[] ds = new double[n];
+ Arrays.fill(ds, value);
+ return ds;
+ }
+
+ // XXX utility for figuring out call sequence.
+ private static String caller(int n) {
+ final Throwable t = new Throwable();
+ final StackTraceElement[] elements = t.getStackTrace();
+ final StackTraceElement e = elements[n];
+ return e.getMethodName() + " (at line " + e.getLineNumber() + ")";
+ }
+ // XXX utility for figuring out call sequence.
+ private static void printState(int s) {
+ // System.out.println(caller(2) + ": state " + s);
+ }
+ // XXX utility for figuring out call sequence.
+ private static void printMethod() {
+ // System.out.println(caller(2));
+ }
+
+ /**
+ * Marker for code paths that are not explored with the current unit tests.
+ * If the path becomes explored, it should just be removed from the code.
+ */
+ private static class PathIsExploredException extends RuntimeException {
+ private static final long serialVersionUID = 745350979634801853L;
+
+ private static final String PATH_IS_EXPLORED
+ = "If this exception is thrown, just remove it from the code";
+
+ PathIsExploredException() {
+ super(PATH_IS_EXPLORED + " " + BOBYQAOptimizer.caller(3));
+ }
+ }
+}
+//CHECKSTYLE: resume all
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/CMAESOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/CMAESOptimizer.java
new file mode 100644
index 000000000..8e436f9d5
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/CMAESOptimizer.java
@@ -0,0 +1,1317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import org.apache.commons.math3.exception.DimensionMismatchException;
+import org.apache.commons.math3.exception.NotPositiveException;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.exception.OutOfRangeException;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+import org.apache.commons.math3.linear.Array2DRowRealMatrix;
+import org.apache.commons.math3.linear.EigenDecomposition;
+import org.apache.commons.math3.linear.MatrixUtils;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.optim.OptimizationData;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer;
+import org.apache.commons.math3.random.RandomGenerator;
+import org.apache.commons.math3.util.MathArrays;
+
+/**
+ * An implementation of the active Covariance Matrix Adaptation Evolution Strategy (CMA-ES)
+ * for non-linear, non-convex, non-smooth, global function minimization.
+ * The CMA-Evolution Strategy (CMA-ES) is a reliable stochastic optimization method
+ * which should be applied if derivative-based methods, e.g. quasi-Newton BFGS or
+ * conjugate gradient, fail due to a rugged search landscape (e.g. noise, local
+ * optima, outlier, etc.) of the objective function. Like a
+ * quasi-Newton method, the CMA-ES learns and applies a variable metric
+ * on the underlying search space. Unlike a quasi-Newton method, the
+ * CMA-ES neither estimates nor uses gradients, making it considerably more
+ * reliable in terms of finding a good, or even close to optimal, solution.
+ *
+ * In general, on smooth objective functions the CMA-ES is roughly ten times
+ * slower than BFGS (counting objective function evaluations, no gradients provided).
+ * For up to variables also the derivative-free simplex
+ * direct search method (Nelder and Mead) can be faster, but it is
+ * far less reliable than CMA-ES.
+ *
+ * The CMA-ES is particularly well suited for non-separable
+ * and/or badly conditioned problems. To observe the advantage of CMA compared
+ * to a conventional evolution strategy, it will usually take about
+ * function evaluations. On difficult problems the complete
+ * optimization (a single run) is expected to take roughly between
+ * and
+ * function evaluations.
+ *
+ * This implementation is translated and adapted from the Matlab version
+ * of the CMA-ES algorithm as implemented in module {@code cmaes.m} version 3.51.
+ *
+ * For more information, please refer to the following links:
+ *
+ *
+ * @version $Id: CMAESOptimizer.java 1400108 2012-10-19 14:20:16Z erans $
+ * @since 3.0
+ */
+public class CMAESOptimizer
+ extends MultivariateOptimizer {
+ // global search parameters
+ /**
+ * Population size, offspring number. The primary strategy parameter to play
+ * with, which can be increased from its default value. Increasing the
+ * population size improves global search properties in exchange to speed.
+ * Speed decreases, as a rule, at most linearly with increasing population
+ * size. It is advisable to begin with the default small population size.
+ */
+ private int lambda; // population size
+ /**
+ * Covariance update mechanism, default is active CMA. isActiveCMA = true
+ * turns on "active CMA" with a negative update of the covariance matrix and
+ * checks for positive definiteness. OPTS.CMA.active = 2 does not check for
+ * pos. def. and is numerically faster. Active CMA usually speeds up the
+ * adaptation.
+ */
+ private final boolean isActiveCMA;
+ /**
+ * Determines how often a new random offspring is generated in case it is
+ * not feasible / beyond the defined limits, default is 0.
+ */
+ private final int checkFeasableCount;
+ /**
+ * @see Sigma
+ */
+ private double[] inputSigma;
+ /** Number of objective variables/problem dimension */
+ private int dimension;
+ /**
+ * Defines the number of initial iterations, where the covariance matrix
+ * remains diagonal and the algorithm has internally linear time complexity.
+ * diagonalOnly = 1 means keeping the covariance matrix always diagonal and
+ * this setting also exhibits linear space complexity. This can be
+ * particularly useful for dimension > 100.
+ * @see A Simple Modification in CMA-ES
+ */
+ private int diagonalOnly;
+ /** Number of objective variables/problem dimension */
+ private boolean isMinimize = true;
+ /** Indicates whether statistic data is collected. */
+ private final boolean generateStatistics;
+
+ // termination criteria
+ /** Maximal number of iterations allowed. */
+ private final int maxIterations;
+ /** Limit for fitness value. */
+ private final double stopFitness;
+ /** Stop if x-changes larger stopTolUpX. */
+ private double stopTolUpX;
+ /** Stop if x-change smaller stopTolX. */
+ private double stopTolX;
+ /** Stop if fun-changes smaller stopTolFun. */
+ private double stopTolFun;
+ /** Stop if back fun-changes smaller stopTolHistFun. */
+ private double stopTolHistFun;
+
+ // selection strategy parameters
+ /** Number of parents/points for recombination. */
+ private int mu; //
+ /** log(mu + 0.5), stored for efficiency. */
+ private double logMu2;
+ /** Array for weighted recombination. */
+ private RealMatrix weights;
+ /** Variance-effectiveness of sum w_i x_i. */
+ private double mueff; //
+
+ // dynamic strategy parameters and constants
+ /** Overall standard deviation - search volume. */
+ private double sigma;
+ /** Cumulation constant. */
+ private double cc;
+ /** Cumulation constant for step-size. */
+ private double cs;
+ /** Damping for step-size. */
+ private double damps;
+ /** Learning rate for rank-one update. */
+ private double ccov1;
+ /** Learning rate for rank-mu update' */
+ private double ccovmu;
+ /** Expectation of ||N(0,I)|| == norm(randn(N,1)). */
+ private double chiN;
+ /** Learning rate for rank-one update - diagonalOnly */
+ private double ccov1Sep;
+ /** Learning rate for rank-mu update - diagonalOnly */
+ private double ccovmuSep;
+
+ // CMA internal values - updated each generation
+ /** Objective variables. */
+ private RealMatrix xmean;
+ /** Evolution path. */
+ private RealMatrix pc;
+ /** Evolution path for sigma. */
+ private RealMatrix ps;
+ /** Norm of ps, stored for efficiency. */
+ private double normps;
+ /** Coordinate system. */
+ private RealMatrix B;
+ /** Scaling. */
+ private RealMatrix D;
+ /** B*D, stored for efficiency. */
+ private RealMatrix BD;
+ /** Diagonal of sqrt(D), stored for efficiency. */
+ private RealMatrix diagD;
+ /** Covariance matrix. */
+ private RealMatrix C;
+ /** Diagonal of C, used for diagonalOnly. */
+ private RealMatrix diagC;
+ /** Number of iterations already performed. */
+ private int iterations;
+
+ /** History queue of best values. */
+ private double[] fitnessHistory;
+ /** Size of history queue of best values. */
+ private int historySize;
+
+ /** Random generator. */
+ private final RandomGenerator random;
+
+ /** History of sigma values. */
+ private final List statisticsSigmaHistory = new ArrayList();
+ /** History of mean matrix. */
+ private final List statisticsMeanHistory = new ArrayList();
+ /** History of fitness values. */
+ private final List statisticsFitnessHistory = new ArrayList();
+ /** History of D matrix. */
+ private final List statisticsDHistory = new ArrayList();
+
+ /**
+ * @param maxIterations Maximal number of iterations.
+ * @param stopFitness Whether to stop if objective function value is smaller than
+ * {@code stopFitness}.
+ * @param isActiveCMA Chooses the covariance matrix update method.
+ * @param diagonalOnly Number of initial iterations, where the covariance matrix
+ * remains diagonal.
+ * @param checkFeasableCount Determines how often new random objective variables are
+ * generated in case they are out of bounds.
+ * @param random Random generator.
+ * @param generateStatistics Whether statistic data is collected.
+ * @param checker Convergence checker.
+ *
+ * @since 3.1
+ */
+ public CMAESOptimizer(int maxIterations,
+ double stopFitness,
+ boolean isActiveCMA,
+ int diagonalOnly,
+ int checkFeasableCount,
+ RandomGenerator random,
+ boolean generateStatistics,
+ ConvergenceChecker checker) {
+ super(checker);
+ this.maxIterations = maxIterations;
+ this.stopFitness = stopFitness;
+ this.isActiveCMA = isActiveCMA;
+ this.diagonalOnly = diagonalOnly;
+ this.checkFeasableCount = checkFeasableCount;
+ this.random = random;
+ this.generateStatistics = generateStatistics;
+ }
+
+ /**
+ * @return History of sigma values.
+ */
+ public List getStatisticsSigmaHistory() {
+ return statisticsSigmaHistory;
+ }
+
+ /**
+ * @return History of mean matrix.
+ */
+ public List getStatisticsMeanHistory() {
+ return statisticsMeanHistory;
+ }
+
+ /**
+ * @return History of fitness values.
+ */
+ public List getStatisticsFitnessHistory() {
+ return statisticsFitnessHistory;
+ }
+
+ /**
+ * @return History of D matrix.
+ */
+ public List getStatisticsDHistory() {
+ return statisticsDHistory;
+ }
+
+ /**
+ * Input sigma values.
+ * They define the initial coordinate-wise standard deviations for
+ * sampling new search points around the initial guess.
+ * It is suggested to set them to the estimated distance from the
+ * initial to the desired optimum.
+ * Small values induce the search to be more local (and very small
+ * values are more likely to find a local optimum close to the initial
+ * guess).
+ * Too small values might however lead to early termination.
+ */
+ public static class Sigma implements OptimizationData {
+ /** Sigma values. */
+ private final double[] sigma;
+
+ /**
+ * @param s Sigma values.
+ * @throws NotPositiveException if any of the array entries is smaller
+ * than zero.
+ */
+ public Sigma(double[] s)
+ throws NotPositiveException {
+ for (int i = 0; i < s.length; i++) {
+ if (s[i] < 0) {
+ throw new NotPositiveException(s[i]);
+ }
+ }
+
+ sigma = s.clone();
+ }
+
+ /**
+ * @return the sigma values.
+ */
+ public double[] getSigma() {
+ return sigma.clone();
+ }
+ }
+
+ /**
+ * Population size.
+ * The number of offspring is the primary strategy parameter.
+ * In the absence of better clues, a good default could be an
+ * integer close to {@code 4 + 3 ln(n)}, where {@code n} is the
+ * number of optimized parameters.
+ * Increasing the population size improves global search properties
+ * at the expense of speed (which in general decreases at most
+ * linearly with increasing population size).
+ */
+ public static class PopulationSize implements OptimizationData {
+ /** Population size. */
+ private final int lambda;
+
+ /**
+ * @param size Population size.
+ * @throws NotStrictlyPositiveException if {@code size <= 0}.
+ */
+ public PopulationSize(int size)
+ throws NotStrictlyPositiveException {
+ if (size <= 0) {
+ throw new NotStrictlyPositiveException(size);
+ }
+ lambda = size;
+ }
+
+ /**
+ * @return the population size.
+ */
+ public int getPopulationSize() {
+ return lambda;
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param optData Optimization data. The following data will be looked for:
+ *
+ * - {@link org.apache.commons.math3.optim.MaxEval}
+ * - {@link org.apache.commons.math3.optim.InitialGuess}
+ * - {@link org.apache.commons.math3.optim.SimpleBounds}
+ * - {@link org.apache.commons.math3.optim.ObjectiveFunction}
+ * - {@link Sigma}
+ * - {@link PopulationSize}
+ *
+ * @return {@inheritDoc}
+ * @throws TooManyEvaluationsException if the maximal number of
+ * evaluations is exceeded.
+ * @throws DimensionMismatchException if the initial guess, target, and weight
+ * arguments have inconsistent dimensions.
+ */
+ @Override
+ public PointValuePair optimize(OptimizationData... optData)
+ throws TooManyEvaluationsException,
+ DimensionMismatchException {
+ // Retrieve settings.
+ parseOptimizationData(optData);
+ // Set up base class and perform computation.
+ return super.optimize(optData);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected PointValuePair doOptimize() {
+ checkParameters();
+ // -------------------- Initialization --------------------------------
+ isMinimize = getGoalType().equals(GoalType.MINIMIZE);
+ final FitnessFunction fitfun = new FitnessFunction();
+ final double[] guess = getStartPoint();
+ // number of objective variables/problem dimension
+ dimension = guess.length;
+ initializeCMA(guess);
+ iterations = 0;
+ double bestValue = fitfun.value(guess);
+ push(fitnessHistory, bestValue);
+ PointValuePair optimum
+ = new PointValuePair(getStartPoint(),
+ isMinimize ? bestValue : -bestValue);
+ PointValuePair lastResult = null;
+
+ // -------------------- Generation Loop --------------------------------
+
+ generationLoop:
+ for (iterations = 1; iterations <= maxIterations; iterations++) {
+ // Generate and evaluate lambda offspring
+ final RealMatrix arz = randn1(dimension, lambda);
+ final RealMatrix arx = zeros(dimension, lambda);
+ final double[] fitness = new double[lambda];
+ // generate random offspring
+ for (int k = 0; k < lambda; k++) {
+ RealMatrix arxk = null;
+ for (int i = 0; i < checkFeasableCount + 1; i++) {
+ if (diagonalOnly <= 0) {
+ arxk = xmean.add(BD.multiply(arz.getColumnMatrix(k))
+ .scalarMultiply(sigma)); // m + sig * Normal(0,C)
+ } else {
+ arxk = xmean.add(times(diagD,arz.getColumnMatrix(k))
+ .scalarMultiply(sigma));
+ }
+ if (i >= checkFeasableCount ||
+ fitfun.isFeasible(arxk.getColumn(0))) {
+ break;
+ }
+ // regenerate random arguments for row
+ arz.setColumn(k, randn(dimension));
+ }
+ copyColumn(arxk, 0, arx, k);
+ try {
+ fitness[k] = fitfun.value(arx.getColumn(k)); // compute fitness
+ } catch (TooManyEvaluationsException e) {
+ break generationLoop;
+ }
+ }
+ // Sort by fitness and compute weighted mean into xmean
+ final int[] arindex = sortedIndices(fitness);
+ // Calculate new xmean, this is selection and recombination
+ final RealMatrix xold = xmean; // for speed up of Eq. (2) and (3)
+ final RealMatrix bestArx = selectColumns(arx, MathArrays.copyOf(arindex, mu));
+ xmean = bestArx.multiply(weights);
+ final RealMatrix bestArz = selectColumns(arz, MathArrays.copyOf(arindex, mu));
+ final RealMatrix zmean = bestArz.multiply(weights);
+ final boolean hsig = updateEvolutionPaths(zmean, xold);
+ if (diagonalOnly <= 0) {
+ updateCovariance(hsig, bestArx, arz, arindex, xold);
+ } else {
+ updateCovarianceDiagonalOnly(hsig, bestArz, xold);
+ }
+ // Adapt step size sigma - Eq. (5)
+ sigma *= Math.exp(Math.min(1, (normps/chiN - 1) * cs / damps));
+ final double bestFitness = fitness[arindex[0]];
+ final double worstFitness = fitness[arindex[arindex.length - 1]];
+ if (bestValue > bestFitness) {
+ bestValue = bestFitness;
+ lastResult = optimum;
+ optimum = new PointValuePair(fitfun.repair(bestArx.getColumn(0)),
+ isMinimize ? bestFitness : -bestFitness);
+ if (getConvergenceChecker() != null &&
+ lastResult != null) {
+ if (getConvergenceChecker().converged(iterations, optimum, lastResult)) {
+ break generationLoop;
+ }
+ }
+ }
+ // handle termination criteria
+ // Break, if fitness is good enough
+ if (stopFitness != 0) { // only if stopFitness is defined
+ if (bestFitness < (isMinimize ? stopFitness : -stopFitness)) {
+ break generationLoop;
+ }
+ }
+ final double[] sqrtDiagC = sqrt(diagC).getColumn(0);
+ final double[] pcCol = pc.getColumn(0);
+ for (int i = 0; i < dimension; i++) {
+ if (sigma * Math.max(Math.abs(pcCol[i]), sqrtDiagC[i]) > stopTolX) {
+ break;
+ }
+ if (i >= dimension - 1) {
+ break generationLoop;
+ }
+ }
+ for (int i = 0; i < dimension; i++) {
+ if (sigma * sqrtDiagC[i] > stopTolUpX) {
+ break generationLoop;
+ }
+ }
+ final double historyBest = min(fitnessHistory);
+ final double historyWorst = max(fitnessHistory);
+ if (iterations > 2 &&
+ Math.max(historyWorst, worstFitness) -
+ Math.min(historyBest, bestFitness) < stopTolFun) {
+ break generationLoop;
+ }
+ if (iterations > fitnessHistory.length &&
+ historyWorst - historyBest < stopTolHistFun) {
+ break generationLoop;
+ }
+ // condition number of the covariance matrix exceeds 1e14
+ if (max(diagD) / min(diagD) > 1e7) {
+ break generationLoop;
+ }
+ // user defined termination
+ if (getConvergenceChecker() != null) {
+ final PointValuePair current
+ = new PointValuePair(bestArx.getColumn(0),
+ isMinimize ? bestFitness : -bestFitness);
+ if (lastResult != null &&
+ getConvergenceChecker().converged(iterations, current, lastResult)) {
+ break generationLoop;
+ }
+ lastResult = current;
+ }
+ // Adjust step size in case of equal function values (flat fitness)
+ if (bestValue == fitness[arindex[(int)(0.1+lambda/4.)]]) {
+ sigma = sigma * Math.exp(0.2 + cs / damps);
+ }
+ if (iterations > 2 && Math.max(historyWorst, bestFitness) -
+ Math.min(historyBest, bestFitness) == 0) {
+ sigma = sigma * Math.exp(0.2 + cs / damps);
+ }
+ // store best in history
+ push(fitnessHistory,bestFitness);
+ fitfun.setValueRange(worstFitness-bestFitness);
+ if (generateStatistics) {
+ statisticsSigmaHistory.add(sigma);
+ statisticsFitnessHistory.add(bestFitness);
+ statisticsMeanHistory.add(xmean.transpose());
+ statisticsDHistory.add(diagD.transpose().scalarMultiply(1E5));
+ }
+ }
+ return optimum;
+ }
+
+ /**
+ * Scans the list of (required and optional) optimization data that
+ * characterize the problem.
+ *
+ * @param optData Optimization data. The following data will be looked for:
+ *
+ * - {@link Sigma}
+ * - {@link PopulationSize}
+ *
+ */
+ private void parseOptimizationData(OptimizationData... optData) {
+ // The existing values (as set by the previous call) are reused if
+ // not provided in the argument list.
+ for (OptimizationData data : optData) {
+ if (data instanceof Sigma) {
+ inputSigma = ((Sigma) data).getSigma();
+ continue;
+ }
+ if (data instanceof PopulationSize) {
+ lambda = ((PopulationSize) data).getPopulationSize();
+ continue;
+ }
+ }
+ }
+
+ /**
+ * Checks dimensions and values of boundaries and inputSigma if defined.
+ */
+ private void checkParameters() {
+ final double[] init = getStartPoint();
+ final double[] lB = getLowerBound();
+ final double[] uB = getUpperBound();
+
+ if (inputSigma != null) {
+ if (inputSigma.length != init.length) {
+ throw new DimensionMismatchException(inputSigma.length, init.length);
+ }
+ for (int i = 0; i < init.length; i++) {
+ if (inputSigma[i] > uB[i] - lB[i]) {
+ throw new OutOfRangeException(inputSigma[i], 0, uB[i] - lB[i]);
+ }
+ }
+ }
+ }
+
+ /**
+ * Initialization of the dynamic search parameters
+ *
+ * @param guess Initial guess for the arguments of the fitness function.
+ */
+ private void initializeCMA(double[] guess) {
+ if (lambda <= 0) {
+ throw new NotStrictlyPositiveException(lambda);
+ }
+ // initialize sigma
+ final double[][] sigmaArray = new double[guess.length][1];
+ for (int i = 0; i < guess.length; i++) {
+ sigmaArray[i][0] = inputSigma[i];
+ }
+ final RealMatrix insigma = new Array2DRowRealMatrix(sigmaArray, false);
+ sigma = max(insigma); // overall standard deviation
+
+ // initialize termination criteria
+ stopTolUpX = 1e3 * max(insigma);
+ stopTolX = 1e-11 * max(insigma);
+ stopTolFun = 1e-12;
+ stopTolHistFun = 1e-13;
+
+ // initialize selection strategy parameters
+ mu = lambda / 2; // number of parents/points for recombination
+ logMu2 = Math.log(mu + 0.5);
+ weights = log(sequence(1, mu, 1)).scalarMultiply(-1).scalarAdd(logMu2);
+ double sumw = 0;
+ double sumwq = 0;
+ for (int i = 0; i < mu; i++) {
+ double w = weights.getEntry(i, 0);
+ sumw += w;
+ sumwq += w * w;
+ }
+ weights = weights.scalarMultiply(1 / sumw);
+ mueff = sumw * sumw / sumwq; // variance-effectiveness of sum w_i x_i
+
+ // initialize dynamic strategy parameters and constants
+ cc = (4 + mueff / dimension) /
+ (dimension + 4 + 2 * mueff / dimension);
+ cs = (mueff + 2) / (dimension + mueff + 3.);
+ damps = (1 + 2 * Math.max(0, Math.sqrt((mueff - 1) /
+ (dimension + 1)) - 1)) *
+ Math.max(0.3,
+ 1 - dimension / (1e-6 + maxIterations)) + cs; // minor increment
+ ccov1 = 2 / ((dimension + 1.3) * (dimension + 1.3) + mueff);
+ ccovmu = Math.min(1 - ccov1, 2 * (mueff - 2 + 1 / mueff) /
+ ((dimension + 2) * (dimension + 2) + mueff));
+ ccov1Sep = Math.min(1, ccov1 * (dimension + 1.5) / 3);
+ ccovmuSep = Math.min(1 - ccov1, ccovmu * (dimension + 1.5) / 3);
+ chiN = Math.sqrt(dimension) *
+ (1 - 1 / ((double) 4 * dimension) + 1 / ((double) 21 * dimension * dimension));
+ // intialize CMA internal values - updated each generation
+ xmean = MatrixUtils.createColumnRealMatrix(guess); // objective variables
+ diagD = insigma.scalarMultiply(1 / sigma);
+ diagC = square(diagD);
+ pc = zeros(dimension, 1); // evolution paths for C and sigma
+ ps = zeros(dimension, 1); // B defines the coordinate system
+ normps = ps.getFrobeniusNorm();
+
+ B = eye(dimension, dimension);
+ D = ones(dimension, 1); // diagonal D defines the scaling
+ BD = times(B, repmat(diagD.transpose(), dimension, 1));
+ C = B.multiply(diag(square(D)).multiply(B.transpose())); // covariance
+ historySize = 10 + (int) (3 * 10 * dimension / (double) lambda);
+ fitnessHistory = new double[historySize]; // history of fitness values
+ for (int i = 0; i < historySize; i++) {
+ fitnessHistory[i] = Double.MAX_VALUE;
+ }
+ }
+
+ /**
+ * Update of the evolution paths ps and pc.
+ *
+ * @param zmean Weighted row matrix of the gaussian random numbers generating
+ * the current offspring.
+ * @param xold xmean matrix of the previous generation.
+ * @return hsig flag indicating a small correction.
+ */
+ private boolean updateEvolutionPaths(RealMatrix zmean, RealMatrix xold) {
+ ps = ps.scalarMultiply(1 - cs).add(
+ B.multiply(zmean).scalarMultiply(
+ Math.sqrt(cs * (2 - cs) * mueff)));
+ normps = ps.getFrobeniusNorm();
+ final boolean hsig = normps /
+ Math.sqrt(1 - Math.pow(1 - cs, 2 * iterations)) /
+ chiN < 1.4 + 2 / ((double) dimension + 1);
+ pc = pc.scalarMultiply(1 - cc);
+ if (hsig) {
+ pc = pc.add(xmean.subtract(xold).scalarMultiply(Math.sqrt(cc * (2 - cc) * mueff) / sigma));
+ }
+ return hsig;
+ }
+
+ /**
+ * Update of the covariance matrix C for diagonalOnly > 0
+ *
+ * @param hsig Flag indicating a small correction.
+ * @param bestArz Fitness-sorted matrix of the gaussian random values of the
+ * current offspring.
+ * @param xold xmean matrix of the previous generation.
+ */
+ private void updateCovarianceDiagonalOnly(boolean hsig,
+ final RealMatrix bestArz,
+ final RealMatrix xold) {
+ // minor correction if hsig==false
+ double oldFac = hsig ? 0 : ccov1Sep * cc * (2 - cc);
+ oldFac += 1 - ccov1Sep - ccovmuSep;
+ diagC = diagC.scalarMultiply(oldFac) // regard old matrix
+ .add(square(pc).scalarMultiply(ccov1Sep)) // plus rank one update
+ .add((times(diagC, square(bestArz).multiply(weights))) // plus rank mu update
+ .scalarMultiply(ccovmuSep));
+ diagD = sqrt(diagC); // replaces eig(C)
+ if (diagonalOnly > 1 &&
+ iterations > diagonalOnly) {
+ // full covariance matrix from now on
+ diagonalOnly = 0;
+ B = eye(dimension, dimension);
+ BD = diag(diagD);
+ C = diag(diagC);
+ }
+ }
+
+ /**
+ * Update of the covariance matrix C.
+ *
+ * @param hsig Flag indicating a small correction.
+ * @param bestArx Fitness-sorted matrix of the argument vectors producing the
+ * current offspring.
+ * @param arz Unsorted matrix containing the gaussian random values of the
+ * current offspring.
+ * @param arindex Indices indicating the fitness-order of the current offspring.
+ * @param xold xmean matrix of the previous generation.
+ */
+ private void updateCovariance(boolean hsig, final RealMatrix bestArx,
+ final RealMatrix arz, final int[] arindex,
+ final RealMatrix xold) {
+ double negccov = 0;
+ if (ccov1 + ccovmu > 0) {
+ final RealMatrix arpos = bestArx.subtract(repmat(xold, 1, mu))
+ .scalarMultiply(1 / sigma); // mu difference vectors
+ final RealMatrix roneu = pc.multiply(pc.transpose())
+ .scalarMultiply(ccov1); // rank one update
+ // minor correction if hsig==false
+ double oldFac = hsig ? 0 : ccov1 * cc * (2 - cc);
+ oldFac += 1 - ccov1 - ccovmu;
+ if (isActiveCMA) {
+ // Adapt covariance matrix C active CMA
+ negccov = (1 - ccovmu) * 0.25 * mueff /
+ (Math.pow(dimension + 2, 1.5) + 2 * mueff);
+ // keep at least 0.66 in all directions, small popsize are most
+ // critical
+ final double negminresidualvariance = 0.66;
+ // where to make up for the variance loss
+ final double negalphaold = 0.5;
+ // prepare vectors, compute negative updating matrix Cneg
+ final int[] arReverseIndex = reverse(arindex);
+ RealMatrix arzneg = selectColumns(arz, MathArrays.copyOf(arReverseIndex, mu));
+ RealMatrix arnorms = sqrt(sumRows(square(arzneg)));
+ final int[] idxnorms = sortedIndices(arnorms.getRow(0));
+ final RealMatrix arnormsSorted = selectColumns(arnorms, idxnorms);
+ final int[] idxReverse = reverse(idxnorms);
+ final RealMatrix arnormsReverse = selectColumns(arnorms, idxReverse);
+ arnorms = divide(arnormsReverse, arnormsSorted);
+ final int[] idxInv = inverse(idxnorms);
+ final RealMatrix arnormsInv = selectColumns(arnorms, idxInv);
+ // check and set learning rate negccov
+ final double negcovMax = (1 - negminresidualvariance) /
+ square(arnormsInv).multiply(weights).getEntry(0, 0);
+ if (negccov > negcovMax) {
+ negccov = negcovMax;
+ }
+ arzneg = times(arzneg, repmat(arnormsInv, dimension, 1));
+ final RealMatrix artmp = BD.multiply(arzneg);
+ final RealMatrix Cneg = artmp.multiply(diag(weights)).multiply(artmp.transpose());
+ oldFac += negalphaold * negccov;
+ C = C.scalarMultiply(oldFac)
+ .add(roneu) // regard old matrix
+ .add(arpos.scalarMultiply( // plus rank one update
+ ccovmu + (1 - negalphaold) * negccov) // plus rank mu update
+ .multiply(times(repmat(weights, 1, dimension),
+ arpos.transpose())))
+ .subtract(Cneg.scalarMultiply(negccov));
+ } else {
+ // Adapt covariance matrix C - nonactive
+ C = C.scalarMultiply(oldFac) // regard old matrix
+ .add(roneu) // plus rank one update
+ .add(arpos.scalarMultiply(ccovmu) // plus rank mu update
+ .multiply(times(repmat(weights, 1, dimension),
+ arpos.transpose())));
+ }
+ }
+ updateBD(negccov);
+ }
+
+ /**
+ * Update B and D from C.
+ *
+ * @param negccov Negative covariance factor.
+ */
+ private void updateBD(double negccov) {
+ if (ccov1 + ccovmu + negccov > 0 &&
+ (iterations % 1. / (ccov1 + ccovmu + negccov) / dimension / 10.) < 1) {
+ // to achieve O(N^2)
+ C = triu(C, 0).add(triu(C, 1).transpose());
+ // enforce symmetry to prevent complex numbers
+ final EigenDecomposition eig = new EigenDecomposition(C);
+ B = eig.getV(); // eigen decomposition, B==normalized eigenvectors
+ D = eig.getD();
+ diagD = diag(D);
+ if (min(diagD) <= 0) {
+ for (int i = 0; i < dimension; i++) {
+ if (diagD.getEntry(i, 0) < 0) {
+ diagD.setEntry(i, 0, 0);
+ }
+ }
+ final double tfac = max(diagD) / 1e14;
+ C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
+ diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
+ }
+ if (max(diagD) > 1e14 * min(diagD)) {
+ final double tfac = max(diagD) / 1e14 - min(diagD);
+ C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
+ diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
+ }
+ diagC = diag(C);
+ diagD = sqrt(diagD); // D contains standard deviations now
+ BD = times(B, repmat(diagD.transpose(), dimension, 1)); // O(n^2)
+ }
+ }
+
+ /**
+ * Pushes the current best fitness value in a history queue.
+ *
+ * @param vals History queue.
+ * @param val Current best fitness value.
+ */
+ private static void push(double[] vals, double val) {
+ for (int i = vals.length-1; i > 0; i--) {
+ vals[i] = vals[i-1];
+ }
+ vals[0] = val;
+ }
+
+ /**
+ * Sorts fitness values.
+ *
+ * @param doubles Array of values to be sorted.
+ * @return a sorted array of indices pointing into doubles.
+ */
+ private int[] sortedIndices(final double[] doubles) {
+ final DoubleIndex[] dis = new DoubleIndex[doubles.length];
+ for (int i = 0; i < doubles.length; i++) {
+ dis[i] = new DoubleIndex(doubles[i], i);
+ }
+ Arrays.sort(dis);
+ final int[] indices = new int[doubles.length];
+ for (int i = 0; i < doubles.length; i++) {
+ indices[i] = dis[i].index;
+ }
+ return indices;
+ }
+
+ /**
+ * Used to sort fitness values. Sorting is always in lower value first
+ * order.
+ */
+ private static class DoubleIndex implements Comparable {
+ /** Value to compare. */
+ private final double value;
+ /** Index into sorted array. */
+ private final int index;
+
+ /**
+ * @param value Value to compare.
+ * @param index Index into sorted array.
+ */
+ DoubleIndex(double value, int index) {
+ this.value = value;
+ this.index = index;
+ }
+
+ /** {@inheritDoc} */
+ public int compareTo(DoubleIndex o) {
+ return Double.compare(value, o.value);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public boolean equals(Object other) {
+
+ if (this == other) {
+ return true;
+ }
+
+ if (other instanceof DoubleIndex) {
+ return Double.compare(value, ((DoubleIndex) other).value) == 0;
+ }
+
+ return false;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int hashCode() {
+ long bits = Double.doubleToLongBits(value);
+ return (int) ((1438542 ^ (bits >>> 32) ^ bits) & 0xffffffff);
+ }
+ }
+
+ /**
+ * Normalizes fitness values to the range [0,1]. Adds a penalty to the
+ * fitness value if out of range. The penalty is adjusted by calling
+ * setValueRange().
+ */
+ private class FitnessFunction {
+ /** Determines the penalty for boundary violations */
+ private double valueRange;
+ /**
+ * Flag indicating whether the objective variables are forced into their
+ * bounds if defined
+ */
+ private final boolean isRepairMode;
+
+ /** Simple constructor.
+ */
+ public FitnessFunction() {
+ valueRange = 1;
+ isRepairMode = true;
+ }
+
+ /**
+ * @param point Normalized objective variables.
+ * @return the objective value + penalty for violated bounds.
+ */
+ public double value(final double[] point) {
+ double value;
+ if (isRepairMode) {
+ double[] repaired = repair(point);
+ value = CMAESOptimizer.this.computeObjectiveValue(repaired) +
+ penalty(point, repaired);
+ } else {
+ value = CMAESOptimizer.this.computeObjectiveValue(point);
+ }
+ return isMinimize ? value : -value;
+ }
+
+ /**
+ * @param x Normalized objective variables.
+ * @return {@code true} if in bounds.
+ */
+ public boolean isFeasible(final double[] x) {
+ final double[] lB = CMAESOptimizer.this.getLowerBound();
+ final double[] uB = CMAESOptimizer.this.getUpperBound();
+
+ for (int i = 0; i < x.length; i++) {
+ if (x[i] < lB[i]) {
+ return false;
+ }
+ if (x[i] > uB[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * @param valueRange Adjusts the penalty computation.
+ */
+ public void setValueRange(double valueRange) {
+ this.valueRange = valueRange;
+ }
+
+ /**
+ * @param x Normalized objective variables.
+ * @return the repaired (i.e. all in bounds) objective variables.
+ */
+ private double[] repair(final double[] x) {
+ final double[] lB = CMAESOptimizer.this.getLowerBound();
+ final double[] uB = CMAESOptimizer.this.getUpperBound();
+
+ final double[] repaired = new double[x.length];
+ for (int i = 0; i < x.length; i++) {
+ if (x[i] < lB[i]) {
+ repaired[i] = lB[i];
+ } else if (x[i] > uB[i]) {
+ repaired[i] = uB[i];
+ } else {
+ repaired[i] = x[i];
+ }
+ }
+ return repaired;
+ }
+
+ /**
+ * @param x Normalized objective variables.
+ * @param repaired Repaired objective variables.
+ * @return Penalty value according to the violation of the bounds.
+ */
+ private double penalty(final double[] x, final double[] repaired) {
+ double penalty = 0;
+ for (int i = 0; i < x.length; i++) {
+ double diff = Math.abs(x[i] - repaired[i]);
+ penalty += diff * valueRange;
+ }
+ return isMinimize ? penalty : -penalty;
+ }
+ }
+
+ // -----Matrix utility functions similar to the Matlab build in functions------
+
+ /**
+ * @param m Input matrix
+ * @return Matrix representing the element-wise logarithm of m.
+ */
+ private static RealMatrix log(final RealMatrix m) {
+ final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
+ for (int r = 0; r < m.getRowDimension(); r++) {
+ for (int c = 0; c < m.getColumnDimension(); c++) {
+ d[r][c] = Math.log(m.getEntry(r, c));
+ }
+ }
+ return new Array2DRowRealMatrix(d, false);
+ }
+
+ /**
+ * @param m Input matrix.
+ * @return Matrix representing the element-wise square root of m.
+ */
+ private static RealMatrix sqrt(final RealMatrix m) {
+ final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
+ for (int r = 0; r < m.getRowDimension(); r++) {
+ for (int c = 0; c < m.getColumnDimension(); c++) {
+ d[r][c] = Math.sqrt(m.getEntry(r, c));
+ }
+ }
+ return new Array2DRowRealMatrix(d, false);
+ }
+
+ /**
+ * @param m Input matrix.
+ * @return Matrix representing the element-wise square of m.
+ */
+ private static RealMatrix square(final RealMatrix m) {
+ final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
+ for (int r = 0; r < m.getRowDimension(); r++) {
+ for (int c = 0; c < m.getColumnDimension(); c++) {
+ double e = m.getEntry(r, c);
+ d[r][c] = e * e;
+ }
+ }
+ return new Array2DRowRealMatrix(d, false);
+ }
+
+ /**
+ * @param m Input matrix 1.
+ * @param n Input matrix 2.
+ * @return the matrix where the elements of m and n are element-wise multiplied.
+ */
+ private static RealMatrix times(final RealMatrix m, final RealMatrix n) {
+ final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
+ for (int r = 0; r < m.getRowDimension(); r++) {
+ for (int c = 0; c < m.getColumnDimension(); c++) {
+ d[r][c] = m.getEntry(r, c) * n.getEntry(r, c);
+ }
+ }
+ return new Array2DRowRealMatrix(d, false);
+ }
+
+ /**
+ * @param m Input matrix 1.
+ * @param n Input matrix 2.
+ * @return Matrix where the elements of m and n are element-wise divided.
+ */
+ private static RealMatrix divide(final RealMatrix m, final RealMatrix n) {
+ final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
+ for (int r = 0; r < m.getRowDimension(); r++) {
+ for (int c = 0; c < m.getColumnDimension(); c++) {
+ d[r][c] = m.getEntry(r, c) / n.getEntry(r, c);
+ }
+ }
+ return new Array2DRowRealMatrix(d, false);
+ }
+
+ /**
+ * @param m Input matrix.
+ * @param cols Columns to select.
+ * @return Matrix representing the selected columns.
+ */
+ private static RealMatrix selectColumns(final RealMatrix m, final int[] cols) {
+ final double[][] d = new double[m.getRowDimension()][cols.length];
+ for (int r = 0; r < m.getRowDimension(); r++) {
+ for (int c = 0; c < cols.length; c++) {
+ d[r][c] = m.getEntry(r, cols[c]);
+ }
+ }
+ return new Array2DRowRealMatrix(d, false);
+ }
+
+ /**
+ * @param m Input matrix.
+ * @param k Diagonal position.
+ * @return Upper triangular part of matrix.
+ */
+ private static RealMatrix triu(final RealMatrix m, int k) {
+ final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
+ for (int r = 0; r < m.getRowDimension(); r++) {
+ for (int c = 0; c < m.getColumnDimension(); c++) {
+ d[r][c] = r <= c - k ? m.getEntry(r, c) : 0;
+ }
+ }
+ return new Array2DRowRealMatrix(d, false);
+ }
+
+ /**
+ * @param m Input matrix.
+ * @return Row matrix representing the sums of the rows.
+ */
+ private static RealMatrix sumRows(final RealMatrix m) {
+ final double[][] d = new double[1][m.getColumnDimension()];
+ for (int c = 0; c < m.getColumnDimension(); c++) {
+ double sum = 0;
+ for (int r = 0; r < m.getRowDimension(); r++) {
+ sum += m.getEntry(r, c);
+ }
+ d[0][c] = sum;
+ }
+ return new Array2DRowRealMatrix(d, false);
+ }
+
+ /**
+ * @param m Input matrix.
+ * @return the diagonal n-by-n matrix if m is a column matrix or the column
+ * matrix representing the diagonal if m is a n-by-n matrix.
+ */
+ private static RealMatrix diag(final RealMatrix m) {
+ if (m.getColumnDimension() == 1) {
+ final double[][] d = new double[m.getRowDimension()][m.getRowDimension()];
+ for (int i = 0; i < m.getRowDimension(); i++) {
+ d[i][i] = m.getEntry(i, 0);
+ }
+ return new Array2DRowRealMatrix(d, false);
+ } else {
+ final double[][] d = new double[m.getRowDimension()][1];
+ for (int i = 0; i < m.getColumnDimension(); i++) {
+ d[i][0] = m.getEntry(i, i);
+ }
+ return new Array2DRowRealMatrix(d, false);
+ }
+ }
+
+ /**
+ * Copies a column from m1 to m2.
+ *
+ * @param m1 Source matrix.
+ * @param col1 Source column.
+ * @param m2 Target matrix.
+ * @param col2 Target column.
+ */
+ private static void copyColumn(final RealMatrix m1, int col1,
+ RealMatrix m2, int col2) {
+ for (int i = 0; i < m1.getRowDimension(); i++) {
+ m2.setEntry(i, col2, m1.getEntry(i, col1));
+ }
+ }
+
+ /**
+ * @param n Number of rows.
+ * @param m Number of columns.
+ * @return n-by-m matrix filled with 1.
+ */
+ private static RealMatrix ones(int n, int m) {
+ final double[][] d = new double[n][m];
+ for (int r = 0; r < n; r++) {
+ Arrays.fill(d[r], 1);
+ }
+ return new Array2DRowRealMatrix(d, false);
+ }
+
+ /**
+ * @param n Number of rows.
+ * @param m Number of columns.
+ * @return n-by-m matrix of 0 values out of diagonal, and 1 values on
+ * the diagonal.
+ */
+ private static RealMatrix eye(int n, int m) {
+ final double[][] d = new double[n][m];
+ for (int r = 0; r < n; r++) {
+ if (r < m) {
+ d[r][r] = 1;
+ }
+ }
+ return new Array2DRowRealMatrix(d, false);
+ }
+
+ /**
+ * @param n Number of rows.
+ * @param m Number of columns.
+ * @return n-by-m matrix of zero values.
+ */
+ private static RealMatrix zeros(int n, int m) {
+ return new Array2DRowRealMatrix(n, m);
+ }
+
+ /**
+ * @param mat Input matrix.
+ * @param n Number of row replicates.
+ * @param m Number of column replicates.
+ * @return a matrix which replicates the input matrix in both directions.
+ */
+ private static RealMatrix repmat(final RealMatrix mat, int n, int m) {
+ final int rd = mat.getRowDimension();
+ final int cd = mat.getColumnDimension();
+ final double[][] d = new double[n * rd][m * cd];
+ for (int r = 0; r < n * rd; r++) {
+ for (int c = 0; c < m * cd; c++) {
+ d[r][c] = mat.getEntry(r % rd, c % cd);
+ }
+ }
+ return new Array2DRowRealMatrix(d, false);
+ }
+
+ /**
+ * @param start Start value.
+ * @param end End value.
+ * @param step Step size.
+ * @return a sequence as column matrix.
+ */
+ private static RealMatrix sequence(double start, double end, double step) {
+ final int size = (int) ((end - start) / step + 1);
+ final double[][] d = new double[size][1];
+ double value = start;
+ for (int r = 0; r < size; r++) {
+ d[r][0] = value;
+ value += step;
+ }
+ return new Array2DRowRealMatrix(d, false);
+ }
+
+ /**
+ * @param m Input matrix.
+ * @return the maximum of the matrix element values.
+ */
+ private static double max(final RealMatrix m) {
+ double max = -Double.MAX_VALUE;
+ for (int r = 0; r < m.getRowDimension(); r++) {
+ for (int c = 0; c < m.getColumnDimension(); c++) {
+ double e = m.getEntry(r, c);
+ if (max < e) {
+ max = e;
+ }
+ }
+ }
+ return max;
+ }
+
+ /**
+ * @param m Input matrix.
+ * @return the minimum of the matrix element values.
+ */
+ private static double min(final RealMatrix m) {
+ double min = Double.MAX_VALUE;
+ for (int r = 0; r < m.getRowDimension(); r++) {
+ for (int c = 0; c < m.getColumnDimension(); c++) {
+ double e = m.getEntry(r, c);
+ if (min > e) {
+ min = e;
+ }
+ }
+ }
+ return min;
+ }
+
+ /**
+ * @param m Input array.
+ * @return the maximum of the array values.
+ */
+ private static double max(final double[] m) {
+ double max = -Double.MAX_VALUE;
+ for (int r = 0; r < m.length; r++) {
+ if (max < m[r]) {
+ max = m[r];
+ }
+ }
+ return max;
+ }
+
+ /**
+ * @param m Input array.
+ * @return the minimum of the array values.
+ */
+ private static double min(final double[] m) {
+ double min = Double.MAX_VALUE;
+ for (int r = 0; r < m.length; r++) {
+ if (min > m[r]) {
+ min = m[r];
+ }
+ }
+ return min;
+ }
+
+ /**
+ * @param indices Input index array.
+ * @return the inverse of the mapping defined by indices.
+ */
+ private static int[] inverse(final int[] indices) {
+ final int[] inverse = new int[indices.length];
+ for (int i = 0; i < indices.length; i++) {
+ inverse[indices[i]] = i;
+ }
+ return inverse;
+ }
+
+ /**
+ * @param indices Input index array.
+ * @return the indices in inverse order (last is first).
+ */
+ private static int[] reverse(final int[] indices) {
+ final int[] reverse = new int[indices.length];
+ for (int i = 0; i < indices.length; i++) {
+ reverse[i] = indices[indices.length - i - 1];
+ }
+ return reverse;
+ }
+
+ /**
+ * @param size Length of random array.
+ * @return an array of Gaussian random numbers.
+ */
+ private double[] randn(int size) {
+ final double[] randn = new double[size];
+ for (int i = 0; i < size; i++) {
+ randn[i] = random.nextGaussian();
+ }
+ return randn;
+ }
+
+ /**
+ * @param size Number of rows.
+ * @param popSize Population size.
+ * @return a 2-dimensional matrix of Gaussian random numbers.
+ */
+ private RealMatrix randn1(int size, int popSize) {
+ final double[][] d = new double[size][popSize];
+ for (int r = 0; r < size; r++) {
+ for (int c = 0; c < popSize; c++) {
+ d[r][c] = random.nextGaussian();
+ }
+ }
+ return new Array2DRowRealMatrix(d, false);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/MultiDirectionalSimplex.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/MultiDirectionalSimplex.java
new file mode 100644
index 000000000..0b06ab15a
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/MultiDirectionalSimplex.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
+
+import java.util.Comparator;
+
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.optim.PointValuePair;
+
+/**
+ * This class implements the multi-directional direct search method.
+ *
+ * @version $Id: MultiDirectionalSimplex.java 1364392 2012-07-22 18:27:12Z tn $
+ * @since 3.0
+ */
+public class MultiDirectionalSimplex extends AbstractSimplex {
+ /** Default value for {@link #khi}: {@value}. */
+ private static final double DEFAULT_KHI = 2;
+ /** Default value for {@link #gamma}: {@value}. */
+ private static final double DEFAULT_GAMMA = 0.5;
+ /** Expansion coefficient. */
+ private final double khi;
+ /** Contraction coefficient. */
+ private final double gamma;
+
+ /**
+ * Build a multi-directional simplex with default coefficients.
+ * The default values are 2.0 for khi and 0.5 for gamma.
+ *
+ * @param n Dimension of the simplex.
+ */
+ public MultiDirectionalSimplex(final int n) {
+ this(n, 1d);
+ }
+
+ /**
+ * Build a multi-directional simplex with default coefficients.
+ * The default values are 2.0 for khi and 0.5 for gamma.
+ *
+ * @param n Dimension of the simplex.
+ * @param sideLength Length of the sides of the default (hypercube)
+ * simplex. See {@link AbstractSimplex#AbstractSimplex(int,double)}.
+ */
+ public MultiDirectionalSimplex(final int n, double sideLength) {
+ this(n, sideLength, DEFAULT_KHI, DEFAULT_GAMMA);
+ }
+
+ /**
+ * Build a multi-directional simplex with specified coefficients.
+ *
+ * @param n Dimension of the simplex. See
+ * {@link AbstractSimplex#AbstractSimplex(int,double)}.
+ * @param khi Expansion coefficient.
+ * @param gamma Contraction coefficient.
+ */
+ public MultiDirectionalSimplex(final int n,
+ final double khi, final double gamma) {
+ this(n, 1d, khi, gamma);
+ }
+
+ /**
+ * Build a multi-directional simplex with specified coefficients.
+ *
+ * @param n Dimension of the simplex. See
+ * {@link AbstractSimplex#AbstractSimplex(int,double)}.
+ * @param sideLength Length of the sides of the default (hypercube)
+ * simplex. See {@link AbstractSimplex#AbstractSimplex(int,double)}.
+ * @param khi Expansion coefficient.
+ * @param gamma Contraction coefficient.
+ */
+ public MultiDirectionalSimplex(final int n, double sideLength,
+ final double khi, final double gamma) {
+ super(n, sideLength);
+
+ this.khi = khi;
+ this.gamma = gamma;
+ }
+
+ /**
+ * Build a multi-directional simplex with default coefficients.
+ * The default values are 2.0 for khi and 0.5 for gamma.
+ *
+ * @param steps Steps along the canonical axes representing box edges.
+ * They may be negative but not zero. See
+ */
+ public MultiDirectionalSimplex(final double[] steps) {
+ this(steps, DEFAULT_KHI, DEFAULT_GAMMA);
+ }
+
+ /**
+ * Build a multi-directional simplex with specified coefficients.
+ *
+ * @param steps Steps along the canonical axes representing box edges.
+ * They may be negative but not zero. See
+ * {@link AbstractSimplex#AbstractSimplex(double[])}.
+ * @param khi Expansion coefficient.
+ * @param gamma Contraction coefficient.
+ */
+ public MultiDirectionalSimplex(final double[] steps,
+ final double khi, final double gamma) {
+ super(steps);
+
+ this.khi = khi;
+ this.gamma = gamma;
+ }
+
+ /**
+ * Build a multi-directional simplex with default coefficients.
+ * The default values are 2.0 for khi and 0.5 for gamma.
+ *
+ * @param referenceSimplex Reference simplex. See
+ * {@link AbstractSimplex#AbstractSimplex(double[][])}.
+ */
+ public MultiDirectionalSimplex(final double[][] referenceSimplex) {
+ this(referenceSimplex, DEFAULT_KHI, DEFAULT_GAMMA);
+ }
+
+ /**
+ * Build a multi-directional simplex with specified coefficients.
+ *
+ * @param referenceSimplex Reference simplex. See
+ * {@link AbstractSimplex#AbstractSimplex(double[][])}.
+ * @param khi Expansion coefficient.
+ * @param gamma Contraction coefficient.
+ * @throws org.apache.commons.math3.exception.NotStrictlyPositiveException
+ * if the reference simplex does not contain at least one point.
+ * @throws org.apache.commons.math3.exception.DimensionMismatchException
+ * if there is a dimension mismatch in the reference simplex.
+ */
+ public MultiDirectionalSimplex(final double[][] referenceSimplex,
+ final double khi, final double gamma) {
+ super(referenceSimplex);
+
+ this.khi = khi;
+ this.gamma = gamma;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void iterate(final MultivariateFunction evaluationFunction,
+ final Comparator comparator) {
+ // Save the original simplex.
+ final PointValuePair[] original = getPoints();
+ final PointValuePair best = original[0];
+
+ // Perform a reflection step.
+ final PointValuePair reflected = evaluateNewSimplex(evaluationFunction,
+ original, 1, comparator);
+ if (comparator.compare(reflected, best) < 0) {
+ // Compute the expanded simplex.
+ final PointValuePair[] reflectedSimplex = getPoints();
+ final PointValuePair expanded = evaluateNewSimplex(evaluationFunction,
+ original, khi, comparator);
+ if (comparator.compare(reflected, expanded) <= 0) {
+ // Keep the reflected simplex.
+ setPoints(reflectedSimplex);
+ }
+ // Keep the expanded simplex.
+ return;
+ }
+
+ // Compute the contracted simplex.
+ evaluateNewSimplex(evaluationFunction, original, gamma, comparator);
+
+ }
+
+ /**
+ * Compute and evaluate a new simplex.
+ *
+ * @param evaluationFunction Evaluation function.
+ * @param original Original simplex (to be preserved).
+ * @param coeff Linear coefficient.
+ * @param comparator Comparator to use to sort simplex vertices from best
+ * to poorest.
+ * @return the best point in the transformed simplex.
+ * @throws org.apache.commons.math3.exception.TooManyEvaluationsException
+ * if the maximal number of evaluations is exceeded.
+ */
+ private PointValuePair evaluateNewSimplex(final MultivariateFunction evaluationFunction,
+ final PointValuePair[] original,
+ final double coeff,
+ final Comparator comparator) {
+ final double[] xSmallest = original[0].getPointRef();
+ // Perform a linear transformation on all the simplex points,
+ // except the first one.
+ setPoint(0, original[0]);
+ final int dim = getDimension();
+ for (int i = 1; i < getSize(); i++) {
+ final double[] xOriginal = original[i].getPointRef();
+ final double[] xTransformed = new double[dim];
+ for (int j = 0; j < dim; j++) {
+ xTransformed[j] = xSmallest[j] + coeff * (xSmallest[j] - xOriginal[j]);
+ }
+ setPoint(i, new PointValuePair(xTransformed, Double.NaN, false));
+ }
+
+ // Evaluate the simplex.
+ evaluate(evaluationFunction, comparator);
+
+ return getPoint(0);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/NelderMeadSimplex.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/NelderMeadSimplex.java
new file mode 100644
index 000000000..ea76ab049
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/NelderMeadSimplex.java
@@ -0,0 +1,281 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
+
+import java.util.Comparator;
+
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.analysis.MultivariateFunction;
+
+/**
+ * This class implements the Nelder-Mead simplex algorithm.
+ *
+ * @version $Id: NelderMeadSimplex.java 1364392 2012-07-22 18:27:12Z tn $
+ * @since 3.0
+ */
+public class NelderMeadSimplex extends AbstractSimplex {
+ /** Default value for {@link #rho}: {@value}. */
+ private static final double DEFAULT_RHO = 1;
+ /** Default value for {@link #khi}: {@value}. */
+ private static final double DEFAULT_KHI = 2;
+ /** Default value for {@link #gamma}: {@value}. */
+ private static final double DEFAULT_GAMMA = 0.5;
+ /** Default value for {@link #sigma}: {@value}. */
+ private static final double DEFAULT_SIGMA = 0.5;
+ /** Reflection coefficient. */
+ private final double rho;
+ /** Expansion coefficient. */
+ private final double khi;
+ /** Contraction coefficient. */
+ private final double gamma;
+ /** Shrinkage coefficient. */
+ private final double sigma;
+
+ /**
+ * Build a Nelder-Mead simplex with default coefficients.
+ * The default coefficients are 1.0 for rho, 2.0 for khi and 0.5
+ * for both gamma and sigma.
+ *
+ * @param n Dimension of the simplex.
+ */
+ public NelderMeadSimplex(final int n) {
+ this(n, 1d);
+ }
+
+ /**
+ * Build a Nelder-Mead simplex with default coefficients.
+ * The default coefficients are 1.0 for rho, 2.0 for khi and 0.5
+ * for both gamma and sigma.
+ *
+ * @param n Dimension of the simplex.
+ * @param sideLength Length of the sides of the default (hypercube)
+ * simplex. See {@link AbstractSimplex#AbstractSimplex(int,double)}.
+ */
+ public NelderMeadSimplex(final int n, double sideLength) {
+ this(n, sideLength,
+ DEFAULT_RHO, DEFAULT_KHI, DEFAULT_GAMMA, DEFAULT_SIGMA);
+ }
+
+ /**
+ * Build a Nelder-Mead simplex with specified coefficients.
+ *
+ * @param n Dimension of the simplex. See
+ * {@link AbstractSimplex#AbstractSimplex(int,double)}.
+ * @param sideLength Length of the sides of the default (hypercube)
+ * simplex. See {@link AbstractSimplex#AbstractSimplex(int,double)}.
+ * @param rho Reflection coefficient.
+ * @param khi Expansion coefficient.
+ * @param gamma Contraction coefficient.
+ * @param sigma Shrinkage coefficient.
+ */
+ public NelderMeadSimplex(final int n, double sideLength,
+ final double rho, final double khi,
+ final double gamma, final double sigma) {
+ super(n, sideLength);
+
+ this.rho = rho;
+ this.khi = khi;
+ this.gamma = gamma;
+ this.sigma = sigma;
+ }
+
+ /**
+ * Build a Nelder-Mead simplex with specified coefficients.
+ *
+ * @param n Dimension of the simplex. See
+ * {@link AbstractSimplex#AbstractSimplex(int)}.
+ * @param rho Reflection coefficient.
+ * @param khi Expansion coefficient.
+ * @param gamma Contraction coefficient.
+ * @param sigma Shrinkage coefficient.
+ */
+ public NelderMeadSimplex(final int n,
+ final double rho, final double khi,
+ final double gamma, final double sigma) {
+ this(n, 1d, rho, khi, gamma, sigma);
+ }
+
+ /**
+ * Build a Nelder-Mead simplex with default coefficients.
+ * The default coefficients are 1.0 for rho, 2.0 for khi and 0.5
+ * for both gamma and sigma.
+ *
+ * @param steps Steps along the canonical axes representing box edges.
+ * They may be negative but not zero. See
+ */
+ public NelderMeadSimplex(final double[] steps) {
+ this(steps, DEFAULT_RHO, DEFAULT_KHI, DEFAULT_GAMMA, DEFAULT_SIGMA);
+ }
+
+ /**
+ * Build a Nelder-Mead simplex with specified coefficients.
+ *
+ * @param steps Steps along the canonical axes representing box edges.
+ * They may be negative but not zero. See
+ * {@link AbstractSimplex#AbstractSimplex(double[])}.
+ * @param rho Reflection coefficient.
+ * @param khi Expansion coefficient.
+ * @param gamma Contraction coefficient.
+ * @param sigma Shrinkage coefficient.
+ * @throws IllegalArgumentException if one of the steps is zero.
+ */
+ public NelderMeadSimplex(final double[] steps,
+ final double rho, final double khi,
+ final double gamma, final double sigma) {
+ super(steps);
+
+ this.rho = rho;
+ this.khi = khi;
+ this.gamma = gamma;
+ this.sigma = sigma;
+ }
+
+ /**
+ * Build a Nelder-Mead simplex with default coefficients.
+ * The default coefficients are 1.0 for rho, 2.0 for khi and 0.5
+ * for both gamma and sigma.
+ *
+ * @param referenceSimplex Reference simplex. See
+ * {@link AbstractSimplex#AbstractSimplex(double[][])}.
+ */
+ public NelderMeadSimplex(final double[][] referenceSimplex) {
+ this(referenceSimplex, DEFAULT_RHO, DEFAULT_KHI, DEFAULT_GAMMA, DEFAULT_SIGMA);
+ }
+
+ /**
+ * Build a Nelder-Mead simplex with specified coefficients.
+ *
+ * @param referenceSimplex Reference simplex. See
+ * {@link AbstractSimplex#AbstractSimplex(double[][])}.
+ * @param rho Reflection coefficient.
+ * @param khi Expansion coefficient.
+ * @param gamma Contraction coefficient.
+ * @param sigma Shrinkage coefficient.
+ * @throws org.apache.commons.math3.exception.NotStrictlyPositiveException
+ * if the reference simplex does not contain at least one point.
+ * @throws org.apache.commons.math3.exception.DimensionMismatchException
+ * if there is a dimension mismatch in the reference simplex.
+ */
+ public NelderMeadSimplex(final double[][] referenceSimplex,
+ final double rho, final double khi,
+ final double gamma, final double sigma) {
+ super(referenceSimplex);
+
+ this.rho = rho;
+ this.khi = khi;
+ this.gamma = gamma;
+ this.sigma = sigma;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void iterate(final MultivariateFunction evaluationFunction,
+ final Comparator comparator) {
+ // The simplex has n + 1 points if dimension is n.
+ final int n = getDimension();
+
+ // Interesting values.
+ final PointValuePair best = getPoint(0);
+ final PointValuePair secondBest = getPoint(n - 1);
+ final PointValuePair worst = getPoint(n);
+ final double[] xWorst = worst.getPointRef();
+
+ // Compute the centroid of the best vertices (dismissing the worst
+ // point at index n).
+ final double[] centroid = new double[n];
+ for (int i = 0; i < n; i++) {
+ final double[] x = getPoint(i).getPointRef();
+ for (int j = 0; j < n; j++) {
+ centroid[j] += x[j];
+ }
+ }
+ final double scaling = 1.0 / n;
+ for (int j = 0; j < n; j++) {
+ centroid[j] *= scaling;
+ }
+
+ // compute the reflection point
+ final double[] xR = new double[n];
+ for (int j = 0; j < n; j++) {
+ xR[j] = centroid[j] + rho * (centroid[j] - xWorst[j]);
+ }
+ final PointValuePair reflected
+ = new PointValuePair(xR, evaluationFunction.value(xR), false);
+
+ if (comparator.compare(best, reflected) <= 0 &&
+ comparator.compare(reflected, secondBest) < 0) {
+ // Accept the reflected point.
+ replaceWorstPoint(reflected, comparator);
+ } else if (comparator.compare(reflected, best) < 0) {
+ // Compute the expansion point.
+ final double[] xE = new double[n];
+ for (int j = 0; j < n; j++) {
+ xE[j] = centroid[j] + khi * (xR[j] - centroid[j]);
+ }
+ final PointValuePair expanded
+ = new PointValuePair(xE, evaluationFunction.value(xE), false);
+
+ if (comparator.compare(expanded, reflected) < 0) {
+ // Accept the expansion point.
+ replaceWorstPoint(expanded, comparator);
+ } else {
+ // Accept the reflected point.
+ replaceWorstPoint(reflected, comparator);
+ }
+ } else {
+ if (comparator.compare(reflected, worst) < 0) {
+ // Perform an outside contraction.
+ final double[] xC = new double[n];
+ for (int j = 0; j < n; j++) {
+ xC[j] = centroid[j] + gamma * (xR[j] - centroid[j]);
+ }
+ final PointValuePair outContracted
+ = new PointValuePair(xC, evaluationFunction.value(xC), false);
+ if (comparator.compare(outContracted, reflected) <= 0) {
+ // Accept the contraction point.
+ replaceWorstPoint(outContracted, comparator);
+ return;
+ }
+ } else {
+ // Perform an inside contraction.
+ final double[] xC = new double[n];
+ for (int j = 0; j < n; j++) {
+ xC[j] = centroid[j] - gamma * (centroid[j] - xWorst[j]);
+ }
+ final PointValuePair inContracted
+ = new PointValuePair(xC, evaluationFunction.value(xC), false);
+
+ if (comparator.compare(inContracted, worst) < 0) {
+ // Accept the contraction point.
+ replaceWorstPoint(inContracted, comparator);
+ return;
+ }
+ }
+
+ // Perform a shrink.
+ final double[] xSmallest = getPoint(0).getPointRef();
+ for (int i = 1; i <= n; i++) {
+ final double[] x = getPoint(i).getPoint();
+ for (int j = 0; j < n; j++) {
+ x[j] = xSmallest[j] + sigma * (x[j] - xSmallest[j]);
+ }
+ setPoint(i, new PointValuePair(x, Double.NaN, false));
+ }
+ evaluate(evaluationFunction, comparator);
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/PowellOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/PowellOptimizer.java
new file mode 100644
index 000000000..11625a128
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/PowellOptimizer.java
@@ -0,0 +1,356 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
+
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.util.MathArrays;
+import org.apache.commons.math3.analysis.UnivariateFunction;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer;
+import org.apache.commons.math3.optim.univariate.BracketFinder;
+import org.apache.commons.math3.optim.univariate.BrentOptimizer;
+import org.apache.commons.math3.optim.univariate.UnivariatePointValuePair;
+import org.apache.commons.math3.optim.univariate.SimpleUnivariateValueChecker;
+import org.apache.commons.math3.optim.univariate.SearchInterval;
+import org.apache.commons.math3.optim.univariate.UnivariateObjectiveFunction;
+
+/**
+ * Powell algorithm.
+ * This code is translated and adapted from the Python version of this
+ * algorithm (as implemented in module {@code optimize.py} v0.5 of
+ * SciPy).
+ *
+ * The default stopping criterion is based on the differences of the
+ * function value between two successive iterations. It is however possible
+ * to define a custom convergence checker that might terminate the algorithm
+ * earlier.
+ *
+ * The internal line search optimizer is a {@link BrentOptimizer} with a
+ * convergence checker set to {@link SimpleUnivariateValueChecker}.
+ *
+ * @version $Id: PowellOptimizer.java 1413594 2012-11-26 13:16:39Z erans $
+ * @since 2.2
+ */
+public class PowellOptimizer
+ extends MultivariateOptimizer {
+ /**
+ * Minimum relative tolerance.
+ */
+ private static final double MIN_RELATIVE_TOLERANCE = 2 * FastMath.ulp(1d);
+ /**
+ * Relative threshold.
+ */
+ private final double relativeThreshold;
+ /**
+ * Absolute threshold.
+ */
+ private final double absoluteThreshold;
+ /**
+ * Line search.
+ */
+ private final LineSearch line;
+
+ /**
+ * This constructor allows to specify a user-defined convergence checker,
+ * in addition to the parameters that control the default convergence
+ * checking procedure.
+ *
+ * The internal line search tolerances are set to the square-root of their
+ * corresponding value in the multivariate optimizer.
+ *
+ * @param rel Relative threshold.
+ * @param abs Absolute threshold.
+ * @param checker Convergence checker.
+ * @throws NotStrictlyPositiveException if {@code abs <= 0}.
+ * @throws NumberIsTooSmallException if {@code rel < 2 * Math.ulp(1d)}.
+ */
+ public PowellOptimizer(double rel,
+ double abs,
+ ConvergenceChecker checker) {
+ this(rel, abs, FastMath.sqrt(rel), FastMath.sqrt(abs), checker);
+ }
+
+ /**
+ * This constructor allows to specify a user-defined convergence checker,
+ * in addition to the parameters that control the default convergence
+ * checking procedure and the line search tolerances.
+ *
+ * @param rel Relative threshold for this optimizer.
+ * @param abs Absolute threshold for this optimizer.
+ * @param lineRel Relative threshold for the internal line search optimizer.
+ * @param lineAbs Absolute threshold for the internal line search optimizer.
+ * @param checker Convergence checker.
+ * @throws NotStrictlyPositiveException if {@code abs <= 0}.
+ * @throws NumberIsTooSmallException if {@code rel < 2 * Math.ulp(1d)}.
+ */
+ public PowellOptimizer(double rel,
+ double abs,
+ double lineRel,
+ double lineAbs,
+ ConvergenceChecker checker) {
+ super(checker);
+
+ if (rel < MIN_RELATIVE_TOLERANCE) {
+ throw new NumberIsTooSmallException(rel, MIN_RELATIVE_TOLERANCE, true);
+ }
+ if (abs <= 0) {
+ throw new NotStrictlyPositiveException(abs);
+ }
+ relativeThreshold = rel;
+ absoluteThreshold = abs;
+
+ // Create the line search optimizer.
+ line = new LineSearch(lineRel,
+ lineAbs);
+ }
+
+ /**
+ * The parameters control the default convergence checking procedure.
+ *
+ * The internal line search tolerances are set to the square-root of their
+ * corresponding value in the multivariate optimizer.
+ *
+ * @param rel Relative threshold.
+ * @param abs Absolute threshold.
+ * @throws NotStrictlyPositiveException if {@code abs <= 0}.
+ * @throws NumberIsTooSmallException if {@code rel < 2 * Math.ulp(1d)}.
+ */
+ public PowellOptimizer(double rel,
+ double abs) {
+ this(rel, abs, null);
+ }
+
+ /**
+ * Builds an instance with the default convergence checking procedure.
+ *
+ * @param rel Relative threshold.
+ * @param abs Absolute threshold.
+ * @param lineRel Relative threshold for the internal line search optimizer.
+ * @param lineAbs Absolute threshold for the internal line search optimizer.
+ * @throws NotStrictlyPositiveException if {@code abs <= 0}.
+ * @throws NumberIsTooSmallException if {@code rel < 2 * Math.ulp(1d)}.
+ */
+ public PowellOptimizer(double rel,
+ double abs,
+ double lineRel,
+ double lineAbs) {
+ this(rel, abs, lineRel, lineAbs, null);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected PointValuePair doOptimize() {
+ final GoalType goal = getGoalType();
+ final double[] guess = getStartPoint();
+ final int n = guess.length;
+
+ final double[][] direc = new double[n][n];
+ for (int i = 0; i < n; i++) {
+ direc[i][i] = 1;
+ }
+
+ final ConvergenceChecker checker
+ = getConvergenceChecker();
+
+ double[] x = guess;
+ double fVal = computeObjectiveValue(x);
+ double[] x1 = x.clone();
+ int iter = 0;
+ while (true) {
+ ++iter;
+
+ double fX = fVal;
+ double fX2 = 0;
+ double delta = 0;
+ int bigInd = 0;
+ double alphaMin = 0;
+
+ for (int i = 0; i < n; i++) {
+ final double[] d = MathArrays.copyOf(direc[i]);
+
+ fX2 = fVal;
+
+ final UnivariatePointValuePair optimum = line.search(x, d);
+ fVal = optimum.getValue();
+ alphaMin = optimum.getPoint();
+ final double[][] result = newPointAndDirection(x, d, alphaMin);
+ x = result[0];
+
+ if ((fX2 - fVal) > delta) {
+ delta = fX2 - fVal;
+ bigInd = i;
+ }
+ }
+
+ // Default convergence check.
+ boolean stop = 2 * (fX - fVal) <=
+ (relativeThreshold * (FastMath.abs(fX) + FastMath.abs(fVal)) +
+ absoluteThreshold);
+
+ final PointValuePair previous = new PointValuePair(x1, fX);
+ final PointValuePair current = new PointValuePair(x, fVal);
+ if (!stop) { // User-defined stopping criteria.
+ if (checker != null) {
+ stop = checker.converged(iter, previous, current);
+ }
+ }
+ if (stop) {
+ if (goal == GoalType.MINIMIZE) {
+ return (fVal < fX) ? current : previous;
+ } else {
+ return (fVal > fX) ? current : previous;
+ }
+ }
+
+ final double[] d = new double[n];
+ final double[] x2 = new double[n];
+ for (int i = 0; i < n; i++) {
+ d[i] = x[i] - x1[i];
+ x2[i] = 2 * x[i] - x1[i];
+ }
+
+ x1 = x.clone();
+ fX2 = computeObjectiveValue(x2);
+
+ if (fX > fX2) {
+ double t = 2 * (fX + fX2 - 2 * fVal);
+ double temp = fX - fVal - delta;
+ t *= temp * temp;
+ temp = fX - fX2;
+ t -= delta * temp * temp;
+
+ if (t < 0.0) {
+ final UnivariatePointValuePair optimum = line.search(x, d);
+ fVal = optimum.getValue();
+ alphaMin = optimum.getPoint();
+ final double[][] result = newPointAndDirection(x, d, alphaMin);
+ x = result[0];
+
+ final int lastInd = n - 1;
+ direc[bigInd] = direc[lastInd];
+ direc[lastInd] = result[1];
+ }
+ }
+ }
+ }
+
+ /**
+ * Compute a new point (in the original space) and a new direction
+ * vector, resulting from the line search.
+ *
+ * @param p Point used in the line search.
+ * @param d Direction used in the line search.
+ * @param optimum Optimum found by the line search.
+ * @return a 2-element array containing the new point (at index 0) and
+ * the new direction (at index 1).
+ */
+ private double[][] newPointAndDirection(double[] p,
+ double[] d,
+ double optimum) {
+ final int n = p.length;
+ final double[] nP = new double[n];
+ final double[] nD = new double[n];
+ for (int i = 0; i < n; i++) {
+ nD[i] = d[i] * optimum;
+ nP[i] = p[i] + nD[i];
+ }
+
+ final double[][] result = new double[2][];
+ result[0] = nP;
+ result[1] = nD;
+
+ return result;
+ }
+
+ /**
+ * Class for finding the minimum of the objective function along a given
+ * direction.
+ */
+ private class LineSearch extends BrentOptimizer {
+ /**
+ * Value that will pass the precondition check for {@link BrentOptimizer}
+ * but will not pass the convergence check, so that the custom checker
+ * will always decide when to stop the line search.
+ */
+ private static final double REL_TOL_UNUSED = 1e-15;
+ /**
+ * Value that will pass the precondition check for {@link BrentOptimizer}
+ * but will not pass the convergence check, so that the custom checker
+ * will always decide when to stop the line search.
+ */
+ private static final double ABS_TOL_UNUSED = Double.MIN_VALUE;
+ /**
+ * Automatic bracketing.
+ */
+ private final BracketFinder bracket = new BracketFinder();
+
+ /**
+ * The "BrentOptimizer" default stopping criterion uses the tolerances
+ * to check the domain (point) values, not the function values.
+ * We thus create a custom checker to use function values.
+ *
+ * @param rel Relative threshold.
+ * @param abs Absolute threshold.
+ */
+ LineSearch(double rel,
+ double abs) {
+ super(REL_TOL_UNUSED,
+ ABS_TOL_UNUSED,
+ new SimpleUnivariateValueChecker(rel, abs));
+ }
+
+ /**
+ * Find the minimum of the function {@code f(p + alpha * d)}.
+ *
+ * @param p Starting point.
+ * @param d Search direction.
+ * @return the optimum.
+ * @throws org.apache.commons.math3.exception.TooManyEvaluationsException
+ * if the number of evaluations is exceeded.
+ */
+ public UnivariatePointValuePair search(final double[] p, final double[] d) {
+ final int n = p.length;
+ final UnivariateFunction f = new UnivariateFunction() {
+ public double value(double alpha) {
+ final double[] x = new double[n];
+ for (int i = 0; i < n; i++) {
+ x[i] = p[i] + alpha * d[i];
+ }
+ final double obj = PowellOptimizer.this.computeObjectiveValue(x);
+ return obj;
+ }
+ };
+
+ final GoalType goal = PowellOptimizer.this.getGoalType();
+ bracket.search(f, goal, 0, 1);
+ // Passing "MAX_VALUE" as a dummy value because it is the enclosing
+ // class that counts the number of evaluations (and will eventually
+ // generate the exception).
+ return optimize(new MaxEval(Integer.MAX_VALUE),
+ new UnivariateObjectiveFunction(f),
+ goal,
+ new SearchInterval(bracket.getLo(),
+ bracket.getHi(),
+ bracket.getMid()));
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/SimplexOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/SimplexOptimizer.java
new file mode 100644
index 000000000..04d80d154
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/SimplexOptimizer.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
+
+import java.util.Comparator;
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.exception.NullArgumentException;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.SimpleValueChecker;
+import org.apache.commons.math3.optim.OptimizationData;
+import org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer;
+
+/**
+ * This class implements simplex-based direct search optimization.
+ *
+ *
+ * Direct search methods only use objective function values, they do
+ * not need derivatives and don't either try to compute approximation
+ * of the derivatives. According to a 1996 paper by Margaret H. Wright
+ * (Direct
+ * Search Methods: Once Scorned, Now Respectable), they are used
+ * when either the computation of the derivative is impossible (noisy
+ * functions, unpredictable discontinuities) or difficult (complexity,
+ * computation cost). In the first cases, rather than an optimum, a
+ * not too bad point is desired. In the latter cases, an
+ * optimum is desired but cannot be reasonably found. In all cases
+ * direct search methods can be useful.
+ *
+ *
+ * Simplex-based direct search methods are based on comparison of
+ * the objective function values at the vertices of a simplex (which is a
+ * set of n+1 points in dimension n) that is updated by the algorithms
+ * steps.
+ *
+ *
+ * The simplex update procedure ({@link NelderMeadSimplex} or
+ * {@link MultiDirectionalSimplex}) must be passed to the
+ * {@code optimize} method.
+ *
+ *
+ * Each call to {@code optimize} will re-use the start configuration of
+ * the current simplex and move it such that its first vertex is at the
+ * provided start point of the optimization.
+ * If the {@code optimize} method is called to solve a different problem
+ * and the number of parameters change, the simplex must be re-initialized
+ * to one with the appropriate dimensions.
+ *
+ *
+ * Convergence is checked by providing the worst points of
+ * previous and current simplex to the convergence checker, not the best
+ * ones.
+ *
+ *
+ * This simplex optimizer implementation does not directly support constrained
+ * optimization with simple bounds; so, for such optimizations, either a more
+ * dedicated algorithm must be used like
+ * {@link CMAESOptimizer} or {@link BOBYQAOptimizer}, or the objective
+ * function must be wrapped in an adapter like
+ * {@link org.apache.commons.math3.optim.nonlinear.scalar.MultivariateFunctionMappingAdapter
+ * MultivariateFunctionMappingAdapter} or
+ * {@link org.apache.commons.math3.optim.nonlinear.scalar.MultivariateFunctionPenaltyAdapter
+ * MultivariateFunctionPenaltyAdapter}.
+ *
+ *
+ * @version $Id: SimplexOptimizer.java 1397759 2012-10-13 01:12:58Z erans $
+ * @since 3.0
+ */
+public class SimplexOptimizer extends MultivariateOptimizer {
+ /** Simplex update rule. */
+ private AbstractSimplex simplex;
+
+ /**
+ * @param checker Convergence checker.
+ */
+ public SimplexOptimizer(ConvergenceChecker checker) {
+ super(checker);
+ }
+
+ /**
+ * @param rel Relative threshold.
+ * @param abs Absolute threshold.
+ */
+ public SimplexOptimizer(double rel, double abs) {
+ this(new SimpleValueChecker(rel, abs));
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param optData Optimization data.
+ * The following data will be looked for:
+ *
+ * - {@link org.apache.commons.math3.optim.MaxEval}
+ * - {@link org.apache.commons.math3.optim.InitialGuess}
+ * - {@link org.apache.commons.math3.optim.SimpleBounds}
+ * - {@link AbstractSimplex}
+ *
+ * @return {@inheritDoc}
+ */
+ @Override
+ public PointValuePair optimize(OptimizationData... optData) {
+ // Retrieve settings
+ parseOptimizationData(optData);
+ // Set up base class and perform computation.
+ return super.optimize(optData);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected PointValuePair doOptimize() {
+ if (simplex == null) {
+ throw new NullArgumentException();
+ }
+
+ // Indirect call to "computeObjectiveValue" in order to update the
+ // evaluations counter.
+ final MultivariateFunction evalFunc
+ = new MultivariateFunction() {
+ public double value(double[] point) {
+ return computeObjectiveValue(point);
+ }
+ };
+
+ final boolean isMinim = getGoalType() == GoalType.MINIMIZE;
+ final Comparator comparator
+ = new Comparator() {
+ public int compare(final PointValuePair o1,
+ final PointValuePair o2) {
+ final double v1 = o1.getValue();
+ final double v2 = o2.getValue();
+ return isMinim ? Double.compare(v1, v2) : Double.compare(v2, v1);
+ }
+ };
+
+ // Initialize search.
+ simplex.build(getStartPoint());
+ simplex.evaluate(evalFunc, comparator);
+
+ PointValuePair[] previous = null;
+ int iteration = 0;
+ final ConvergenceChecker checker = getConvergenceChecker();
+ while (true) {
+ if (iteration > 0) {
+ boolean converged = true;
+ for (int i = 0; i < simplex.getSize(); i++) {
+ PointValuePair prev = previous[i];
+ converged = converged &&
+ checker.converged(iteration, prev, simplex.getPoint(i));
+ }
+ if (converged) {
+ // We have found an optimum.
+ return simplex.getPoint(0);
+ }
+ }
+
+ // We still need to search.
+ previous = simplex.getPoints();
+ simplex.iterate(evalFunc, comparator);
+ ++iteration;
+ }
+ }
+
+ /**
+ * Scans the list of (required and optional) optimization data that
+ * characterize the problem.
+ *
+ * @param optData Optimization data.
+ * The following data will be looked for:
+ *
+ * - {@link AbstractSimplex}
+ *
+ */
+ private void parseOptimizationData(OptimizationData... optData) {
+ // The existing values (as set by the previous call) are reused if
+ // not provided in the argument list.
+ for (OptimizationData data : optData) {
+ if (data instanceof AbstractSimplex) {
+ simplex = (AbstractSimplex) data;
+ // If more data must be parsed, this statement _must_ be
+ // changed to "continue".
+ break;
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/package-info.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/package-info.java
new file mode 100644
index 000000000..25af94bc3
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
+
+/**
+ * This package provides optimization algorithms that do not require derivatives.
+ */
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/package-info.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/package-info.java
new file mode 100644
index 000000000..a3ee653ae
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar;
+
+/**
+ * Algorithms for optimizing a scalar function.
+ */
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/JacobianMultivariateVectorOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/JacobianMultivariateVectorOptimizer.java
new file mode 100644
index 000000000..0997c6667
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/JacobianMultivariateVectorOptimizer.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector;
+
+import org.apache.commons.math3.analysis.MultivariateMatrixFunction;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.optim.OptimizationData;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+import org.apache.commons.math3.exception.DimensionMismatchException;
+
+/**
+ * Base class for implementing optimizers for multivariate vector
+ * differentiable functions.
+ * It contains boiler-plate code for dealing with Jacobian evaluation.
+ * It assumes that the rows of the Jacobian matrix iterate on the model
+ * functions while the columns iterate on the parameters; thus, the numbers
+ * of rows is equal to the dimension of the {@link Target} while the
+ * number of columns is equal to the dimension of the
+ * {@link org.apache.commons.math3.optim.InitialGuess InitialGuess}.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public abstract class JacobianMultivariateVectorOptimizer
+ extends MultivariateVectorOptimizer {
+ /**
+ * Jacobian of the model function.
+ */
+ private MultivariateMatrixFunction jacobian;
+
+ /**
+ * @param checker Convergence checker.
+ */
+ protected JacobianMultivariateVectorOptimizer(ConvergenceChecker checker) {
+ super(checker);
+ }
+
+ /**
+ * Computes the Jacobian matrix.
+ *
+ * @param params Point at which the Jacobian must be evaluated.
+ * @return the Jacobian at the specified point.
+ */
+ protected double[][] computeJacobian(final double[] params) {
+ return jacobian.value(params);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param optData Optimization data. The following data will be looked for:
+ *
+ * - {@link org.apache.commons.math3.optim.MaxEval}
+ * - {@link org.apache.commons.math3.optim.InitialGuess}
+ * - {@link org.apache.commons.math3.optim.SimpleBounds}
+ * - {@link Target}
+ * - {@link Weight}
+ * - {@link ModelFunction}
+ * - {@link ModelFunctionJacobian}
+ *
+ * @return {@inheritDoc}
+ * @throws TooManyEvaluationsException if the maximal number of
+ * evaluations is exceeded.
+ * @throws DimensionMismatchException if the initial guess, target, and weight
+ * arguments have inconsistent dimensions.
+ */
+ @Override
+ public PointVectorValuePair optimize(OptimizationData... optData)
+ throws TooManyEvaluationsException,
+ DimensionMismatchException {
+ // Retrieve settings.
+ parseOptimizationData(optData);
+ // Set up base class and perform computation.
+ return super.optimize(optData);
+ }
+
+ /**
+ * Scans the list of (required and optional) optimization data that
+ * characterize the problem.
+ *
+ * @param optData Optimization data.
+ * The following data will be looked for:
+ *
+ * - {@link ModelFunctionJacobian}
+ *
+ */
+ private void parseOptimizationData(OptimizationData... optData) {
+ // The existing values (as set by the previous call) are reused if
+ // not provided in the argument list.
+ for (OptimizationData data : optData) {
+ if (data instanceof ModelFunctionJacobian) {
+ jacobian = ((ModelFunctionJacobian) data).getModelFunctionJacobian();
+ // If more data must be parsed, this statement _must_ be
+ // changed to "continue".
+ break;
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/ModelFunction.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/ModelFunction.java
new file mode 100644
index 000000000..586997ae0
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/ModelFunction.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector;
+
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.optim.OptimizationData;
+
+/**
+ * Model (vector) function to be optimized.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public class ModelFunction implements OptimizationData {
+ /** Function to be optimized. */
+ private final MultivariateVectorFunction model;
+
+ /**
+ * @param m Model function to be optimized.
+ */
+ public ModelFunction(MultivariateVectorFunction m) {
+ model = m;
+ }
+
+ /**
+ * Gets the model function to be optimized.
+ *
+ * @return the model function.
+ */
+ public MultivariateVectorFunction getModelFunction() {
+ return model;
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/ModelFunctionJacobian.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/ModelFunctionJacobian.java
new file mode 100644
index 000000000..774f06cd6
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/ModelFunctionJacobian.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector;
+
+import org.apache.commons.math3.analysis.MultivariateMatrixFunction;
+import org.apache.commons.math3.optim.OptimizationData;
+
+/**
+ * Jacobian of the model (vector) function to be optimized.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public class ModelFunctionJacobian implements OptimizationData {
+ /** Function to be optimized. */
+ private final MultivariateMatrixFunction jacobian;
+
+ /**
+ * @param j Jacobian of the model function to be optimized.
+ */
+ public ModelFunctionJacobian(MultivariateMatrixFunction j) {
+ jacobian = j;
+ }
+
+ /**
+ * Gets the Jacobian of the model function to be optimized.
+ *
+ * @return the model function Jacobian.
+ */
+ public MultivariateMatrixFunction getModelFunctionJacobian() {
+ return jacobian;
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/MultiStartMultivariateVectorOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/MultiStartMultivariateVectorOptimizer.java
new file mode 100644
index 000000000..c90a5b373
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/MultiStartMultivariateVectorOptimizer.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Comparator;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.exception.NullArgumentException;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.RealVector;
+import org.apache.commons.math3.linear.ArrayRealVector;
+import org.apache.commons.math3.random.RandomVectorGenerator;
+import org.apache.commons.math3.optim.BaseMultiStartMultivariateOptimizer;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+
+/**
+ * Multi-start optimizer for a (vector) model function.
+ *
+ * This class wraps an optimizer in order to use it several times in
+ * turn with different starting points (trying to avoid being trapped
+ * in a local extremum when looking for a global one).
+ *
+ * @version $Id$
+ * @since 3.0
+ */
+public class MultiStartMultivariateVectorOptimizer
+ extends BaseMultiStartMultivariateOptimizer {
+ /** Underlying optimizer. */
+ private final MultivariateVectorOptimizer optimizer;
+ /** Found optima. */
+ private final List optima = new ArrayList();
+
+ /**
+ * Create a multi-start optimizer from a single-start optimizer.
+ *
+ * @param optimizer Single-start optimizer to wrap.
+ * @param starts Number of starts to perform.
+ * If {@code starts == 1}, the result will be same as if {@code optimizer}
+ * is called directly.
+ * @param generator Random vector generator to use for restarts.
+ * @throws NullArgumentException if {@code optimizer} or {@code generator}
+ * is {@code null}.
+ * @throws NotStrictlyPositiveException if {@code starts < 1}.
+ */
+ public MultiStartMultivariateVectorOptimizer(final MultivariateVectorOptimizer optimizer,
+ final int starts,
+ final RandomVectorGenerator generator)
+ throws NullArgumentException,
+ NotStrictlyPositiveException {
+ super(optimizer, starts, generator);
+ this.optimizer = optimizer;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public PointVectorValuePair[] getOptima() {
+ Collections.sort(optima, getPairComparator());
+ return optima.toArray(new PointVectorValuePair[0]);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ protected void store(PointVectorValuePair optimum) {
+ optima.add(optimum);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ protected void clear() {
+ optima.clear();
+ }
+
+ /**
+ * @return a comparator for sorting the optima.
+ */
+ private Comparator getPairComparator() {
+ return new Comparator() {
+ private final RealVector target = new ArrayRealVector(optimizer.getTarget(), false);
+ private final RealMatrix weight = optimizer.getWeight();
+
+ public int compare(final PointVectorValuePair o1,
+ final PointVectorValuePair o2) {
+ if (o1 == null) {
+ return (o2 == null) ? 0 : 1;
+ } else if (o2 == null) {
+ return -1;
+ }
+ return Double.compare(weightedResidual(o1),
+ weightedResidual(o2));
+ }
+
+ private double weightedResidual(final PointVectorValuePair pv) {
+ final RealVector v = new ArrayRealVector(pv.getValueRef(), false);
+ final RealVector r = target.subtract(v);
+ return r.dotProduct(weight.operate(r));
+ }
+ };
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/MultivariateVectorOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/MultivariateVectorOptimizer.java
new file mode 100644
index 000000000..f43d387e9
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/MultivariateVectorOptimizer.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.vector;
+
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+import org.apache.commons.math3.exception.DimensionMismatchException;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.optim.OptimizationData;
+import org.apache.commons.math3.optim.BaseMultivariateOptimizer;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+import org.apache.commons.math3.linear.RealMatrix;
+
+/**
+ * Base class for a multivariate vector function optimizer.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public abstract class MultivariateVectorOptimizer
+ extends BaseMultivariateOptimizer {
+ /** Target values for the model function at optimum. */
+ private double[] target;
+ /** Weight matrix. */
+ private RealMatrix weightMatrix;
+ /** Model function. */
+ private MultivariateVectorFunction model;
+
+ /**
+ * @param checker Convergence checker.
+ */
+ protected MultivariateVectorOptimizer(ConvergenceChecker checker) {
+ super(checker);
+ }
+
+ /**
+ * Computes the objective function value.
+ * This method must be called by subclasses to enforce the
+ * evaluation counter limit.
+ *
+ * @param params Point at which the objective function must be evaluated.
+ * @return the objective function value at the specified point.
+ * @throws TooManyEvaluationsException if the maximal number of evaluations
+ * (of the model vector function) is exceeded.
+ */
+ protected double[] computeObjectiveValue(double[] params) {
+ super.incrementEvaluationCount();
+ return model.value(params);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param optData Optimization data. The following data will be looked for:
+ *
+ * - {@link org.apache.commons.math3.optim.MaxEval}
+ * - {@link org.apache.commons.math3.optim.InitialGuess}
+ * - {@link org.apache.commons.math3.optim.SimpleBounds}
+ * - {@link Target}
+ * - {@link Weight}
+ * - {@link ModelFunction}
+ *
+ * @return {@inheritDoc}
+ * @throws TooManyEvaluationsException if the maximal number of
+ * evaluations is exceeded.
+ * @throws DimensionMismatchException if the initial guess, target, and weight
+ * arguments have inconsistent dimensions.
+ */
+ public PointVectorValuePair optimize(OptimizationData... optData)
+ throws TooManyEvaluationsException,
+ DimensionMismatchException {
+ // Retrieve settings.
+ parseOptimizationData(optData);
+ // Check input consistency.
+ checkParameters();
+ // Set up base class and perform computation.
+ return super.optimize(optData);
+ }
+
+ /**
+ * Gets the weight matrix of the observations.
+ *
+ * @return the weight matrix.
+ */
+ public RealMatrix getWeight() {
+ return weightMatrix.copy();
+ }
+ /**
+ * Gets the observed values to be matched by the objective vector
+ * function.
+ *
+ * @return the target values.
+ */
+ public double[] getTarget() {
+ return target.clone();
+ }
+
+ /**
+ * Gets the number of observed values.
+ *
+ * @return the length of the target vector.
+ */
+ public int getTargetSize() {
+ return target.length;
+ }
+
+ /**
+ * Scans the list of (required and optional) optimization data that
+ * characterize the problem.
+ *
+ * @param optData Optimization data. The following data will be looked for:
+ *
+ * - {@link Target}
+ * - {@link Weight}
+ * - {@link ModelFunction}
+ *
+ */
+ private void parseOptimizationData(OptimizationData... optData) {
+ // The existing values (as set by the previous call) are reused if
+ // not provided in the argument list.
+ for (OptimizationData data : optData) {
+ if (data instanceof ModelFunction) {
+ model = ((ModelFunction) data).getModelFunction();
+ continue;
+ }
+ if (data instanceof Target) {
+ target = ((Target) data).getTarget();
+ continue;
+ }
+ if (data instanceof Weight) {
+ weightMatrix = ((Weight) data).getWeight();
+ continue;
+ }
+ }
+ }
+
+ /**
+ * Check parameters consistency.
+ *
+ * @throws DimensionMismatchException if {@link #target} and
+ * {@link #weightMatrix} have inconsistent dimensions.
+ */
+ private void checkParameters() {
+ if (target.length != weightMatrix.getColumnDimension()) {
+ throw new DimensionMismatchException(target.length,
+ weightMatrix.getColumnDimension());
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/Target.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/Target.java
new file mode 100644
index 000000000..d6fe12d6a
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/Target.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector;
+
+import org.apache.commons.math3.optim.OptimizationData;
+
+/**
+ * Target of the optimization procedure.
+ * They are the values which the objective vector function must reproduce
+ * When the parameters of the model have been optimized.
+ *
+ * Immutable class.
+ *
+ * @version $Id: Target.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 3.1
+ */
+public class Target implements OptimizationData {
+ /** Target values (of the objective vector function). */
+ private final double[] target;
+
+ /**
+ * @param observations Target values.
+ */
+ public Target(double[] observations) {
+ target = observations.clone();
+ }
+
+ /**
+ * Gets the initial guess.
+ *
+ * @return the initial guess.
+ */
+ public double[] getTarget() {
+ return target.clone();
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/Weight.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/Weight.java
new file mode 100644
index 000000000..789bc256d
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/Weight.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector;
+
+import org.apache.commons.math3.optim.OptimizationData;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.MatrixUtils;
+import org.apache.commons.math3.linear.NonSquareMatrixException;
+
+/**
+ * Weight matrix of the residuals between model and observations.
+ *
+ * Immutable class.
+ *
+ * @version $Id: Weight.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 3.1
+ */
+public class Weight implements OptimizationData {
+ /** Weight matrix. */
+ private final RealMatrix weightMatrix;
+
+ /**
+ * Creates a diagonal weight matrix.
+ *
+ * @param weight List of the values of the diagonal.
+ */
+ public Weight(double[] weight) {
+ final int dim = weight.length;
+ weightMatrix = MatrixUtils.createRealMatrix(dim, dim);
+ for (int i = 0; i < dim; i++) {
+ weightMatrix.setEntry(i, i, weight[i]);
+ }
+ }
+
+ /**
+ * @param weight Weight matrix.
+ * @throws NonSquareMatrixException if the argument is not
+ * a square matrix.
+ */
+ public Weight(RealMatrix weight) {
+ if (weight.getColumnDimension() != weight.getRowDimension()) {
+ throw new NonSquareMatrixException(weight.getColumnDimension(),
+ weight.getRowDimension());
+ }
+
+ weightMatrix = weight.copy();
+ }
+
+ /**
+ * Gets the initial guess.
+ *
+ * @return the initial guess.
+ */
+ public RealMatrix getWeight() {
+ return weightMatrix.copy();
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizer.java
new file mode 100644
index 000000000..b7bb6f575
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizer.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import org.apache.commons.math3.exception.DimensionMismatchException;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+import org.apache.commons.math3.linear.ArrayRealVector;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.DecompositionSolver;
+import org.apache.commons.math3.linear.MatrixUtils;
+import org.apache.commons.math3.linear.QRDecomposition;
+import org.apache.commons.math3.linear.EigenDecomposition;
+import org.apache.commons.math3.optim.OptimizationData;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+import org.apache.commons.math3.optim.nonlinear.vector.Weight;
+import org.apache.commons.math3.optim.nonlinear.vector.JacobianMultivariateVectorOptimizer;
+import org.apache.commons.math3.util.FastMath;
+
+/**
+ * Base class for implementing least-squares optimizers.
+ * It provides methods for error estimation.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public abstract class AbstractLeastSquaresOptimizer
+ extends JacobianMultivariateVectorOptimizer {
+ /** Square-root of the weight matrix. */
+ private RealMatrix weightMatrixSqrt;
+ /** Cost value (square root of the sum of the residuals). */
+ private double cost;
+
+ /**
+ * @param checker Convergence checker.
+ */
+ protected AbstractLeastSquaresOptimizer(ConvergenceChecker checker) {
+ super(checker);
+ }
+
+ /**
+ * Computes the weighted Jacobian matrix.
+ *
+ * @param params Model parameters at which to compute the Jacobian.
+ * @return the weighted Jacobian: W1/2 J.
+ * @throws DimensionMismatchException if the Jacobian dimension does not
+ * match problem dimension.
+ */
+ protected RealMatrix computeWeightedJacobian(double[] params) {
+ return weightMatrixSqrt.multiply(MatrixUtils.createRealMatrix(computeJacobian(params)));
+ }
+
+ /**
+ * Computes the cost.
+ *
+ * @param residuals Residuals.
+ * @return the cost.
+ * @see #computeResiduals(double[])
+ */
+ protected double computeCost(double[] residuals) {
+ final ArrayRealVector r = new ArrayRealVector(residuals);
+ return FastMath.sqrt(r.dotProduct(getWeight().operate(r)));
+ }
+
+ /**
+ * Gets the root-mean-square (RMS) value.
+ *
+ * The RMS the root of the arithmetic mean of the square of all weighted
+ * residuals.
+ * This is related to the criterion that is minimized by the optimizer
+ * as follows: If c if the criterion, and n is the
+ * number of measurements, then the RMS is sqrt (c/n).
+ *
+ * @return the RMS value.
+ */
+ public double getRMS() {
+ return FastMath.sqrt(getChiSquare() / getTargetSize());
+ }
+
+ /**
+ * Get a Chi-Square-like value assuming the N residuals follow N
+ * distinct normal distributions centered on 0 and whose variances are
+ * the reciprocal of the weights.
+ * @return chi-square value
+ */
+ public double getChiSquare() {
+ return cost * cost;
+ }
+
+ /**
+ * Gets the square-root of the weight matrix.
+ *
+ * @return the square-root of the weight matrix.
+ */
+ public RealMatrix getWeightSquareRoot() {
+ return weightMatrixSqrt.copy();
+ }
+
+ /**
+ * Sets the cost.
+ *
+ * @param cost Cost value.
+ */
+ protected void setCost(double cost) {
+ this.cost = cost;
+ }
+
+ /**
+ * Get the covariance matrix of the optimized parameters.
+ *
+ * Note that this operation involves the inversion of the
+ * JTJ
matrix, where {@code J} is the
+ * Jacobian matrix.
+ * The {@code threshold} parameter is a way for the caller to specify
+ * that the result of this computation should be considered meaningless,
+ * and thus trigger an exception.
+ *
+ * @param params Model parameters.
+ * @param threshold Singularity threshold.
+ * @return the covariance matrix.
+ * @throws org.apache.commons.math3.linear.SingularMatrixException
+ * if the covariance matrix cannot be computed (singular problem).
+ */
+ public double[][] computeCovariances(double[] params,
+ double threshold) {
+ // Set up the Jacobian.
+ final RealMatrix j = computeWeightedJacobian(params);
+
+ // Compute transpose(J)J.
+ final RealMatrix jTj = j.transpose().multiply(j);
+
+ // Compute the covariances matrix.
+ final DecompositionSolver solver
+ = new QRDecomposition(jTj, threshold).getSolver();
+ return solver.getInverse().getData();
+ }
+
+ /**
+ * Computes an estimate of the standard deviation of the parameters. The
+ * returned values are the square root of the diagonal coefficients of the
+ * covariance matrix, {@code sd(a[i]) ~= sqrt(C[i][i])}, where {@code a[i]}
+ * is the optimized value of the {@code i}-th parameter, and {@code C} is
+ * the covariance matrix.
+ *
+ * @param params Model parameters.
+ * @param covarianceSingularityThreshold Singularity threshold (see
+ * {@link #computeCovariances(double[],double) computeCovariances}).
+ * @return an estimate of the standard deviation of the optimized parameters
+ * @throws org.apache.commons.math3.linear.SingularMatrixException
+ * if the covariance matrix cannot be computed.
+ */
+ public double[] computeSigma(double[] params,
+ double covarianceSingularityThreshold) {
+ final int nC = params.length;
+ final double[] sig = new double[nC];
+ final double[][] cov = computeCovariances(params, covarianceSingularityThreshold);
+ for (int i = 0; i < nC; ++i) {
+ sig[i] = FastMath.sqrt(cov[i][i]);
+ }
+ return sig;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param optData Optimization data. The following data will be looked for:
+ *
+ * - {@link org.apache.commons.math3.optim.MaxEval}
+ * - {@link org.apache.commons.math3.optim.InitialGuess}
+ * - {@link org.apache.commons.math3.optim.SimpleBounds}
+ * - {@link org.apache.commons.math3.optim.nonlinear.vector.Target}
+ * - {@link org.apache.commons.math3.optim.nonlinear.vector.Weight}
+ * - {@link org.apache.commons.math3.optim.nonlinear.vector.ModelFunction}
+ * - {@link org.apache.commons.math3.optim.nonlinear.vector.ModelFunctionJacobian}
+ *
+ * @return {@inheritDoc}
+ * @throws TooManyEvaluationsException if the maximal number of
+ * evaluations is exceeded.
+ * @throws DimensionMismatchException if the initial guess, target, and weight
+ * arguments have inconsistent dimensions.
+ */
+ @Override
+ public PointVectorValuePair optimize(OptimizationData... optData)
+ throws TooManyEvaluationsException {
+ // Retrieve settings.
+ parseOptimizationData(optData);
+ // Set up base class and perform computation.
+ return super.optimize(optData);
+ }
+
+ /**
+ * Computes the residuals.
+ * The residual is the difference between the observed (target)
+ * values and the model (objective function) value.
+ * There is one residual for each element of the vector-valued
+ * function.
+ *
+ * @param objectiveValue Value of the the objective function. This is
+ * the value returned from a call to
+ * {@link #computeObjectiveValue(double[]) computeObjectiveValue}
+ * (whose array argument contains the model parameters).
+ * @return the residuals.
+ * @throws DimensionMismatchException if {@code params} has a wrong
+ * length.
+ */
+ protected double[] computeResiduals(double[] objectiveValue) {
+ final double[] target = getTarget();
+ if (objectiveValue.length != target.length) {
+ throw new DimensionMismatchException(target.length,
+ objectiveValue.length);
+ }
+
+ final double[] residuals = new double[target.length];
+ for (int i = 0; i < target.length; i++) {
+ residuals[i] = target[i] - objectiveValue[i];
+ }
+
+ return residuals;
+ }
+
+ /**
+ * Scans the list of (required and optional) optimization data that
+ * characterize the problem.
+ * If the weight matrix is specified, the {@link #weightMatrixSqrt}
+ * field is recomputed.
+ *
+ * @param optData Optimization data. The following data will be looked for:
+ *
+ */
+ private void parseOptimizationData(OptimizationData... optData) {
+ // The existing values (as set by the previous call) are reused if
+ // not provided in the argument list.
+ for (OptimizationData data : optData) {
+ if (data instanceof Weight) {
+ weightMatrixSqrt = squareRoot(((Weight) data).getWeight());
+ // If more data must be parsed, this statement _must_ be
+ // changed to "continue".
+ break;
+ }
+ }
+ }
+
+ /**
+ * Computes the square-root of the weight matrix.
+ *
+ * @param m Symmetric, positive-definite (weight) matrix.
+ * @return the square-root of the weight matrix.
+ */
+ private RealMatrix squareRoot(RealMatrix m) {
+ final EigenDecomposition dec = new EigenDecomposition(m);
+ return dec.getSquareRoot();
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/GaussNewtonOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/GaussNewtonOptimizer.java
new file mode 100644
index 000000000..6aa684dd5
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/GaussNewtonOptimizer.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import org.apache.commons.math3.exception.ConvergenceException;
+import org.apache.commons.math3.exception.NullArgumentException;
+import org.apache.commons.math3.exception.MathInternalError;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.linear.ArrayRealVector;
+import org.apache.commons.math3.linear.BlockRealMatrix;
+import org.apache.commons.math3.linear.DecompositionSolver;
+import org.apache.commons.math3.linear.LUDecomposition;
+import org.apache.commons.math3.linear.QRDecomposition;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.SingularMatrixException;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+
+/**
+ * Gauss-Newton least-squares solver.
+ *
+ * This class solve a least-square problem by solving the normal equations
+ * of the linearized problem at each iteration. Either LU decomposition or
+ * QR decomposition can be used to solve the normal equations. LU decomposition
+ * is faster but QR decomposition is more robust for difficult problems.
+ *
+ *
+ * @version $Id: GaussNewtonOptimizer.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ *
+ */
+public class GaussNewtonOptimizer extends AbstractLeastSquaresOptimizer {
+ /** Indicator for using LU decomposition. */
+ private final boolean useLU;
+
+ /**
+ * Simple constructor with default settings.
+ * The normal equations will be solved using LU decomposition.
+ *
+ * @param checker Convergence checker.
+ */
+ public GaussNewtonOptimizer(ConvergenceChecker checker) {
+ this(true, checker);
+ }
+
+ /**
+ * @param useLU If {@code true}, the normal equations will be solved
+ * using LU decomposition, otherwise they will be solved using QR
+ * decomposition.
+ * @param checker Convergence checker.
+ */
+ public GaussNewtonOptimizer(final boolean useLU,
+ ConvergenceChecker checker) {
+ super(checker);
+ this.useLU = useLU;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public PointVectorValuePair doOptimize() {
+ final ConvergenceChecker checker
+ = getConvergenceChecker();
+
+ // Computation will be useless without a checker (see "for-loop").
+ if (checker == null) {
+ throw new NullArgumentException();
+ }
+
+ final double[] targetValues = getTarget();
+ final int nR = targetValues.length; // Number of observed data.
+
+ final RealMatrix weightMatrix = getWeight();
+ // Diagonal of the weight matrix.
+ final double[] residualsWeights = new double[nR];
+ for (int i = 0; i < nR; i++) {
+ residualsWeights[i] = weightMatrix.getEntry(i, i);
+ }
+
+ final double[] currentPoint = getStartPoint();
+ final int nC = currentPoint.length;
+
+ // iterate until convergence is reached
+ PointVectorValuePair current = null;
+ int iter = 0;
+ for (boolean converged = false; !converged;) {
+ ++iter;
+
+ // evaluate the objective function and its jacobian
+ PointVectorValuePair previous = current;
+ // Value of the objective function at "currentPoint".
+ final double[] currentObjective = computeObjectiveValue(currentPoint);
+ final double[] currentResiduals = computeResiduals(currentObjective);
+ final RealMatrix weightedJacobian = computeWeightedJacobian(currentPoint);
+ current = new PointVectorValuePair(currentPoint, currentObjective);
+
+ // build the linear problem
+ final double[] b = new double[nC];
+ final double[][] a = new double[nC][nC];
+ for (int i = 0; i < nR; ++i) {
+
+ final double[] grad = weightedJacobian.getRow(i);
+ final double weight = residualsWeights[i];
+ final double residual = currentResiduals[i];
+
+ // compute the normal equation
+ final double wr = weight * residual;
+ for (int j = 0; j < nC; ++j) {
+ b[j] += wr * grad[j];
+ }
+
+ // build the contribution matrix for measurement i
+ for (int k = 0; k < nC; ++k) {
+ double[] ak = a[k];
+ double wgk = weight * grad[k];
+ for (int l = 0; l < nC; ++l) {
+ ak[l] += wgk * grad[l];
+ }
+ }
+ }
+
+ try {
+ // solve the linearized least squares problem
+ RealMatrix mA = new BlockRealMatrix(a);
+ DecompositionSolver solver = useLU ?
+ new LUDecomposition(mA).getSolver() :
+ new QRDecomposition(mA).getSolver();
+ final double[] dX = solver.solve(new ArrayRealVector(b, false)).toArray();
+ // update the estimated parameters
+ for (int i = 0; i < nC; ++i) {
+ currentPoint[i] += dX[i];
+ }
+ } catch (SingularMatrixException e) {
+ throw new ConvergenceException(LocalizedFormats.UNABLE_TO_SOLVE_SINGULAR_PROBLEM);
+ }
+
+ // Check convergence.
+ if (previous != null) {
+ converged = checker.converged(iter, previous, current);
+ if (converged) {
+ setCost(computeCost(currentResiduals));
+ return current;
+ }
+ }
+ }
+ // Must never happen.
+ throw new MathInternalError();
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/LevenbergMarquardtOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/LevenbergMarquardtOptimizer.java
new file mode 100644
index 000000000..e3b2a3fa5
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/LevenbergMarquardtOptimizer.java
@@ -0,0 +1,939 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import java.util.Arrays;
+import org.apache.commons.math3.exception.ConvergenceException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.util.Precision;
+import org.apache.commons.math3.util.FastMath;
+
+
+/**
+ * This class solves a least-squares problem using the Levenberg-Marquardt algorithm.
+ *
+ * This implementation should work even for over-determined systems
+ * (i.e. systems having more point than equations). Over-determined systems
+ * are solved by ignoring the point which have the smallest impact according
+ * to their jacobian column norm. Only the rank of the matrix and some loop bounds
+ * are changed to implement this.
+ *
+ * The resolution engine is a simple translation of the MINPACK lmder routine with minor
+ * changes. The changes include the over-determined resolution, the use of
+ * inherited convergence checker and the Q.R. decomposition which has been
+ * rewritten following the algorithm described in the
+ * P. Lascaux and R. Theodor book Analyse numérique matricielle
+ * appliquée à l'art de l'ingénieur, Masson 1986.
+ * The authors of the original fortran version are:
+ *
+ * - Argonne National Laboratory. MINPACK project. March 1980
+ * - Burton S. Garbow
+ * - Kenneth E. Hillstrom
+ * - Jorge J. More
+ *
+ * The redistribution policy for MINPACK is available here, for convenience, it
+ * is reproduced below.
+ *
+ *
+ *
+ * Minpack Copyright Notice (1999) University of Chicago.
+ * All rights reserved
+ * |
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * - The end-user documentation included with the redistribution, if any,
+ * must include the following acknowledgment:
+ *
This product includes software developed by the University of
+ * Chicago, as Operator of Argonne National Laboratory.
+ * Alternately, this acknowledgment may appear in the software itself,
+ * if and wherever such third-party acknowledgments normally appear.
+ * - WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
+ * UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
+ * THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
+ * OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
+ * OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
+ * USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
+ * THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
+ * DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
+ * UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
+ * BE CORRECTED.
+ * - LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
+ * HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
+ * ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
+ * INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
+ * ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
+ * PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
+ * SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
+ * (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
+ * EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
+ * POSSIBILITY OF SUCH LOSS OR DAMAGES.
+ *
|
+ *
+ *
+ * @version $Id: LevenbergMarquardtOptimizer.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public class LevenbergMarquardtOptimizer
+ extends AbstractLeastSquaresOptimizer {
+ /** Number of solved point. */
+ private int solvedCols;
+ /** Diagonal elements of the R matrix in the Q.R. decomposition. */
+ private double[] diagR;
+ /** Norms of the columns of the jacobian matrix. */
+ private double[] jacNorm;
+ /** Coefficients of the Householder transforms vectors. */
+ private double[] beta;
+ /** Columns permutation array. */
+ private int[] permutation;
+ /** Rank of the jacobian matrix. */
+ private int rank;
+ /** Levenberg-Marquardt parameter. */
+ private double lmPar;
+ /** Parameters evolution direction associated with lmPar. */
+ private double[] lmDir;
+ /** Positive input variable used in determining the initial step bound. */
+ private final double initialStepBoundFactor;
+ /** Desired relative error in the sum of squares. */
+ private final double costRelativeTolerance;
+ /** Desired relative error in the approximate solution parameters. */
+ private final double parRelativeTolerance;
+ /** Desired max cosine on the orthogonality between the function vector
+ * and the columns of the jacobian. */
+ private final double orthoTolerance;
+ /** Threshold for QR ranking. */
+ private final double qrRankingThreshold;
+ /** Weighted residuals. */
+ private double[] weightedResidual;
+ /** Weighted Jacobian. */
+ private double[][] weightedJacobian;
+
+ /**
+ * Build an optimizer for least squares problems with default values
+ * for all the tuning parameters (see the {@link
+ * #LevenbergMarquardtOptimizer(double,double,double,double,double)
+ * other contructor}.
+ * The default values for the algorithm settings are:
+ *
+ * - Initial step bound factor: 100
+ * - Cost relative tolerance: 1e-10
+ * - Parameters relative tolerance: 1e-10
+ * - Orthogonality tolerance: 1e-10
+ * - QR ranking threshold: {@link Precision#SAFE_MIN}
+ *
+ */
+ public LevenbergMarquardtOptimizer() {
+ this(100, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
+ }
+
+ /**
+ * Constructor that allows the specification of a custom convergence
+ * checker.
+ * Note that all the usual convergence checks will be disabled.
+ * The default values for the algorithm settings are:
+ *
+ * - Initial step bound factor: 100
+ * - Cost relative tolerance: 1e-10
+ * - Parameters relative tolerance: 1e-10
+ * - Orthogonality tolerance: 1e-10
+ * - QR ranking threshold: {@link Precision#SAFE_MIN}
+ *
+ *
+ * @param checker Convergence checker.
+ */
+ public LevenbergMarquardtOptimizer(ConvergenceChecker checker) {
+ this(100, checker, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
+ }
+
+ /**
+ * Constructor that allows the specification of a custom convergence
+ * checker, in addition to the standard ones.
+ *
+ * @param initialStepBoundFactor Positive input variable used in
+ * determining the initial step bound. This bound is set to the
+ * product of initialStepBoundFactor and the euclidean norm of
+ * {@code diag * x} if non-zero, or else to {@code initialStepBoundFactor}
+ * itself. In most cases factor should lie in the interval
+ * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
+ * @param checker Convergence checker.
+ * @param costRelativeTolerance Desired relative error in the sum of
+ * squares.
+ * @param parRelativeTolerance Desired relative error in the approximate
+ * solution parameters.
+ * @param orthoTolerance Desired max cosine on the orthogonality between
+ * the function vector and the columns of the Jacobian.
+ * @param threshold Desired threshold for QR ranking. If the squared norm
+ * of a column vector is smaller or equal to this threshold during QR
+ * decomposition, it is considered to be a zero vector and hence the rank
+ * of the matrix is reduced.
+ */
+ public LevenbergMarquardtOptimizer(double initialStepBoundFactor,
+ ConvergenceChecker checker,
+ double costRelativeTolerance,
+ double parRelativeTolerance,
+ double orthoTolerance,
+ double threshold) {
+ super(checker);
+ this.initialStepBoundFactor = initialStepBoundFactor;
+ this.costRelativeTolerance = costRelativeTolerance;
+ this.parRelativeTolerance = parRelativeTolerance;
+ this.orthoTolerance = orthoTolerance;
+ this.qrRankingThreshold = threshold;
+ }
+
+ /**
+ * Build an optimizer for least squares problems with default values
+ * for some of the tuning parameters (see the {@link
+ * #LevenbergMarquardtOptimizer(double,double,double,double,double)
+ * other contructor}.
+ * The default values for the algorithm settings are:
+ *
+ * - Initial step bound factor}: 100
+ * - QR ranking threshold}: {@link Precision#SAFE_MIN}
+ *
+ *
+ * @param costRelativeTolerance Desired relative error in the sum of
+ * squares.
+ * @param parRelativeTolerance Desired relative error in the approximate
+ * solution parameters.
+ * @param orthoTolerance Desired max cosine on the orthogonality between
+ * the function vector and the columns of the Jacobian.
+ */
+ public LevenbergMarquardtOptimizer(double costRelativeTolerance,
+ double parRelativeTolerance,
+ double orthoTolerance) {
+ this(100,
+ costRelativeTolerance, parRelativeTolerance, orthoTolerance,
+ Precision.SAFE_MIN);
+ }
+
+ /**
+ * The arguments control the behaviour of the default convergence checking
+ * procedure.
+ * Additional criteria can defined through the setting of a {@link
+ * ConvergenceChecker}.
+ *
+ * @param initialStepBoundFactor Positive input variable used in
+ * determining the initial step bound. This bound is set to the
+ * product of initialStepBoundFactor and the euclidean norm of
+ * {@code diag * x} if non-zero, or else to {@code initialStepBoundFactor}
+ * itself. In most cases factor should lie in the interval
+ * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
+ * @param costRelativeTolerance Desired relative error in the sum of
+ * squares.
+ * @param parRelativeTolerance Desired relative error in the approximate
+ * solution parameters.
+ * @param orthoTolerance Desired max cosine on the orthogonality between
+ * the function vector and the columns of the Jacobian.
+ * @param threshold Desired threshold for QR ranking. If the squared norm
+ * of a column vector is smaller or equal to this threshold during QR
+ * decomposition, it is considered to be a zero vector and hence the rank
+ * of the matrix is reduced.
+ */
+ public LevenbergMarquardtOptimizer(double initialStepBoundFactor,
+ double costRelativeTolerance,
+ double parRelativeTolerance,
+ double orthoTolerance,
+ double threshold) {
+ super(null); // No custom convergence criterion.
+ this.initialStepBoundFactor = initialStepBoundFactor;
+ this.costRelativeTolerance = costRelativeTolerance;
+ this.parRelativeTolerance = parRelativeTolerance;
+ this.orthoTolerance = orthoTolerance;
+ this.qrRankingThreshold = threshold;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected PointVectorValuePair doOptimize() {
+ final int nR = getTarget().length; // Number of observed data.
+ final double[] currentPoint = getStartPoint();
+ final int nC = currentPoint.length; // Number of parameters.
+
+ // arrays shared with the other private methods
+ solvedCols = FastMath.min(nR, nC);
+ diagR = new double[nC];
+ jacNorm = new double[nC];
+ beta = new double[nC];
+ permutation = new int[nC];
+ lmDir = new double[nC];
+
+ // local point
+ double delta = 0;
+ double xNorm = 0;
+ double[] diag = new double[nC];
+ double[] oldX = new double[nC];
+ double[] oldRes = new double[nR];
+ double[] oldObj = new double[nR];
+ double[] qtf = new double[nR];
+ double[] work1 = new double[nC];
+ double[] work2 = new double[nC];
+ double[] work3 = new double[nC];
+
+ final RealMatrix weightMatrixSqrt = getWeightSquareRoot();
+
+ // Evaluate the function at the starting point and calculate its norm.
+ double[] currentObjective = computeObjectiveValue(currentPoint);
+ double[] currentResiduals = computeResiduals(currentObjective);
+ PointVectorValuePair current = new PointVectorValuePair(currentPoint, currentObjective);
+ double currentCost = computeCost(currentResiduals);
+
+ // Outer loop.
+ lmPar = 0;
+ boolean firstIteration = true;
+ int iter = 0;
+ final ConvergenceChecker checker = getConvergenceChecker();
+ while (true) {
+ ++iter;
+ final PointVectorValuePair previous = current;
+
+ // QR decomposition of the jacobian matrix
+ qrDecomposition(computeWeightedJacobian(currentPoint));
+
+ weightedResidual = weightMatrixSqrt.operate(currentResiduals);
+ for (int i = 0; i < nR; i++) {
+ qtf[i] = weightedResidual[i];
+ }
+
+ // compute Qt.res
+ qTy(qtf);
+
+ // now we don't need Q anymore,
+ // so let jacobian contain the R matrix with its diagonal elements
+ for (int k = 0; k < solvedCols; ++k) {
+ int pk = permutation[k];
+ weightedJacobian[k][pk] = diagR[pk];
+ }
+
+ if (firstIteration) {
+ // scale the point according to the norms of the columns
+ // of the initial jacobian
+ xNorm = 0;
+ for (int k = 0; k < nC; ++k) {
+ double dk = jacNorm[k];
+ if (dk == 0) {
+ dk = 1.0;
+ }
+ double xk = dk * currentPoint[k];
+ xNorm += xk * xk;
+ diag[k] = dk;
+ }
+ xNorm = FastMath.sqrt(xNorm);
+
+ // initialize the step bound delta
+ delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
+ }
+
+ // check orthogonality between function vector and jacobian columns
+ double maxCosine = 0;
+ if (currentCost != 0) {
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double s = jacNorm[pj];
+ if (s != 0) {
+ double sum = 0;
+ for (int i = 0; i <= j; ++i) {
+ sum += weightedJacobian[i][pj] * qtf[i];
+ }
+ maxCosine = FastMath.max(maxCosine, FastMath.abs(sum) / (s * currentCost));
+ }
+ }
+ }
+ if (maxCosine <= orthoTolerance) {
+ // Convergence has been reached.
+ setCost(currentCost);
+ return current;
+ }
+
+ // rescale if necessary
+ for (int j = 0; j < nC; ++j) {
+ diag[j] = FastMath.max(diag[j], jacNorm[j]);
+ }
+
+ // Inner loop.
+ for (double ratio = 0; ratio < 1.0e-4;) {
+
+ // save the state
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ oldX[pj] = currentPoint[pj];
+ }
+ final double previousCost = currentCost;
+ double[] tmpVec = weightedResidual;
+ weightedResidual = oldRes;
+ oldRes = tmpVec;
+ tmpVec = currentObjective;
+ currentObjective = oldObj;
+ oldObj = tmpVec;
+
+ // determine the Levenberg-Marquardt parameter
+ determineLMParameter(qtf, delta, diag, work1, work2, work3);
+
+ // compute the new point and the norm of the evolution direction
+ double lmNorm = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ lmDir[pj] = -lmDir[pj];
+ currentPoint[pj] = oldX[pj] + lmDir[pj];
+ double s = diag[pj] * lmDir[pj];
+ lmNorm += s * s;
+ }
+ lmNorm = FastMath.sqrt(lmNorm);
+ // on the first iteration, adjust the initial step bound.
+ if (firstIteration) {
+ delta = FastMath.min(delta, lmNorm);
+ }
+
+ // Evaluate the function at x + p and calculate its norm.
+ currentObjective = computeObjectiveValue(currentPoint);
+ currentResiduals = computeResiduals(currentObjective);
+ current = new PointVectorValuePair(currentPoint, currentObjective);
+ currentCost = computeCost(currentResiduals);
+
+ // compute the scaled actual reduction
+ double actRed = -1.0;
+ if (0.1 * currentCost < previousCost) {
+ double r = currentCost / previousCost;
+ actRed = 1.0 - r * r;
+ }
+
+ // compute the scaled predicted reduction
+ // and the scaled directional derivative
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double dirJ = lmDir[pj];
+ work1[j] = 0;
+ for (int i = 0; i <= j; ++i) {
+ work1[i] += weightedJacobian[i][pj] * dirJ;
+ }
+ }
+ double coeff1 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ coeff1 += work1[j] * work1[j];
+ }
+ double pc2 = previousCost * previousCost;
+ coeff1 = coeff1 / pc2;
+ double coeff2 = lmPar * lmNorm * lmNorm / pc2;
+ double preRed = coeff1 + 2 * coeff2;
+ double dirDer = -(coeff1 + coeff2);
+
+ // ratio of the actual to the predicted reduction
+ ratio = (preRed == 0) ? 0 : (actRed / preRed);
+
+ // update the step bound
+ if (ratio <= 0.25) {
+ double tmp =
+ (actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
+ if ((0.1 * currentCost >= previousCost) || (tmp < 0.1)) {
+ tmp = 0.1;
+ }
+ delta = tmp * FastMath.min(delta, 10.0 * lmNorm);
+ lmPar /= tmp;
+ } else if ((lmPar == 0) || (ratio >= 0.75)) {
+ delta = 2 * lmNorm;
+ lmPar *= 0.5;
+ }
+
+ // test for successful iteration.
+ if (ratio >= 1.0e-4) {
+ // successful iteration, update the norm
+ firstIteration = false;
+ xNorm = 0;
+ for (int k = 0; k < nC; ++k) {
+ double xK = diag[k] * currentPoint[k];
+ xNorm += xK * xK;
+ }
+ xNorm = FastMath.sqrt(xNorm);
+
+ // tests for convergence.
+ if (checker != null) {
+ // we use the vectorial convergence checker
+ if (checker.converged(iter, previous, current)) {
+ setCost(currentCost);
+ return current;
+ }
+ }
+ } else {
+ // failed iteration, reset the previous values
+ currentCost = previousCost;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ currentPoint[pj] = oldX[pj];
+ }
+ tmpVec = weightedResidual;
+ weightedResidual = oldRes;
+ oldRes = tmpVec;
+ tmpVec = currentObjective;
+ currentObjective = oldObj;
+ oldObj = tmpVec;
+ // Reset "current" to previous values.
+ current = new PointVectorValuePair(currentPoint, currentObjective);
+ }
+
+ // Default convergence criteria.
+ if ((FastMath.abs(actRed) <= costRelativeTolerance &&
+ preRed <= costRelativeTolerance &&
+ ratio <= 2.0) ||
+ delta <= parRelativeTolerance * xNorm) {
+ setCost(currentCost);
+ return current;
+ }
+
+ // tests for termination and stringent tolerances
+ // (2.2204e-16 is the machine epsilon for IEEE754)
+ if ((FastMath.abs(actRed) <= 2.2204e-16) && (preRed <= 2.2204e-16) && (ratio <= 2.0)) {
+ throw new ConvergenceException(LocalizedFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE,
+ costRelativeTolerance);
+ } else if (delta <= 2.2204e-16 * xNorm) {
+ throw new ConvergenceException(LocalizedFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE,
+ parRelativeTolerance);
+ } else if (maxCosine <= 2.2204e-16) {
+ throw new ConvergenceException(LocalizedFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE,
+ orthoTolerance);
+ }
+ }
+ }
+ }
+
+ /**
+ * Determine the Levenberg-Marquardt parameter.
+ * This implementation is a translation in Java of the MINPACK
+ * lmpar
+ * routine.
+ * This method sets the lmPar and lmDir attributes.
+ * The authors of the original fortran function are:
+ *
+ * - Argonne National Laboratory. MINPACK project. March 1980
+ * - Burton S. Garbow
+ * - Kenneth E. Hillstrom
+ * - Jorge J. More
+ *
+ * Luc Maisonobe did the Java translation.
+ *
+ * @param qy array containing qTy
+ * @param delta upper bound on the euclidean norm of diagR * lmDir
+ * @param diag diagonal matrix
+ * @param work1 work array
+ * @param work2 work array
+ * @param work3 work array
+ */
+ private void determineLMParameter(double[] qy, double delta, double[] diag,
+ double[] work1, double[] work2, double[] work3) {
+ final int nC = weightedJacobian[0].length;
+
+ // compute and store in x the gauss-newton direction, if the
+ // jacobian is rank-deficient, obtain a least squares solution
+ for (int j = 0; j < rank; ++j) {
+ lmDir[permutation[j]] = qy[j];
+ }
+ for (int j = rank; j < nC; ++j) {
+ lmDir[permutation[j]] = 0;
+ }
+ for (int k = rank - 1; k >= 0; --k) {
+ int pk = permutation[k];
+ double ypk = lmDir[pk] / diagR[pk];
+ for (int i = 0; i < k; ++i) {
+ lmDir[permutation[i]] -= ypk * weightedJacobian[i][pk];
+ }
+ lmDir[pk] = ypk;
+ }
+
+ // evaluate the function at the origin, and test
+ // for acceptance of the Gauss-Newton direction
+ double dxNorm = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double s = diag[pj] * lmDir[pj];
+ work1[pj] = s;
+ dxNorm += s * s;
+ }
+ dxNorm = FastMath.sqrt(dxNorm);
+ double fp = dxNorm - delta;
+ if (fp <= 0.1 * delta) {
+ lmPar = 0;
+ return;
+ }
+
+ // if the jacobian is not rank deficient, the Newton step provides
+ // a lower bound, parl, for the zero of the function,
+ // otherwise set this bound to zero
+ double sum2;
+ double parl = 0;
+ if (rank == solvedCols) {
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] *= diag[pj] / dxNorm;
+ }
+ sum2 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double sum = 0;
+ for (int i = 0; i < j; ++i) {
+ sum += weightedJacobian[i][pj] * work1[permutation[i]];
+ }
+ double s = (work1[pj] - sum) / diagR[pj];
+ work1[pj] = s;
+ sum2 += s * s;
+ }
+ parl = fp / (delta * sum2);
+ }
+
+ // calculate an upper bound, paru, for the zero of the function
+ sum2 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double sum = 0;
+ for (int i = 0; i <= j; ++i) {
+ sum += weightedJacobian[i][pj] * qy[i];
+ }
+ sum /= diag[pj];
+ sum2 += sum * sum;
+ }
+ double gNorm = FastMath.sqrt(sum2);
+ double paru = gNorm / delta;
+ if (paru == 0) {
+ // 2.2251e-308 is the smallest positive real for IEE754
+ paru = 2.2251e-308 / FastMath.min(delta, 0.1);
+ }
+
+ // if the input par lies outside of the interval (parl,paru),
+ // set par to the closer endpoint
+ lmPar = FastMath.min(paru, FastMath.max(lmPar, parl));
+ if (lmPar == 0) {
+ lmPar = gNorm / dxNorm;
+ }
+
+ for (int countdown = 10; countdown >= 0; --countdown) {
+
+ // evaluate the function at the current value of lmPar
+ if (lmPar == 0) {
+ lmPar = FastMath.max(2.2251e-308, 0.001 * paru);
+ }
+ double sPar = FastMath.sqrt(lmPar);
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] = sPar * diag[pj];
+ }
+ determineLMDirection(qy, work1, work2, work3);
+
+ dxNorm = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double s = diag[pj] * lmDir[pj];
+ work3[pj] = s;
+ dxNorm += s * s;
+ }
+ dxNorm = FastMath.sqrt(dxNorm);
+ double previousFP = fp;
+ fp = dxNorm - delta;
+
+ // if the function is small enough, accept the current value
+ // of lmPar, also test for the exceptional cases where parl is zero
+ if ((FastMath.abs(fp) <= 0.1 * delta) ||
+ ((parl == 0) && (fp <= previousFP) && (previousFP < 0))) {
+ return;
+ }
+
+ // compute the Newton correction
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] = work3[pj] * diag[pj] / dxNorm;
+ }
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] /= work2[j];
+ double tmp = work1[pj];
+ for (int i = j + 1; i < solvedCols; ++i) {
+ work1[permutation[i]] -= weightedJacobian[i][pj] * tmp;
+ }
+ }
+ sum2 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ double s = work1[permutation[j]];
+ sum2 += s * s;
+ }
+ double correction = fp / (delta * sum2);
+
+ // depending on the sign of the function, update parl or paru.
+ if (fp > 0) {
+ parl = FastMath.max(parl, lmPar);
+ } else if (fp < 0) {
+ paru = FastMath.min(paru, lmPar);
+ }
+
+ // compute an improved estimate for lmPar
+ lmPar = FastMath.max(parl, lmPar + correction);
+
+ }
+ }
+
+ /**
+ * Solve a*x = b and d*x = 0 in the least squares sense.
+ * This implementation is a translation in Java of the MINPACK
+ * qrsolv
+ * routine.
+ * This method sets the lmDir and lmDiag attributes.
+ * The authors of the original fortran function are:
+ *
+ * - Argonne National Laboratory. MINPACK project. March 1980
+ * - Burton S. Garbow
+ * - Kenneth E. Hillstrom
+ * - Jorge J. More
+ *
+ * Luc Maisonobe did the Java translation.
+ *
+ * @param qy array containing qTy
+ * @param diag diagonal matrix
+ * @param lmDiag diagonal elements associated with lmDir
+ * @param work work array
+ */
+ private void determineLMDirection(double[] qy, double[] diag,
+ double[] lmDiag, double[] work) {
+
+ // copy R and Qty to preserve input and initialize s
+ // in particular, save the diagonal elements of R in lmDir
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ for (int i = j + 1; i < solvedCols; ++i) {
+ weightedJacobian[i][pj] = weightedJacobian[j][permutation[i]];
+ }
+ lmDir[j] = diagR[pj];
+ work[j] = qy[j];
+ }
+
+ // eliminate the diagonal matrix d using a Givens rotation
+ for (int j = 0; j < solvedCols; ++j) {
+
+ // prepare the row of d to be eliminated, locating the
+ // diagonal element using p from the Q.R. factorization
+ int pj = permutation[j];
+ double dpj = diag[pj];
+ if (dpj != 0) {
+ Arrays.fill(lmDiag, j + 1, lmDiag.length, 0);
+ }
+ lmDiag[j] = dpj;
+
+ // the transformations to eliminate the row of d
+ // modify only a single element of Qty
+ // beyond the first n, which is initially zero.
+ double qtbpj = 0;
+ for (int k = j; k < solvedCols; ++k) {
+ int pk = permutation[k];
+
+ // determine a Givens rotation which eliminates the
+ // appropriate element in the current row of d
+ if (lmDiag[k] != 0) {
+
+ final double sin;
+ final double cos;
+ double rkk = weightedJacobian[k][pk];
+ if (FastMath.abs(rkk) < FastMath.abs(lmDiag[k])) {
+ final double cotan = rkk / lmDiag[k];
+ sin = 1.0 / FastMath.sqrt(1.0 + cotan * cotan);
+ cos = sin * cotan;
+ } else {
+ final double tan = lmDiag[k] / rkk;
+ cos = 1.0 / FastMath.sqrt(1.0 + tan * tan);
+ sin = cos * tan;
+ }
+
+ // compute the modified diagonal element of R and
+ // the modified element of (Qty,0)
+ weightedJacobian[k][pk] = cos * rkk + sin * lmDiag[k];
+ final double temp = cos * work[k] + sin * qtbpj;
+ qtbpj = -sin * work[k] + cos * qtbpj;
+ work[k] = temp;
+
+ // accumulate the tranformation in the row of s
+ for (int i = k + 1; i < solvedCols; ++i) {
+ double rik = weightedJacobian[i][pk];
+ final double temp2 = cos * rik + sin * lmDiag[i];
+ lmDiag[i] = -sin * rik + cos * lmDiag[i];
+ weightedJacobian[i][pk] = temp2;
+ }
+ }
+ }
+
+ // store the diagonal element of s and restore
+ // the corresponding diagonal element of R
+ lmDiag[j] = weightedJacobian[j][permutation[j]];
+ weightedJacobian[j][permutation[j]] = lmDir[j];
+ }
+
+ // solve the triangular system for z, if the system is
+ // singular, then obtain a least squares solution
+ int nSing = solvedCols;
+ for (int j = 0; j < solvedCols; ++j) {
+ if ((lmDiag[j] == 0) && (nSing == solvedCols)) {
+ nSing = j;
+ }
+ if (nSing < solvedCols) {
+ work[j] = 0;
+ }
+ }
+ if (nSing > 0) {
+ for (int j = nSing - 1; j >= 0; --j) {
+ int pj = permutation[j];
+ double sum = 0;
+ for (int i = j + 1; i < nSing; ++i) {
+ sum += weightedJacobian[i][pj] * work[i];
+ }
+ work[j] = (work[j] - sum) / lmDiag[j];
+ }
+ }
+
+ // permute the components of z back to components of lmDir
+ for (int j = 0; j < lmDir.length; ++j) {
+ lmDir[permutation[j]] = work[j];
+ }
+ }
+
+ /**
+ * Decompose a matrix A as A.P = Q.R using Householder transforms.
+ * As suggested in the P. Lascaux and R. Theodor book
+ * Analyse numérique matricielle appliquée à
+ * l'art de l'ingénieur (Masson, 1986), instead of representing
+ * the Householder transforms with uk unit vectors such that:
+ *
+ * Hk = I - 2uk.ukt
+ *
+ * we use k non-unit vectors such that:
+ *
+ * Hk = I - betakvk.vkt
+ *
+ * where vk = ak - alphak ek.
+ * The betak coefficients are provided upon exit as recomputing
+ * them from the vk vectors would be costly.
+ * This decomposition handles rank deficient cases since the tranformations
+ * are performed in non-increasing columns norms order thanks to columns
+ * pivoting. The diagonal elements of the R matrix are therefore also in
+ * non-increasing absolute values order.
+ *
+ * @param jacobian Weighted Jacobian matrix at the current point.
+ * @exception ConvergenceException if the decomposition cannot be performed
+ */
+ private void qrDecomposition(RealMatrix jacobian) throws ConvergenceException {
+ // Code in this class assumes that the weighted Jacobian is -(W^(1/2) J),
+ // hence the multiplication by -1.
+ weightedJacobian = jacobian.scalarMultiply(-1).getData();
+
+ final int nR = weightedJacobian.length;
+ final int nC = weightedJacobian[0].length;
+
+ // initializations
+ for (int k = 0; k < nC; ++k) {
+ permutation[k] = k;
+ double norm2 = 0;
+ for (int i = 0; i < nR; ++i) {
+ double akk = weightedJacobian[i][k];
+ norm2 += akk * akk;
+ }
+ jacNorm[k] = FastMath.sqrt(norm2);
+ }
+
+ // transform the matrix column after column
+ for (int k = 0; k < nC; ++k) {
+
+ // select the column with the greatest norm on active components
+ int nextColumn = -1;
+ double ak2 = Double.NEGATIVE_INFINITY;
+ for (int i = k; i < nC; ++i) {
+ double norm2 = 0;
+ for (int j = k; j < nR; ++j) {
+ double aki = weightedJacobian[j][permutation[i]];
+ norm2 += aki * aki;
+ }
+ if (Double.isInfinite(norm2) || Double.isNaN(norm2)) {
+ throw new ConvergenceException(LocalizedFormats.UNABLE_TO_PERFORM_QR_DECOMPOSITION_ON_JACOBIAN,
+ nR, nC);
+ }
+ if (norm2 > ak2) {
+ nextColumn = i;
+ ak2 = norm2;
+ }
+ }
+ if (ak2 <= qrRankingThreshold) {
+ rank = k;
+ return;
+ }
+ int pk = permutation[nextColumn];
+ permutation[nextColumn] = permutation[k];
+ permutation[k] = pk;
+
+ // choose alpha such that Hk.u = alpha ek
+ double akk = weightedJacobian[k][pk];
+ double alpha = (akk > 0) ? -FastMath.sqrt(ak2) : FastMath.sqrt(ak2);
+ double betak = 1.0 / (ak2 - akk * alpha);
+ beta[pk] = betak;
+
+ // transform the current column
+ diagR[pk] = alpha;
+ weightedJacobian[k][pk] -= alpha;
+
+ // transform the remaining columns
+ for (int dk = nC - 1 - k; dk > 0; --dk) {
+ double gamma = 0;
+ for (int j = k; j < nR; ++j) {
+ gamma += weightedJacobian[j][pk] * weightedJacobian[j][permutation[k + dk]];
+ }
+ gamma *= betak;
+ for (int j = k; j < nR; ++j) {
+ weightedJacobian[j][permutation[k + dk]] -= gamma * weightedJacobian[j][pk];
+ }
+ }
+ }
+ rank = solvedCols;
+ }
+
+ /**
+ * Compute the product Qt.y for some Q.R. decomposition.
+ *
+ * @param y vector to multiply (will be overwritten with the result)
+ */
+ private void qTy(double[] y) {
+ final int nR = weightedJacobian.length;
+ final int nC = weightedJacobian[0].length;
+
+ for (int k = 0; k < nC; ++k) {
+ int pk = permutation[k];
+ double gamma = 0;
+ for (int i = k; i < nR; ++i) {
+ gamma += weightedJacobian[i][pk] * y[i];
+ }
+ gamma *= beta[pk];
+ for (int i = k; i < nR; ++i) {
+ y[i] -= gamma * weightedJacobian[i][pk];
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/package-info.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/package-info.java
new file mode 100644
index 000000000..169914711
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+/**
+ * This package provides optimization algorithms that require derivatives.
+ */
diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/package-info.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/package-info.java
new file mode 100644
index 000000000..75e3585c3
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector;
+
+/**
+ * Algorithms for optimizing a vector function.
+ */
diff --git a/src/main/java/org/apache/commons/math3/optim/package-info.java b/src/main/java/org/apache/commons/math3/optim/package-info.java
new file mode 100644
index 000000000..080eb1b8c
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/package-info.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+/**
+ *
+ * Generally, optimizers are algorithms that will either
+ * {@link GoalType#MINIMIZE minimize} or {@link GoalType#MAXIMIZE maximize}
+ * a scalar function, called the {@link ObjectiveFunction objective
+ * function}.
+ *
+ * For some scalar objective functions the gradient can be computed (analytically
+ * or numerically). Algorithms that use this knowledge are defined in the
+ * {@link org.apache.commons.math3.optim.nonlinear.scalar.gradient} package.
+ * The algorithms that do not need this additional information are located in
+ * the {@link org.apache.commons.math3.optim.nonlinear.scalar.noderiv} package.
+ *
+ *
+ *
+ * Some problems are solved more efficiently by algorithms that, instead of an
+ * objective function, need access to a
+ * {@link org.apache.commons.math3.optim.nonlinear.vector.ModelFunction
+ * model function}: such a model predicts a set of values which the
+ * algorithm tries to match with a set of given
+ * {@link org.apache.commons.math3.optim.nonlinear.vector.Target target values}.
+ * Those algorithms are located in the
+ * {@link org.apache.commons.math3.optim.nonlinear.vector} package.
+ *
+ * Algorithms that also require the
+ * {@link org.apache.commons.math3.optim.nonlinear.vector.ModelFunctionJacobian
+ * Jacobian matrix of the model} are located in the
+ * {@link org.apache.commons.math3.optim.nonlinear.vector.jacobian} package.
+ *
+ * The {@link org.apache.commons.math3.optim.nonlinear.vector.jacobian.AbstractLeastSquaresOptimizer
+ * non-linear least-squares optimizers} are a specialization of the the latter,
+ * that minimize the distance (called cost or χ2)
+ * between model and observations.
+ *
+ * For cases where the Jacobian cannot be provided, a utility class will
+ * {@link org.apache.commons.math3.optim.nonlinear.scalar.LeastSquaresConverter
+ * convert} a (vector) model into a (scalar) objective function.
+ *
+ *
+ *
+ * This package provides common functionality for the optimization algorithms.
+ * Abstract classes ({@link BaseOptimizer} and {@link BaseMultivariateOptimizer})
+ * define boiler-plate code for storing {@link MaxEval evaluations} and
+ * {@link MaxIter iterations} counters and a user-defined
+ * {@link ConvergenceChecker convergence checker}.
+ *
+ *
+ *
+ * For each of the optimizer types, there is a special implementation that
+ * wraps an optimizer instance and provides a "multi-start" feature: it calls
+ * the underlying optimizer several times with different starting points and
+ * returns the best optimum found, or all optima if so desired.
+ * This could be useful to avoid being trapped in a local extremum.
+ *
+ */
diff --git a/src/main/java/org/apache/commons/math3/optim/univariate/BracketFinder.java b/src/main/java/org/apache/commons/math3/optim/univariate/BracketFinder.java
new file mode 100644
index 000000000..7dabfa949
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/univariate/BracketFinder.java
@@ -0,0 +1,287 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.univariate;
+
+import org.apache.commons.math3.util.Incrementor;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+import org.apache.commons.math3.exception.MaxCountExceededException;
+import org.apache.commons.math3.analysis.UnivariateFunction;
+import org.apache.commons.math3.optim.GoalType;
+
+/**
+ * Provide an interval that brackets a local optimum of a function.
+ * This code is based on a Python implementation (from SciPy,
+ * module {@code optimize.py} v0.5).
+ *
+ * @version $Id: BracketFinder.java 1413186 2012-11-24 13:47:59Z erans $
+ * @since 2.2
+ */
+public class BracketFinder {
+ /** Tolerance to avoid division by zero. */
+ private static final double EPS_MIN = 1e-21;
+ /**
+ * Golden section.
+ */
+ private static final double GOLD = 1.618034;
+ /**
+ * Factor for expanding the interval.
+ */
+ private final double growLimit;
+ /**
+ * Counter for function evaluations.
+ */
+ private final Incrementor evaluations = new Incrementor();
+ /**
+ * Lower bound of the bracket.
+ */
+ private double lo;
+ /**
+ * Higher bound of the bracket.
+ */
+ private double hi;
+ /**
+ * Point inside the bracket.
+ */
+ private double mid;
+ /**
+ * Function value at {@link #lo}.
+ */
+ private double fLo;
+ /**
+ * Function value at {@link #hi}.
+ */
+ private double fHi;
+ /**
+ * Function value at {@link #mid}.
+ */
+ private double fMid;
+
+ /**
+ * Constructor with default values {@code 100, 50} (see the
+ * {@link #BracketFinder(double,int) other constructor}).
+ */
+ public BracketFinder() {
+ this(100, 50);
+ }
+
+ /**
+ * Create a bracketing interval finder.
+ *
+ * @param growLimit Expanding factor.
+ * @param maxEvaluations Maximum number of evaluations allowed for finding
+ * a bracketing interval.
+ */
+ public BracketFinder(double growLimit,
+ int maxEvaluations) {
+ if (growLimit <= 0) {
+ throw new NotStrictlyPositiveException(growLimit);
+ }
+ if (maxEvaluations <= 0) {
+ throw new NotStrictlyPositiveException(maxEvaluations);
+ }
+
+ this.growLimit = growLimit;
+ evaluations.setMaximalCount(maxEvaluations);
+ }
+
+ /**
+ * Search new points that bracket a local optimum of the function.
+ *
+ * @param func Function whose optimum should be bracketed.
+ * @param goal {@link GoalType Goal type}.
+ * @param xA Initial point.
+ * @param xB Initial point.
+ * @throws TooManyEvaluationsException if the maximum number of evaluations
+ * is exceeded.
+ */
+ public void search(UnivariateFunction func, GoalType goal, double xA, double xB) {
+ evaluations.resetCount();
+ final boolean isMinim = goal == GoalType.MINIMIZE;
+
+ double fA = eval(func, xA);
+ double fB = eval(func, xB);
+ if (isMinim ?
+ fA < fB :
+ fA > fB) {
+
+ double tmp = xA;
+ xA = xB;
+ xB = tmp;
+
+ tmp = fA;
+ fA = fB;
+ fB = tmp;
+ }
+
+ double xC = xB + GOLD * (xB - xA);
+ double fC = eval(func, xC);
+
+ while (isMinim ? fC < fB : fC > fB) {
+ double tmp1 = (xB - xA) * (fB - fC);
+ double tmp2 = (xB - xC) * (fB - fA);
+
+ double val = tmp2 - tmp1;
+ double denom = Math.abs(val) < EPS_MIN ? 2 * EPS_MIN : 2 * val;
+
+ double w = xB - ((xB - xC) * tmp2 - (xB - xA) * tmp1) / denom;
+ double wLim = xB + growLimit * (xC - xB);
+
+ double fW;
+ if ((w - xC) * (xB - w) > 0) {
+ fW = eval(func, w);
+ if (isMinim ?
+ fW < fC :
+ fW > fC) {
+ xA = xB;
+ xB = w;
+ fA = fB;
+ fB = fW;
+ break;
+ } else if (isMinim ?
+ fW > fB :
+ fW < fB) {
+ xC = w;
+ fC = fW;
+ break;
+ }
+ w = xC + GOLD * (xC - xB);
+ fW = eval(func, w);
+ } else if ((w - wLim) * (wLim - xC) >= 0) {
+ w = wLim;
+ fW = eval(func, w);
+ } else if ((w - wLim) * (xC - w) > 0) {
+ fW = eval(func, w);
+ if (isMinim ?
+ fW < fC :
+ fW > fC) {
+ xB = xC;
+ xC = w;
+ w = xC + GOLD * (xC - xB);
+ fB = fC;
+ fC =fW;
+ fW = eval(func, w);
+ }
+ } else {
+ w = xC + GOLD * (xC - xB);
+ fW = eval(func, w);
+ }
+
+ xA = xB;
+ fA = fB;
+ xB = xC;
+ fB = fC;
+ xC = w;
+ fC = fW;
+ }
+
+ lo = xA;
+ fLo = fA;
+ mid = xB;
+ fMid = fB;
+ hi = xC;
+ fHi = fC;
+
+ if (lo > hi) {
+ double tmp = lo;
+ lo = hi;
+ hi = tmp;
+
+ tmp = fLo;
+ fLo = fHi;
+ fHi = tmp;
+ }
+ }
+
+ /**
+ * @return the number of evalutations.
+ */
+ public int getMaxEvaluations() {
+ return evaluations.getMaximalCount();
+ }
+
+ /**
+ * @return the number of evalutations.
+ */
+ public int getEvaluations() {
+ return evaluations.getCount();
+ }
+
+ /**
+ * @return the lower bound of the bracket.
+ * @see #getFLo()
+ */
+ public double getLo() {
+ return lo;
+ }
+
+ /**
+ * Get function value at {@link #getLo()}.
+ * @return function value at {@link #getLo()}
+ */
+ public double getFLo() {
+ return fLo;
+ }
+
+ /**
+ * @return the higher bound of the bracket.
+ * @see #getFHi()
+ */
+ public double getHi() {
+ return hi;
+ }
+
+ /**
+ * Get function value at {@link #getHi()}.
+ * @return function value at {@link #getHi()}
+ */
+ public double getFHi() {
+ return fHi;
+ }
+
+ /**
+ * @return a point in the middle of the bracket.
+ * @see #getFMid()
+ */
+ public double getMid() {
+ return mid;
+ }
+
+ /**
+ * Get function value at {@link #getMid()}.
+ * @return function value at {@link #getMid()}
+ */
+ public double getFMid() {
+ return fMid;
+ }
+
+ /**
+ * @param f Function.
+ * @param x Argument.
+ * @return {@code f(x)}
+ * @throws TooManyEvaluationsException if the maximal number of evaluations is
+ * exceeded.
+ */
+ private double eval(UnivariateFunction f, double x) {
+ try {
+ evaluations.incrementCount();
+ } catch (MaxCountExceededException e) {
+ throw new TooManyEvaluationsException(e.getMax());
+ }
+ return f.value(x);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/univariate/BrentOptimizer.java b/src/main/java/org/apache/commons/math3/optim/univariate/BrentOptimizer.java
new file mode 100644
index 000000000..5ca957e0f
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/univariate/BrentOptimizer.java
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.univariate;
+
+import org.apache.commons.math3.util.Precision;
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.optim.GoalType;
+
+/**
+ * For a function defined on some interval {@code (lo, hi)}, this class
+ * finds an approximation {@code x} to the point at which the function
+ * attains its minimum.
+ * It implements Richard Brent's algorithm (from his book "Algorithms for
+ * Minimization without Derivatives", p. 79) for finding minima of real
+ * univariate functions.
+ *
+ * This code is an adaptation, partly based on the Python code from SciPy
+ * (module "optimize.py" v0.5); the original algorithm is also modified
+ *
+ * - to use an initial guess provided by the user,
+ * - to ensure that the best point encountered is the one returned.
+ *
+ *
+ * @version $Id: BrentOptimizer.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public class BrentOptimizer extends UnivariateOptimizer {
+ /**
+ * Golden section.
+ */
+ private static final double GOLDEN_SECTION = 0.5 * (3 - FastMath.sqrt(5));
+ /**
+ * Minimum relative tolerance.
+ */
+ private static final double MIN_RELATIVE_TOLERANCE = 2 * FastMath.ulp(1d);
+ /**
+ * Relative threshold.
+ */
+ private final double relativeThreshold;
+ /**
+ * Absolute threshold.
+ */
+ private final double absoluteThreshold;
+
+ /**
+ * The arguments are used implement the original stopping criterion
+ * of Brent's algorithm.
+ * {@code abs} and {@code rel} define a tolerance
+ * {@code tol = rel |x| + abs}. {@code rel} should be no smaller than
+ * 2 macheps and preferably not much less than sqrt(macheps),
+ * where macheps is the relative machine precision. {@code abs} must
+ * be positive.
+ *
+ * @param rel Relative threshold.
+ * @param abs Absolute threshold.
+ * @param checker Additional, user-defined, convergence checking
+ * procedure.
+ * @throws NotStrictlyPositiveException if {@code abs <= 0}.
+ * @throws NumberIsTooSmallException if {@code rel < 2 * Math.ulp(1d)}.
+ */
+ public BrentOptimizer(double rel,
+ double abs,
+ ConvergenceChecker checker) {
+ super(checker);
+
+ if (rel < MIN_RELATIVE_TOLERANCE) {
+ throw new NumberIsTooSmallException(rel, MIN_RELATIVE_TOLERANCE, true);
+ }
+ if (abs <= 0) {
+ throw new NotStrictlyPositiveException(abs);
+ }
+
+ relativeThreshold = rel;
+ absoluteThreshold = abs;
+ }
+
+ /**
+ * The arguments are used for implementing the original stopping criterion
+ * of Brent's algorithm.
+ * {@code abs} and {@code rel} define a tolerance
+ * {@code tol = rel |x| + abs}. {@code rel} should be no smaller than
+ * 2 macheps and preferably not much less than sqrt(macheps),
+ * where macheps is the relative machine precision. {@code abs} must
+ * be positive.
+ *
+ * @param rel Relative threshold.
+ * @param abs Absolute threshold.
+ * @throws NotStrictlyPositiveException if {@code abs <= 0}.
+ * @throws NumberIsTooSmallException if {@code rel < 2 * Math.ulp(1d)}.
+ */
+ public BrentOptimizer(double rel,
+ double abs) {
+ this(rel, abs, null);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected UnivariatePointValuePair doOptimize() {
+ final boolean isMinim = getGoalType() == GoalType.MINIMIZE;
+ final double lo = getMin();
+ final double mid = getStartValue();
+ final double hi = getMax();
+
+ // Optional additional convergence criteria.
+ final ConvergenceChecker checker
+ = getConvergenceChecker();
+
+ double a;
+ double b;
+ if (lo < hi) {
+ a = lo;
+ b = hi;
+ } else {
+ a = hi;
+ b = lo;
+ }
+
+ double x = mid;
+ double v = x;
+ double w = x;
+ double d = 0;
+ double e = 0;
+ double fx = computeObjectiveValue(x);
+ if (!isMinim) {
+ fx = -fx;
+ }
+ double fv = fx;
+ double fw = fx;
+
+ UnivariatePointValuePair previous = null;
+ UnivariatePointValuePair current
+ = new UnivariatePointValuePair(x, isMinim ? fx : -fx);
+ // Best point encountered so far (which is the initial guess).
+ UnivariatePointValuePair best = current;
+
+ int iter = 0;
+ while (true) {
+ final double m = 0.5 * (a + b);
+ final double tol1 = relativeThreshold * FastMath.abs(x) + absoluteThreshold;
+ final double tol2 = 2 * tol1;
+
+ // Default stopping criterion.
+ final boolean stop = FastMath.abs(x - m) <= tol2 - 0.5 * (b - a);
+ if (!stop) {
+ double p = 0;
+ double q = 0;
+ double r = 0;
+ double u = 0;
+
+ if (FastMath.abs(e) > tol1) { // Fit parabola.
+ r = (x - w) * (fx - fv);
+ q = (x - v) * (fx - fw);
+ p = (x - v) * q - (x - w) * r;
+ q = 2 * (q - r);
+
+ if (q > 0) {
+ p = -p;
+ } else {
+ q = -q;
+ }
+
+ r = e;
+ e = d;
+
+ if (p > q * (a - x) &&
+ p < q * (b - x) &&
+ FastMath.abs(p) < FastMath.abs(0.5 * q * r)) {
+ // Parabolic interpolation step.
+ d = p / q;
+ u = x + d;
+
+ // f must not be evaluated too close to a or b.
+ if (u - a < tol2 || b - u < tol2) {
+ if (x <= m) {
+ d = tol1;
+ } else {
+ d = -tol1;
+ }
+ }
+ } else {
+ // Golden section step.
+ if (x < m) {
+ e = b - x;
+ } else {
+ e = a - x;
+ }
+ d = GOLDEN_SECTION * e;
+ }
+ } else {
+ // Golden section step.
+ if (x < m) {
+ e = b - x;
+ } else {
+ e = a - x;
+ }
+ d = GOLDEN_SECTION * e;
+ }
+
+ // Update by at least "tol1".
+ if (FastMath.abs(d) < tol1) {
+ if (d >= 0) {
+ u = x + tol1;
+ } else {
+ u = x - tol1;
+ }
+ } else {
+ u = x + d;
+ }
+
+ double fu = computeObjectiveValue(u);
+ if (!isMinim) {
+ fu = -fu;
+ }
+
+ // User-defined convergence checker.
+ previous = current;
+ current = new UnivariatePointValuePair(u, isMinim ? fu : -fu);
+ best = best(best,
+ best(previous,
+ current,
+ isMinim),
+ isMinim);
+
+ if (checker != null) {
+ if (checker.converged(iter, previous, current)) {
+ return best;
+ }
+ }
+
+ // Update a, b, v, w and x.
+ if (fu <= fx) {
+ if (u < x) {
+ b = x;
+ } else {
+ a = x;
+ }
+ v = w;
+ fv = fw;
+ w = x;
+ fw = fx;
+ x = u;
+ fx = fu;
+ } else {
+ if (u < x) {
+ a = u;
+ } else {
+ b = u;
+ }
+ if (fu <= fw ||
+ Precision.equals(w, x)) {
+ v = w;
+ fv = fw;
+ w = u;
+ fw = fu;
+ } else if (fu <= fv ||
+ Precision.equals(v, x) ||
+ Precision.equals(v, w)) {
+ v = u;
+ fv = fu;
+ }
+ }
+ } else { // Default termination (Brent's criterion).
+ return best(best,
+ best(previous,
+ current,
+ isMinim),
+ isMinim);
+ }
+ ++iter;
+ }
+ }
+
+ /**
+ * Selects the best of two points.
+ *
+ * @param a Point and value.
+ * @param b Point and value.
+ * @param isMinim {@code true} if the selected point must be the one with
+ * the lowest value.
+ * @return the best point, or {@code null} if {@code a} and {@code b} are
+ * both {@code null}. When {@code a} and {@code b} have the same function
+ * value, {@code a} is returned.
+ */
+ private UnivariatePointValuePair best(UnivariatePointValuePair a,
+ UnivariatePointValuePair b,
+ boolean isMinim) {
+ if (a == null) {
+ return b;
+ }
+ if (b == null) {
+ return a;
+ }
+
+ if (isMinim) {
+ return a.getValue() <= b.getValue() ? a : b;
+ } else {
+ return a.getValue() >= b.getValue() ? a : b;
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/univariate/MultiStartUnivariateOptimizer.java b/src/main/java/org/apache/commons/math3/optim/univariate/MultiStartUnivariateOptimizer.java
new file mode 100644
index 000000000..8a1692004
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/univariate/MultiStartUnivariateOptimizer.java
@@ -0,0 +1,235 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.univariate;
+
+import java.util.Arrays;
+import java.util.Comparator;
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.exception.NullArgumentException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.random.RandomGenerator;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.OptimizationData;
+
+/**
+ * Special implementation of the {@link UnivariateOptimizer} interface
+ * adding multi-start features to an existing optimizer.
+ *
+ * This class wraps an optimizer in order to use it several times in
+ * turn with different starting points (trying to avoid being trapped
+ * in a local extremum when looking for a global one).
+ *
+ * @version $Id$
+ * @since 3.0
+ */
+public class MultiStartUnivariateOptimizer
+ extends UnivariateOptimizer {
+ /** Underlying classical optimizer. */
+ private final UnivariateOptimizer optimizer;
+ /** Number of evaluations already performed for all starts. */
+ private int totalEvaluations;
+ /** Number of starts to go. */
+ private int starts;
+ /** Random generator for multi-start. */
+ private RandomGenerator generator;
+ /** Found optima. */
+ private UnivariatePointValuePair[] optima;
+ /** Optimization data. */
+ private OptimizationData[] optimData;
+ /**
+ * Location in {@link #optimData} where the updated maximum
+ * number of evaluations will be stored.
+ */
+ private int maxEvalIndex = -1;
+ /**
+ * Location in {@link #optimData} where the updated start value
+ * will be stored.
+ */
+ private int searchIntervalIndex = -1;
+
+ /**
+ * Create a multi-start optimizer from a single-start optimizer.
+ *
+ * @param optimizer Single-start optimizer to wrap.
+ * @param starts Number of starts to perform. If {@code starts == 1},
+ * the {@code optimize} methods will return the same solution as
+ * {@code optimizer} would.
+ * @param generator Random generator to use for restarts.
+ * @throws NullArgumentException if {@code optimizer} or {@code generator}
+ * is {@code null}.
+ * @throws NotStrictlyPositiveException if {@code starts < 1}.
+ */
+ public MultiStartUnivariateOptimizer(final UnivariateOptimizer optimizer,
+ final int starts,
+ final RandomGenerator generator) {
+ super(optimizer.getConvergenceChecker());
+
+ if (optimizer == null ||
+ generator == null) {
+ throw new NullArgumentException();
+ }
+ if (starts < 1) {
+ throw new NotStrictlyPositiveException(starts);
+ }
+
+ this.optimizer = optimizer;
+ this.starts = starts;
+ this.generator = generator;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int getEvaluations() {
+ return totalEvaluations;
+ }
+
+ /**
+ * Gets all the optima found during the last call to {@code optimize}.
+ * The optimizer stores all the optima found during a set of
+ * restarts. The {@code optimize} method returns the best point only.
+ * This method returns all the points found at the end of each starts,
+ * including the best one already returned by the {@code optimize} method.
+ *
+ * The returned array as one element for each start as specified
+ * in the constructor. It is ordered with the results from the
+ * runs that did converge first, sorted from best to worst
+ * objective value (i.e in ascending order if minimizing and in
+ * descending order if maximizing), followed by {@code null} elements
+ * corresponding to the runs that did not converge. This means all
+ * elements will be {@code null} if the {@code optimize} method did throw
+ * an exception.
+ * This also means that if the first element is not {@code null}, it is
+ * the best point found across all starts.
+ *
+ * @return an array containing the optima.
+ * @throws MathIllegalStateException if {@link #optimize(OptimizationData[])
+ * optimize} has not been called.
+ */
+ public UnivariatePointValuePair[] getOptima() {
+ if (optima == null) {
+ throw new MathIllegalStateException(LocalizedFormats.NO_OPTIMUM_COMPUTED_YET);
+ }
+ return optima.clone();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @throws MathIllegalStateException if {@code optData} does not contain an
+ * instance of {@link MaxEval} or {@link SearchInterval}.
+ */
+ @Override
+ public UnivariatePointValuePair optimize(OptimizationData... optData) {
+ // Store arguments in order to pass them to the internal optimizer.
+ optimData = optData;
+ // Set up base class and perform computations.
+ return super.optimize(optData);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected UnivariatePointValuePair doOptimize() {
+ // Remove all instances of "MaxEval" and "SearchInterval" from the
+ // array that will be passed to the internal optimizer.
+ // The former is to enforce smaller numbers of allowed evaluations
+ // (according to how many have been used up already), and the latter
+ // to impose a different start value for each start.
+ for (int i = 0; i < optimData.length; i++) {
+ if (optimData[i] instanceof MaxEval) {
+ optimData[i] = null;
+ maxEvalIndex = i;
+ continue;
+ }
+ if (optimData[i] instanceof SearchInterval) {
+ optimData[i] = null;
+ searchIntervalIndex = i;
+ continue;
+ }
+ }
+ if (maxEvalIndex == -1) {
+ throw new MathIllegalStateException();
+ }
+ if (searchIntervalIndex == -1) {
+ throw new MathIllegalStateException();
+ }
+
+ RuntimeException lastException = null;
+ optima = new UnivariatePointValuePair[starts];
+ totalEvaluations = 0;
+
+ final int maxEval = getMaxEvaluations();
+ final double min = getMin();
+ final double max = getMax();
+ final double startValue = getStartValue();
+
+ // Multi-start loop.
+ for (int i = 0; i < starts; i++) {
+ // CHECKSTYLE: stop IllegalCatch
+ try {
+ // Decrease number of allowed evaluations.
+ optimData[maxEvalIndex] = new MaxEval(maxEval - totalEvaluations);
+ // New start value.
+ final double s = (i == 0) ?
+ startValue :
+ min + generator.nextDouble() * (max - min);
+ optimData[searchIntervalIndex] = new SearchInterval(min, max, s);
+ // Optimize.
+ optima[i] = optimizer.optimize(optimData);
+ } catch (RuntimeException mue) {
+ lastException = mue;
+ optima[i] = null;
+ }
+ // CHECKSTYLE: resume IllegalCatch
+
+ totalEvaluations += optimizer.getEvaluations();
+ }
+
+ sortPairs(getGoalType());
+
+ if (optima[0] == null) {
+ throw lastException; // Cannot be null if starts >= 1.
+ }
+
+ // Return the point with the best objective function value.
+ return optima[0];
+ }
+
+ /**
+ * Sort the optima from best to worst, followed by {@code null} elements.
+ *
+ * @param goal Goal type.
+ */
+ private void sortPairs(final GoalType goal) {
+ Arrays.sort(optima, new Comparator() {
+ public int compare(final UnivariatePointValuePair o1,
+ final UnivariatePointValuePair o2) {
+ if (o1 == null) {
+ return (o2 == null) ? 0 : 1;
+ } else if (o2 == null) {
+ return -1;
+ }
+ final double v1 = o1.getValue();
+ final double v2 = o2.getValue();
+ return (goal == GoalType.MINIMIZE) ?
+ Double.compare(v1, v2) : Double.compare(v2, v1);
+ }
+ });
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/univariate/SearchInterval.java b/src/main/java/org/apache/commons/math3/optim/univariate/SearchInterval.java
new file mode 100644
index 000000000..f0b2c0955
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/univariate/SearchInterval.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.univariate;
+
+import org.apache.commons.math3.optim.OptimizationData;
+import org.apache.commons.math3.exception.NumberIsTooLargeException;
+import org.apache.commons.math3.exception.OutOfRangeException;
+
+/**
+ * Search interval and (optional) start value.
+ *
+ * Immutable class.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public class SearchInterval implements OptimizationData {
+ /** Lower bound. */
+ private final double lower;
+ /** Upper bound. */
+ private final double upper;
+ /** Start value. */
+ private final double start;
+
+ /**
+ * @param lo Lower bound.
+ * @param hi Upper bound.
+ * @param init Start value.
+ * @throws NumberIsTooLargeException if {@code lo >= hi}.
+ * @throws OutOfRangeException if {@code init < lo} or {@code init > hi}.
+ */
+ public SearchInterval(double lo,
+ double hi,
+ double init) {
+ if (lo >= hi) {
+ throw new NumberIsTooLargeException(lo, hi, false);
+ }
+ if (init < lo ||
+ init > hi) {
+ throw new OutOfRangeException(init, lo, hi);
+ }
+
+ lower = lo;
+ upper = hi;
+ start = init;
+ }
+
+ /**
+ * @param lo Lower bound.
+ * @param hi Upper bound.
+ * @throws NumberIsTooLargeException if {@code lo >= hi}.
+ */
+ public SearchInterval(double lo,
+ double hi) {
+ this(lo, hi, 0.5 * (lo + hi));
+ }
+
+ /**
+ * Gets the lower bound.
+ *
+ * @return the lower bound.
+ */
+ public double getMin() {
+ return lower;
+ }
+ /**
+ * Gets the upper bound.
+ *
+ * @return the upper bound.
+ */
+ public double getMax() {
+ return upper;
+ }
+ /**
+ * Gets the start value.
+ *
+ * @return the start value.
+ */
+ public double getStartValue() {
+ return start;
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/univariate/SimpleUnivariateValueChecker.java b/src/main/java/org/apache/commons/math3/optim/univariate/SimpleUnivariateValueChecker.java
new file mode 100644
index 000000000..3f8b87324
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/univariate/SimpleUnivariateValueChecker.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.univariate;
+
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.optim.AbstractConvergenceChecker;
+
+/**
+ * Simple implementation of the
+ * {@link org.apache.commons.math3.optimization.ConvergenceChecker} interface
+ * that uses only objective function values.
+ *
+ * Convergence is considered to have been reached if either the relative
+ * difference between the objective function values is smaller than a
+ * threshold or if either the absolute difference between the objective
+ * function values is smaller than another threshold.
+ *
+ * The {@link #converged(int,UnivariatePointValuePair,UnivariatePointValuePair)
+ * converged} method will also return {@code true} if the number of iterations
+ * has been set (see {@link #SimpleUnivariateValueChecker(double,double,int)
+ * this constructor}).
+ *
+ * @version $Id: SimpleUnivariateValueChecker.java 1413171 2012-11-24 11:11:10Z erans $
+ * @since 3.1
+ */
+public class SimpleUnivariateValueChecker
+ extends AbstractConvergenceChecker {
+ /**
+ * If {@link #maxIterationCount} is set to this value, the number of
+ * iterations will never cause
+ * {@link #converged(int,UnivariatePointValuePair,UnivariatePointValuePair)}
+ * to return {@code true}.
+ */
+ private static final int ITERATION_CHECK_DISABLED = -1;
+ /**
+ * Number of iterations after which the
+ * {@link #converged(int,UnivariatePointValuePair,UnivariatePointValuePair)}
+ * method will return true (unless the check is disabled).
+ */
+ private final int maxIterationCount;
+
+ /** Build an instance with specified thresholds.
+ *
+ * In order to perform only relative checks, the absolute tolerance
+ * must be set to a negative value. In order to perform only absolute
+ * checks, the relative tolerance must be set to a negative value.
+ *
+ * @param relativeThreshold relative tolerance threshold
+ * @param absoluteThreshold absolute tolerance threshold
+ */
+ public SimpleUnivariateValueChecker(final double relativeThreshold,
+ final double absoluteThreshold) {
+ super(relativeThreshold, absoluteThreshold);
+ maxIterationCount = ITERATION_CHECK_DISABLED;
+ }
+
+ /**
+ * Builds an instance with specified thresholds.
+ *
+ * In order to perform only relative checks, the absolute tolerance
+ * must be set to a negative value. In order to perform only absolute
+ * checks, the relative tolerance must be set to a negative value.
+ *
+ * @param relativeThreshold relative tolerance threshold
+ * @param absoluteThreshold absolute tolerance threshold
+ * @param maxIter Maximum iteration count.
+ * @throws NotStrictlyPositiveException if {@code maxIter <= 0}.
+ *
+ * @since 3.1
+ */
+ public SimpleUnivariateValueChecker(final double relativeThreshold,
+ final double absoluteThreshold,
+ final int maxIter) {
+ super(relativeThreshold, absoluteThreshold);
+
+ if (maxIter <= 0) {
+ throw new NotStrictlyPositiveException(maxIter);
+ }
+ maxIterationCount = maxIter;
+ }
+
+ /**
+ * Check if the optimization algorithm has converged considering the
+ * last two points.
+ * This method may be called several time from the same algorithm
+ * iteration with different points. This can be detected by checking the
+ * iteration number at each call if needed. Each time this method is
+ * called, the previous and current point correspond to points with the
+ * same role at each iteration, so they can be compared. As an example,
+ * simplex-based algorithms call this method for all points of the simplex,
+ * not only for the best or worst ones.
+ *
+ * @param iteration Index of current iteration
+ * @param previous Best point in the previous iteration.
+ * @param current Best point in the current iteration.
+ * @return {@code true} if the algorithm has converged.
+ */
+ @Override
+ public boolean converged(final int iteration,
+ final UnivariatePointValuePair previous,
+ final UnivariatePointValuePair current) {
+ if (maxIterationCount != ITERATION_CHECK_DISABLED) {
+ if (iteration >= maxIterationCount) {
+ return true;
+ }
+ }
+
+ final double p = previous.getValue();
+ final double c = current.getValue();
+ final double difference = FastMath.abs(p - c);
+ final double size = FastMath.max(FastMath.abs(p), FastMath.abs(c));
+ return difference <= size * getRelativeThreshold() ||
+ difference <= getAbsoluteThreshold();
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/univariate/UnivariateObjectiveFunction.java b/src/main/java/org/apache/commons/math3/optim/univariate/UnivariateObjectiveFunction.java
new file mode 100644
index 000000000..d3a0d3e24
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/univariate/UnivariateObjectiveFunction.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.univariate;
+
+import org.apache.commons.math3.analysis.UnivariateFunction;
+import org.apache.commons.math3.optim.OptimizationData;
+
+/**
+ * Scalar function to be optimized.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public class UnivariateObjectiveFunction implements OptimizationData {
+ /** Function to be optimized. */
+ private final UnivariateFunction function;
+
+ /**
+ * @param f Function to be optimized.
+ */
+ public UnivariateObjectiveFunction(UnivariateFunction f) {
+ function = f;
+ }
+
+ /**
+ * Gets the function to be optimized.
+ *
+ * @return the objective function.
+ */
+ public UnivariateFunction getObjectiveFunction() {
+ return function;
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/univariate/UnivariateOptimizer.java b/src/main/java/org/apache/commons/math3/optim/univariate/UnivariateOptimizer.java
new file mode 100644
index 000000000..59d9fabaa
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/univariate/UnivariateOptimizer.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.univariate;
+
+import org.apache.commons.math3.analysis.UnivariateFunction;
+import org.apache.commons.math3.optim.BaseOptimizer;
+import org.apache.commons.math3.optim.OptimizationData;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+
+/**
+ * Base class for a univariate scalar function optimizer.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public abstract class UnivariateOptimizer
+ extends BaseOptimizer {
+ /** Objective function. */
+ private UnivariateFunction function;
+ /** Type of optimization. */
+ private GoalType goal;
+ /** Initial guess. */
+ private double start;
+ /** Lower bound. */
+ private double min;
+ /** Upper bound. */
+ private double max;
+
+ /**
+ * @param checker Convergence checker.
+ */
+ protected UnivariateOptimizer(ConvergenceChecker checker) {
+ super(checker);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param optData Optimization data.
+ * The following data will be looked for:
+ *
+ * - {@link GoalType}
+ * - {@link SearchInterval}
+ * - {@link UnivariateObjectiveFunction}
+ *
+ * @return {@inheritDoc}
+ * @throws TooManyEvaluationsException if the maximal number of
+ * evaluations is exceeded.
+ */
+ public UnivariatePointValuePair optimize(OptimizationData... optData)
+ throws TooManyEvaluationsException {
+ // Retrieve settings.
+ parseOptimizationData(optData);
+ // Perform computation.
+ return super.optimize(optData);
+ }
+
+ /**
+ * @return the optimization type.
+ */
+ public GoalType getGoalType() {
+ return goal;
+ }
+
+ /**
+ * Scans the list of (required and optional) optimization data that
+ * characterize the problem.
+ *
+ * @param optData Optimization data.
+ * The following data will be looked for:
+ *
+ * - {@link GoalType}
+ * - {@link SearchInterval}
+ * - {@link UnivariateObjectiveFunction}
+ *
+ */
+ private void parseOptimizationData(OptimizationData... optData) {
+ // The existing values (as set by the previous call) are reused if
+ // not provided in the argument list.
+ for (OptimizationData data : optData) {
+ if (data instanceof SearchInterval) {
+ final SearchInterval interval = (SearchInterval) data;
+ min = interval.getMin();
+ max = interval.getMax();
+ start = interval.getStartValue();
+ continue;
+ }
+ if (data instanceof UnivariateObjectiveFunction) {
+ function = ((UnivariateObjectiveFunction) data).getObjectiveFunction();
+ continue;
+ }
+ if (data instanceof GoalType) {
+ goal = (GoalType) data;
+ continue;
+ }
+ }
+ }
+
+ /**
+ * @return the initial guess.
+ */
+ public double getStartValue() {
+ return start;
+ }
+ /**
+ * @return the lower bounds.
+ */
+ public double getMin() {
+ return min;
+ }
+ /**
+ * @return the upper bounds.
+ */
+ public double getMax() {
+ return max;
+ }
+
+ /**
+ * Computes the objective function value.
+ * This method must be called by subclasses to enforce the
+ * evaluation counter limit.
+ *
+ * @param x Point at which the objective function must be evaluated.
+ * @return the objective function value at the specified point.
+ * @throws TooManyEvaluationsException if the maximal number of
+ * evaluations is exceeded.
+ */
+ protected double computeObjectiveValue(double x) {
+ super.incrementEvaluationCount();
+ return function.value(x);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/univariate/UnivariatePointValuePair.java b/src/main/java/org/apache/commons/math3/optim/univariate/UnivariatePointValuePair.java
new file mode 100644
index 000000000..12241a92e
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/univariate/UnivariatePointValuePair.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.univariate;
+
+import java.io.Serializable;
+
+/**
+ * This class holds a point and the value of an objective function at this
+ * point.
+ * This is a simple immutable container.
+ *
+ * @version $Id: UnivariatePointValuePair.java 1364392 2012-07-22 18:27:12Z tn $
+ * @since 3.0
+ */
+public class UnivariatePointValuePair implements Serializable {
+ /** Serializable version identifier. */
+ private static final long serialVersionUID = 1003888396256744753L;
+ /** Point. */
+ private final double point;
+ /** Value of the objective function at the point. */
+ private final double value;
+
+ /**
+ * Build a point/objective function value pair.
+ *
+ * @param point Point.
+ * @param value Value of an objective function at the point
+ */
+ public UnivariatePointValuePair(final double point,
+ final double value) {
+ this.point = point;
+ this.value = value;
+ }
+
+ /**
+ * Get the point.
+ *
+ * @return the point.
+ */
+ public double getPoint() {
+ return point;
+ }
+
+ /**
+ * Get the value of the objective function.
+ *
+ * @return the stored value of the objective function.
+ */
+ public double getValue() {
+ return value;
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optim/univariate/package-info.java b/src/main/java/org/apache/commons/math3/optim/univariate/package-info.java
new file mode 100644
index 000000000..e1532c6a3
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optim/univariate/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.univariate;
+
+/**
+ * One-dimensional optimization algorithms.
+ */
diff --git a/src/main/resources/assets/org/apache/commons/math3/exception/util/LocalizedFormats_fr.properties b/src/main/resources/assets/org/apache/commons/math3/exception/util/LocalizedFormats_fr.properties
index 0cb7bba3c..118c6e00d 100644
--- a/src/main/resources/assets/org/apache/commons/math3/exception/util/LocalizedFormats_fr.properties
+++ b/src/main/resources/assets/org/apache/commons/math3/exception/util/LocalizedFormats_fr.properties
@@ -119,6 +119,7 @@ INVALID_REGRESSION_ARRAY= la longueur du tableau de donn\u00e9es = {0} ne corres
INVALID_REGRESSION_OBSERVATION = la longueur du tableau de variables explicatives ({0}) ne correspond pas au nombre de variables dans le mod\u00e8le ({1})
INVALID_ROUNDING_METHOD = m\u00e9thode d''arondi {0} invalide, m\u00e9thodes valides : {1} ({2}), {3} ({4}), {5} ({6}), {7} ({8}), {9} ({10}), {11} ({12}), {13} ({14}), {15} ({16})
ITERATOR_EXHAUSTED = it\u00e9ration achev\u00e9e
+ITERATIONS = it\u00e9rations
LCM_OVERFLOW_32_BITS = d\u00e9passement de capacit\u00e9 : le MCM de {0} et {1} vaut 2^31
LCM_OVERFLOW_64_BITS = d\u00e9passement de capacit\u00e9 : le MCM de {0} et {1} vaut 2^63
LIST_OF_CHROMOSOMES_BIGGER_THAN_POPULATION_SIZE = la liste des chromosomes d\u00e9passe maxPopulationSize
diff --git a/src/test/java/org/apache/commons/math3/exception/util/LocalizedFormatsTest.java b/src/test/java/org/apache/commons/math3/exception/util/LocalizedFormatsTest.java
index 364453f98..195f44ffe 100644
--- a/src/test/java/org/apache/commons/math3/exception/util/LocalizedFormatsTest.java
+++ b/src/test/java/org/apache/commons/math3/exception/util/LocalizedFormatsTest.java
@@ -30,7 +30,7 @@ public class LocalizedFormatsTest {
@Test
public void testMessageNumber() {
- Assert.assertEquals(311, LocalizedFormats.values().length);
+ Assert.assertEquals(312, LocalizedFormats.values().length);
}
@Test
diff --git a/src/test/java/org/apache/commons/math3/fitting/CurveFitterTest.java b/src/test/java/org/apache/commons/math3/fitting/CurveFitterTest.java
new file mode 100644
index 000000000..7f07e3de6
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/fitting/CurveFitterTest.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import org.apache.commons.math3.optim.nonlinear.vector.jacobian.LevenbergMarquardtOptimizer;
+import org.apache.commons.math3.analysis.ParametricUnivariateFunction;
+import org.apache.commons.math3.util.FastMath;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class CurveFitterTest {
+ @Test
+ public void testMath303() {
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ CurveFitter fitter = new CurveFitter(optimizer);
+ fitter.addObservedPoint(2.805d, 0.6934785852953367d);
+ fitter.addObservedPoint(2.74333333333333d, 0.6306772025518496d);
+ fitter.addObservedPoint(1.655d, 0.9474675497289684);
+ fitter.addObservedPoint(1.725d, 0.9013594835804194d);
+
+ ParametricUnivariateFunction sif = new SimpleInverseFunction();
+
+ double[] initialguess1 = new double[1];
+ initialguess1[0] = 1.0d;
+ Assert.assertEquals(1, fitter.fit(sif, initialguess1).length);
+
+ double[] initialguess2 = new double[2];
+ initialguess2[0] = 1.0d;
+ initialguess2[1] = .5d;
+ Assert.assertEquals(2, fitter.fit(sif, initialguess2).length);
+ }
+
+ @Test
+ public void testMath304() {
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ CurveFitter fitter = new CurveFitter(optimizer);
+ fitter.addObservedPoint(2.805d, 0.6934785852953367d);
+ fitter.addObservedPoint(2.74333333333333d, 0.6306772025518496d);
+ fitter.addObservedPoint(1.655d, 0.9474675497289684);
+ fitter.addObservedPoint(1.725d, 0.9013594835804194d);
+
+ ParametricUnivariateFunction sif = new SimpleInverseFunction();
+
+ double[] initialguess1 = new double[1];
+ initialguess1[0] = 1.0d;
+ Assert.assertEquals(1.6357215104109237, fitter.fit(sif, initialguess1)[0], 1.0e-14);
+
+ double[] initialguess2 = new double[1];
+ initialguess2[0] = 10.0d;
+ Assert.assertEquals(1.6357215104109237, fitter.fit(sif, initialguess1)[0], 1.0e-14);
+ }
+
+ @Test
+ public void testMath372() {
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ CurveFitter curveFitter = new CurveFitter(optimizer);
+
+ curveFitter.addObservedPoint( 15, 4443);
+ curveFitter.addObservedPoint( 31, 8493);
+ curveFitter.addObservedPoint( 62, 17586);
+ curveFitter.addObservedPoint(125, 30582);
+ curveFitter.addObservedPoint(250, 45087);
+ curveFitter.addObservedPoint(500, 50683);
+
+ ParametricUnivariateFunction f = new ParametricUnivariateFunction() {
+ public double value(double x, double ... parameters) {
+ double a = parameters[0];
+ double b = parameters[1];
+ double c = parameters[2];
+ double d = parameters[3];
+
+ return d + ((a - d) / (1 + FastMath.pow(x / c, b)));
+ }
+
+ public double[] gradient(double x, double ... parameters) {
+ double a = parameters[0];
+ double b = parameters[1];
+ double c = parameters[2];
+ double d = parameters[3];
+
+ double[] gradients = new double[4];
+ double den = 1 + FastMath.pow(x / c, b);
+
+ // derivative with respect to a
+ gradients[0] = 1 / den;
+
+ // derivative with respect to b
+ // in the reported (invalid) issue, there was a sign error here
+ gradients[1] = -((a - d) * FastMath.pow(x / c, b) * FastMath.log(x / c)) / (den * den);
+
+ // derivative with respect to c
+ gradients[2] = (b * FastMath.pow(x / c, b - 1) * (x / (c * c)) * (a - d)) / (den * den);
+
+ // derivative with respect to d
+ gradients[3] = 1 - (1 / den);
+
+ return gradients;
+
+ }
+ };
+
+ double[] initialGuess = new double[] { 1500, 0.95, 65, 35000 };
+ double[] estimatedParameters = curveFitter.fit(f, initialGuess);
+
+ Assert.assertEquals( 2411.00, estimatedParameters[0], 500.00);
+ Assert.assertEquals( 1.62, estimatedParameters[1], 0.04);
+ Assert.assertEquals( 111.22, estimatedParameters[2], 0.30);
+ Assert.assertEquals(55347.47, estimatedParameters[3], 300.00);
+ Assert.assertTrue(optimizer.getRMS() < 600.0);
+ }
+
+ private static class SimpleInverseFunction implements ParametricUnivariateFunction {
+
+ public double value(double x, double ... parameters) {
+ return parameters[0] / x + (parameters.length < 2 ? 0 : parameters[1]);
+ }
+
+ public double[] gradient(double x, double ... doubles) {
+ double[] gradientVector = new double[doubles.length];
+ gradientVector[0] = 1 / x;
+ if (doubles.length >= 2) {
+ gradientVector[1] = 1;
+ }
+ return gradientVector;
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/fitting/GaussianFitterTest.java b/src/test/java/org/apache/commons/math3/fitting/GaussianFitterTest.java
new file mode 100644
index 000000000..8462fe1c6
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/fitting/GaussianFitterTest.java
@@ -0,0 +1,363 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import org.apache.commons.math3.optim.nonlinear.vector.jacobian.LevenbergMarquardtOptimizer;
+import org.apache.commons.math3.exception.MathIllegalArgumentException;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Tests {@link GaussianFitter}.
+ *
+ * @since 2.2
+ * @version $Id: GaussianFitterTest.java 1349707 2012-06-13 09:30:56Z erans $
+ */
+public class GaussianFitterTest {
+ /** Good data. */
+ protected static final double[][] DATASET1 = new double[][] {
+ {4.0254623, 531026.0},
+ {4.02804905, 664002.0},
+ {4.02934242, 787079.0},
+ {4.03128248, 984167.0},
+ {4.03386923, 1294546.0},
+ {4.03580929, 1560230.0},
+ {4.03839603, 1887233.0},
+ {4.0396894, 2113240.0},
+ {4.04162946, 2375211.0},
+ {4.04421621, 2687152.0},
+ {4.04550958, 2862644.0},
+ {4.04744964, 3078898.0},
+ {4.05003639, 3327238.0},
+ {4.05132976, 3461228.0},
+ {4.05326982, 3580526.0},
+ {4.05585657, 3576946.0},
+ {4.05779662, 3439750.0},
+ {4.06038337, 3220296.0},
+ {4.06167674, 3070073.0},
+ {4.0636168, 2877648.0},
+ {4.06620355, 2595848.0},
+ {4.06749692, 2390157.0},
+ {4.06943698, 2175960.0},
+ {4.07202373, 1895104.0},
+ {4.0733171, 1687576.0},
+ {4.07525716, 1447024.0},
+ {4.0778439, 1130879.0},
+ {4.07978396, 904900.0},
+ {4.08237071, 717104.0},
+ {4.08366408, 620014.0}
+ };
+ /** Poor data: right of peak not symmetric with left of peak. */
+ protected static final double[][] DATASET2 = new double[][] {
+ {-20.15, 1523.0},
+ {-19.65, 1566.0},
+ {-19.15, 1592.0},
+ {-18.65, 1927.0},
+ {-18.15, 3089.0},
+ {-17.65, 6068.0},
+ {-17.15, 14239.0},
+ {-16.65, 34124.0},
+ {-16.15, 64097.0},
+ {-15.65, 110352.0},
+ {-15.15, 164742.0},
+ {-14.65, 209499.0},
+ {-14.15, 267274.0},
+ {-13.65, 283290.0},
+ {-13.15, 275363.0},
+ {-12.65, 258014.0},
+ {-12.15, 225000.0},
+ {-11.65, 200000.0},
+ {-11.15, 190000.0},
+ {-10.65, 185000.0},
+ {-10.15, 180000.0},
+ { -9.65, 179000.0},
+ { -9.15, 178000.0},
+ { -8.65, 177000.0},
+ { -8.15, 176000.0},
+ { -7.65, 175000.0},
+ { -7.15, 174000.0},
+ { -6.65, 173000.0},
+ { -6.15, 172000.0},
+ { -5.65, 171000.0},
+ { -5.15, 170000.0}
+ };
+ /** Poor data: long tails. */
+ protected static final double[][] DATASET3 = new double[][] {
+ {-90.15, 1513.0},
+ {-80.15, 1514.0},
+ {-70.15, 1513.0},
+ {-60.15, 1514.0},
+ {-50.15, 1513.0},
+ {-40.15, 1514.0},
+ {-30.15, 1513.0},
+ {-20.15, 1523.0},
+ {-19.65, 1566.0},
+ {-19.15, 1592.0},
+ {-18.65, 1927.0},
+ {-18.15, 3089.0},
+ {-17.65, 6068.0},
+ {-17.15, 14239.0},
+ {-16.65, 34124.0},
+ {-16.15, 64097.0},
+ {-15.65, 110352.0},
+ {-15.15, 164742.0},
+ {-14.65, 209499.0},
+ {-14.15, 267274.0},
+ {-13.65, 283290.0},
+ {-13.15, 275363.0},
+ {-12.65, 258014.0},
+ {-12.15, 214073.0},
+ {-11.65, 182244.0},
+ {-11.15, 136419.0},
+ {-10.65, 97823.0},
+ {-10.15, 58930.0},
+ { -9.65, 35404.0},
+ { -9.15, 16120.0},
+ { -8.65, 9823.0},
+ { -8.15, 5064.0},
+ { -7.65, 2575.0},
+ { -7.15, 1642.0},
+ { -6.65, 1101.0},
+ { -6.15, 812.0},
+ { -5.65, 690.0},
+ { -5.15, 565.0},
+ { 5.15, 564.0},
+ { 15.15, 565.0},
+ { 25.15, 564.0},
+ { 35.15, 565.0},
+ { 45.15, 564.0},
+ { 55.15, 565.0},
+ { 65.15, 564.0},
+ { 75.15, 565.0}
+ };
+ /** Poor data: right of peak is missing. */
+ protected static final double[][] DATASET4 = new double[][] {
+ {-20.15, 1523.0},
+ {-19.65, 1566.0},
+ {-19.15, 1592.0},
+ {-18.65, 1927.0},
+ {-18.15, 3089.0},
+ {-17.65, 6068.0},
+ {-17.15, 14239.0},
+ {-16.65, 34124.0},
+ {-16.15, 64097.0},
+ {-15.65, 110352.0},
+ {-15.15, 164742.0},
+ {-14.65, 209499.0},
+ {-14.15, 267274.0},
+ {-13.65, 283290.0}
+ };
+ /** Good data, but few points. */
+ protected static final double[][] DATASET5 = new double[][] {
+ {4.0254623, 531026.0},
+ {4.03128248, 984167.0},
+ {4.03839603, 1887233.0},
+ {4.04421621, 2687152.0},
+ {4.05132976, 3461228.0},
+ {4.05326982, 3580526.0},
+ {4.05779662, 3439750.0},
+ {4.0636168, 2877648.0},
+ {4.06943698, 2175960.0},
+ {4.07525716, 1447024.0},
+ {4.08237071, 717104.0},
+ {4.08366408, 620014.0}
+ };
+
+ /**
+ * Basic.
+ */
+ @Test
+ public void testFit01() {
+ GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
+ addDatasetToGaussianFitter(DATASET1, fitter);
+ double[] parameters = fitter.fit();
+
+ Assert.assertEquals(3496978.1837704973, parameters[0], 1e-4);
+ Assert.assertEquals(4.054933085999146, parameters[1], 1e-4);
+ Assert.assertEquals(0.015039355620304326, parameters[2], 1e-4);
+ }
+
+ /**
+ * Zero points is not enough observed points.
+ */
+ @Test(expected=MathIllegalArgumentException.class)
+ public void testFit02() {
+ GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
+ fitter.fit();
+ }
+
+ /**
+ * Two points is not enough observed points.
+ */
+ @Test(expected=MathIllegalArgumentException.class)
+ public void testFit03() {
+ GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
+ addDatasetToGaussianFitter(new double[][] {
+ {4.0254623, 531026.0},
+ {4.02804905, 664002.0}},
+ fitter);
+ fitter.fit();
+ }
+
+ /**
+ * Poor data: right of peak not symmetric with left of peak.
+ */
+ @Test
+ public void testFit04() {
+ GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
+ addDatasetToGaussianFitter(DATASET2, fitter);
+ double[] parameters = fitter.fit();
+
+ Assert.assertEquals(233003.2967252038, parameters[0], 1e-4);
+ Assert.assertEquals(-10.654887521095983, parameters[1], 1e-4);
+ Assert.assertEquals(4.335937353196641, parameters[2], 1e-4);
+ }
+
+ /**
+ * Poor data: long tails.
+ */
+ @Test
+ public void testFit05() {
+ GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
+ addDatasetToGaussianFitter(DATASET3, fitter);
+ double[] parameters = fitter.fit();
+
+ Assert.assertEquals(283863.81929180305, parameters[0], 1e-4);
+ Assert.assertEquals(-13.29641995105174, parameters[1], 1e-4);
+ Assert.assertEquals(1.7297330293549908, parameters[2], 1e-4);
+ }
+
+ /**
+ * Poor data: right of peak is missing.
+ */
+ @Test
+ public void testFit06() {
+ GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
+ addDatasetToGaussianFitter(DATASET4, fitter);
+ double[] parameters = fitter.fit();
+
+ Assert.assertEquals(285250.66754309234, parameters[0], 1e-4);
+ Assert.assertEquals(-13.528375695228455, parameters[1], 1e-4);
+ Assert.assertEquals(1.5204344894331614, parameters[2], 1e-4);
+ }
+
+ /**
+ * Basic with smaller dataset.
+ */
+ @Test
+ public void testFit07() {
+ GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
+ addDatasetToGaussianFitter(DATASET5, fitter);
+ double[] parameters = fitter.fit();
+
+ Assert.assertEquals(3514384.729342235, parameters[0], 1e-4);
+ Assert.assertEquals(4.054970307455625, parameters[1], 1e-4);
+ Assert.assertEquals(0.015029412832160017, parameters[2], 1e-4);
+ }
+
+ @Test
+ public void testMath519() {
+ // The optimizer will try negative sigma values but "GaussianFitter"
+ // will catch the raised exceptions and return NaN values instead.
+
+ final double[] data = {
+ 1.1143831578403364E-29,
+ 4.95281403484594E-28,
+ 1.1171347211930288E-26,
+ 1.7044813962636277E-25,
+ 1.9784716574832164E-24,
+ 1.8630236407866774E-23,
+ 1.4820532905097742E-22,
+ 1.0241963854632831E-21,
+ 6.275077366673128E-21,
+ 3.461808994532493E-20,
+ 1.7407124684715706E-19,
+ 8.056687953553974E-19,
+ 3.460193945992071E-18,
+ 1.3883326374011525E-17,
+ 5.233894983671116E-17,
+ 1.8630791465263745E-16,
+ 6.288759227922111E-16,
+ 2.0204433920597856E-15,
+ 6.198768938576155E-15,
+ 1.821419346860626E-14,
+ 5.139176445538471E-14,
+ 1.3956427429045787E-13,
+ 3.655705706448139E-13,
+ 9.253753324779779E-13,
+ 2.267636001476696E-12,
+ 5.3880460095836855E-12,
+ 1.2431632654852931E-11
+ };
+
+ GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
+ for (int i = 0; i < data.length; i++) {
+ fitter.addObservedPoint(i, data[i]);
+ }
+ final double[] p = fitter.fit();
+
+ Assert.assertEquals(53.1572792, p[1], 1e-7);
+ Assert.assertEquals(5.75214622, p[2], 1e-8);
+ }
+
+ @Test
+ public void testMath798() {
+ final GaussianFitter fitter = new GaussianFitter(new LevenbergMarquardtOptimizer());
+
+ // When the data points are not commented out below, the fit stalls.
+ // This is expected however, since the whole dataset hardly looks like
+ // a Gaussian.
+ // When commented out, the fit proceeds fine.
+
+ fitter.addObservedPoint(0.23, 395.0);
+ //fitter.addObservedPoint(0.68, 0.0);
+ fitter.addObservedPoint(1.14, 376.0);
+ //fitter.addObservedPoint(1.59, 0.0);
+ fitter.addObservedPoint(2.05, 163.0);
+ //fitter.addObservedPoint(2.50, 0.0);
+ fitter.addObservedPoint(2.95, 49.0);
+ //fitter.addObservedPoint(3.41, 0.0);
+ fitter.addObservedPoint(3.86, 16.0);
+ //fitter.addObservedPoint(4.32, 0.0);
+ fitter.addObservedPoint(4.77, 1.0);
+
+ final double[] p = fitter.fit();
+
+ // Values are copied from a previous run of this test.
+ Assert.assertEquals(420.8397296167364, p[0], 1e-12);
+ Assert.assertEquals(0.603770729862231, p[1], 1e-15);
+ Assert.assertEquals(1.0786447936766612, p[2], 1e-14);
+ }
+
+ /**
+ * Adds the specified points to specified GaussianFitter
+ * instance.
+ *
+ * @param points data points where first dimension is a point index and
+ * second dimension is an array of length two representing the point
+ * with the first value corresponding to X and the second value
+ * corresponding to Y
+ * @param fitter fitter to which the points in points
should be
+ * added as observed points
+ */
+ protected static void addDatasetToGaussianFitter(double[][] points,
+ GaussianFitter fitter) {
+ for (int i = 0; i < points.length; i++) {
+ fitter.addObservedPoint(points[i][0], points[i][1]);
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/fitting/HarmonicFitterTest.java b/src/test/java/org/apache/commons/math3/fitting/HarmonicFitterTest.java
new file mode 100644
index 000000000..1c457612f
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/fitting/HarmonicFitterTest.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import java.util.Random;
+import org.apache.commons.math3.optim.nonlinear.vector.jacobian.LevenbergMarquardtOptimizer;
+import org.apache.commons.math3.analysis.function.HarmonicOscillator;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.util.MathUtils;
+
+import org.junit.Test;
+import org.junit.Assert;
+
+public class HarmonicFitterTest {
+ @Test(expected=NumberIsTooSmallException.class)
+ public void testPreconditions1() {
+ HarmonicFitter fitter =
+ new HarmonicFitter(new LevenbergMarquardtOptimizer());
+
+ fitter.fit();
+ }
+
+ @Test
+ public void testNoError() {
+ final double a = 0.2;
+ final double w = 3.4;
+ final double p = 4.1;
+ HarmonicOscillator f = new HarmonicOscillator(a, w, p);
+
+ HarmonicFitter fitter =
+ new HarmonicFitter(new LevenbergMarquardtOptimizer());
+ for (double x = 0.0; x < 1.3; x += 0.01) {
+ fitter.addObservedPoint(1, x, f.value(x));
+ }
+
+ final double[] fitted = fitter.fit();
+ Assert.assertEquals(a, fitted[0], 1.0e-13);
+ Assert.assertEquals(w, fitted[1], 1.0e-13);
+ Assert.assertEquals(p, MathUtils.normalizeAngle(fitted[2], p), 1e-13);
+
+ HarmonicOscillator ff = new HarmonicOscillator(fitted[0], fitted[1], fitted[2]);
+
+ for (double x = -1.0; x < 1.0; x += 0.01) {
+ Assert.assertTrue(FastMath.abs(f.value(x) - ff.value(x)) < 1e-13);
+ }
+ }
+
+ @Test
+ public void test1PercentError() {
+ Random randomizer = new Random(64925784252l);
+ final double a = 0.2;
+ final double w = 3.4;
+ final double p = 4.1;
+ HarmonicOscillator f = new HarmonicOscillator(a, w, p);
+
+ HarmonicFitter fitter =
+ new HarmonicFitter(new LevenbergMarquardtOptimizer());
+ for (double x = 0.0; x < 10.0; x += 0.1) {
+ fitter.addObservedPoint(1, x,
+ f.value(x) + 0.01 * randomizer.nextGaussian());
+ }
+
+ final double[] fitted = fitter.fit();
+ Assert.assertEquals(a, fitted[0], 7.6e-4);
+ Assert.assertEquals(w, fitted[1], 2.7e-3);
+ Assert.assertEquals(p, MathUtils.normalizeAngle(fitted[2], p), 1.3e-2);
+ }
+
+ @Test
+ public void testTinyVariationsData() {
+ Random randomizer = new Random(64925784252l);
+
+ HarmonicFitter fitter =
+ new HarmonicFitter(new LevenbergMarquardtOptimizer());
+ for (double x = 0.0; x < 10.0; x += 0.1) {
+ fitter.addObservedPoint(1, x, 1e-7 * randomizer.nextGaussian());
+ }
+
+ fitter.fit();
+ // This test serves to cover the part of the code of "guessAOmega"
+ // when the algorithm using integrals fails.
+ }
+
+ @Test
+ public void testInitialGuess() {
+ Random randomizer = new Random(45314242l);
+ final double a = 0.2;
+ final double w = 3.4;
+ final double p = 4.1;
+ HarmonicOscillator f = new HarmonicOscillator(a, w, p);
+
+ HarmonicFitter fitter =
+ new HarmonicFitter(new LevenbergMarquardtOptimizer());
+ for (double x = 0.0; x < 10.0; x += 0.1) {
+ fitter.addObservedPoint(1, x,
+ f.value(x) + 0.01 * randomizer.nextGaussian());
+ }
+
+ final double[] fitted = fitter.fit(new double[] { 0.15, 3.6, 4.5 });
+ Assert.assertEquals(a, fitted[0], 1.2e-3);
+ Assert.assertEquals(w, fitted[1], 3.3e-3);
+ Assert.assertEquals(p, MathUtils.normalizeAngle(fitted[2], p), 1.7e-2);
+ }
+
+ @Test
+ public void testUnsorted() {
+ Random randomizer = new Random(64925784252l);
+ final double a = 0.2;
+ final double w = 3.4;
+ final double p = 4.1;
+ HarmonicOscillator f = new HarmonicOscillator(a, w, p);
+
+ HarmonicFitter fitter =
+ new HarmonicFitter(new LevenbergMarquardtOptimizer());
+
+ // build a regularly spaced array of measurements
+ int size = 100;
+ double[] xTab = new double[size];
+ double[] yTab = new double[size];
+ for (int i = 0; i < size; ++i) {
+ xTab[i] = 0.1 * i;
+ yTab[i] = f.value(xTab[i]) + 0.01 * randomizer.nextGaussian();
+ }
+
+ // shake it
+ for (int i = 0; i < size; ++i) {
+ int i1 = randomizer.nextInt(size);
+ int i2 = randomizer.nextInt(size);
+ double xTmp = xTab[i1];
+ double yTmp = yTab[i1];
+ xTab[i1] = xTab[i2];
+ yTab[i1] = yTab[i2];
+ xTab[i2] = xTmp;
+ yTab[i2] = yTmp;
+ }
+
+ // pass it to the fitter
+ for (int i = 0; i < size; ++i) {
+ fitter.addObservedPoint(1, xTab[i], yTab[i]);
+ }
+
+ final double[] fitted = fitter.fit();
+ Assert.assertEquals(a, fitted[0], 7.6e-4);
+ Assert.assertEquals(w, fitted[1], 3.5e-3);
+ Assert.assertEquals(p, MathUtils.normalizeAngle(fitted[2], p), 1.5e-2);
+ }
+
+ @Test(expected=MathIllegalStateException.class)
+ public void testMath844() {
+ final double[] y = { 0, 1, 2, 3, 2, 1,
+ 0, -1, -2, -3, -2, -1,
+ 0, 1, 2, 3, 2, 1,
+ 0, -1, -2, -3, -2, -1,
+ 0, 1, 2, 3, 2, 1, 0 };
+ final int len = y.length;
+ final WeightedObservedPoint[] points = new WeightedObservedPoint[len];
+ for (int i = 0; i < len; i++) {
+ points[i] = new WeightedObservedPoint(1, i, y[i]);
+ }
+
+ // The guesser fails because the function is far from an harmonic
+ // function: It is a triangular periodic function with amplitude 3
+ // and period 12, and all sample points are taken at integer abscissae
+ // so function values all belong to the integer subset {-3, -2, -1, 0,
+ // 1, 2, 3}.
+ final HarmonicFitter.ParameterGuesser guesser
+ = new HarmonicFitter.ParameterGuesser(points);
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/fitting/PolynomialFitterTest.java b/src/test/java/org/apache/commons/math3/fitting/PolynomialFitterTest.java
new file mode 100644
index 000000000..b695ee63e
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/fitting/PolynomialFitterTest.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import java.util.Random;
+import org.apache.commons.math3.analysis.polynomials.PolynomialFunction;
+import org.apache.commons.math3.analysis.polynomials.PolynomialFunction.Parametric;
+import org.apache.commons.math3.exception.ConvergenceException;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+import org.apache.commons.math3.optim.nonlinear.vector.MultivariateVectorOptimizer;
+import org.apache.commons.math3.optim.nonlinear.vector.jacobian.LevenbergMarquardtOptimizer;
+import org.apache.commons.math3.optim.nonlinear.vector.jacobian.GaussNewtonOptimizer;
+import org.apache.commons.math3.optim.SimpleVectorValueChecker;
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.distribution.RealDistribution;
+import org.apache.commons.math3.distribution.UniformRealDistribution;
+import org.apache.commons.math3.TestUtils;
+import org.junit.Test;
+import org.junit.Assert;
+
+/**
+ * Test for class {@link CurveFitter} where the function to fit is a
+ * polynomial.
+ */
+public class PolynomialFitterTest {
+ @Test
+ public void testFit() {
+ final RealDistribution rng = new UniformRealDistribution(-100, 100);
+ rng.reseedRandomGenerator(64925784252L);
+
+ final LevenbergMarquardtOptimizer optim = new LevenbergMarquardtOptimizer();
+ final PolynomialFitter fitter = new PolynomialFitter(optim);
+ final double[] coeff = { 12.9, -3.4, 2.1 }; // 12.9 - 3.4 x + 2.1 x^2
+ final PolynomialFunction f = new PolynomialFunction(coeff);
+
+ // Collect data from a known polynomial.
+ for (int i = 0; i < 100; i++) {
+ final double x = rng.sample();
+ fitter.addObservedPoint(x, f.value(x));
+ }
+
+ // Start fit from initial guesses that are far from the optimal values.
+ final double[] best = fitter.fit(new double[] { -1e-20, 3e15, -5e25 });
+
+ TestUtils.assertEquals("best != coeff", coeff, best, 1e-12);
+ }
+
+ @Test
+ public void testNoError() {
+ Random randomizer = new Random(64925784252l);
+ for (int degree = 1; degree < 10; ++degree) {
+ PolynomialFunction p = buildRandomPolynomial(degree, randomizer);
+
+ PolynomialFitter fitter = new PolynomialFitter(new LevenbergMarquardtOptimizer());
+ for (int i = 0; i <= degree; ++i) {
+ fitter.addObservedPoint(1.0, i, p.value(i));
+ }
+
+ final double[] init = new double[degree + 1];
+ PolynomialFunction fitted = new PolynomialFunction(fitter.fit(init));
+
+ for (double x = -1.0; x < 1.0; x += 0.01) {
+ double error = FastMath.abs(p.value(x) - fitted.value(x)) /
+ (1.0 + FastMath.abs(p.value(x)));
+ Assert.assertEquals(0.0, error, 1.0e-6);
+ }
+ }
+ }
+
+ @Test
+ public void testSmallError() {
+ Random randomizer = new Random(53882150042l);
+ double maxError = 0;
+ for (int degree = 0; degree < 10; ++degree) {
+ PolynomialFunction p = buildRandomPolynomial(degree, randomizer);
+
+ PolynomialFitter fitter = new PolynomialFitter(new LevenbergMarquardtOptimizer());
+ for (double x = -1.0; x < 1.0; x += 0.01) {
+ fitter.addObservedPoint(1.0, x,
+ p.value(x) + 0.1 * randomizer.nextGaussian());
+ }
+
+ final double[] init = new double[degree + 1];
+ PolynomialFunction fitted = new PolynomialFunction(fitter.fit(init));
+
+ for (double x = -1.0; x < 1.0; x += 0.01) {
+ double error = FastMath.abs(p.value(x) - fitted.value(x)) /
+ (1.0 + FastMath.abs(p.value(x)));
+ maxError = FastMath.max(maxError, error);
+ Assert.assertTrue(FastMath.abs(error) < 0.1);
+ }
+ }
+ Assert.assertTrue(maxError > 0.01);
+ }
+
+ @Test
+ public void testMath798() {
+ final double tol = 1e-14;
+ final SimpleVectorValueChecker checker = new SimpleVectorValueChecker(tol, tol);
+ final double[] init = new double[] { 0, 0 };
+ final int maxEval = 3;
+
+ final double[] lm = doMath798(new LevenbergMarquardtOptimizer(checker), maxEval, init);
+ final double[] gn = doMath798(new GaussNewtonOptimizer(checker), maxEval, init);
+
+ for (int i = 0; i <= 1; i++) {
+ Assert.assertEquals(lm[i], gn[i], tol);
+ }
+ }
+
+ /**
+ * This test shows that the user can set the maximum number of iterations
+ * to avoid running for too long.
+ * But in the test case, the real problem is that the tolerance is way too
+ * stringent.
+ */
+ @Test(expected=TooManyEvaluationsException.class)
+ public void testMath798WithToleranceTooLow() {
+ final double tol = 1e-100;
+ final SimpleVectorValueChecker checker = new SimpleVectorValueChecker(tol, tol);
+ final double[] init = new double[] { 0, 0 };
+ final int maxEval = 10000; // Trying hard to fit.
+
+ final double[] gn = doMath798(new GaussNewtonOptimizer(checker), maxEval, init);
+ }
+
+ /**
+ * This test shows that the user can set the maximum number of iterations
+ * to avoid running for too long.
+ * Even if the real problem is that the tolerance is way too stringent, it
+ * is possible to get the best solution so far, i.e. a checker will return
+ * the point when the maximum iteration count has been reached.
+ */
+ @Test
+ public void testMath798WithToleranceTooLowButNoException() {
+ final double tol = 1e-100;
+ final double[] init = new double[] { 0, 0 };
+ final int maxEval = 10000; // Trying hard to fit.
+ final SimpleVectorValueChecker checker = new SimpleVectorValueChecker(tol, tol, maxEval);
+
+ final double[] lm = doMath798(new LevenbergMarquardtOptimizer(checker), maxEval, init);
+ final double[] gn = doMath798(new GaussNewtonOptimizer(checker), maxEval, init);
+
+ for (int i = 0; i <= 1; i++) {
+ Assert.assertEquals(lm[i], gn[i], 1e-15);
+ }
+ }
+
+ /**
+ * @param optimizer Optimizer.
+ * @param maxEval Maximum number of function evaluations.
+ * @param init First guess.
+ * @return the solution found by the given optimizer.
+ */
+ private double[] doMath798(MultivariateVectorOptimizer optimizer,
+ int maxEval,
+ double[] init) {
+ final CurveFitter fitter = new CurveFitter(optimizer);
+
+ fitter.addObservedPoint(-0.2, -7.12442E-13);
+ fitter.addObservedPoint(-0.199, -4.33397E-13);
+ fitter.addObservedPoint(-0.198, -2.823E-13);
+ fitter.addObservedPoint(-0.197, -1.40405E-13);
+ fitter.addObservedPoint(-0.196, -7.80821E-15);
+ fitter.addObservedPoint(-0.195, 6.20484E-14);
+ fitter.addObservedPoint(-0.194, 7.24673E-14);
+ fitter.addObservedPoint(-0.193, 1.47152E-13);
+ fitter.addObservedPoint(-0.192, 1.9629E-13);
+ fitter.addObservedPoint(-0.191, 2.12038E-13);
+ fitter.addObservedPoint(-0.19, 2.46906E-13);
+ fitter.addObservedPoint(-0.189, 2.77495E-13);
+ fitter.addObservedPoint(-0.188, 2.51281E-13);
+ fitter.addObservedPoint(-0.187, 2.64001E-13);
+ fitter.addObservedPoint(-0.186, 2.8882E-13);
+ fitter.addObservedPoint(-0.185, 3.13604E-13);
+ fitter.addObservedPoint(-0.184, 3.14248E-13);
+ fitter.addObservedPoint(-0.183, 3.1172E-13);
+ fitter.addObservedPoint(-0.182, 3.12912E-13);
+ fitter.addObservedPoint(-0.181, 3.06761E-13);
+ fitter.addObservedPoint(-0.18, 2.8559E-13);
+ fitter.addObservedPoint(-0.179, 2.86806E-13);
+ fitter.addObservedPoint(-0.178, 2.985E-13);
+ fitter.addObservedPoint(-0.177, 2.67148E-13);
+ fitter.addObservedPoint(-0.176, 2.94173E-13);
+ fitter.addObservedPoint(-0.175, 3.27528E-13);
+ fitter.addObservedPoint(-0.174, 3.33858E-13);
+ fitter.addObservedPoint(-0.173, 2.97511E-13);
+ fitter.addObservedPoint(-0.172, 2.8615E-13);
+ fitter.addObservedPoint(-0.171, 2.84624E-13);
+
+ final double[] coeff = fitter.fit(maxEval,
+ new PolynomialFunction.Parametric(),
+ init);
+ return coeff;
+ }
+
+ @Test
+ public void testRedundantSolvable() {
+ // Levenberg-Marquardt should handle redundant information gracefully
+ checkUnsolvableProblem(new LevenbergMarquardtOptimizer(), true);
+ }
+
+ @Test
+ public void testRedundantUnsolvable() {
+ // Gauss-Newton should not be able to solve redundant information
+ checkUnsolvableProblem(new GaussNewtonOptimizer(true, new SimpleVectorValueChecker(1e-15, 1e-15)), false);
+ }
+
+ private void checkUnsolvableProblem(MultivariateVectorOptimizer optimizer,
+ boolean solvable) {
+ Random randomizer = new Random(1248788532l);
+ for (int degree = 0; degree < 10; ++degree) {
+ PolynomialFunction p = buildRandomPolynomial(degree, randomizer);
+
+ PolynomialFitter fitter = new PolynomialFitter(optimizer);
+
+ // reusing the same point over and over again does not bring
+ // information, the problem cannot be solved in this case for
+ // degrees greater than 1 (but one point is sufficient for
+ // degree 0)
+ for (double x = -1.0; x < 1.0; x += 0.01) {
+ fitter.addObservedPoint(1.0, 0.0, p.value(0.0));
+ }
+
+ try {
+ final double[] init = new double[degree + 1];
+ fitter.fit(init);
+ Assert.assertTrue(solvable || (degree == 0));
+ } catch(ConvergenceException e) {
+ Assert.assertTrue((! solvable) && (degree > 0));
+ }
+ }
+ }
+
+ private PolynomialFunction buildRandomPolynomial(int degree, Random randomizer) {
+ final double[] coefficients = new double[degree + 1];
+ for (int i = 0; i <= degree; ++i) {
+ coefficients[i] = randomizer.nextGaussian();
+ }
+ return new PolynomialFunction(coefficients);
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/PointValuePairTest.java b/src/test/java/org/apache/commons/math3/optim/PointValuePairTest.java
new file mode 100644
index 000000000..7146d150d
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/PointValuePairTest.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+import org.apache.commons.math3.TestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class PointValuePairTest {
+ @Test
+ public void testSerial() {
+ PointValuePair pv1 = new PointValuePair(new double[] { 1.0, 2.0, 3.0 }, 4.0);
+ PointValuePair pv2 = (PointValuePair) TestUtils.serializeAndRecover(pv1);
+ Assert.assertEquals(pv1.getKey().length, pv2.getKey().length);
+ for (int i = 0; i < pv1.getKey().length; ++i) {
+ Assert.assertEquals(pv1.getKey()[i], pv2.getKey()[i], 1.0e-15);
+ }
+ Assert.assertEquals(pv1.getValue(), pv2.getValue(), 1.0e-15);
+ }
+
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/PointVectorValuePairTest.java b/src/test/java/org/apache/commons/math3/optim/PointVectorValuePairTest.java
new file mode 100644
index 000000000..427bac04f
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/PointVectorValuePairTest.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim;
+
+import org.apache.commons.math3.TestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class PointVectorValuePairTest {
+ @Test
+ public void testSerial() {
+ PointVectorValuePair pv1 = new PointVectorValuePair(new double[] { 1.0, 2.0, 3.0 },
+ new double[] { 4.0, 5.0 });
+ PointVectorValuePair pv2 = (PointVectorValuePair) TestUtils.serializeAndRecover(pv1);
+ Assert.assertEquals(pv1.getKey().length, pv2.getKey().length);
+ for (int i = 0; i < pv1.getKey().length; ++i) {
+ Assert.assertEquals(pv1.getKey()[i], pv2.getKey()[i], 1.0e-15);
+ }
+ Assert.assertEquals(pv1.getValue().length, pv2.getValue().length);
+ for (int i = 0; i < pv1.getValue().length; ++i) {
+ Assert.assertEquals(pv1.getValue()[i], pv2.getValue()[i], 1.0e-15);
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/SimplePointCheckerTest.java b/src/test/java/org/apache/commons/math3/optim/SimplePointCheckerTest.java
new file mode 100644
index 000000000..f5b057a1d
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/SimplePointCheckerTest.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.junit.Test;
+import org.junit.Assert;
+
+public class SimplePointCheckerTest {
+ @Test(expected=NotStrictlyPositiveException.class)
+ public void testIterationCheckPrecondition() {
+ new SimplePointChecker(1e-1, 1e-2, 0);
+ }
+
+ @Test
+ public void testIterationCheck() {
+ final int max = 10;
+ final SimplePointChecker checker
+ = new SimplePointChecker(1e-1, 1e-2, max);
+ Assert.assertTrue(checker.converged(max, null, null));
+ Assert.assertTrue(checker.converged(max + 1, null, null));
+ }
+
+ @Test
+ public void testIterationCheckDisabled() {
+ final SimplePointChecker checker
+ = new SimplePointChecker(1e-8, 1e-8);
+
+ final PointValuePair a = new PointValuePair(new double[] { 1d }, 1d);
+ final PointValuePair b = new PointValuePair(new double[] { 10d }, 10d);
+
+ Assert.assertFalse(checker.converged(-1, a, b));
+ Assert.assertFalse(checker.converged(0, a, b));
+ Assert.assertFalse(checker.converged(1000000, a, b));
+
+ Assert.assertTrue(checker.converged(-1, a, a));
+ Assert.assertTrue(checker.converged(-1, b, b));
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/SimpleValueCheckerTest.java b/src/test/java/org/apache/commons/math3/optim/SimpleValueCheckerTest.java
new file mode 100644
index 000000000..f4b7f2f9d
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/SimpleValueCheckerTest.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.junit.Test;
+import org.junit.Assert;
+
+public class SimpleValueCheckerTest {
+ @Test(expected=NotStrictlyPositiveException.class)
+ public void testIterationCheckPrecondition() {
+ new SimpleValueChecker(1e-1, 1e-2, 0);
+ }
+
+ @Test
+ public void testIterationCheck() {
+ final int max = 10;
+ final SimpleValueChecker checker = new SimpleValueChecker(1e-1, 1e-2, max);
+ Assert.assertTrue(checker.converged(max, null, null));
+ Assert.assertTrue(checker.converged(max + 1, null, null));
+ }
+
+ @Test
+ public void testIterationCheckDisabled() {
+ final SimpleValueChecker checker = new SimpleValueChecker(1e-8, 1e-8);
+
+ final PointValuePair a = new PointValuePair(new double[] { 1d }, 1d);
+ final PointValuePair b = new PointValuePair(new double[] { 10d }, 10d);
+
+ Assert.assertFalse(checker.converged(-1, a, b));
+ Assert.assertFalse(checker.converged(0, a, b));
+ Assert.assertFalse(checker.converged(1000000, a, b));
+
+ Assert.assertTrue(checker.converged(-1, a, a));
+ Assert.assertTrue(checker.converged(-1, b, b));
+ }
+
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/SimpleVectorValueCheckerTest.java b/src/test/java/org/apache/commons/math3/optim/SimpleVectorValueCheckerTest.java
new file mode 100644
index 000000000..c9dcedc78
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/SimpleVectorValueCheckerTest.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.junit.Test;
+import org.junit.Assert;
+
+public class SimpleVectorValueCheckerTest {
+ @Test(expected=NotStrictlyPositiveException.class)
+ public void testIterationCheckPrecondition() {
+ new SimpleVectorValueChecker(1e-1, 1e-2, 0);
+ }
+
+ @Test
+ public void testIterationCheck() {
+ final int max = 10;
+ final SimpleVectorValueChecker checker = new SimpleVectorValueChecker(1e-1, 1e-2, max);
+ Assert.assertTrue(checker.converged(max, null, null));
+ Assert.assertTrue(checker.converged(max + 1, null, null));
+ }
+
+ @Test
+ public void testIterationCheckDisabled() {
+ final SimpleVectorValueChecker checker = new SimpleVectorValueChecker(1e-8, 1e-8);
+
+ final PointVectorValuePair a = new PointVectorValuePair(new double[] { 1d },
+ new double[] { 1d });
+ final PointVectorValuePair b = new PointVectorValuePair(new double[] { 10d },
+ new double[] { 10d });
+
+ Assert.assertFalse(checker.converged(-1, a, b));
+ Assert.assertFalse(checker.converged(0, a, b));
+ Assert.assertFalse(checker.converged(1000000, a, b));
+
+ Assert.assertTrue(checker.converged(-1, a, a));
+ Assert.assertTrue(checker.converged(-1, b, b));
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/linear/SimplexSolverTest.java b/src/test/java/org/apache/commons/math3/optim/linear/SimplexSolverTest.java
new file mode 100644
index 000000000..8f4031046
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/linear/SimplexSolverTest.java
@@ -0,0 +1,664 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.linear;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import org.apache.commons.math3.optim.MaxIter;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.util.Precision;
+import org.junit.Test;
+import org.junit.Assert;
+
+public class SimplexSolverTest {
+ private static final MaxIter DEFAULT_MAX_ITER = new MaxIter(100);
+
+ @Test
+ public void testMath828() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(
+ new double[] { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}, 0.0);
+
+ ArrayList constraints = new ArrayList();
+
+ constraints.add(new LinearConstraint(new double[] {0.0, 39.0, 23.0, 96.0, 15.0, 48.0, 9.0, 21.0, 48.0, 36.0, 76.0, 19.0, 88.0, 17.0, 16.0, 36.0,}, Relationship.GEQ, 15.0));
+ constraints.add(new LinearConstraint(new double[] {0.0, 59.0, 93.0, 12.0, 29.0, 78.0, 73.0, 87.0, 32.0, 70.0, 68.0, 24.0, 11.0, 26.0, 65.0, 25.0,}, Relationship.GEQ, 29.0));
+ constraints.add(new LinearConstraint(new double[] {0.0, 74.0, 5.0, 82.0, 6.0, 97.0, 55.0, 44.0, 52.0, 54.0, 5.0, 93.0, 91.0, 8.0, 20.0, 97.0,}, Relationship.GEQ, 6.0));
+ constraints.add(new LinearConstraint(new double[] {8.0, -3.0, -28.0, -72.0, -8.0, -31.0, -31.0, -74.0, -47.0, -59.0, -24.0, -57.0, -56.0, -16.0, -92.0, -59.0,}, Relationship.GEQ, 0.0));
+ constraints.add(new LinearConstraint(new double[] {25.0, -7.0, -99.0, -78.0, -25.0, -14.0, -16.0, -89.0, -39.0, -56.0, -53.0, -9.0, -18.0, -26.0, -11.0, -61.0,}, Relationship.GEQ, 0.0));
+ constraints.add(new LinearConstraint(new double[] {33.0, -95.0, -15.0, -4.0, -33.0, -3.0, -20.0, -96.0, -27.0, -13.0, -80.0, -24.0, -3.0, -13.0, -57.0, -76.0,}, Relationship.GEQ, 0.0));
+ constraints.add(new LinearConstraint(new double[] {7.0, -95.0, -39.0, -93.0, -7.0, -94.0, -94.0, -62.0, -76.0, -26.0, -53.0, -57.0, -31.0, -76.0, -53.0, -52.0,}, Relationship.GEQ, 0.0));
+
+ double epsilon = 1e-6;
+ PointValuePair solution = new SimplexSolver().optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MINIMIZE, new NonNegativeConstraint(true));
+ Assert.assertEquals(1.0d, solution.getValue(), epsilon);
+ Assert.assertTrue(validSolution(solution, constraints, epsilon));
+ }
+
+ @Test
+ public void testMath828Cycle() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(
+ new double[] { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}, 0.0);
+
+ ArrayList constraints = new ArrayList();
+
+ constraints.add(new LinearConstraint(new double[] {0.0, 16.0, 14.0, 69.0, 1.0, 85.0, 52.0, 43.0, 64.0, 97.0, 14.0, 74.0, 89.0, 28.0, 94.0, 58.0, 13.0, 22.0, 21.0, 17.0, 30.0, 25.0, 1.0, 59.0, 91.0, 78.0, 12.0, 74.0, 56.0, 3.0, 88.0,}, Relationship.GEQ, 91.0));
+ constraints.add(new LinearConstraint(new double[] {0.0, 60.0, 40.0, 81.0, 71.0, 72.0, 46.0, 45.0, 38.0, 48.0, 40.0, 17.0, 33.0, 85.0, 64.0, 32.0, 84.0, 3.0, 54.0, 44.0, 71.0, 67.0, 90.0, 95.0, 54.0, 99.0, 99.0, 29.0, 52.0, 98.0, 9.0,}, Relationship.GEQ, 54.0));
+ constraints.add(new LinearConstraint(new double[] {0.0, 41.0, 12.0, 86.0, 90.0, 61.0, 31.0, 41.0, 23.0, 89.0, 17.0, 74.0, 44.0, 27.0, 16.0, 47.0, 80.0, 32.0, 11.0, 56.0, 68.0, 82.0, 11.0, 62.0, 62.0, 53.0, 39.0, 16.0, 48.0, 1.0, 63.0,}, Relationship.GEQ, 62.0));
+ constraints.add(new LinearConstraint(new double[] {83.0, -76.0, -94.0, -19.0, -15.0, -70.0, -72.0, -57.0, -63.0, -65.0, -22.0, -94.0, -22.0, -88.0, -86.0, -89.0, -72.0, -16.0, -80.0, -49.0, -70.0, -93.0, -95.0, -17.0, -83.0, -97.0, -31.0, -47.0, -31.0, -13.0, -23.0,}, Relationship.GEQ, 0.0));
+ constraints.add(new LinearConstraint(new double[] {41.0, -96.0, -41.0, -48.0, -70.0, -43.0, -43.0, -43.0, -97.0, -37.0, -85.0, -70.0, -45.0, -67.0, -87.0, -69.0, -94.0, -54.0, -54.0, -92.0, -79.0, -10.0, -35.0, -20.0, -41.0, -41.0, -65.0, -25.0, -12.0, -8.0, -46.0,}, Relationship.GEQ, 0.0));
+ constraints.add(new LinearConstraint(new double[] {27.0, -42.0, -65.0, -49.0, -53.0, -42.0, -17.0, -2.0, -61.0, -31.0, -76.0, -47.0, -8.0, -93.0, -86.0, -62.0, -65.0, -63.0, -22.0, -43.0, -27.0, -23.0, -32.0, -74.0, -27.0, -63.0, -47.0, -78.0, -29.0, -95.0, -73.0,}, Relationship.GEQ, 0.0));
+ constraints.add(new LinearConstraint(new double[] {15.0, -46.0, -41.0, -83.0, -98.0, -99.0, -21.0, -35.0, -7.0, -14.0, -80.0, -63.0, -18.0, -42.0, -5.0, -34.0, -56.0, -70.0, -16.0, -18.0, -74.0, -61.0, -47.0, -41.0, -15.0, -79.0, -18.0, -47.0, -88.0, -68.0, -55.0,}, Relationship.GEQ, 0.0));
+
+ double epsilon = 1e-6;
+ PointValuePair solution = new SimplexSolver().optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MINIMIZE, new NonNegativeConstraint(true));
+ Assert.assertEquals(1.0d, solution.getValue(), epsilon);
+ Assert.assertTrue(validSolution(solution, constraints, epsilon));
+ }
+
+ @Test
+ public void testMath781() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 2, 6, 7 }, 0);
+
+ ArrayList constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 1, 2, 1 }, Relationship.LEQ, 2));
+ constraints.add(new LinearConstraint(new double[] { -1, 1, 1 }, Relationship.LEQ, -1));
+ constraints.add(new LinearConstraint(new double[] { 2, -3, 1 }, Relationship.LEQ, -1));
+
+ double epsilon = 1e-6;
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MAXIMIZE, new NonNegativeConstraint(false));
+
+ Assert.assertTrue(Precision.compareTo(solution.getPoint()[0], 0.0d, epsilon) > 0);
+ Assert.assertTrue(Precision.compareTo(solution.getPoint()[1], 0.0d, epsilon) > 0);
+ Assert.assertTrue(Precision.compareTo(solution.getPoint()[2], 0.0d, epsilon) < 0);
+ Assert.assertEquals(2.0d, solution.getValue(), epsilon);
+ }
+
+ @Test
+ public void testMath713NegativeVariable() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] {1.0, 1.0}, 0.0d);
+ ArrayList constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] {1, 0}, Relationship.EQ, 1));
+
+ double epsilon = 1e-6;
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MINIMIZE, new NonNegativeConstraint(true));
+
+ Assert.assertTrue(Precision.compareTo(solution.getPoint()[0], 0.0d, epsilon) >= 0);
+ Assert.assertTrue(Precision.compareTo(solution.getPoint()[1], 0.0d, epsilon) >= 0);
+ }
+
+ @Test
+ public void testMath434NegativeVariable() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] {0.0, 0.0, 1.0}, 0.0d);
+ ArrayList constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] {1, 1, 0}, Relationship.EQ, 5));
+ constraints.add(new LinearConstraint(new double[] {0, 0, 1}, Relationship.GEQ, -10));
+
+ double epsilon = 1e-6;
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MINIMIZE, new NonNegativeConstraint(false));
+
+ Assert.assertEquals(5.0, solution.getPoint()[0] + solution.getPoint()[1], epsilon);
+ Assert.assertEquals(-10.0, solution.getPoint()[2], epsilon);
+ Assert.assertEquals(-10.0, solution.getValue(), epsilon);
+
+ }
+
+ @Test(expected = NoFeasibleSolutionException.class)
+ public void testMath434UnfeasibleSolution() {
+ double epsilon = 1e-6;
+
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] {1.0, 0.0}, 0.0);
+ ArrayList constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] {epsilon/2, 0.5}, Relationship.EQ, 0));
+ constraints.add(new LinearConstraint(new double[] {1e-3, 0.1}, Relationship.EQ, 10));
+
+ SimplexSolver solver = new SimplexSolver();
+ // allowing only non-negative values, no feasible solution shall be found
+ solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MINIMIZE, new NonNegativeConstraint(true));
+ }
+
+ @Test
+ public void testMath434PivotRowSelection() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] {1.0}, 0.0);
+
+ double epsilon = 1e-6;
+ ArrayList constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] {200}, Relationship.GEQ, 1));
+ constraints.add(new LinearConstraint(new double[] {100}, Relationship.GEQ, 0.499900001));
+
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MINIMIZE, new NonNegativeConstraint(false));
+
+ Assert.assertTrue(Precision.compareTo(solution.getPoint()[0] * 200.d, 1.d, epsilon) >= 0);
+ Assert.assertEquals(0.0050, solution.getValue(), epsilon);
+ }
+
+ @Test
+ public void testMath434PivotRowSelection2() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] {0.0d, 1.0d, 1.0d, 0.0d, 0.0d, 0.0d, 0.0d}, 0.0d);
+
+ ArrayList constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] {1.0d, -0.1d, 0.0d, 0.0d, 0.0d, 0.0d, 0.0d}, Relationship.EQ, -0.1d));
+ constraints.add(new LinearConstraint(new double[] {1.0d, 0.0d, 0.0d, 0.0d, 0.0d, 0.0d, 0.0d}, Relationship.GEQ, -1e-18d));
+ constraints.add(new LinearConstraint(new double[] {0.0d, 1.0d, 0.0d, 0.0d, 0.0d, 0.0d, 0.0d}, Relationship.GEQ, 0.0d));
+ constraints.add(new LinearConstraint(new double[] {0.0d, 0.0d, 0.0d, 1.0d, 0.0d, -0.0128588d, 1e-5d}, Relationship.EQ, 0.0d));
+ constraints.add(new LinearConstraint(new double[] {0.0d, 0.0d, 0.0d, 0.0d, 1.0d, 1e-5d, -0.0128586d}, Relationship.EQ, 1e-10d));
+ constraints.add(new LinearConstraint(new double[] {0.0d, 0.0d, 1.0d, -1.0d, 0.0d, 0.0d, 0.0d}, Relationship.GEQ, 0.0d));
+ constraints.add(new LinearConstraint(new double[] {0.0d, 0.0d, 1.0d, 1.0d, 0.0d, 0.0d, 0.0d}, Relationship.GEQ, 0.0d));
+ constraints.add(new LinearConstraint(new double[] {0.0d, 0.0d, 1.0d, 0.0d, -1.0d, 0.0d, 0.0d}, Relationship.GEQ, 0.0d));
+ constraints.add(new LinearConstraint(new double[] {0.0d, 0.0d, 1.0d, 0.0d, 1.0d, 0.0d, 0.0d}, Relationship.GEQ, 0.0d));
+
+ double epsilon = 1e-7;
+ SimplexSolver simplex = new SimplexSolver();
+ PointValuePair solution = simplex.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MINIMIZE, new NonNegativeConstraint(false));
+
+ Assert.assertTrue(Precision.compareTo(solution.getPoint()[0], -1e-18d, epsilon) >= 0);
+ Assert.assertEquals(1.0d, solution.getPoint()[1], epsilon);
+ Assert.assertEquals(0.0d, solution.getPoint()[2], epsilon);
+ Assert.assertEquals(1.0d, solution.getValue(), epsilon);
+ }
+
+ @Test
+ public void testMath272() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 2, 2, 1 }, 0);
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 1, 1, 0 }, Relationship.GEQ, 1));
+ constraints.add(new LinearConstraint(new double[] { 1, 0, 1 }, Relationship.GEQ, 1));
+ constraints.add(new LinearConstraint(new double[] { 0, 1, 0 }, Relationship.GEQ, 1));
+
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MINIMIZE, new NonNegativeConstraint(true));
+
+ Assert.assertEquals(0.0, solution.getPoint()[0], .0000001);
+ Assert.assertEquals(1.0, solution.getPoint()[1], .0000001);
+ Assert.assertEquals(1.0, solution.getPoint()[2], .0000001);
+ Assert.assertEquals(3.0, solution.getValue(), .0000001);
+ }
+
+ @Test
+ public void testMath286() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 0.8, 0.2, 0.7, 0.3, 0.6, 0.4 }, 0 );
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 1, 0, 1, 0, 1, 0 }, Relationship.EQ, 23.0));
+ constraints.add(new LinearConstraint(new double[] { 0, 1, 0, 1, 0, 1 }, Relationship.EQ, 23.0));
+ constraints.add(new LinearConstraint(new double[] { 1, 0, 0, 0, 0, 0 }, Relationship.GEQ, 10.0));
+ constraints.add(new LinearConstraint(new double[] { 0, 0, 1, 0, 0, 0 }, Relationship.GEQ, 8.0));
+ constraints.add(new LinearConstraint(new double[] { 0, 0, 0, 0, 1, 0 }, Relationship.GEQ, 5.0));
+
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MAXIMIZE, new NonNegativeConstraint(true));
+
+ Assert.assertEquals(25.8, solution.getValue(), .0000001);
+ Assert.assertEquals(23.0, solution.getPoint()[0] + solution.getPoint()[2] + solution.getPoint()[4], 0.0000001);
+ Assert.assertEquals(23.0, solution.getPoint()[1] + solution.getPoint()[3] + solution.getPoint()[5], 0.0000001);
+ Assert.assertTrue(solution.getPoint()[0] >= 10.0 - 0.0000001);
+ Assert.assertTrue(solution.getPoint()[2] >= 8.0 - 0.0000001);
+ Assert.assertTrue(solution.getPoint()[4] >= 5.0 - 0.0000001);
+ }
+
+ @Test
+ public void testDegeneracy() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 0.8, 0.7 }, 0 );
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 1, 1 }, Relationship.LEQ, 18.0));
+ constraints.add(new LinearConstraint(new double[] { 1, 0 }, Relationship.GEQ, 10.0));
+ constraints.add(new LinearConstraint(new double[] { 0, 1 }, Relationship.GEQ, 8.0));
+
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MINIMIZE, new NonNegativeConstraint(true));
+ Assert.assertEquals(13.6, solution.getValue(), .0000001);
+ }
+
+ @Test
+ public void testMath288() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 7, 3, 0, 0 }, 0 );
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 3, 0, -5, 0 }, Relationship.LEQ, 0.0));
+ constraints.add(new LinearConstraint(new double[] { 2, 0, 0, -5 }, Relationship.LEQ, 0.0));
+ constraints.add(new LinearConstraint(new double[] { 0, 3, 0, -5 }, Relationship.LEQ, 0.0));
+ constraints.add(new LinearConstraint(new double[] { 1, 0, 0, 0 }, Relationship.LEQ, 1.0));
+ constraints.add(new LinearConstraint(new double[] { 0, 1, 0, 0 }, Relationship.LEQ, 1.0));
+
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MAXIMIZE, new NonNegativeConstraint(true));
+ Assert.assertEquals(10.0, solution.getValue(), .0000001);
+ }
+
+ @Test
+ public void testMath290GEQ() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 1, 5 }, 0 );
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 2, 0 }, Relationship.GEQ, -1.0));
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MINIMIZE, new NonNegativeConstraint(true));
+ Assert.assertEquals(0, solution.getValue(), .0000001);
+ Assert.assertEquals(0, solution.getPoint()[0], .0000001);
+ Assert.assertEquals(0, solution.getPoint()[1], .0000001);
+ }
+
+ @Test(expected=NoFeasibleSolutionException.class)
+ public void testMath290LEQ() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 1, 5 }, 0 );
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 2, 0 }, Relationship.LEQ, -1.0));
+ SimplexSolver solver = new SimplexSolver();
+ solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MINIMIZE, new NonNegativeConstraint(true));
+ }
+
+ @Test
+ public void testMath293() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 0.8, 0.2, 0.7, 0.3, 0.4, 0.6}, 0 );
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 1, 0, 1, 0, 1, 0 }, Relationship.EQ, 30.0));
+ constraints.add(new LinearConstraint(new double[] { 0, 1, 0, 1, 0, 1 }, Relationship.EQ, 30.0));
+ constraints.add(new LinearConstraint(new double[] { 0.8, 0.2, 0.0, 0.0, 0.0, 0.0 }, Relationship.GEQ, 10.0));
+ constraints.add(new LinearConstraint(new double[] { 0.0, 0.0, 0.7, 0.3, 0.0, 0.0 }, Relationship.GEQ, 10.0));
+ constraints.add(new LinearConstraint(new double[] { 0.0, 0.0, 0.0, 0.0, 0.4, 0.6 }, Relationship.GEQ, 10.0));
+
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution1 = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MAXIMIZE, new NonNegativeConstraint(true));
+
+ Assert.assertEquals(15.7143, solution1.getPoint()[0], .0001);
+ Assert.assertEquals(0.0, solution1.getPoint()[1], .0001);
+ Assert.assertEquals(14.2857, solution1.getPoint()[2], .0001);
+ Assert.assertEquals(0.0, solution1.getPoint()[3], .0001);
+ Assert.assertEquals(0.0, solution1.getPoint()[4], .0001);
+ Assert.assertEquals(30.0, solution1.getPoint()[5], .0001);
+ Assert.assertEquals(40.57143, solution1.getValue(), .0001);
+
+ double valA = 0.8 * solution1.getPoint()[0] + 0.2 * solution1.getPoint()[1];
+ double valB = 0.7 * solution1.getPoint()[2] + 0.3 * solution1.getPoint()[3];
+ double valC = 0.4 * solution1.getPoint()[4] + 0.6 * solution1.getPoint()[5];
+
+ f = new LinearObjectiveFunction(new double[] { 0.8, 0.2, 0.7, 0.3, 0.4, 0.6}, 0 );
+ constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 1, 0, 1, 0, 1, 0 }, Relationship.EQ, 30.0));
+ constraints.add(new LinearConstraint(new double[] { 0, 1, 0, 1, 0, 1 }, Relationship.EQ, 30.0));
+ constraints.add(new LinearConstraint(new double[] { 0.8, 0.2, 0.0, 0.0, 0.0, 0.0 }, Relationship.GEQ, valA));
+ constraints.add(new LinearConstraint(new double[] { 0.0, 0.0, 0.7, 0.3, 0.0, 0.0 }, Relationship.GEQ, valB));
+ constraints.add(new LinearConstraint(new double[] { 0.0, 0.0, 0.0, 0.0, 0.4, 0.6 }, Relationship.GEQ, valC));
+
+ PointValuePair solution2 = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MAXIMIZE, new NonNegativeConstraint(true));
+ Assert.assertEquals(40.57143, solution2.getValue(), .0001);
+ }
+
+ @Test
+ public void testSimplexSolver() {
+ LinearObjectiveFunction f =
+ new LinearObjectiveFunction(new double[] { 15, 10 }, 7);
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 1, 0 }, Relationship.LEQ, 2));
+ constraints.add(new LinearConstraint(new double[] { 0, 1 }, Relationship.LEQ, 3));
+ constraints.add(new LinearConstraint(new double[] { 1, 1 }, Relationship.EQ, 4));
+
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MAXIMIZE, new NonNegativeConstraint(true));
+ Assert.assertEquals(2.0, solution.getPoint()[0], 0.0);
+ Assert.assertEquals(2.0, solution.getPoint()[1], 0.0);
+ Assert.assertEquals(57.0, solution.getValue(), 0.0);
+ }
+
+ @Test
+ public void testSingleVariableAndConstraint() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 3 }, 0);
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 1 }, Relationship.LEQ, 10));
+
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MAXIMIZE, new NonNegativeConstraint(false));
+ Assert.assertEquals(10.0, solution.getPoint()[0], 0.0);
+ Assert.assertEquals(30.0, solution.getValue(), 0.0);
+ }
+
+ /**
+ * With no artificial variables needed (no equals and no greater than
+ * constraints) we can go straight to Phase 2.
+ */
+ @Test
+ public void testModelWithNoArtificialVars() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 15, 10 }, 0);
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 1, 0 }, Relationship.LEQ, 2));
+ constraints.add(new LinearConstraint(new double[] { 0, 1 }, Relationship.LEQ, 3));
+ constraints.add(new LinearConstraint(new double[] { 1, 1 }, Relationship.LEQ, 4));
+
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MAXIMIZE, new NonNegativeConstraint(false));
+ Assert.assertEquals(2.0, solution.getPoint()[0], 0.0);
+ Assert.assertEquals(2.0, solution.getPoint()[1], 0.0);
+ Assert.assertEquals(50.0, solution.getValue(), 0.0);
+ }
+
+ @Test
+ public void testMinimization() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { -2, 1 }, -5);
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 1, 2 }, Relationship.LEQ, 6));
+ constraints.add(new LinearConstraint(new double[] { 3, 2 }, Relationship.LEQ, 12));
+ constraints.add(new LinearConstraint(new double[] { 0, 1 }, Relationship.GEQ, 0));
+
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MINIMIZE, new NonNegativeConstraint(false));
+ Assert.assertEquals(4.0, solution.getPoint()[0], 0.0);
+ Assert.assertEquals(0.0, solution.getPoint()[1], 0.0);
+ Assert.assertEquals(-13.0, solution.getValue(), 0.0);
+ }
+
+ @Test
+ public void testSolutionWithNegativeDecisionVariable() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { -2, 1 }, 0);
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 1, 1 }, Relationship.GEQ, 6));
+ constraints.add(new LinearConstraint(new double[] { 1, 2 }, Relationship.LEQ, 14));
+
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MAXIMIZE, new NonNegativeConstraint(false));
+ Assert.assertEquals(-2.0, solution.getPoint()[0], 0.0);
+ Assert.assertEquals(8.0, solution.getPoint()[1], 0.0);
+ Assert.assertEquals(12.0, solution.getValue(), 0.0);
+ }
+
+ @Test(expected = NoFeasibleSolutionException.class)
+ public void testInfeasibleSolution() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 15 }, 0);
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 1 }, Relationship.LEQ, 1));
+ constraints.add(new LinearConstraint(new double[] { 1 }, Relationship.GEQ, 3));
+
+ SimplexSolver solver = new SimplexSolver();
+ solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MAXIMIZE, new NonNegativeConstraint(false));
+ }
+
+ @Test(expected = UnboundedSolutionException.class)
+ public void testUnboundedSolution() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 15, 10 }, 0);
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 1, 0 }, Relationship.EQ, 2));
+
+ SimplexSolver solver = new SimplexSolver();
+ solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MAXIMIZE, new NonNegativeConstraint(false));
+ }
+
+ @Test
+ public void testRestrictVariablesToNonNegative() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 409, 523, 70, 204, 339 }, 0);
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 43, 56, 345, 56, 5 }, Relationship.LEQ, 4567456));
+ constraints.add(new LinearConstraint(new double[] { 12, 45, 7, 56, 23 }, Relationship.LEQ, 56454));
+ constraints.add(new LinearConstraint(new double[] { 8, 768, 0, 34, 7456 }, Relationship.LEQ, 1923421));
+ constraints.add(new LinearConstraint(new double[] { 12342, 2342, 34, 678, 2342 }, Relationship.GEQ, 4356));
+ constraints.add(new LinearConstraint(new double[] { 45, 678, 76, 52, 23 }, Relationship.EQ, 456356));
+
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MAXIMIZE, new NonNegativeConstraint(true));
+ Assert.assertEquals(2902.92783505155, solution.getPoint()[0], .0000001);
+ Assert.assertEquals(480.419243986254, solution.getPoint()[1], .0000001);
+ Assert.assertEquals(0.0, solution.getPoint()[2], .0000001);
+ Assert.assertEquals(0.0, solution.getPoint()[3], .0000001);
+ Assert.assertEquals(0.0, solution.getPoint()[4], .0000001);
+ Assert.assertEquals(1438556.7491409, solution.getValue(), .0000001);
+ }
+
+ @Test
+ public void testEpsilon() {
+ LinearObjectiveFunction f =
+ new LinearObjectiveFunction(new double[] { 10, 5, 1 }, 0);
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 9, 8, 0 }, Relationship.EQ, 17));
+ constraints.add(new LinearConstraint(new double[] { 0, 7, 8 }, Relationship.LEQ, 7));
+ constraints.add(new LinearConstraint(new double[] { 10, 0, 2 }, Relationship.LEQ, 10));
+
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MAXIMIZE, new NonNegativeConstraint(false));
+ Assert.assertEquals(1.0, solution.getPoint()[0], 0.0);
+ Assert.assertEquals(1.0, solution.getPoint()[1], 0.0);
+ Assert.assertEquals(0.0, solution.getPoint()[2], 0.0);
+ Assert.assertEquals(15.0, solution.getValue(), 0.0);
+ }
+
+ @Test
+ public void testTrivialModel() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 1, 1 }, 0);
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] { 1, 1 }, Relationship.EQ, 0));
+
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MAXIMIZE, new NonNegativeConstraint(true));
+ Assert.assertEquals(0, solution.getValue(), .0000001);
+ }
+
+ @Test
+ public void testLargeModel() {
+ double[] objective = new double[] {
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 12, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 12, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 12, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 12, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 12, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 12, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1};
+
+ LinearObjectiveFunction f = new LinearObjectiveFunction(objective, 0);
+ Collection constraints = new ArrayList();
+ constraints.add(equationFromString(objective.length, "x0 + x1 + x2 + x3 - x12 = 0"));
+ constraints.add(equationFromString(objective.length, "x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 - x13 = 0"));
+ constraints.add(equationFromString(objective.length, "x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 >= 49"));
+ constraints.add(equationFromString(objective.length, "x0 + x1 + x2 + x3 >= 42"));
+ constraints.add(equationFromString(objective.length, "x14 + x15 + x16 + x17 - x26 = 0"));
+ constraints.add(equationFromString(objective.length, "x18 + x19 + x20 + x21 + x22 + x23 + x24 + x25 - x27 = 0"));
+ constraints.add(equationFromString(objective.length, "x14 + x15 + x16 + x17 - x12 = 0"));
+ constraints.add(equationFromString(objective.length, "x18 + x19 + x20 + x21 + x22 + x23 + x24 + x25 - x13 = 0"));
+ constraints.add(equationFromString(objective.length, "x28 + x29 + x30 + x31 - x40 = 0"));
+ constraints.add(equationFromString(objective.length, "x32 + x33 + x34 + x35 + x36 + x37 + x38 + x39 - x41 = 0"));
+ constraints.add(equationFromString(objective.length, "x32 + x33 + x34 + x35 + x36 + x37 + x38 + x39 >= 49"));
+ constraints.add(equationFromString(objective.length, "x28 + x29 + x30 + x31 >= 42"));
+ constraints.add(equationFromString(objective.length, "x42 + x43 + x44 + x45 - x54 = 0"));
+ constraints.add(equationFromString(objective.length, "x46 + x47 + x48 + x49 + x50 + x51 + x52 + x53 - x55 = 0"));
+ constraints.add(equationFromString(objective.length, "x42 + x43 + x44 + x45 - x40 = 0"));
+ constraints.add(equationFromString(objective.length, "x46 + x47 + x48 + x49 + x50 + x51 + x52 + x53 - x41 = 0"));
+ constraints.add(equationFromString(objective.length, "x56 + x57 + x58 + x59 - x68 = 0"));
+ constraints.add(equationFromString(objective.length, "x60 + x61 + x62 + x63 + x64 + x65 + x66 + x67 - x69 = 0"));
+ constraints.add(equationFromString(objective.length, "x60 + x61 + x62 + x63 + x64 + x65 + x66 + x67 >= 51"));
+ constraints.add(equationFromString(objective.length, "x56 + x57 + x58 + x59 >= 44"));
+ constraints.add(equationFromString(objective.length, "x70 + x71 + x72 + x73 - x82 = 0"));
+ constraints.add(equationFromString(objective.length, "x74 + x75 + x76 + x77 + x78 + x79 + x80 + x81 - x83 = 0"));
+ constraints.add(equationFromString(objective.length, "x70 + x71 + x72 + x73 - x68 = 0"));
+ constraints.add(equationFromString(objective.length, "x74 + x75 + x76 + x77 + x78 + x79 + x80 + x81 - x69 = 0"));
+ constraints.add(equationFromString(objective.length, "x84 + x85 + x86 + x87 - x96 = 0"));
+ constraints.add(equationFromString(objective.length, "x88 + x89 + x90 + x91 + x92 + x93 + x94 + x95 - x97 = 0"));
+ constraints.add(equationFromString(objective.length, "x88 + x89 + x90 + x91 + x92 + x93 + x94 + x95 >= 51"));
+ constraints.add(equationFromString(objective.length, "x84 + x85 + x86 + x87 >= 44"));
+ constraints.add(equationFromString(objective.length, "x98 + x99 + x100 + x101 - x110 = 0"));
+ constraints.add(equationFromString(objective.length, "x102 + x103 + x104 + x105 + x106 + x107 + x108 + x109 - x111 = 0"));
+ constraints.add(equationFromString(objective.length, "x98 + x99 + x100 + x101 - x96 = 0"));
+ constraints.add(equationFromString(objective.length, "x102 + x103 + x104 + x105 + x106 + x107 + x108 + x109 - x97 = 0"));
+ constraints.add(equationFromString(objective.length, "x112 + x113 + x114 + x115 - x124 = 0"));
+ constraints.add(equationFromString(objective.length, "x116 + x117 + x118 + x119 + x120 + x121 + x122 + x123 - x125 = 0"));
+ constraints.add(equationFromString(objective.length, "x116 + x117 + x118 + x119 + x120 + x121 + x122 + x123 >= 49"));
+ constraints.add(equationFromString(objective.length, "x112 + x113 + x114 + x115 >= 42"));
+ constraints.add(equationFromString(objective.length, "x126 + x127 + x128 + x129 - x138 = 0"));
+ constraints.add(equationFromString(objective.length, "x130 + x131 + x132 + x133 + x134 + x135 + x136 + x137 - x139 = 0"));
+ constraints.add(equationFromString(objective.length, "x126 + x127 + x128 + x129 - x124 = 0"));
+ constraints.add(equationFromString(objective.length, "x130 + x131 + x132 + x133 + x134 + x135 + x136 + x137 - x125 = 0"));
+ constraints.add(equationFromString(objective.length, "x140 + x141 + x142 + x143 - x152 = 0"));
+ constraints.add(equationFromString(objective.length, "x144 + x145 + x146 + x147 + x148 + x149 + x150 + x151 - x153 = 0"));
+ constraints.add(equationFromString(objective.length, "x144 + x145 + x146 + x147 + x148 + x149 + x150 + x151 >= 59"));
+ constraints.add(equationFromString(objective.length, "x140 + x141 + x142 + x143 >= 42"));
+ constraints.add(equationFromString(objective.length, "x154 + x155 + x156 + x157 - x166 = 0"));
+ constraints.add(equationFromString(objective.length, "x158 + x159 + x160 + x161 + x162 + x163 + x164 + x165 - x167 = 0"));
+ constraints.add(equationFromString(objective.length, "x154 + x155 + x156 + x157 - x152 = 0"));
+ constraints.add(equationFromString(objective.length, "x158 + x159 + x160 + x161 + x162 + x163 + x164 + x165 - x153 = 0"));
+ constraints.add(equationFromString(objective.length, "x83 + x82 - x168 = 0"));
+ constraints.add(equationFromString(objective.length, "x111 + x110 - x169 = 0"));
+ constraints.add(equationFromString(objective.length, "x170 - x182 = 0"));
+ constraints.add(equationFromString(objective.length, "x171 - x183 = 0"));
+ constraints.add(equationFromString(objective.length, "x172 - x184 = 0"));
+ constraints.add(equationFromString(objective.length, "x173 - x185 = 0"));
+ constraints.add(equationFromString(objective.length, "x174 - x186 = 0"));
+ constraints.add(equationFromString(objective.length, "x175 + x176 - x187 = 0"));
+ constraints.add(equationFromString(objective.length, "x177 - x188 = 0"));
+ constraints.add(equationFromString(objective.length, "x178 - x189 = 0"));
+ constraints.add(equationFromString(objective.length, "x179 - x190 = 0"));
+ constraints.add(equationFromString(objective.length, "x180 - x191 = 0"));
+ constraints.add(equationFromString(objective.length, "x181 - x192 = 0"));
+ constraints.add(equationFromString(objective.length, "x170 - x26 = 0"));
+ constraints.add(equationFromString(objective.length, "x171 - x27 = 0"));
+ constraints.add(equationFromString(objective.length, "x172 - x54 = 0"));
+ constraints.add(equationFromString(objective.length, "x173 - x55 = 0"));
+ constraints.add(equationFromString(objective.length, "x174 - x168 = 0"));
+ constraints.add(equationFromString(objective.length, "x177 - x169 = 0"));
+ constraints.add(equationFromString(objective.length, "x178 - x138 = 0"));
+ constraints.add(equationFromString(objective.length, "x179 - x139 = 0"));
+ constraints.add(equationFromString(objective.length, "x180 - x166 = 0"));
+ constraints.add(equationFromString(objective.length, "x181 - x167 = 0"));
+ constraints.add(equationFromString(objective.length, "x193 - x205 = 0"));
+ constraints.add(equationFromString(objective.length, "x194 - x206 = 0"));
+ constraints.add(equationFromString(objective.length, "x195 - x207 = 0"));
+ constraints.add(equationFromString(objective.length, "x196 - x208 = 0"));
+ constraints.add(equationFromString(objective.length, "x197 - x209 = 0"));
+ constraints.add(equationFromString(objective.length, "x198 + x199 - x210 = 0"));
+ constraints.add(equationFromString(objective.length, "x200 - x211 = 0"));
+ constraints.add(equationFromString(objective.length, "x201 - x212 = 0"));
+ constraints.add(equationFromString(objective.length, "x202 - x213 = 0"));
+ constraints.add(equationFromString(objective.length, "x203 - x214 = 0"));
+ constraints.add(equationFromString(objective.length, "x204 - x215 = 0"));
+ constraints.add(equationFromString(objective.length, "x193 - x182 = 0"));
+ constraints.add(equationFromString(objective.length, "x194 - x183 = 0"));
+ constraints.add(equationFromString(objective.length, "x195 - x184 = 0"));
+ constraints.add(equationFromString(objective.length, "x196 - x185 = 0"));
+ constraints.add(equationFromString(objective.length, "x197 - x186 = 0"));
+ constraints.add(equationFromString(objective.length, "x198 + x199 - x187 = 0"));
+ constraints.add(equationFromString(objective.length, "x200 - x188 = 0"));
+ constraints.add(equationFromString(objective.length, "x201 - x189 = 0"));
+ constraints.add(equationFromString(objective.length, "x202 - x190 = 0"));
+ constraints.add(equationFromString(objective.length, "x203 - x191 = 0"));
+ constraints.add(equationFromString(objective.length, "x204 - x192 = 0"));
+
+ SimplexSolver solver = new SimplexSolver();
+ PointValuePair solution = solver.optimize(DEFAULT_MAX_ITER, f, new LinearConstraintSet(constraints),
+ GoalType.MINIMIZE, new NonNegativeConstraint(true));
+ Assert.assertEquals(7518.0, solution.getValue(), .0000001);
+ }
+
+ /**
+ * Converts a test string to a {@link LinearConstraint}.
+ * Ex: x0 + x1 + x2 + x3 - x12 = 0
+ */
+ private LinearConstraint equationFromString(int numCoefficients, String s) {
+ Relationship relationship;
+ if (s.contains(">=")) {
+ relationship = Relationship.GEQ;
+ } else if (s.contains("<=")) {
+ relationship = Relationship.LEQ;
+ } else if (s.contains("=")) {
+ relationship = Relationship.EQ;
+ } else {
+ throw new IllegalArgumentException();
+ }
+
+ String[] equationParts = s.split("[>|<]?=");
+ double rhs = Double.parseDouble(equationParts[1].trim());
+
+ double[] lhs = new double[numCoefficients];
+ String left = equationParts[0].replaceAll(" ?x", "");
+ String[] coefficients = left.split(" ");
+ for (String coefficient : coefficients) {
+ double value = coefficient.charAt(0) == '-' ? -1 : 1;
+ int index = Integer.parseInt(coefficient.replaceFirst("[+|-]", "").trim());
+ lhs[index] = value;
+ }
+ return new LinearConstraint(lhs, relationship, rhs);
+ }
+
+ private static boolean validSolution(PointValuePair solution, List constraints, double epsilon) {
+ double[] vals = solution.getPoint();
+ for (LinearConstraint c : constraints) {
+ double[] coeffs = c.getCoefficients().toArray();
+ double result = 0.0d;
+ for (int i = 0; i < vals.length; i++) {
+ result += vals[i] * coeffs[i];
+ }
+
+ switch (c.getRelationship()) {
+ case EQ:
+ if (!Precision.equals(result, c.getValue(), epsilon)) {
+ return false;
+ }
+ break;
+
+ case GEQ:
+ if (Precision.compareTo(result, c.getValue(), epsilon) < 0) {
+ return false;
+ }
+ break;
+
+ case LEQ:
+ if (Precision.compareTo(result, c.getValue(), epsilon) > 0) {
+ return false;
+ }
+ break;
+ }
+ }
+
+ return true;
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/linear/SimplexTableauTest.java b/src/test/java/org/apache/commons/math3/optim/linear/SimplexTableauTest.java
new file mode 100644
index 000000000..242a8c170
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/linear/SimplexTableauTest.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.linear;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import org.apache.commons.math3.TestUtils;
+import org.apache.commons.math3.optim.GoalType;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class SimplexTableauTest {
+
+ @Test
+ public void testInitialization() {
+ LinearObjectiveFunction f = createFunction();
+ Collection constraints = createConstraints();
+ SimplexTableau tableau =
+ new SimplexTableau(f, constraints, GoalType.MAXIMIZE, false, 1.0e-6);
+ double[][] expectedInitialTableau = {
+ {-1, 0, -1, -1, 2, 0, 0, 0, -4},
+ { 0, 1, -15, -10, 25, 0, 0, 0, 0},
+ { 0, 0, 1, 0, -1, 1, 0, 0, 2},
+ { 0, 0, 0, 1, -1, 0, 1, 0, 3},
+ { 0, 0, 1, 1, -2, 0, 0, 1, 4}
+ };
+ assertMatrixEquals(expectedInitialTableau, tableau.getData());
+ }
+
+ @Test
+ public void testDropPhase1Objective() {
+ LinearObjectiveFunction f = createFunction();
+ Collection constraints = createConstraints();
+ SimplexTableau tableau =
+ new SimplexTableau(f, constraints, GoalType.MAXIMIZE, false, 1.0e-6);
+ double[][] expectedTableau = {
+ { 1, -15, -10, 0, 0, 0, 0},
+ { 0, 1, 0, 1, 0, 0, 2},
+ { 0, 0, 1, 0, 1, 0, 3},
+ { 0, 1, 1, 0, 0, 1, 4}
+ };
+ tableau.dropPhase1Objective();
+ assertMatrixEquals(expectedTableau, tableau.getData());
+ }
+
+ @Test
+ public void testTableauWithNoArtificialVars() {
+ LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] {15, 10}, 0);
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] {1, 0}, Relationship.LEQ, 2));
+ constraints.add(new LinearConstraint(new double[] {0, 1}, Relationship.LEQ, 3));
+ constraints.add(new LinearConstraint(new double[] {1, 1}, Relationship.LEQ, 4));
+ SimplexTableau tableau =
+ new SimplexTableau(f, constraints, GoalType.MAXIMIZE, false, 1.0e-6);
+ double[][] initialTableau = {
+ {1, -15, -10, 25, 0, 0, 0, 0},
+ {0, 1, 0, -1, 1, 0, 0, 2},
+ {0, 0, 1, -1, 0, 1, 0, 3},
+ {0, 1, 1, -2, 0, 0, 1, 4}
+ };
+ assertMatrixEquals(initialTableau, tableau.getData());
+ }
+
+ @Test
+ public void testSerial() {
+ LinearObjectiveFunction f = createFunction();
+ Collection constraints = createConstraints();
+ SimplexTableau tableau =
+ new SimplexTableau(f, constraints, GoalType.MAXIMIZE, false, 1.0e-6);
+ Assert.assertEquals(tableau, TestUtils.serializeAndRecover(tableau));
+ }
+
+ private LinearObjectiveFunction createFunction() {
+ return new LinearObjectiveFunction(new double[] {15, 10}, 0);
+ }
+
+ private Collection createConstraints() {
+ Collection constraints = new ArrayList();
+ constraints.add(new LinearConstraint(new double[] {1, 0}, Relationship.LEQ, 2));
+ constraints.add(new LinearConstraint(new double[] {0, 1}, Relationship.LEQ, 3));
+ constraints.add(new LinearConstraint(new double[] {1, 1}, Relationship.EQ, 4));
+ return constraints;
+ }
+
+ private void assertMatrixEquals(double[][] expected, double[][] result) {
+ Assert.assertEquals("Wrong number of rows.", expected.length, result.length);
+ for (int i = 0; i < expected.length; i++) {
+ Assert.assertEquals("Wrong number of columns.", expected[i].length, result[i].length);
+ for (int j = 0; j < expected[i].length; j++) {
+ Assert.assertEquals("Wrong value at position [" + i + "," + j + "]", expected[i][j], result[i][j], 1.0e-15);
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/MultiStartMultivariateOptimizerTest.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/MultiStartMultivariateOptimizerTest.java
new file mode 100644
index 000000000..46e1db613
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/MultiStartMultivariateOptimizerTest.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar;
+
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.geometry.euclidean.twod.Vector2D;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.ObjectiveFunction;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.SimpleValueChecker;
+import org.apache.commons.math3.optim.nonlinear.scalar.gradient.CircleScalar;
+import org.apache.commons.math3.optim.nonlinear.scalar.gradient.NonLinearConjugateGradientOptimizer;
+import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.NelderMeadSimplex;
+import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.SimplexOptimizer;
+import org.apache.commons.math3.random.GaussianRandomGenerator;
+import org.apache.commons.math3.random.JDKRandomGenerator;
+import org.apache.commons.math3.random.RandomVectorGenerator;
+import org.apache.commons.math3.random.UncorrelatedRandomVectorGenerator;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class MultiStartMultivariateOptimizerTest {
+ @Test
+ public void testCircleFitting() {
+ CircleScalar circle = new CircleScalar();
+ circle.addPoint( 30.0, 68.0);
+ circle.addPoint( 50.0, -6.0);
+ circle.addPoint(110.0, -20.0);
+ circle.addPoint( 35.0, 15.0);
+ circle.addPoint( 45.0, 97.0);
+ // TODO: the wrapper around NonLinearConjugateGradientOptimizer is a temporary hack for
+ // version 3.1 of the library. It should be removed when NonLinearConjugateGradientOptimizer
+ // will officially be declared as implementing MultivariateDifferentiableOptimizer
+ GradientMultivariateOptimizer underlying
+ = new NonLinearConjugateGradientOptimizer(NonLinearConjugateGradientOptimizer.Formula.POLAK_RIBIERE,
+ new SimpleValueChecker(1e-10, 1e-10));
+ JDKRandomGenerator g = new JDKRandomGenerator();
+ g.setSeed(753289573253l);
+ RandomVectorGenerator generator
+ = new UncorrelatedRandomVectorGenerator(new double[] { 50, 50 },
+ new double[] { 10, 10 },
+ new GaussianRandomGenerator(g));
+ MultiStartMultivariateOptimizer optimizer
+ = new MultiStartMultivariateOptimizer(underlying, 10, generator);
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(200),
+ circle.getObjectiveFunction(),
+ circle.getObjectiveFunctionGradient(),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 98.680, 47.345 }));
+ Assert.assertEquals(200, optimizer.getMaxEvaluations());
+ PointValuePair[] optima = optimizer.getOptima();
+ for (PointValuePair o : optima) {
+ Vector2D center = new Vector2D(o.getPointRef()[0], o.getPointRef()[1]);
+ Assert.assertEquals(69.960161753, circle.getRadius(center), 1e-8);
+ Assert.assertEquals(96.075902096, center.getX(), 1e-8);
+ Assert.assertEquals(48.135167894, center.getY(), 1e-8);
+ }
+ Assert.assertTrue(optimizer.getEvaluations() > 70);
+ Assert.assertTrue(optimizer.getEvaluations() < 90);
+ Assert.assertEquals(3.1267527, optimum.getValue(), 1e-8);
+ }
+
+ @Test
+ public void testRosenbrock() {
+ Rosenbrock rosenbrock = new Rosenbrock();
+ SimplexOptimizer underlying
+ = new SimplexOptimizer(new SimpleValueChecker(-1, 1e-3));
+ NelderMeadSimplex simplex = new NelderMeadSimplex(new double[][] {
+ { -1.2, 1.0 },
+ { 0.9, 1.2 } ,
+ { 3.5, -2.3 }
+ });
+ JDKRandomGenerator g = new JDKRandomGenerator();
+ g.setSeed(16069223052l);
+ RandomVectorGenerator generator
+ = new UncorrelatedRandomVectorGenerator(2, new GaussianRandomGenerator(g));
+ MultiStartMultivariateOptimizer optimizer
+ = new MultiStartMultivariateOptimizer(underlying, 10, generator);
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(1100),
+ new ObjectiveFunction(rosenbrock),
+ GoalType.MINIMIZE,
+ simplex,
+ new InitialGuess(new double[] { -1.2, 1.0 }));
+
+ Assert.assertEquals(rosenbrock.getCount(), optimizer.getEvaluations());
+ Assert.assertTrue(optimizer.getEvaluations() > 900);
+ Assert.assertTrue(optimizer.getEvaluations() < 1200);
+ Assert.assertTrue(optimum.getValue() < 8e-4);
+ }
+
+ private static class Rosenbrock implements MultivariateFunction {
+ private int count;
+
+ public Rosenbrock() {
+ count = 0;
+ }
+
+ public double value(double[] x) {
+ ++count;
+ double a = x[1] - x[0] * x[0];
+ double b = 1 - x[0];
+ return 100 * a * a + b * b;
+ }
+
+ public int getCount() {
+ return count;
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/MultivariateFunctionMappingAdapterTest.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/MultivariateFunctionMappingAdapterTest.java
new file mode 100644
index 000000000..e47b2a6ee
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/MultivariateFunctionMappingAdapterTest.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar;
+
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.ObjectiveFunction;
+import org.apache.commons.math3.optim.SimplePointChecker;
+import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.SimplexOptimizer;
+import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.AbstractSimplex;
+import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.NelderMeadSimplex;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class MultivariateFunctionMappingAdapterTest {
+ @Test
+ public void testStartSimplexInsideRange() {
+ final BiQuadratic biQuadratic = new BiQuadratic(2.0, 2.5, 1.0, 3.0, 2.0, 3.0);
+ final MultivariateFunctionMappingAdapter wrapped
+ = new MultivariateFunctionMappingAdapter(biQuadratic,
+ biQuadratic.getLower(),
+ biQuadratic.getUpper());
+
+ SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
+ final AbstractSimplex simplex = new NelderMeadSimplex(new double[][] {
+ wrapped.boundedToUnbounded(new double[] { 1.5, 2.75 }),
+ wrapped.boundedToUnbounded(new double[] { 1.5, 2.95 }),
+ wrapped.boundedToUnbounded(new double[] { 1.7, 2.90 })
+ });
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(300),
+ new ObjectiveFunction(wrapped),
+ simplex,
+ GoalType.MINIMIZE,
+ new InitialGuess(wrapped.boundedToUnbounded(new double[] { 1.5, 2.25 })));
+ final double[] bounded = wrapped.unboundedToBounded(optimum.getPoint());
+
+ Assert.assertEquals(biQuadratic.getBoundedXOptimum(), bounded[0], 2e-7);
+ Assert.assertEquals(biQuadratic.getBoundedYOptimum(), bounded[1], 2e-7);
+ }
+
+ @Test
+ public void testOptimumOutsideRange() {
+ final BiQuadratic biQuadratic = new BiQuadratic(4.0, 0.0, 1.0, 3.0, 2.0, 3.0);
+ final MultivariateFunctionMappingAdapter wrapped
+ = new MultivariateFunctionMappingAdapter(biQuadratic,
+ biQuadratic.getLower(),
+ biQuadratic.getUpper());
+
+ SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
+ final AbstractSimplex simplex = new NelderMeadSimplex(new double[][] {
+ wrapped.boundedToUnbounded(new double[] { 1.5, 2.75 }),
+ wrapped.boundedToUnbounded(new double[] { 1.5, 2.95 }),
+ wrapped.boundedToUnbounded(new double[] { 1.7, 2.90 })
+ });
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ new ObjectiveFunction(wrapped),
+ simplex,
+ GoalType.MINIMIZE,
+ new InitialGuess(wrapped.boundedToUnbounded(new double[] { 1.5, 2.25 })));
+ final double[] bounded = wrapped.unboundedToBounded(optimum.getPoint());
+
+ Assert.assertEquals(biQuadratic.getBoundedXOptimum(), bounded[0], 2e-7);
+ Assert.assertEquals(biQuadratic.getBoundedYOptimum(), bounded[1], 2e-7);
+ }
+
+ @Test
+ public void testUnbounded() {
+ final BiQuadratic biQuadratic = new BiQuadratic(4.0, 0.0,
+ Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY,
+ Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY);
+ final MultivariateFunctionMappingAdapter wrapped
+ = new MultivariateFunctionMappingAdapter(biQuadratic,
+ biQuadratic.getLower(),
+ biQuadratic.getUpper());
+
+ SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
+ final AbstractSimplex simplex = new NelderMeadSimplex(new double[][] {
+ wrapped.boundedToUnbounded(new double[] { 1.5, 2.75 }),
+ wrapped.boundedToUnbounded(new double[] { 1.5, 2.95 }),
+ wrapped.boundedToUnbounded(new double[] { 1.7, 2.90 })
+ });
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(300),
+ new ObjectiveFunction(wrapped),
+ simplex,
+ GoalType.MINIMIZE,
+ new InitialGuess(wrapped.boundedToUnbounded(new double[] { 1.5, 2.25 })));
+ final double[] bounded = wrapped.unboundedToBounded(optimum.getPoint());
+
+ Assert.assertEquals(biQuadratic.getBoundedXOptimum(), bounded[0], 2e-7);
+ Assert.assertEquals(biQuadratic.getBoundedYOptimum(), bounded[1], 2e-7);
+ }
+
+ @Test
+ public void testHalfBounded() {
+ final BiQuadratic biQuadratic = new BiQuadratic(4.0, 4.0,
+ 1.0, Double.POSITIVE_INFINITY,
+ Double.NEGATIVE_INFINITY, 3.0);
+ final MultivariateFunctionMappingAdapter wrapped
+ = new MultivariateFunctionMappingAdapter(biQuadratic,
+ biQuadratic.getLower(),
+ biQuadratic.getUpper());
+
+ SimplexOptimizer optimizer = new SimplexOptimizer(1e-13, 1e-30);
+ final AbstractSimplex simplex = new NelderMeadSimplex(new double[][] {
+ wrapped.boundedToUnbounded(new double[] { 1.5, 2.75 }),
+ wrapped.boundedToUnbounded(new double[] { 1.5, 2.95 }),
+ wrapped.boundedToUnbounded(new double[] { 1.7, 2.90 })
+ });
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(200),
+ new ObjectiveFunction(wrapped),
+ simplex,
+ GoalType.MINIMIZE,
+ new InitialGuess(wrapped.boundedToUnbounded(new double[] { 1.5, 2.25 })));
+ final double[] bounded = wrapped.unboundedToBounded(optimum.getPoint());
+
+ Assert.assertEquals(biQuadratic.getBoundedXOptimum(), bounded[0], 1e-7);
+ Assert.assertEquals(biQuadratic.getBoundedYOptimum(), bounded[1], 1e-7);
+ }
+
+ private static class BiQuadratic implements MultivariateFunction {
+
+ private final double xOptimum;
+ private final double yOptimum;
+
+ private final double xMin;
+ private final double xMax;
+ private final double yMin;
+ private final double yMax;
+
+ public BiQuadratic(final double xOptimum, final double yOptimum,
+ final double xMin, final double xMax,
+ final double yMin, final double yMax) {
+ this.xOptimum = xOptimum;
+ this.yOptimum = yOptimum;
+ this.xMin = xMin;
+ this.xMax = xMax;
+ this.yMin = yMin;
+ this.yMax = yMax;
+ }
+
+ public double value(double[] point) {
+ // the function should never be called with out of range points
+ Assert.assertTrue(point[0] >= xMin);
+ Assert.assertTrue(point[0] <= xMax);
+ Assert.assertTrue(point[1] >= yMin);
+ Assert.assertTrue(point[1] <= yMax);
+
+ final double dx = point[0] - xOptimum;
+ final double dy = point[1] - yOptimum;
+ return dx * dx + dy * dy;
+
+ }
+
+ public double[] getLower() {
+ return new double[] { xMin, yMin };
+ }
+
+ public double[] getUpper() {
+ return new double[] { xMax, yMax };
+ }
+
+ public double getBoundedXOptimum() {
+ return (xOptimum < xMin) ? xMin : ((xOptimum > xMax) ? xMax : xOptimum);
+ }
+
+ public double getBoundedYOptimum() {
+ return (yOptimum < yMin) ? yMin : ((yOptimum > yMax) ? yMax : yOptimum);
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/MultivariateFunctionPenaltyAdapterTest.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/MultivariateFunctionPenaltyAdapterTest.java
new file mode 100644
index 000000000..e85b0b1f0
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/MultivariateFunctionPenaltyAdapterTest.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar;
+
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.ObjectiveFunction;
+import org.apache.commons.math3.optim.SimplePointChecker;
+import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.SimplexOptimizer;
+import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.AbstractSimplex;
+import org.apache.commons.math3.optim.nonlinear.scalar.noderiv.NelderMeadSimplex;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class MultivariateFunctionPenaltyAdapterTest {
+ @Test
+ public void testStartSimplexInsideRange() {
+ final BiQuadratic biQuadratic = new BiQuadratic(2.0, 2.5, 1.0, 3.0, 2.0, 3.0);
+ final MultivariateFunctionPenaltyAdapter wrapped
+ = new MultivariateFunctionPenaltyAdapter(biQuadratic,
+ biQuadratic.getLower(),
+ biQuadratic.getUpper(),
+ 1000.0, new double[] { 100.0, 100.0 });
+
+ SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
+ final AbstractSimplex simplex = new NelderMeadSimplex(new double[] { 1.0, 0.5 });
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(300),
+ new ObjectiveFunction(wrapped),
+ simplex,
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 1.5, 2.25 }));
+
+ Assert.assertEquals(biQuadratic.getBoundedXOptimum(), optimum.getPoint()[0], 2e-7);
+ Assert.assertEquals(biQuadratic.getBoundedYOptimum(), optimum.getPoint()[1], 2e-7);
+ }
+
+ @Test
+ public void testStartSimplexOutsideRange() {
+ final BiQuadratic biQuadratic = new BiQuadratic(2.0, 2.5, 1.0, 3.0, 2.0, 3.0);
+ final MultivariateFunctionPenaltyAdapter wrapped
+ = new MultivariateFunctionPenaltyAdapter(biQuadratic,
+ biQuadratic.getLower(),
+ biQuadratic.getUpper(),
+ 1000.0, new double[] { 100.0, 100.0 });
+
+ SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
+ final AbstractSimplex simplex = new NelderMeadSimplex(new double[] { 1.0, 0.5 });
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(300),
+ new ObjectiveFunction(wrapped),
+ simplex,
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { -1.5, 4.0 }));
+
+ Assert.assertEquals(biQuadratic.getBoundedXOptimum(), optimum.getPoint()[0], 2e-7);
+ Assert.assertEquals(biQuadratic.getBoundedYOptimum(), optimum.getPoint()[1], 2e-7);
+ }
+
+ @Test
+ public void testOptimumOutsideRange() {
+ final BiQuadratic biQuadratic = new BiQuadratic(4.0, 0.0, 1.0, 3.0, 2.0, 3.0);
+ final MultivariateFunctionPenaltyAdapter wrapped
+ = new MultivariateFunctionPenaltyAdapter(biQuadratic,
+ biQuadratic.getLower(),
+ biQuadratic.getUpper(),
+ 1000.0, new double[] { 100.0, 100.0 });
+
+ SimplexOptimizer optimizer = new SimplexOptimizer(new SimplePointChecker(1.0e-11, 1.0e-20));
+ final AbstractSimplex simplex = new NelderMeadSimplex(new double[] { 1.0, 0.5 });
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(600),
+ new ObjectiveFunction(wrapped),
+ simplex,
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { -1.5, 4.0 }));
+
+ Assert.assertEquals(biQuadratic.getBoundedXOptimum(), optimum.getPoint()[0], 2e-7);
+ Assert.assertEquals(biQuadratic.getBoundedYOptimum(), optimum.getPoint()[1], 2e-7);
+ }
+
+ @Test
+ public void testUnbounded() {
+ final BiQuadratic biQuadratic = new BiQuadratic(4.0, 0.0,
+ Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY,
+ Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY);
+ final MultivariateFunctionPenaltyAdapter wrapped
+ = new MultivariateFunctionPenaltyAdapter(biQuadratic,
+ biQuadratic.getLower(),
+ biQuadratic.getUpper(),
+ 1000.0, new double[] { 100.0, 100.0 });
+
+ SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
+ final AbstractSimplex simplex = new NelderMeadSimplex(new double[] { 1.0, 0.5 });
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(300),
+ new ObjectiveFunction(wrapped),
+ simplex,
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { -1.5, 4.0 }));
+
+ Assert.assertEquals(biQuadratic.getBoundedXOptimum(), optimum.getPoint()[0], 2e-7);
+ Assert.assertEquals(biQuadratic.getBoundedYOptimum(), optimum.getPoint()[1], 2e-7);
+ }
+
+ @Test
+ public void testHalfBounded() {
+ final BiQuadratic biQuadratic = new BiQuadratic(4.0, 4.0,
+ 1.0, Double.POSITIVE_INFINITY,
+ Double.NEGATIVE_INFINITY, 3.0);
+ final MultivariateFunctionPenaltyAdapter wrapped
+ = new MultivariateFunctionPenaltyAdapter(biQuadratic,
+ biQuadratic.getLower(),
+ biQuadratic.getUpper(),
+ 1000.0, new double[] { 100.0, 100.0 });
+
+ SimplexOptimizer optimizer = new SimplexOptimizer(new SimplePointChecker(1.0e-10, 1.0e-20));
+ final AbstractSimplex simplex = new NelderMeadSimplex(new double[] { 1.0, 0.5 });
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(400),
+ new ObjectiveFunction(wrapped),
+ simplex,
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { -1.5, 4.0 }));
+
+ Assert.assertEquals(biQuadratic.getBoundedXOptimum(), optimum.getPoint()[0], 2e-7);
+ Assert.assertEquals(biQuadratic.getBoundedYOptimum(), optimum.getPoint()[1], 2e-7);
+ }
+
+ private static class BiQuadratic implements MultivariateFunction {
+
+ private final double xOptimum;
+ private final double yOptimum;
+
+ private final double xMin;
+ private final double xMax;
+ private final double yMin;
+ private final double yMax;
+
+ public BiQuadratic(final double xOptimum, final double yOptimum,
+ final double xMin, final double xMax,
+ final double yMin, final double yMax) {
+ this.xOptimum = xOptimum;
+ this.yOptimum = yOptimum;
+ this.xMin = xMin;
+ this.xMax = xMax;
+ this.yMin = yMin;
+ this.yMax = yMax;
+ }
+
+ public double value(double[] point) {
+ // the function should never be called with out of range points
+ Assert.assertTrue(point[0] >= xMin);
+ Assert.assertTrue(point[0] <= xMax);
+ Assert.assertTrue(point[1] >= yMin);
+ Assert.assertTrue(point[1] <= yMax);
+
+ final double dx = point[0] - xOptimum;
+ final double dy = point[1] - yOptimum;
+ return dx * dx + dy * dy;
+
+ }
+
+ public double[] getLower() {
+ return new double[] { xMin, yMin };
+ }
+
+ public double[] getUpper() {
+ return new double[] { xMax, yMax };
+ }
+
+ public double getBoundedXOptimum() {
+ return (xOptimum < xMin) ? xMin : ((xOptimum > xMax) ? xMax : xOptimum);
+ }
+
+ public double getBoundedYOptimum() {
+ return (yOptimum < yMin) ? yMin : ((yOptimum > yMax) ? yMax : yOptimum);
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/CircleScalar.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/CircleScalar.java
new file mode 100644
index 000000000..7bc925f47
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/CircleScalar.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.scalar.gradient;
+
+import java.util.ArrayList;
+
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.geometry.euclidean.twod.Vector2D;
+import org.apache.commons.math3.optim.ObjectiveFunction;
+import org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunctionGradient;
+
+/**
+ * Class used in the tests.
+ */
+public class CircleScalar {
+ private ArrayList points;
+
+ public CircleScalar() {
+ points = new ArrayList();
+ }
+
+ public void addPoint(double px, double py) {
+ points.add(new Vector2D(px, py));
+ }
+
+ public double getRadius(Vector2D center) {
+ double r = 0;
+ for (Vector2D point : points) {
+ r += point.distance(center);
+ }
+ return r / points.size();
+ }
+
+ public ObjectiveFunction getObjectiveFunction() {
+ return new ObjectiveFunction(new MultivariateFunction() {
+ public double value(double[] params) {
+ Vector2D center = new Vector2D(params[0], params[1]);
+ double radius = getRadius(center);
+ double sum = 0;
+ for (Vector2D point : points) {
+ double di = point.distance(center) - radius;
+ sum += di * di;
+ }
+ return sum;
+ }
+ });
+ }
+
+ public ObjectiveFunctionGradient getObjectiveFunctionGradient() {
+ return new ObjectiveFunctionGradient(new MultivariateVectorFunction() {
+ public double[] value(double[] params) {
+ Vector2D center = new Vector2D(params[0], params[1]);
+ double radius = getRadius(center);
+ // gradient of the sum of squared residuals
+ double dJdX = 0;
+ double dJdY = 0;
+ for (Vector2D pk : points) {
+ double dk = pk.distance(center);
+ dJdX += (center.getX() - pk.getX()) * (dk - radius) / dk;
+ dJdY += (center.getY() - pk.getY()) * (dk - radius) / dk;
+ }
+ dJdX *= 2;
+ dJdY *= 2;
+
+ return new double[] { dJdX, dJdY };
+ }
+ });
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/NonLinearConjugateGradientOptimizerTest.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/NonLinearConjugateGradientOptimizerTest.java
new file mode 100644
index 000000000..10a712841
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/NonLinearConjugateGradientOptimizerTest.java
@@ -0,0 +1,447 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.scalar.gradient;
+
+import java.io.Serializable;
+
+import org.apache.commons.math3.analysis.DifferentiableMultivariateFunction;
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.analysis.differentiation.DerivativeStructure;
+import org.apache.commons.math3.analysis.differentiation.MultivariateDifferentiableFunction;
+import org.apache.commons.math3.analysis.solvers.BrentSolver;
+import org.apache.commons.math3.exception.DimensionMismatchException;
+import org.apache.commons.math3.exception.MathIllegalArgumentException;
+import org.apache.commons.math3.geometry.euclidean.twod.Vector2D;
+import org.apache.commons.math3.linear.BlockRealMatrix;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.SimpleValueChecker;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.ObjectiveFunction;
+import org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunctionGradient;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Some of the unit tests are re-implementations of the MINPACK file17 and file22 test files.
+ * The redistribution policy for MINPACK is available here, for
+ * convenience, it is reproduced below.
+ *
+ *
+ *
+ * Minpack Copyright Notice (1999) University of Chicago.
+ * All rights reserved
+ * |
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * - The end-user documentation included with the redistribution, if any,
+ * must include the following acknowledgment:
+ *
This product includes software developed by the University of
+ * Chicago, as Operator of Argonne National Laboratory.
+ * Alternately, this acknowledgment may appear in the software itself,
+ * if and wherever such third-party acknowledgments normally appear.
+ * - WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
+ * UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
+ * THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
+ * OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
+ * OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
+ * USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
+ * THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
+ * DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
+ * UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
+ * BE CORRECTED.
+ * - LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
+ * HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
+ * ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
+ * INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
+ * ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
+ * PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
+ * SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
+ * (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
+ * EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
+ * POSSIBILITY OF SUCH LOSS OR DAMAGES.
+ *
|
+ *
+ *
+ * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
+ * @author Burton S. Garbow (original fortran minpack tests)
+ * @author Kenneth E. Hillstrom (original fortran minpack tests)
+ * @author Jorge J. More (original fortran minpack tests)
+ * @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
+ */
+public class NonLinearConjugateGradientOptimizerTest {
+ @Test
+ public void testTrivial() {
+ LinearProblem problem
+ = new LinearProblem(new double[][] { { 2 } }, new double[] { 3 });
+ NonLinearConjugateGradientOptimizer optimizer
+ = new NonLinearConjugateGradientOptimizer(NonLinearConjugateGradientOptimizer.Formula.POLAK_RIBIERE,
+ new SimpleValueChecker(1e-6, 1e-6));
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getObjectiveFunction(),
+ problem.getObjectiveFunctionGradient(),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 0 }));
+ Assert.assertEquals(1.5, optimum.getPoint()[0], 1.0e-10);
+ Assert.assertEquals(0.0, optimum.getValue(), 1.0e-10);
+ }
+
+ @Test
+ public void testColumnsPermutation() {
+ LinearProblem problem
+ = new LinearProblem(new double[][] { { 1.0, -1.0 }, { 0.0, 2.0 }, { 1.0, -2.0 } },
+ new double[] { 4.0, 6.0, 1.0 });
+
+ NonLinearConjugateGradientOptimizer optimizer
+ = new NonLinearConjugateGradientOptimizer(NonLinearConjugateGradientOptimizer.Formula.POLAK_RIBIERE,
+ new SimpleValueChecker(1e-6, 1e-6));
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getObjectiveFunction(),
+ problem.getObjectiveFunctionGradient(),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 0, 0 }));
+ Assert.assertEquals(7.0, optimum.getPoint()[0], 1.0e-10);
+ Assert.assertEquals(3.0, optimum.getPoint()[1], 1.0e-10);
+ Assert.assertEquals(0.0, optimum.getValue(), 1.0e-10);
+
+ }
+
+ @Test
+ public void testNoDependency() {
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 2, 0, 0, 0, 0, 0 },
+ { 0, 2, 0, 0, 0, 0 },
+ { 0, 0, 2, 0, 0, 0 },
+ { 0, 0, 0, 2, 0, 0 },
+ { 0, 0, 0, 0, 2, 0 },
+ { 0, 0, 0, 0, 0, 2 }
+ }, new double[] { 0.0, 1.1, 2.2, 3.3, 4.4, 5.5 });
+ NonLinearConjugateGradientOptimizer optimizer
+ = new NonLinearConjugateGradientOptimizer(NonLinearConjugateGradientOptimizer.Formula.POLAK_RIBIERE,
+ new SimpleValueChecker(1e-6, 1e-6));
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getObjectiveFunction(),
+ problem.getObjectiveFunctionGradient(),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 0, 0, 0, 0, 0, 0 }));
+ for (int i = 0; i < problem.target.length; ++i) {
+ Assert.assertEquals(0.55 * i, optimum.getPoint()[i], 1.0e-10);
+ }
+ }
+
+ @Test
+ public void testOneSet() {
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1, 0, 0 },
+ { -1, 1, 0 },
+ { 0, -1, 1 }
+ }, new double[] { 1, 1, 1});
+ NonLinearConjugateGradientOptimizer optimizer
+ = new NonLinearConjugateGradientOptimizer(NonLinearConjugateGradientOptimizer.Formula.POLAK_RIBIERE,
+ new SimpleValueChecker(1e-6, 1e-6));
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getObjectiveFunction(),
+ problem.getObjectiveFunctionGradient(),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 0, 0, 0 }));
+ Assert.assertEquals(1.0, optimum.getPoint()[0], 1.0e-10);
+ Assert.assertEquals(2.0, optimum.getPoint()[1], 1.0e-10);
+ Assert.assertEquals(3.0, optimum.getPoint()[2], 1.0e-10);
+
+ }
+
+ @Test
+ public void testTwoSets() {
+ final double epsilon = 1.0e-7;
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 2, 1, 0, 4, 0, 0 },
+ { -4, -2, 3, -7, 0, 0 },
+ { 4, 1, -2, 8, 0, 0 },
+ { 0, -3, -12, -1, 0, 0 },
+ { 0, 0, 0, 0, epsilon, 1 },
+ { 0, 0, 0, 0, 1, 1 }
+ }, new double[] { 2, -9, 2, 2, 1 + epsilon * epsilon, 2});
+
+ final Preconditioner preconditioner
+ = new Preconditioner() {
+ public double[] precondition(double[] point, double[] r) {
+ double[] d = r.clone();
+ d[0] /= 72.0;
+ d[1] /= 30.0;
+ d[2] /= 314.0;
+ d[3] /= 260.0;
+ d[4] /= 2 * (1 + epsilon * epsilon);
+ d[5] /= 4.0;
+ return d;
+ }
+ };
+
+ NonLinearConjugateGradientOptimizer optimizer
+ = new NonLinearConjugateGradientOptimizer(NonLinearConjugateGradientOptimizer.Formula.POLAK_RIBIERE,
+ new SimpleValueChecker(1e-13, 1e-13),
+ new BrentSolver(),
+ preconditioner);
+
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getObjectiveFunction(),
+ problem.getObjectiveFunctionGradient(),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 0, 0, 0, 0, 0, 0 }));
+ Assert.assertEquals( 3.0, optimum.getPoint()[0], 1.0e-10);
+ Assert.assertEquals( 4.0, optimum.getPoint()[1], 1.0e-10);
+ Assert.assertEquals(-1.0, optimum.getPoint()[2], 1.0e-10);
+ Assert.assertEquals(-2.0, optimum.getPoint()[3], 1.0e-10);
+ Assert.assertEquals( 1.0 + epsilon, optimum.getPoint()[4], 1.0e-10);
+ Assert.assertEquals( 1.0 - epsilon, optimum.getPoint()[5], 1.0e-10);
+
+ }
+
+ @Test
+ public void testNonInversible() {
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1, 2, -3 },
+ { 2, 1, 3 },
+ { -3, 0, -9 }
+ }, new double[] { 1, 1, 1 });
+ NonLinearConjugateGradientOptimizer optimizer
+ = new NonLinearConjugateGradientOptimizer(NonLinearConjugateGradientOptimizer.Formula.POLAK_RIBIERE,
+ new SimpleValueChecker(1e-6, 1e-6));
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getObjectiveFunction(),
+ problem.getObjectiveFunctionGradient(),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 0, 0, 0 }));
+ Assert.assertTrue(optimum.getValue() > 0.5);
+ }
+
+ @Test
+ public void testIllConditioned() {
+ LinearProblem problem1 = new LinearProblem(new double[][] {
+ { 10.0, 7.0, 8.0, 7.0 },
+ { 7.0, 5.0, 6.0, 5.0 },
+ { 8.0, 6.0, 10.0, 9.0 },
+ { 7.0, 5.0, 9.0, 10.0 }
+ }, new double[] { 32, 23, 33, 31 });
+ NonLinearConjugateGradientOptimizer optimizer
+ = new NonLinearConjugateGradientOptimizer(NonLinearConjugateGradientOptimizer.Formula.POLAK_RIBIERE,
+ new SimpleValueChecker(1e-13, 1e-13),
+ new BrentSolver(1e-15, 1e-15));
+ PointValuePair optimum1
+ = optimizer.optimize(new MaxEval(200),
+ problem1.getObjectiveFunction(),
+ problem1.getObjectiveFunctionGradient(),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 0, 1, 2, 3 }));
+ Assert.assertEquals(1.0, optimum1.getPoint()[0], 1.0e-4);
+ Assert.assertEquals(1.0, optimum1.getPoint()[1], 1.0e-4);
+ Assert.assertEquals(1.0, optimum1.getPoint()[2], 1.0e-4);
+ Assert.assertEquals(1.0, optimum1.getPoint()[3], 1.0e-4);
+
+ LinearProblem problem2 = new LinearProblem(new double[][] {
+ { 10.00, 7.00, 8.10, 7.20 },
+ { 7.08, 5.04, 6.00, 5.00 },
+ { 8.00, 5.98, 9.89, 9.00 },
+ { 6.99, 4.99, 9.00, 9.98 }
+ }, new double[] { 32, 23, 33, 31 });
+ PointValuePair optimum2
+ = optimizer.optimize(new MaxEval(200),
+ problem2.getObjectiveFunction(),
+ problem2.getObjectiveFunctionGradient(),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 0, 1, 2, 3 }));
+ Assert.assertEquals(-81.0, optimum2.getPoint()[0], 1.0e-1);
+ Assert.assertEquals(137.0, optimum2.getPoint()[1], 1.0e-1);
+ Assert.assertEquals(-34.0, optimum2.getPoint()[2], 1.0e-1);
+ Assert.assertEquals( 22.0, optimum2.getPoint()[3], 1.0e-1);
+
+ }
+
+ @Test
+ public void testMoreEstimatedParametersSimple() {
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 3.0, 2.0, 0.0, 0.0 },
+ { 0.0, 1.0, -1.0, 1.0 },
+ { 2.0, 0.0, 1.0, 0.0 }
+ }, new double[] { 7.0, 3.0, 5.0 });
+
+ NonLinearConjugateGradientOptimizer optimizer
+ = new NonLinearConjugateGradientOptimizer(NonLinearConjugateGradientOptimizer.Formula.POLAK_RIBIERE,
+ new SimpleValueChecker(1e-6, 1e-6));
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getObjectiveFunction(),
+ problem.getObjectiveFunctionGradient(),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 7, 6, 5, 4 }));
+ Assert.assertEquals(0, optimum.getValue(), 1.0e-10);
+
+ }
+
+ @Test
+ public void testMoreEstimatedParametersUnsorted() {
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
+ { 0.0, 0.0, 1.0, 1.0, 1.0, 0.0 },
+ { 0.0, 0.0, 0.0, 0.0, 1.0, -1.0 },
+ { 0.0, 0.0, -1.0, 1.0, 0.0, 1.0 },
+ { 0.0, 0.0, 0.0, -1.0, 1.0, 0.0 }
+ }, new double[] { 3.0, 12.0, -1.0, 7.0, 1.0 });
+ NonLinearConjugateGradientOptimizer optimizer
+ = new NonLinearConjugateGradientOptimizer(NonLinearConjugateGradientOptimizer.Formula.POLAK_RIBIERE,
+ new SimpleValueChecker(1e-6, 1e-6));
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getObjectiveFunction(),
+ problem.getObjectiveFunctionGradient(),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 2, 2, 2, 2, 2, 2 }));
+ Assert.assertEquals(0, optimum.getValue(), 1.0e-10);
+ }
+
+ @Test
+ public void testRedundantEquations() {
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1.0, 1.0 },
+ { 1.0, -1.0 },
+ { 1.0, 3.0 }
+ }, new double[] { 3.0, 1.0, 5.0 });
+
+ NonLinearConjugateGradientOptimizer optimizer
+ = new NonLinearConjugateGradientOptimizer(NonLinearConjugateGradientOptimizer.Formula.POLAK_RIBIERE,
+ new SimpleValueChecker(1e-6, 1e-6));
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getObjectiveFunction(),
+ problem.getObjectiveFunctionGradient(),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 1, 1 }));
+ Assert.assertEquals(2.0, optimum.getPoint()[0], 1.0e-8);
+ Assert.assertEquals(1.0, optimum.getPoint()[1], 1.0e-8);
+
+ }
+
+ @Test
+ public void testInconsistentEquations() {
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1.0, 1.0 },
+ { 1.0, -1.0 },
+ { 1.0, 3.0 }
+ }, new double[] { 3.0, 1.0, 4.0 });
+
+ NonLinearConjugateGradientOptimizer optimizer
+ = new NonLinearConjugateGradientOptimizer(NonLinearConjugateGradientOptimizer.Formula.POLAK_RIBIERE,
+ new SimpleValueChecker(1e-6, 1e-6));
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getObjectiveFunction(),
+ problem.getObjectiveFunctionGradient(),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 1, 1 }));
+ Assert.assertTrue(optimum.getValue() > 0.1);
+
+ }
+
+ @Test
+ public void testCircleFitting() {
+ CircleScalar problem = new CircleScalar();
+ problem.addPoint( 30.0, 68.0);
+ problem.addPoint( 50.0, -6.0);
+ problem.addPoint(110.0, -20.0);
+ problem.addPoint( 35.0, 15.0);
+ problem.addPoint( 45.0, 97.0);
+ NonLinearConjugateGradientOptimizer optimizer
+ = new NonLinearConjugateGradientOptimizer(NonLinearConjugateGradientOptimizer.Formula.POLAK_RIBIERE,
+ new SimpleValueChecker(1e-30, 1e-30),
+ new BrentSolver(1e-15, 1e-13));
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getObjectiveFunction(),
+ problem.getObjectiveFunctionGradient(),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 98.680, 47.345 }));
+ Vector2D center = new Vector2D(optimum.getPointRef()[0], optimum.getPointRef()[1]);
+ Assert.assertEquals(69.960161753, problem.getRadius(center), 1.0e-8);
+ Assert.assertEquals(96.075902096, center.getX(), 1.0e-8);
+ Assert.assertEquals(48.135167894, center.getY(), 1.0e-8);
+ }
+
+ private static class LinearProblem {
+ final RealMatrix factors;
+ final double[] target;
+
+ public LinearProblem(double[][] factors,
+ double[] target) {
+ this.factors = new BlockRealMatrix(factors);
+ this.target = target;
+ }
+
+ public ObjectiveFunction getObjectiveFunction() {
+ return new ObjectiveFunction(new MultivariateFunction() {
+ public double value(double[] point) {
+ double[] y = factors.operate(point);
+ double sum = 0;
+ for (int i = 0; i < y.length; ++i) {
+ double ri = y[i] - target[i];
+ sum += ri * ri;
+ }
+ return sum;
+ }
+ });
+ }
+
+ public ObjectiveFunctionGradient getObjectiveFunctionGradient() {
+ return new ObjectiveFunctionGradient(new MultivariateVectorFunction() {
+ public double[] value(double[] point) {
+ double[] r = factors.operate(point);
+ for (int i = 0; i < r.length; ++i) {
+ r[i] -= target[i];
+ }
+ double[] p = factors.transpose().operate(r);
+ for (int i = 0; i < p.length; ++i) {
+ p[i] *= 2;
+ }
+ return p;
+ }
+ });
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/BOBYQAOptimizerTest.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/BOBYQAOptimizerTest.java
new file mode 100644
index 000000000..1281ad315
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/BOBYQAOptimizerTest.java
@@ -0,0 +1,627 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
+
+import java.util.Arrays;
+import java.util.Random;
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.exception.DimensionMismatchException;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+import org.apache.commons.math3.exception.NumberIsTooLargeException;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.ObjectiveFunction;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.SimpleBounds;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test for {@link BOBYQAOptimizer}.
+ */
+public class BOBYQAOptimizerTest {
+
+ static final int DIM = 13;
+
+ @Test(expected=NumberIsTooLargeException.class)
+ public void testInitOutOfBounds() {
+ double[] startPoint = point(DIM, 3);
+ double[][] boundaries = boundaries(DIM, -1, 2);
+ doTest(new Rosen(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-13, 1e-6, 2000, null);
+ }
+
+ @Test(expected=DimensionMismatchException.class)
+ public void testBoundariesDimensionMismatch() {
+ double[] startPoint = point(DIM, 0.5);
+ double[][] boundaries = boundaries(DIM + 1, -1, 2);
+ doTest(new Rosen(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-13, 1e-6, 2000, null);
+ }
+
+ @Test(expected=NumberIsTooSmallException.class)
+ public void testProblemDimensionTooSmall() {
+ double[] startPoint = point(1, 0.5);
+ doTest(new Rosen(), startPoint, null,
+ GoalType.MINIMIZE,
+ 1e-13, 1e-6, 2000, null);
+ }
+
+ @Test(expected=TooManyEvaluationsException.class)
+ public void testMaxEvaluations() {
+ final int lowMaxEval = 2;
+ double[] startPoint = point(DIM, 0.1);
+ double[][] boundaries = null;
+ doTest(new Rosen(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-13, 1e-6, lowMaxEval, null);
+ }
+
+ @Test
+ public void testRosen() {
+ double[] startPoint = point(DIM,0.1);
+ double[][] boundaries = null;
+ PointValuePair expected = new PointValuePair(point(DIM,1.0),0.0);
+ doTest(new Rosen(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-13, 1e-6, 2000, expected);
+ }
+
+ @Test
+ public void testMaximize() {
+ double[] startPoint = point(DIM,1.0);
+ double[][] boundaries = null;
+ PointValuePair expected = new PointValuePair(point(DIM,0.0),1.0);
+ doTest(new MinusElli(), startPoint, boundaries,
+ GoalType.MAXIMIZE,
+ 2e-10, 5e-6, 1000, expected);
+ boundaries = boundaries(DIM,-0.3,0.3);
+ startPoint = point(DIM,0.1);
+ doTest(new MinusElli(), startPoint, boundaries,
+ GoalType.MAXIMIZE,
+ 2e-10, 5e-6, 1000, expected);
+ }
+
+ @Test
+ public void testEllipse() {
+ double[] startPoint = point(DIM,1.0);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new Elli(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-13, 1e-6, 1000, expected);
+ }
+
+ @Test
+ public void testElliRotated() {
+ double[] startPoint = point(DIM,1.0);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new ElliRotated(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-12, 1e-6, 10000, expected);
+ }
+
+ @Test
+ public void testCigar() {
+ double[] startPoint = point(DIM,1.0);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new Cigar(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-13, 1e-6, 100, expected);
+ }
+
+ @Test
+ public void testTwoAxes() {
+ double[] startPoint = point(DIM,1.0);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new TwoAxes(), startPoint, boundaries,
+ GoalType.MINIMIZE, 2*
+ 1e-13, 1e-6, 100, expected);
+ }
+
+ @Test
+ public void testCigTab() {
+ double[] startPoint = point(DIM,1.0);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new CigTab(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-13, 5e-5, 100, expected);
+ }
+
+ @Test
+ public void testSphere() {
+ double[] startPoint = point(DIM,1.0);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new Sphere(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-13, 1e-6, 100, expected);
+ }
+
+ @Test
+ public void testTablet() {
+ double[] startPoint = point(DIM,1.0);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new Tablet(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-13, 1e-6, 100, expected);
+ }
+
+ @Test
+ public void testDiffPow() {
+ double[] startPoint = point(DIM/2,1.0);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM/2,0.0),0.0);
+ doTest(new DiffPow(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-8, 1e-1, 12000, expected);
+ }
+
+ @Test
+ public void testSsDiffPow() {
+ double[] startPoint = point(DIM/2,1.0);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM/2,0.0),0.0);
+ doTest(new SsDiffPow(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-2, 1.3e-1, 50000, expected);
+ }
+
+ @Test
+ public void testAckley() {
+ double[] startPoint = point(DIM,0.1);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new Ackley(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-8, 1e-5, 1000, expected);
+ }
+
+ @Test
+ public void testRastrigin() {
+ double[] startPoint = point(DIM,1.0);
+
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new Rastrigin(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-13, 1e-6, 1000, expected);
+ }
+
+ @Test
+ public void testConstrainedRosen() {
+ double[] startPoint = point(DIM,0.1);
+
+ double[][] boundaries = boundaries(DIM,-1,2);
+ PointValuePair expected =
+ new PointValuePair(point(DIM,1.0),0.0);
+ doTest(new Rosen(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-13, 1e-6, 2000, expected);
+ }
+
+ // See MATH-728
+ @Test
+ public void testConstrainedRosenWithMoreInterpolationPoints() {
+ final double[] startPoint = point(DIM, 0.1);
+ final double[][] boundaries = boundaries(DIM, -1, 2);
+ final PointValuePair expected = new PointValuePair(point(DIM, 1.0), 0.0);
+
+ // This should have been 78 because in the code the hard limit is
+ // said to be
+ // ((DIM + 1) * (DIM + 2)) / 2 - (2 * DIM + 1)
+ // i.e. 78 in this case, but the test fails for 48, 59, 62, 63, 64,
+ // 65, 66, ...
+ final int maxAdditionalPoints = 47;
+
+ for (int num = 1; num <= maxAdditionalPoints; num++) {
+ doTest(new Rosen(), startPoint, boundaries,
+ GoalType.MINIMIZE,
+ 1e-12, 1e-6, 2000,
+ num,
+ expected,
+ "num=" + num);
+ }
+ }
+
+ /**
+ * @param func Function to optimize.
+ * @param startPoint Starting point.
+ * @param boundaries Upper / lower point limit.
+ * @param goal Minimization or maximization.
+ * @param fTol Tolerance relative error on the objective function.
+ * @param pointTol Tolerance for checking that the optimum is correct.
+ * @param maxEvaluations Maximum number of evaluations.
+ * @param expected Expected point / value.
+ */
+ private void doTest(MultivariateFunction func,
+ double[] startPoint,
+ double[][] boundaries,
+ GoalType goal,
+ double fTol,
+ double pointTol,
+ int maxEvaluations,
+ PointValuePair expected) {
+ doTest(func,
+ startPoint,
+ boundaries,
+ goal,
+ fTol,
+ pointTol,
+ maxEvaluations,
+ 0,
+ expected,
+ "");
+ }
+
+ /**
+ * @param func Function to optimize.
+ * @param startPoint Starting point.
+ * @param boundaries Upper / lower point limit.
+ * @param goal Minimization or maximization.
+ * @param fTol Tolerance relative error on the objective function.
+ * @param pointTol Tolerance for checking that the optimum is correct.
+ * @param maxEvaluations Maximum number of evaluations.
+ * @param additionalInterpolationPoints Number of interpolation to used
+ * in addition to the default (2 * dim + 1).
+ * @param expected Expected point / value.
+ */
+ private void doTest(MultivariateFunction func,
+ double[] startPoint,
+ double[][] boundaries,
+ GoalType goal,
+ double fTol,
+ double pointTol,
+ int maxEvaluations,
+ int additionalInterpolationPoints,
+ PointValuePair expected,
+ String assertMsg) {
+
+// System.out.println(func.getClass().getName() + " BEGIN"); // XXX
+
+ int dim = startPoint.length;
+ final int numIterpolationPoints = 2 * dim + 1 + additionalInterpolationPoints;
+ BOBYQAOptimizer optim = new BOBYQAOptimizer(numIterpolationPoints);
+ PointValuePair result = boundaries == null ?
+ optim.optimize(new MaxEval(maxEvaluations),
+ new ObjectiveFunction(func),
+ goal,
+ SimpleBounds.unbounded(dim),
+ new InitialGuess(startPoint)) :
+ optim.optimize(new MaxEval(maxEvaluations),
+ new ObjectiveFunction(func),
+ goal,
+ new InitialGuess(startPoint),
+ new SimpleBounds(boundaries[0],
+ boundaries[1]));
+// System.out.println(func.getClass().getName() + " = "
+// + optim.getEvaluations() + " f(");
+// for (double x: result.getPoint()) System.out.print(x + " ");
+// System.out.println(") = " + result.getValue());
+ Assert.assertEquals(assertMsg, expected.getValue(), result.getValue(), fTol);
+ for (int i = 0; i < dim; i++) {
+ Assert.assertEquals(expected.getPoint()[i],
+ result.getPoint()[i], pointTol);
+ }
+
+// System.out.println(func.getClass().getName() + " END"); // XXX
+ }
+
+ private static double[] point(int n, double value) {
+ double[] ds = new double[n];
+ Arrays.fill(ds, value);
+ return ds;
+ }
+
+ private static double[][] boundaries(int dim,
+ double lower, double upper) {
+ double[][] boundaries = new double[2][dim];
+ for (int i = 0; i < dim; i++)
+ boundaries[0][i] = lower;
+ for (int i = 0; i < dim; i++)
+ boundaries[1][i] = upper;
+ return boundaries;
+ }
+
+ private static class Sphere implements MultivariateFunction {
+
+ public double value(double[] x) {
+ double f = 0;
+ for (int i = 0; i < x.length; ++i)
+ f += x[i] * x[i];
+ return f;
+ }
+ }
+
+ private static class Cigar implements MultivariateFunction {
+ private double factor;
+
+ Cigar() {
+ this(1e3);
+ }
+
+ Cigar(double axisratio) {
+ factor = axisratio * axisratio;
+ }
+
+ public double value(double[] x) {
+ double f = x[0] * x[0];
+ for (int i = 1; i < x.length; ++i)
+ f += factor * x[i] * x[i];
+ return f;
+ }
+ }
+
+ private static class Tablet implements MultivariateFunction {
+ private double factor;
+
+ Tablet() {
+ this(1e3);
+ }
+
+ Tablet(double axisratio) {
+ factor = axisratio * axisratio;
+ }
+
+ public double value(double[] x) {
+ double f = factor * x[0] * x[0];
+ for (int i = 1; i < x.length; ++i)
+ f += x[i] * x[i];
+ return f;
+ }
+ }
+
+ private static class CigTab implements MultivariateFunction {
+ private double factor;
+
+ CigTab() {
+ this(1e4);
+ }
+
+ CigTab(double axisratio) {
+ factor = axisratio;
+ }
+
+ public double value(double[] x) {
+ int end = x.length - 1;
+ double f = x[0] * x[0] / factor + factor * x[end] * x[end];
+ for (int i = 1; i < end; ++i)
+ f += x[i] * x[i];
+ return f;
+ }
+ }
+
+ private static class TwoAxes implements MultivariateFunction {
+
+ private double factor;
+
+ TwoAxes() {
+ this(1e6);
+ }
+
+ TwoAxes(double axisratio) {
+ factor = axisratio * axisratio;
+ }
+
+ public double value(double[] x) {
+ double f = 0;
+ for (int i = 0; i < x.length; ++i)
+ f += (i < x.length / 2 ? factor : 1) * x[i] * x[i];
+ return f;
+ }
+ }
+
+ private static class ElliRotated implements MultivariateFunction {
+ private Basis B = new Basis();
+ private double factor;
+
+ ElliRotated() {
+ this(1e3);
+ }
+
+ ElliRotated(double axisratio) {
+ factor = axisratio * axisratio;
+ }
+
+ public double value(double[] x) {
+ double f = 0;
+ x = B.Rotate(x);
+ for (int i = 0; i < x.length; ++i)
+ f += Math.pow(factor, i / (x.length - 1.)) * x[i] * x[i];
+ return f;
+ }
+ }
+
+ private static class Elli implements MultivariateFunction {
+
+ private double factor;
+
+ Elli() {
+ this(1e3);
+ }
+
+ Elli(double axisratio) {
+ factor = axisratio * axisratio;
+ }
+
+ public double value(double[] x) {
+ double f = 0;
+ for (int i = 0; i < x.length; ++i)
+ f += Math.pow(factor, i / (x.length - 1.)) * x[i] * x[i];
+ return f;
+ }
+ }
+
+ private static class MinusElli implements MultivariateFunction {
+ private final Elli elli = new Elli();
+ public double value(double[] x) {
+ return 1.0 - elli.value(x);
+ }
+ }
+
+ private static class DiffPow implements MultivariateFunction {
+// private int fcount = 0;
+ public double value(double[] x) {
+ double f = 0;
+ for (int i = 0; i < x.length; ++i)
+ f += Math.pow(Math.abs(x[i]), 2. + 10 * (double) i
+ / (x.length - 1.));
+// System.out.print("" + (fcount++) + ") ");
+// for (int i = 0; i < x.length; i++)
+// System.out.print(x[i] + " ");
+// System.out.println(" = " + f);
+ return f;
+ }
+ }
+
+ private static class SsDiffPow implements MultivariateFunction {
+
+ public double value(double[] x) {
+ double f = Math.pow(new DiffPow().value(x), 0.25);
+ return f;
+ }
+ }
+
+ private static class Rosen implements MultivariateFunction {
+
+ public double value(double[] x) {
+ double f = 0;
+ for (int i = 0; i < x.length - 1; ++i)
+ f += 1e2 * (x[i] * x[i] - x[i + 1]) * (x[i] * x[i] - x[i + 1])
+ + (x[i] - 1.) * (x[i] - 1.);
+ return f;
+ }
+ }
+
+ private static class Ackley implements MultivariateFunction {
+ private double axisratio;
+
+ Ackley(double axra) {
+ axisratio = axra;
+ }
+
+ public Ackley() {
+ this(1);
+ }
+
+ public double value(double[] x) {
+ double f = 0;
+ double res2 = 0;
+ double fac = 0;
+ for (int i = 0; i < x.length; ++i) {
+ fac = Math.pow(axisratio, (i - 1.) / (x.length - 1.));
+ f += fac * fac * x[i] * x[i];
+ res2 += Math.cos(2. * Math.PI * fac * x[i]);
+ }
+ f = (20. - 20. * Math.exp(-0.2 * Math.sqrt(f / x.length))
+ + Math.exp(1.) - Math.exp(res2 / x.length));
+ return f;
+ }
+ }
+
+ private static class Rastrigin implements MultivariateFunction {
+
+ private double axisratio;
+ private double amplitude;
+
+ Rastrigin() {
+ this(1, 10);
+ }
+
+ Rastrigin(double axisratio, double amplitude) {
+ this.axisratio = axisratio;
+ this.amplitude = amplitude;
+ }
+
+ public double value(double[] x) {
+ double f = 0;
+ double fac;
+ for (int i = 0; i < x.length; ++i) {
+ fac = Math.pow(axisratio, (i - 1.) / (x.length - 1.));
+ if (i == 0 && x[i] < 0)
+ fac *= 1.;
+ f += fac * fac * x[i] * x[i] + amplitude
+ * (1. - Math.cos(2. * Math.PI * fac * x[i]));
+ }
+ return f;
+ }
+ }
+
+ private static class Basis {
+ double[][] basis;
+ Random rand = new Random(2); // use not always the same basis
+
+ double[] Rotate(double[] x) {
+ GenBasis(x.length);
+ double[] y = new double[x.length];
+ for (int i = 0; i < x.length; ++i) {
+ y[i] = 0;
+ for (int j = 0; j < x.length; ++j)
+ y[i] += basis[i][j] * x[j];
+ }
+ return y;
+ }
+
+ void GenBasis(int DIM) {
+ if (basis != null ? basis.length == DIM : false)
+ return;
+
+ double sp;
+ int i, j, k;
+
+ /* generate orthogonal basis */
+ basis = new double[DIM][DIM];
+ for (i = 0; i < DIM; ++i) {
+ /* sample components gaussian */
+ for (j = 0; j < DIM; ++j)
+ basis[i][j] = rand.nextGaussian();
+ /* substract projection of previous vectors */
+ for (j = i - 1; j >= 0; --j) {
+ for (sp = 0., k = 0; k < DIM; ++k)
+ sp += basis[i][k] * basis[j][k]; /* scalar product */
+ for (k = 0; k < DIM; ++k)
+ basis[i][k] -= sp * basis[j][k]; /* substract */
+ }
+ /* normalize */
+ for (sp = 0., k = 0; k < DIM; ++k)
+ sp += basis[i][k] * basis[i][k]; /* squared norm */
+ for (k = 0; k < DIM; ++k)
+ basis[i][k] /= Math.sqrt(sp);
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/CMAESOptimizerTest.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/CMAESOptimizerTest.java
new file mode 100644
index 000000000..4e8c4dc89
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/CMAESOptimizerTest.java
@@ -0,0 +1,794 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
+
+import java.util.Arrays;
+import java.util.Random;
+import org.apache.commons.math3.Retry;
+import org.apache.commons.math3.RetryRunner;
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.exception.NumberIsTooLargeException;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.exception.DimensionMismatchException;
+import org.apache.commons.math3.exception.MathUnsupportedOperationException;
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.apache.commons.math3.exception.NotPositiveException;
+import org.apache.commons.math3.exception.OutOfRangeException;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.SimpleBounds;
+import org.apache.commons.math3.optim.ObjectiveFunction;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.random.MersenneTwister;
+import org.apache.commons.math3.util.FastMath;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.Ignore;
+import org.junit.runner.RunWith;
+
+/**
+ * Test for {@link CMAESOptimizer}.
+ */
+@RunWith(RetryRunner.class)
+public class CMAESOptimizerTest {
+
+ static final int DIM = 13;
+ static final int LAMBDA = 4 + (int)(3.*Math.log(DIM));
+
+ @Test(expected = NumberIsTooLargeException.class)
+ public void testInitOutofbounds1() {
+ double[] startPoint = point(DIM,3);
+ double[] insigma = point(DIM, 0.3);
+ double[][] boundaries = boundaries(DIM,-1,2);
+ PointValuePair expected =
+ new PointValuePair(point(DIM,1.0),0.0);
+ doTest(new Rosen(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ }
+ @Test(expected = NumberIsTooSmallException.class)
+ public void testInitOutofbounds2() {
+ double[] startPoint = point(DIM, -2);
+ double[] insigma = point(DIM, 0.3);
+ double[][] boundaries = boundaries(DIM,-1,2);
+ PointValuePair expected =
+ new PointValuePair(point(DIM,1.0),0.0);
+ doTest(new Rosen(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ }
+
+ @Test(expected = DimensionMismatchException.class)
+ public void testBoundariesDimensionMismatch() {
+ double[] startPoint = point(DIM,0.5);
+ double[] insigma = point(DIM, 0.3);
+ double[][] boundaries = boundaries(DIM+1,-1,2);
+ PointValuePair expected =
+ new PointValuePair(point(DIM,1.0),0.0);
+ doTest(new Rosen(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ }
+
+ @Test(expected = NotPositiveException.class)
+ public void testInputSigmaNegative() {
+ double[] startPoint = point(DIM,0.5);
+ double[] insigma = point(DIM,-0.5);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,1.0),0.0);
+ doTest(new Rosen(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ }
+
+ @Test(expected = OutOfRangeException.class)
+ public void testInputSigmaOutOfRange() {
+ double[] startPoint = point(DIM,0.5);
+ double[] insigma = point(DIM, 1.1);
+ double[][] boundaries = boundaries(DIM,-0.5,0.5);
+ PointValuePair expected =
+ new PointValuePair(point(DIM,1.0),0.0);
+ doTest(new Rosen(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ }
+
+ @Test(expected = DimensionMismatchException.class)
+ public void testInputSigmaDimensionMismatch() {
+ double[] startPoint = point(DIM,0.5);
+ double[] insigma = point(DIM + 1, 0.5);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,1.0),0.0);
+ doTest(new Rosen(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ }
+
+ @Test
+ @Retry(3)
+ public void testRosen() {
+ double[] startPoint = point(DIM,0.1);
+ double[] insigma = point(DIM,0.1);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,1.0),0.0);
+ doTest(new Rosen(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ doTest(new Rosen(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ }
+
+ @Test
+ @Retry(3)
+ public void testMaximize() {
+ double[] startPoint = point(DIM,1.0);
+ double[] insigma = point(DIM,0.1);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),1.0);
+ doTest(new MinusElli(), startPoint, insigma, boundaries,
+ GoalType.MAXIMIZE, LAMBDA, true, 0, 1.0-1e-13,
+ 2e-10, 5e-6, 100000, expected);
+ doTest(new MinusElli(), startPoint, insigma, boundaries,
+ GoalType.MAXIMIZE, LAMBDA, false, 0, 1.0-1e-13,
+ 2e-10, 5e-6, 100000, expected);
+ boundaries = boundaries(DIM,-0.3,0.3);
+ startPoint = point(DIM,0.1);
+ doTest(new MinusElli(), startPoint, insigma, boundaries,
+ GoalType.MAXIMIZE, LAMBDA, true, 0, 1.0-1e-13,
+ 2e-10, 5e-6, 100000, expected);
+ }
+
+ @Test
+ public void testEllipse() {
+ double[] startPoint = point(DIM,1.0);
+ double[] insigma = point(DIM,0.1);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new Elli(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ doTest(new Elli(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ }
+
+ @Test
+ public void testElliRotated() {
+ double[] startPoint = point(DIM,1.0);
+ double[] insigma = point(DIM,0.1);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new ElliRotated(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ doTest(new ElliRotated(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ }
+
+ @Test
+ public void testCigar() {
+ double[] startPoint = point(DIM,1.0);
+ double[] insigma = point(DIM,0.1);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new Cigar(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
+ 1e-13, 1e-6, 200000, expected);
+ doTest(new Cigar(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ }
+
+ @Test
+ public void testCigarWithBoundaries() {
+ double[] startPoint = point(DIM,1.0);
+ double[] insigma = point(DIM,0.1);
+ double[][] boundaries = boundaries(DIM, -1e100, Double.POSITIVE_INFINITY);
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new Cigar(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
+ 1e-13, 1e-6, 200000, expected);
+ doTest(new Cigar(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ }
+
+ @Test
+ public void testTwoAxes() {
+ double[] startPoint = point(DIM,1.0);
+ double[] insigma = point(DIM,0.1);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new TwoAxes(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, 2*LAMBDA, true, 0, 1e-13,
+ 1e-13, 1e-6, 200000, expected);
+ doTest(new TwoAxes(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, 2*LAMBDA, false, 0, 1e-13,
+ 1e-8, 1e-3, 200000, expected);
+ }
+
+ @Test
+ public void testCigTab() {
+ double[] startPoint = point(DIM,1.0);
+ double[] insigma = point(DIM,0.3);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new CigTab(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
+ 1e-13, 5e-5, 100000, expected);
+ doTest(new CigTab(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
+ 1e-13, 5e-5, 100000, expected);
+ }
+
+ @Test
+ public void testSphere() {
+ double[] startPoint = point(DIM,1.0);
+ double[] insigma = point(DIM,0.1);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new Sphere(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ doTest(new Sphere(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ }
+
+ @Test
+ public void testTablet() {
+ double[] startPoint = point(DIM,1.0);
+ double[] insigma = point(DIM,0.1);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new Tablet(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, true, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ doTest(new Tablet(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, false, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ }
+
+ @Test
+ public void testDiffPow() {
+ double[] startPoint = point(DIM,1.0);
+ double[] insigma = point(DIM,0.1);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new DiffPow(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, 10, true, 0, 1e-13,
+ 1e-8, 1e-1, 100000, expected);
+ doTest(new DiffPow(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, 10, false, 0, 1e-13,
+ 1e-8, 2e-1, 100000, expected);
+ }
+
+ @Test
+ public void testSsDiffPow() {
+ double[] startPoint = point(DIM,1.0);
+ double[] insigma = point(DIM,0.1);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new SsDiffPow(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, 10, true, 0, 1e-13,
+ 1e-4, 1e-1, 200000, expected);
+ doTest(new SsDiffPow(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, 10, false, 0, 1e-13,
+ 1e-4, 1e-1, 200000, expected);
+ }
+
+ @Test
+ public void testAckley() {
+ double[] startPoint = point(DIM,1.0);
+ double[] insigma = point(DIM,1.0);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new Ackley(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, 2*LAMBDA, true, 0, 1e-13,
+ 1e-9, 1e-5, 100000, expected);
+ doTest(new Ackley(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, 2*LAMBDA, false, 0, 1e-13,
+ 1e-9, 1e-5, 100000, expected);
+ }
+
+ @Test
+ public void testRastrigin() {
+ double[] startPoint = point(DIM,0.1);
+ double[] insigma = point(DIM,0.1);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,0.0),0.0);
+ doTest(new Rastrigin(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, (int)(200*Math.sqrt(DIM)), true, 0, 1e-13,
+ 1e-13, 1e-6, 200000, expected);
+ doTest(new Rastrigin(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, (int)(200*Math.sqrt(DIM)), false, 0, 1e-13,
+ 1e-13, 1e-6, 200000, expected);
+ }
+
+ @Test
+ public void testConstrainedRosen() {
+ double[] startPoint = point(DIM, 0.1);
+ double[] insigma = point(DIM, 0.1);
+ double[][] boundaries = boundaries(DIM, -1, 2);
+ PointValuePair expected =
+ new PointValuePair(point(DIM,1.0),0.0);
+ doTest(new Rosen(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, 2*LAMBDA, true, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ doTest(new Rosen(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, 2*LAMBDA, false, 0, 1e-13,
+ 1e-13, 1e-6, 100000, expected);
+ }
+
+ @Test
+ public void testDiagonalRosen() {
+ double[] startPoint = point(DIM,0.1);
+ double[] insigma = point(DIM,0.1);
+ double[][] boundaries = null;
+ PointValuePair expected =
+ new PointValuePair(point(DIM,1.0),0.0);
+ doTest(new Rosen(), startPoint, insigma, boundaries,
+ GoalType.MINIMIZE, LAMBDA, false, 1, 1e-13,
+ 1e-10, 1e-4, 1000000, expected);
+ }
+
+ @Test
+ public void testMath864() {
+ final CMAESOptimizer optimizer
+ = new CMAESOptimizer(30000, 0, true, 10,
+ 0, new MersenneTwister(), false, null);
+ final MultivariateFunction fitnessFunction = new MultivariateFunction() {
+ public double value(double[] parameters) {
+ final double target = 1;
+ final double error = target - parameters[0];
+ return error * error;
+ }
+ };
+
+ final double[] start = { 0 };
+ final double[] lower = { -1e6 };
+ final double[] upper = { 1.5 };
+ final double[] sigma = { 1e-1 };
+ final double[] result = optimizer.optimize(new MaxEval(10000),
+ new ObjectiveFunction(fitnessFunction),
+ GoalType.MINIMIZE,
+ new CMAESOptimizer.PopulationSize(5),
+ new CMAESOptimizer.Sigma(sigma),
+ new InitialGuess(start),
+ new SimpleBounds(lower, upper)).getPoint();
+ Assert.assertTrue("Out of bounds (" + result[0] + " > " + upper[0] + ")",
+ result[0] <= upper[0]);
+ }
+
+ /**
+ * Cf. MATH-867
+ */
+ @Test
+ public void testFitAccuracyDependsOnBoundary() {
+ final CMAESOptimizer optimizer
+ = new CMAESOptimizer(30000, 0, true, 10,
+ 0, new MersenneTwister(), false, null);
+ final MultivariateFunction fitnessFunction = new MultivariateFunction() {
+ public double value(double[] parameters) {
+ final double target = 11.1;
+ final double error = target - parameters[0];
+ return error * error;
+ }
+ };
+
+ final double[] start = { 1 };
+
+ // No bounds.
+ PointValuePair result = optimizer.optimize(new MaxEval(100000),
+ new ObjectiveFunction(fitnessFunction),
+ GoalType.MINIMIZE,
+ SimpleBounds.unbounded(1),
+ new CMAESOptimizer.PopulationSize(5),
+ new CMAESOptimizer.Sigma(new double[] { 1e-1 }),
+ new InitialGuess(start));
+ final double resNoBound = result.getPoint()[0];
+
+ // Optimum is near the lower bound.
+ final double[] lower = { -20 };
+ final double[] upper = { 5e16 };
+ final double[] sigma = { 10 };
+ result = optimizer.optimize(new MaxEval(100000),
+ new ObjectiveFunction(fitnessFunction),
+ GoalType.MINIMIZE,
+ new CMAESOptimizer.PopulationSize(5),
+ new CMAESOptimizer.Sigma(sigma),
+ new InitialGuess(start),
+ new SimpleBounds(lower, upper));
+ final double resNearLo = result.getPoint()[0];
+
+ // Optimum is near the upper bound.
+ lower[0] = -5e16;
+ upper[0] = 20;
+ result = optimizer.optimize(new MaxEval(100000),
+ new ObjectiveFunction(fitnessFunction),
+ GoalType.MINIMIZE,
+ new CMAESOptimizer.PopulationSize(5),
+ new CMAESOptimizer.Sigma(sigma),
+ new InitialGuess(start),
+ new SimpleBounds(lower, upper));
+ final double resNearHi = result.getPoint()[0];
+
+ // System.out.println("resNoBound=" + resNoBound +
+ // " resNearLo=" + resNearLo +
+ // " resNearHi=" + resNearHi);
+
+ // The two values currently differ by a substantial amount, indicating that
+ // the bounds definition can prevent reaching the optimum.
+ Assert.assertEquals(resNoBound, resNearLo, 1e-3);
+ Assert.assertEquals(resNoBound, resNearHi, 1e-3);
+ }
+
+ /**
+ * @param func Function to optimize.
+ * @param startPoint Starting point.
+ * @param inSigma Individual input sigma.
+ * @param boundaries Upper / lower point limit.
+ * @param goal Minimization or maximization.
+ * @param lambda Population size used for offspring.
+ * @param isActive Covariance update mechanism.
+ * @param diagonalOnly Simplified covariance update.
+ * @param stopValue Termination criteria for optimization.
+ * @param fTol Tolerance relative error on the objective function.
+ * @param pointTol Tolerance for checking that the optimum is correct.
+ * @param maxEvaluations Maximum number of evaluations.
+ * @param expected Expected point / value.
+ */
+ private void doTest(MultivariateFunction func,
+ double[] startPoint,
+ double[] inSigma,
+ double[][] boundaries,
+ GoalType goal,
+ int lambda,
+ boolean isActive,
+ int diagonalOnly,
+ double stopValue,
+ double fTol,
+ double pointTol,
+ int maxEvaluations,
+ PointValuePair expected) {
+ int dim = startPoint.length;
+ // test diagonalOnly = 0 - slow but normally fewer feval#
+ CMAESOptimizer optim = new CMAESOptimizer(30000, stopValue, isActive, diagonalOnly,
+ 0, new MersenneTwister(), false, null);
+ PointValuePair result = boundaries == null ?
+ optim.optimize(new MaxEval(maxEvaluations),
+ new ObjectiveFunction(func),
+ goal,
+ new InitialGuess(startPoint),
+ SimpleBounds.unbounded(dim),
+ new CMAESOptimizer.Sigma(inSigma),
+ new CMAESOptimizer.PopulationSize(lambda)) :
+ optim.optimize(new MaxEval(maxEvaluations),
+ new ObjectiveFunction(func),
+ goal,
+ new SimpleBounds(boundaries[0],
+ boundaries[1]),
+ new InitialGuess(startPoint),
+ new CMAESOptimizer.Sigma(inSigma),
+ new CMAESOptimizer.PopulationSize(lambda));
+
+ // System.out.println("sol=" + Arrays.toString(result.getPoint()));
+ Assert.assertEquals(expected.getValue(), result.getValue(), fTol);
+ for (int i = 0; i < dim; i++) {
+ Assert.assertEquals(expected.getPoint()[i], result.getPoint()[i], pointTol);
+ }
+ }
+
+ private static double[] point(int n, double value) {
+ double[] ds = new double[n];
+ Arrays.fill(ds, value);
+ return ds;
+ }
+
+ private static double[][] boundaries(int dim,
+ double lower, double upper) {
+ double[][] boundaries = new double[2][dim];
+ for (int i = 0; i < dim; i++)
+ boundaries[0][i] = lower;
+ for (int i = 0; i < dim; i++)
+ boundaries[1][i] = upper;
+ return boundaries;
+ }
+
+ private static class Sphere implements MultivariateFunction {
+
+ public double value(double[] x) {
+ double f = 0;
+ for (int i = 0; i < x.length; ++i)
+ f += x[i] * x[i];
+ return f;
+ }
+ }
+
+ private static class Cigar implements MultivariateFunction {
+ private double factor;
+
+ Cigar() {
+ this(1e3);
+ }
+
+ Cigar(double axisratio) {
+ factor = axisratio * axisratio;
+ }
+
+ public double value(double[] x) {
+ double f = x[0] * x[0];
+ for (int i = 1; i < x.length; ++i)
+ f += factor * x[i] * x[i];
+ return f;
+ }
+ }
+
+ private static class Tablet implements MultivariateFunction {
+ private double factor;
+
+ Tablet() {
+ this(1e3);
+ }
+
+ Tablet(double axisratio) {
+ factor = axisratio * axisratio;
+ }
+
+ public double value(double[] x) {
+ double f = factor * x[0] * x[0];
+ for (int i = 1; i < x.length; ++i)
+ f += x[i] * x[i];
+ return f;
+ }
+ }
+
+ private static class CigTab implements MultivariateFunction {
+ private double factor;
+
+ CigTab() {
+ this(1e4);
+ }
+
+ CigTab(double axisratio) {
+ factor = axisratio;
+ }
+
+ public double value(double[] x) {
+ int end = x.length - 1;
+ double f = x[0] * x[0] / factor + factor * x[end] * x[end];
+ for (int i = 1; i < end; ++i)
+ f += x[i] * x[i];
+ return f;
+ }
+ }
+
+ private static class TwoAxes implements MultivariateFunction {
+
+ private double factor;
+
+ TwoAxes() {
+ this(1e6);
+ }
+
+ TwoAxes(double axisratio) {
+ factor = axisratio * axisratio;
+ }
+
+ public double value(double[] x) {
+ double f = 0;
+ for (int i = 0; i < x.length; ++i)
+ f += (i < x.length / 2 ? factor : 1) * x[i] * x[i];
+ return f;
+ }
+ }
+
+ private static class ElliRotated implements MultivariateFunction {
+ private Basis B = new Basis();
+ private double factor;
+
+ ElliRotated() {
+ this(1e3);
+ }
+
+ ElliRotated(double axisratio) {
+ factor = axisratio * axisratio;
+ }
+
+ public double value(double[] x) {
+ double f = 0;
+ x = B.Rotate(x);
+ for (int i = 0; i < x.length; ++i)
+ f += Math.pow(factor, i / (x.length - 1.)) * x[i] * x[i];
+ return f;
+ }
+ }
+
+ private static class Elli implements MultivariateFunction {
+
+ private double factor;
+
+ Elli() {
+ this(1e3);
+ }
+
+ Elli(double axisratio) {
+ factor = axisratio * axisratio;
+ }
+
+ public double value(double[] x) {
+ double f = 0;
+ for (int i = 0; i < x.length; ++i)
+ f += Math.pow(factor, i / (x.length - 1.)) * x[i] * x[i];
+ return f;
+ }
+ }
+
+ private static class MinusElli implements MultivariateFunction {
+
+ public double value(double[] x) {
+ return 1.0-(new Elli().value(x));
+ }
+ }
+
+ private static class DiffPow implements MultivariateFunction {
+
+ public double value(double[] x) {
+ double f = 0;
+ for (int i = 0; i < x.length; ++i)
+ f += Math.pow(Math.abs(x[i]), 2. + 10 * (double) i
+ / (x.length - 1.));
+ return f;
+ }
+ }
+
+ private static class SsDiffPow implements MultivariateFunction {
+
+ public double value(double[] x) {
+ double f = Math.pow(new DiffPow().value(x), 0.25);
+ return f;
+ }
+ }
+
+ private static class Rosen implements MultivariateFunction {
+
+ public double value(double[] x) {
+ double f = 0;
+ for (int i = 0; i < x.length - 1; ++i)
+ f += 1e2 * (x[i] * x[i] - x[i + 1]) * (x[i] * x[i] - x[i + 1])
+ + (x[i] - 1.) * (x[i] - 1.);
+ return f;
+ }
+ }
+
+ private static class Ackley implements MultivariateFunction {
+ private double axisratio;
+
+ Ackley(double axra) {
+ axisratio = axra;
+ }
+
+ public Ackley() {
+ this(1);
+ }
+
+ public double value(double[] x) {
+ double f = 0;
+ double res2 = 0;
+ double fac = 0;
+ for (int i = 0; i < x.length; ++i) {
+ fac = Math.pow(axisratio, (i - 1.) / (x.length - 1.));
+ f += fac * fac * x[i] * x[i];
+ res2 += Math.cos(2. * Math.PI * fac * x[i]);
+ }
+ f = (20. - 20. * Math.exp(-0.2 * Math.sqrt(f / x.length))
+ + Math.exp(1.) - Math.exp(res2 / x.length));
+ return f;
+ }
+ }
+
+ private static class Rastrigin implements MultivariateFunction {
+
+ private double axisratio;
+ private double amplitude;
+
+ Rastrigin() {
+ this(1, 10);
+ }
+
+ Rastrigin(double axisratio, double amplitude) {
+ this.axisratio = axisratio;
+ this.amplitude = amplitude;
+ }
+
+ public double value(double[] x) {
+ double f = 0;
+ double fac;
+ for (int i = 0; i < x.length; ++i) {
+ fac = Math.pow(axisratio, (i - 1.) / (x.length - 1.));
+ if (i == 0 && x[i] < 0)
+ fac *= 1.;
+ f += fac * fac * x[i] * x[i] + amplitude
+ * (1. - Math.cos(2. * Math.PI * fac * x[i]));
+ }
+ return f;
+ }
+ }
+
+ private static class Basis {
+ double[][] basis;
+ Random rand = new Random(2); // use not always the same basis
+
+ double[] Rotate(double[] x) {
+ GenBasis(x.length);
+ double[] y = new double[x.length];
+ for (int i = 0; i < x.length; ++i) {
+ y[i] = 0;
+ for (int j = 0; j < x.length; ++j)
+ y[i] += basis[i][j] * x[j];
+ }
+ return y;
+ }
+
+ void GenBasis(int DIM) {
+ if (basis != null ? basis.length == DIM : false)
+ return;
+
+ double sp;
+ int i, j, k;
+
+ /* generate orthogonal basis */
+ basis = new double[DIM][DIM];
+ for (i = 0; i < DIM; ++i) {
+ /* sample components gaussian */
+ for (j = 0; j < DIM; ++j)
+ basis[i][j] = rand.nextGaussian();
+ /* substract projection of previous vectors */
+ for (j = i - 1; j >= 0; --j) {
+ for (sp = 0., k = 0; k < DIM; ++k)
+ sp += basis[i][k] * basis[j][k]; /* scalar product */
+ for (k = 0; k < DIM; ++k)
+ basis[i][k] -= sp * basis[j][k]; /* substract */
+ }
+ /* normalize */
+ for (sp = 0., k = 0; k < DIM; ++k)
+ sp += basis[i][k] * basis[i][k]; /* squared norm */
+ for (k = 0; k < DIM; ++k)
+ basis[i][k] /= Math.sqrt(sp);
+ }
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/PowellOptimizerTest.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/PowellOptimizerTest.java
new file mode 100644
index 000000000..296ec0c95
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/PowellOptimizerTest.java
@@ -0,0 +1,251 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
+
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.analysis.SumSincFunction;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.ObjectiveFunction;
+import org.apache.commons.math3.util.FastMath;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test for {@link PowellOptimizer}.
+ */
+public class PowellOptimizerTest {
+
+ @Test
+ public void testSumSinc() {
+ final MultivariateFunction func = new SumSincFunction(-1);
+
+ int dim = 2;
+ final double[] minPoint = new double[dim];
+ for (int i = 0; i < dim; i++) {
+ minPoint[i] = 0;
+ }
+
+ double[] init = new double[dim];
+
+ // Initial is minimum.
+ for (int i = 0; i < dim; i++) {
+ init[i] = minPoint[i];
+ }
+ doTest(func, minPoint, init, GoalType.MINIMIZE, 1e-9, 1e-9);
+
+ // Initial is far from minimum.
+ for (int i = 0; i < dim; i++) {
+ init[i] = minPoint[i] + 3;
+ }
+ doTest(func, minPoint, init, GoalType.MINIMIZE, 1e-9, 1e-5);
+ // More stringent line search tolerance enhances the precision
+ // of the result.
+ doTest(func, minPoint, init, GoalType.MINIMIZE, 1e-9, 1e-9, 1e-7);
+ }
+
+ @Test
+ public void testQuadratic() {
+ final MultivariateFunction func = new MultivariateFunction() {
+ public double value(double[] x) {
+ final double a = x[0] - 1;
+ final double b = x[1] - 1;
+ return a * a + b * b + 1;
+ }
+ };
+
+ int dim = 2;
+ final double[] minPoint = new double[dim];
+ for (int i = 0; i < dim; i++) {
+ minPoint[i] = 1;
+ }
+
+ double[] init = new double[dim];
+
+ // Initial is minimum.
+ for (int i = 0; i < dim; i++) {
+ init[i] = minPoint[i];
+ }
+ doTest(func, minPoint, init, GoalType.MINIMIZE, 1e-9, 1e-8);
+
+ // Initial is far from minimum.
+ for (int i = 0; i < dim; i++) {
+ init[i] = minPoint[i] - 20;
+ }
+ doTest(func, minPoint, init, GoalType.MINIMIZE, 1e-9, 1e-8);
+ }
+
+ @Test
+ public void testMaximizeQuadratic() {
+ final MultivariateFunction func = new MultivariateFunction() {
+ public double value(double[] x) {
+ final double a = x[0] - 1;
+ final double b = x[1] - 1;
+ return -a * a - b * b + 1;
+ }
+ };
+
+ int dim = 2;
+ final double[] maxPoint = new double[dim];
+ for (int i = 0; i < dim; i++) {
+ maxPoint[i] = 1;
+ }
+
+ double[] init = new double[dim];
+
+ // Initial is minimum.
+ for (int i = 0; i < dim; i++) {
+ init[i] = maxPoint[i];
+ }
+ doTest(func, maxPoint, init, GoalType.MAXIMIZE, 1e-9, 1e-8);
+
+ // Initial is far from minimum.
+ for (int i = 0; i < dim; i++) {
+ init[i] = maxPoint[i] - 20;
+ }
+ doTest(func, maxPoint, init, GoalType.MAXIMIZE, 1e-9, 1e-8);
+ }
+
+ /**
+ * Ensure that we do not increase the number of function evaluations when
+ * the function values are scaled up.
+ * Note that the tolerances parameters passed to the constructor must
+ * still hold sensible values because they are used to set the line search
+ * tolerances.
+ */
+ @Test
+ public void testRelativeToleranceOnScaledValues() {
+ final MultivariateFunction func = new MultivariateFunction() {
+ public double value(double[] x) {
+ final double a = x[0] - 1;
+ final double b = x[1] - 1;
+ return a * a * FastMath.sqrt(FastMath.abs(a)) + b * b + 1;
+ }
+ };
+
+ int dim = 2;
+ final double[] minPoint = new double[dim];
+ for (int i = 0; i < dim; i++) {
+ minPoint[i] = 1;
+ }
+
+ double[] init = new double[dim];
+ // Initial is far from minimum.
+ for (int i = 0; i < dim; i++) {
+ init[i] = minPoint[i] - 20;
+ }
+
+ final double relTol = 1e-10;
+
+ final int maxEval = 1000;
+ // Very small absolute tolerance to rely solely on the relative
+ // tolerance as a stopping criterion
+ final PowellOptimizer optim = new PowellOptimizer(relTol, 1e-100);
+
+ final PointValuePair funcResult = optim.optimize(new MaxEval(maxEval),
+ new ObjectiveFunction(func),
+ GoalType.MINIMIZE,
+ new InitialGuess(init));
+ final double funcValue = func.value(funcResult.getPoint());
+ final int funcEvaluations = optim.getEvaluations();
+
+ final double scale = 1e10;
+ final MultivariateFunction funcScaled = new MultivariateFunction() {
+ public double value(double[] x) {
+ return scale * func.value(x);
+ }
+ };
+
+ final PointValuePair funcScaledResult = optim.optimize(new MaxEval(maxEval),
+ new ObjectiveFunction(funcScaled),
+ GoalType.MINIMIZE,
+ new InitialGuess(init));
+ final double funcScaledValue = funcScaled.value(funcScaledResult.getPoint());
+ final int funcScaledEvaluations = optim.getEvaluations();
+
+ // Check that both minima provide the same objective funciton values,
+ // within the relative function tolerance.
+ Assert.assertEquals(1, funcScaledValue / (scale * funcValue), relTol);
+
+ // Check that the numbers of evaluations are the same.
+ Assert.assertEquals(funcEvaluations, funcScaledEvaluations);
+ }
+
+ /**
+ * @param func Function to optimize.
+ * @param optimum Expected optimum.
+ * @param init Starting point.
+ * @param goal Minimization or maximization.
+ * @param fTol Tolerance (relative error on the objective function) for
+ * "Powell" algorithm.
+ * @param pointTol Tolerance for checking that the optimum is correct.
+ */
+ private void doTest(MultivariateFunction func,
+ double[] optimum,
+ double[] init,
+ GoalType goal,
+ double fTol,
+ double pointTol) {
+ final PowellOptimizer optim = new PowellOptimizer(fTol, Math.ulp(1d));
+
+ final PointValuePair result = optim.optimize(new MaxEval(1000),
+ new ObjectiveFunction(func),
+ goal,
+ new InitialGuess(init));
+ final double[] point = result.getPoint();
+
+ for (int i = 0, dim = optimum.length; i < dim; i++) {
+ Assert.assertEquals("found[" + i + "]=" + point[i] + " value=" + result.getValue(),
+ optimum[i], point[i], pointTol);
+ }
+ }
+
+ /**
+ * @param func Function to optimize.
+ * @param optimum Expected optimum.
+ * @param init Starting point.
+ * @param goal Minimization or maximization.
+ * @param fTol Tolerance (relative error on the objective function) for
+ * "Powell" algorithm.
+ * @param fLineTol Tolerance (relative error on the objective function)
+ * for the internal line search algorithm.
+ * @param pointTol Tolerance for checking that the optimum is correct.
+ */
+ private void doTest(MultivariateFunction func,
+ double[] optimum,
+ double[] init,
+ GoalType goal,
+ double fTol,
+ double fLineTol,
+ double pointTol) {
+ final PowellOptimizer optim = new PowellOptimizer(fTol, Math.ulp(1d),
+ fLineTol, Math.ulp(1d));
+
+ final PointValuePair result = optim.optimize(new MaxEval(1000),
+ new ObjectiveFunction(func),
+ goal,
+ new InitialGuess(init));
+ final double[] point = result.getPoint();
+
+ for (int i = 0, dim = optimum.length; i < dim; i++) {
+ Assert.assertEquals("found[" + i + "]=" + point[i] + " value=" + result.getValue(),
+ optimum[i], point[i], pointTol);
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/SimplexOptimizerMultiDirectionalTest.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/SimplexOptimizerMultiDirectionalTest.java
new file mode 100644
index 000000000..8113a177d
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/SimplexOptimizerMultiDirectionalTest.java
@@ -0,0 +1,228 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
+
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.SimpleValueChecker;
+import org.apache.commons.math3.optim.ObjectiveFunction;
+import org.apache.commons.math3.util.FastMath;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class SimplexOptimizerMultiDirectionalTest {
+ @Test
+ public void testMinimize1() {
+ SimplexOptimizer optimizer = new SimplexOptimizer(1e-11, 1e-30);
+ final FourExtrema fourExtrema = new FourExtrema();
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(200),
+ new ObjectiveFunction(fourExtrema),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { -3, 0 }),
+ new MultiDirectionalSimplex(new double[] { 0.2, 0.2 }));
+ Assert.assertEquals(fourExtrema.xM, optimum.getPoint()[0], 4e-6);
+ Assert.assertEquals(fourExtrema.yP, optimum.getPoint()[1], 3e-6);
+ Assert.assertEquals(fourExtrema.valueXmYp, optimum.getValue(), 8e-13);
+ Assert.assertTrue(optimizer.getEvaluations() > 120);
+ Assert.assertTrue(optimizer.getEvaluations() < 150);
+ }
+
+ @Test
+ public void testMinimize2() {
+ SimplexOptimizer optimizer = new SimplexOptimizer(1e-11, 1e-30);
+ final FourExtrema fourExtrema = new FourExtrema();
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(200),
+ new ObjectiveFunction(fourExtrema),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 1, 0 }),
+ new MultiDirectionalSimplex(new double[] { 0.2, 0.2 }));
+ Assert.assertEquals(fourExtrema.xP, optimum.getPoint()[0], 2e-8);
+ Assert.assertEquals(fourExtrema.yM, optimum.getPoint()[1], 3e-6);
+ Assert.assertEquals(fourExtrema.valueXpYm, optimum.getValue(), 2e-12);
+ Assert.assertTrue(optimizer.getEvaluations() > 120);
+ Assert.assertTrue(optimizer.getEvaluations() < 150);
+ }
+
+ @Test
+ public void testMaximize1() {
+ SimplexOptimizer optimizer = new SimplexOptimizer(1e-11, 1e-30);
+ final FourExtrema fourExtrema = new FourExtrema();
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(200),
+ new ObjectiveFunction(fourExtrema),
+ GoalType.MAXIMIZE,
+ new InitialGuess(new double[] { -3.0, 0.0 }),
+ new MultiDirectionalSimplex(new double[] { 0.2, 0.2 }));
+ Assert.assertEquals(fourExtrema.xM, optimum.getPoint()[0], 7e-7);
+ Assert.assertEquals(fourExtrema.yM, optimum.getPoint()[1], 3e-7);
+ Assert.assertEquals(fourExtrema.valueXmYm, optimum.getValue(), 2e-14);
+ Assert.assertTrue(optimizer.getEvaluations() > 120);
+ Assert.assertTrue(optimizer.getEvaluations() < 150);
+ }
+
+ @Test
+ public void testMaximize2() {
+ SimplexOptimizer optimizer = new SimplexOptimizer(new SimpleValueChecker(1e-15, 1e-30));
+ final FourExtrema fourExtrema = new FourExtrema();
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(200),
+ new ObjectiveFunction(fourExtrema),
+ GoalType.MAXIMIZE,
+ new InitialGuess(new double[] { 1, 0 }),
+ new MultiDirectionalSimplex(new double[] { 0.2, 0.2 }));
+ Assert.assertEquals(fourExtrema.xP, optimum.getPoint()[0], 2e-8);
+ Assert.assertEquals(fourExtrema.yP, optimum.getPoint()[1], 3e-6);
+ Assert.assertEquals(fourExtrema.valueXpYp, optimum.getValue(), 2e-12);
+ Assert.assertTrue(optimizer.getEvaluations() > 180);
+ Assert.assertTrue(optimizer.getEvaluations() < 220);
+ }
+
+ @Test
+ public void testRosenbrock() {
+ MultivariateFunction rosenbrock
+ = new MultivariateFunction() {
+ public double value(double[] x) {
+ ++count;
+ double a = x[1] - x[0] * x[0];
+ double b = 1.0 - x[0];
+ return 100 * a * a + b * b;
+ }
+ };
+
+ count = 0;
+ SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-3);
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ new ObjectiveFunction(rosenbrock),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { -1.2, 1 }),
+ new MultiDirectionalSimplex(new double[][] {
+ { -1.2, 1.0 },
+ { 0.9, 1.2 },
+ { 3.5, -2.3 } }));
+
+ Assert.assertEquals(count, optimizer.getEvaluations());
+ Assert.assertTrue(optimizer.getEvaluations() > 50);
+ Assert.assertTrue(optimizer.getEvaluations() < 100);
+ Assert.assertTrue(optimum.getValue() > 1e-2);
+ }
+
+ @Test
+ public void testPowell() {
+ MultivariateFunction powell
+ = new MultivariateFunction() {
+ public double value(double[] x) {
+ ++count;
+ double a = x[0] + 10 * x[1];
+ double b = x[2] - x[3];
+ double c = x[1] - 2 * x[2];
+ double d = x[0] - x[3];
+ return a * a + 5 * b * b + c * c * c * c + 10 * d * d * d * d;
+ }
+ };
+
+ count = 0;
+ SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-3);
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(1000),
+ new ObjectiveFunction(powell),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 3, -1, 0, 1 }),
+ new MultiDirectionalSimplex(4));
+ Assert.assertEquals(count, optimizer.getEvaluations());
+ Assert.assertTrue(optimizer.getEvaluations() > 800);
+ Assert.assertTrue(optimizer.getEvaluations() < 900);
+ Assert.assertTrue(optimum.getValue() > 1e-2);
+ }
+
+ @Test
+ public void testMath283() {
+ // fails because MultiDirectional.iterateSimplex is looping forever
+ // the while(true) should be replaced with a convergence check
+ SimplexOptimizer optimizer = new SimplexOptimizer(1e-14, 1e-14);
+ final Gaussian2D function = new Gaussian2D(0, 0, 1);
+ PointValuePair estimate = optimizer.optimize(new MaxEval(1000),
+ new ObjectiveFunction(function),
+ GoalType.MAXIMIZE,
+ new InitialGuess(function.getMaximumPosition()),
+ new MultiDirectionalSimplex(2));
+ final double EPSILON = 1e-5;
+ final double expectedMaximum = function.getMaximum();
+ final double actualMaximum = estimate.getValue();
+ Assert.assertEquals(expectedMaximum, actualMaximum, EPSILON);
+
+ final double[] expectedPosition = function.getMaximumPosition();
+ final double[] actualPosition = estimate.getPoint();
+ Assert.assertEquals(expectedPosition[0], actualPosition[0], EPSILON );
+ Assert.assertEquals(expectedPosition[1], actualPosition[1], EPSILON );
+ }
+
+ private static class FourExtrema implements MultivariateFunction {
+ // The following function has 4 local extrema.
+ final double xM = -3.841947088256863675365;
+ final double yM = -1.391745200270734924416;
+ final double xP = 0.2286682237349059125691;
+ final double yP = -yM;
+ final double valueXmYm = 0.2373295333134216789769; // Local maximum.
+ final double valueXmYp = -valueXmYm; // Local minimum.
+ final double valueXpYm = -0.7290400707055187115322; // Global minimum.
+ final double valueXpYp = -valueXpYm; // Global maximum.
+
+ public double value(double[] variables) {
+ final double x = variables[0];
+ final double y = variables[1];
+ return (x == 0 || y == 0) ? 0 :
+ FastMath.atan(x) * FastMath.atan(x + 2) * FastMath.atan(y) * FastMath.atan(y) / (x * y);
+ }
+ }
+
+ private static class Gaussian2D implements MultivariateFunction {
+ private final double[] maximumPosition;
+ private final double std;
+
+ public Gaussian2D(double xOpt, double yOpt, double std) {
+ maximumPosition = new double[] { xOpt, yOpt };
+ this.std = std;
+ }
+
+ public double getMaximum() {
+ return value(maximumPosition);
+ }
+
+ public double[] getMaximumPosition() {
+ return maximumPosition.clone();
+ }
+
+ public double value(double[] point) {
+ final double x = point[0], y = point[1];
+ final double twoS2 = 2.0 * std * std;
+ return 1.0 / (twoS2 * FastMath.PI) * FastMath.exp(-(x * x + y * y) / twoS2);
+ }
+ }
+
+ private int count;
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/SimplexOptimizerNelderMeadTest.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/SimplexOptimizerNelderMeadTest.java
new file mode 100644
index 000000000..ab728beed
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/SimplexOptimizerNelderMeadTest.java
@@ -0,0 +1,295 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
+
+
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+import org.apache.commons.math3.analysis.MultivariateFunction;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.linear.Array2DRowRealMatrix;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.ObjectiveFunction;
+import org.apache.commons.math3.optim.PointValuePair;
+import org.apache.commons.math3.optim.nonlinear.scalar.LeastSquaresConverter;
+import org.apache.commons.math3.util.FastMath;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class SimplexOptimizerNelderMeadTest {
+ @Test
+ public void testMinimize1() {
+ SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
+ final FourExtrema fourExtrema = new FourExtrema();
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ new ObjectiveFunction(fourExtrema),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { -3, 0 }),
+ new NelderMeadSimplex(new double[] { 0.2, 0.2 }));
+ Assert.assertEquals(fourExtrema.xM, optimum.getPoint()[0], 2e-7);
+ Assert.assertEquals(fourExtrema.yP, optimum.getPoint()[1], 2e-5);
+ Assert.assertEquals(fourExtrema.valueXmYp, optimum.getValue(), 6e-12);
+ Assert.assertTrue(optimizer.getEvaluations() > 60);
+ Assert.assertTrue(optimizer.getEvaluations() < 90);
+ }
+
+ @Test
+ public void testMinimize2() {
+ SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
+ final FourExtrema fourExtrema = new FourExtrema();
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ new ObjectiveFunction(fourExtrema),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 1, 0 }),
+ new NelderMeadSimplex(new double[] { 0.2, 0.2 }));
+ Assert.assertEquals(fourExtrema.xP, optimum.getPoint()[0], 5e-6);
+ Assert.assertEquals(fourExtrema.yM, optimum.getPoint()[1], 6e-6);
+ Assert.assertEquals(fourExtrema.valueXpYm, optimum.getValue(), 1e-11);
+ Assert.assertTrue(optimizer.getEvaluations() > 60);
+ Assert.assertTrue(optimizer.getEvaluations() < 90);
+ }
+
+ @Test
+ public void testMaximize1() {
+ SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
+ final FourExtrema fourExtrema = new FourExtrema();
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ new ObjectiveFunction(fourExtrema),
+ GoalType.MAXIMIZE,
+ new InitialGuess(new double[] { -3, 0 }),
+ new NelderMeadSimplex(new double[] { 0.2, 0.2 }));
+ Assert.assertEquals(fourExtrema.xM, optimum.getPoint()[0], 1e-5);
+ Assert.assertEquals(fourExtrema.yM, optimum.getPoint()[1], 3e-6);
+ Assert.assertEquals(fourExtrema.valueXmYm, optimum.getValue(), 3e-12);
+ Assert.assertTrue(optimizer.getEvaluations() > 60);
+ Assert.assertTrue(optimizer.getEvaluations() < 90);
+ }
+
+ @Test
+ public void testMaximize2() {
+ SimplexOptimizer optimizer = new SimplexOptimizer(1e-10, 1e-30);
+ final FourExtrema fourExtrema = new FourExtrema();
+
+ final PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ new ObjectiveFunction(fourExtrema),
+ GoalType.MAXIMIZE,
+ new InitialGuess(new double[] { 1, 0 }),
+ new NelderMeadSimplex(new double[] { 0.2, 0.2 }));
+ Assert.assertEquals(fourExtrema.xP, optimum.getPoint()[0], 4e-6);
+ Assert.assertEquals(fourExtrema.yP, optimum.getPoint()[1], 5e-6);
+ Assert.assertEquals(fourExtrema.valueXpYp, optimum.getValue(), 7e-12);
+ Assert.assertTrue(optimizer.getEvaluations() > 60);
+ Assert.assertTrue(optimizer.getEvaluations() < 90);
+ }
+
+ @Test
+ public void testRosenbrock() {
+
+ Rosenbrock rosenbrock = new Rosenbrock();
+ SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-3);
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ new ObjectiveFunction(rosenbrock),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { -1.2, 1 }),
+ new NelderMeadSimplex(new double[][] {
+ { -1.2, 1 },
+ { 0.9, 1.2 },
+ { 3.5, -2.3 } }));
+
+ Assert.assertEquals(rosenbrock.getCount(), optimizer.getEvaluations());
+ Assert.assertTrue(optimizer.getEvaluations() > 40);
+ Assert.assertTrue(optimizer.getEvaluations() < 50);
+ Assert.assertTrue(optimum.getValue() < 8e-4);
+ }
+
+ @Test
+ public void testPowell() {
+ Powell powell = new Powell();
+ SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-3);
+ PointValuePair optimum =
+ optimizer.optimize(new MaxEval(200),
+ new ObjectiveFunction(powell),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 3, -1, 0, 1 }),
+ new NelderMeadSimplex(4));
+ Assert.assertEquals(powell.getCount(), optimizer.getEvaluations());
+ Assert.assertTrue(optimizer.getEvaluations() > 110);
+ Assert.assertTrue(optimizer.getEvaluations() < 130);
+ Assert.assertTrue(optimum.getValue() < 2e-3);
+ }
+
+ @Test
+ public void testLeastSquares1() {
+ final RealMatrix factors
+ = new Array2DRowRealMatrix(new double[][] {
+ { 1, 0 },
+ { 0, 1 }
+ }, false);
+ LeastSquaresConverter ls = new LeastSquaresConverter(new MultivariateVectorFunction() {
+ public double[] value(double[] variables) {
+ return factors.operate(variables);
+ }
+ }, new double[] { 2.0, -3.0 });
+ SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-6);
+ PointValuePair optimum =
+ optimizer.optimize(new MaxEval(200),
+ new ObjectiveFunction(ls),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 10, 10 }),
+ new NelderMeadSimplex(2));
+ Assert.assertEquals( 2, optimum.getPointRef()[0], 3e-5);
+ Assert.assertEquals(-3, optimum.getPointRef()[1], 4e-4);
+ Assert.assertTrue(optimizer.getEvaluations() > 60);
+ Assert.assertTrue(optimizer.getEvaluations() < 80);
+ Assert.assertTrue(optimum.getValue() < 1.0e-6);
+ }
+
+ @Test
+ public void testLeastSquares2() {
+ final RealMatrix factors
+ = new Array2DRowRealMatrix(new double[][] {
+ { 1, 0 },
+ { 0, 1 }
+ }, false);
+ LeastSquaresConverter ls = new LeastSquaresConverter(new MultivariateVectorFunction() {
+ public double[] value(double[] variables) {
+ return factors.operate(variables);
+ }
+ }, new double[] { 2, -3 }, new double[] { 10, 0.1 });
+ SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-6);
+ PointValuePair optimum =
+ optimizer.optimize(new MaxEval(200),
+ new ObjectiveFunction(ls),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 10, 10 }),
+ new NelderMeadSimplex(2));
+ Assert.assertEquals( 2, optimum.getPointRef()[0], 5e-5);
+ Assert.assertEquals(-3, optimum.getPointRef()[1], 8e-4);
+ Assert.assertTrue(optimizer.getEvaluations() > 60);
+ Assert.assertTrue(optimizer.getEvaluations() < 80);
+ Assert.assertTrue(optimum.getValue() < 1e-6);
+ }
+
+ @Test
+ public void testLeastSquares3() {
+ final RealMatrix factors =
+ new Array2DRowRealMatrix(new double[][] {
+ { 1, 0 },
+ { 0, 1 }
+ }, false);
+ LeastSquaresConverter ls = new LeastSquaresConverter(new MultivariateVectorFunction() {
+ public double[] value(double[] variables) {
+ return factors.operate(variables);
+ }
+ }, new double[] { 2, -3 }, new Array2DRowRealMatrix(new double [][] {
+ { 1, 1.2 }, { 1.2, 2 }
+ }));
+ SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-6);
+ PointValuePair optimum
+ = optimizer.optimize(new MaxEval(200),
+ new ObjectiveFunction(ls),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 10, 10 }),
+ new NelderMeadSimplex(2));
+ Assert.assertEquals( 2, optimum.getPointRef()[0], 2e-3);
+ Assert.assertEquals(-3, optimum.getPointRef()[1], 8e-4);
+ Assert.assertTrue(optimizer.getEvaluations() > 60);
+ Assert.assertTrue(optimizer.getEvaluations() < 80);
+ Assert.assertTrue(optimum.getValue() < 1e-6);
+ }
+
+ @Test(expected=TooManyEvaluationsException.class)
+ public void testMaxIterations() {
+ Powell powell = new Powell();
+ SimplexOptimizer optimizer = new SimplexOptimizer(-1, 1e-3);
+ optimizer.optimize(new MaxEval(20),
+ new ObjectiveFunction(powell),
+ GoalType.MINIMIZE,
+ new InitialGuess(new double[] { 3, -1, 0, 1 }),
+ new NelderMeadSimplex(4));
+ }
+
+ private static class FourExtrema implements MultivariateFunction {
+ // The following function has 4 local extrema.
+ final double xM = -3.841947088256863675365;
+ final double yM = -1.391745200270734924416;
+ final double xP = 0.2286682237349059125691;
+ final double yP = -yM;
+ final double valueXmYm = 0.2373295333134216789769; // Local maximum.
+ final double valueXmYp = -valueXmYm; // Local minimum.
+ final double valueXpYm = -0.7290400707055187115322; // Global minimum.
+ final double valueXpYp = -valueXpYm; // Global maximum.
+
+ public double value(double[] variables) {
+ final double x = variables[0];
+ final double y = variables[1];
+ return (x == 0 || y == 0) ? 0 :
+ FastMath.atan(x) * FastMath.atan(x + 2) * FastMath.atan(y) * FastMath.atan(y) / (x * y);
+ }
+ }
+
+ private static class Rosenbrock implements MultivariateFunction {
+ private int count;
+
+ public Rosenbrock() {
+ count = 0;
+ }
+
+ public double value(double[] x) {
+ ++count;
+ double a = x[1] - x[0] * x[0];
+ double b = 1.0 - x[0];
+ return 100 * a * a + b * b;
+ }
+
+ public int getCount() {
+ return count;
+ }
+ }
+
+ private static class Powell implements MultivariateFunction {
+ private int count;
+
+ public Powell() {
+ count = 0;
+ }
+
+ public double value(double[] x) {
+ ++count;
+ double a = x[0] + 10 * x[1];
+ double b = x[2] - x[3];
+ double c = x[1] - 2 * x[2];
+ double d = x[0] - x[3];
+ return a * a + 5 * b * b + c * c * c * c + 10 * d * d * d * d;
+ }
+
+ public int getCount() {
+ return count;
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/MultiStartMultivariateVectorOptimizerTest.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/MultiStartMultivariateVectorOptimizerTest.java
new file mode 100644
index 000000000..e92091f27
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/MultiStartMultivariateVectorOptimizerTest.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector;
+
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.analysis.MultivariateMatrixFunction;
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.apache.commons.math3.linear.BlockRealMatrix;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+import org.apache.commons.math3.optim.SimpleVectorValueChecker;
+import org.apache.commons.math3.optim.nonlinear.vector.jacobian.GaussNewtonOptimizer;
+import org.apache.commons.math3.random.GaussianRandomGenerator;
+import org.apache.commons.math3.random.JDKRandomGenerator;
+import org.apache.commons.math3.random.RandomVectorGenerator;
+import org.apache.commons.math3.random.UncorrelatedRandomVectorGenerator;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Some of the unit tests are re-implementations of the MINPACK file17 and file22 test files.
+ * The redistribution policy for MINPACK is available here, for
+ * convenience, it is reproduced below.
+ *
+ *
+ *
+ * Minpack Copyright Notice (1999) University of Chicago.
+ * All rights reserved
+ * |
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * - The end-user documentation included with the redistribution, if any,
+ * must include the following acknowledgment:
+ *
This product includes software developed by the University of
+ * Chicago, as Operator of Argonne National Laboratory.
+ * Alternately, this acknowledgment may appear in the software itself,
+ * if and wherever such third-party acknowledgments normally appear.
+ * - WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
+ * UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
+ * THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
+ * OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
+ * OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
+ * USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
+ * THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
+ * DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
+ * UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
+ * BE CORRECTED.
+ * - LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
+ * HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
+ * ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
+ * INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
+ * ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
+ * PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
+ * SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
+ * (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
+ * EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
+ * POSSIBILITY OF SUCH LOSS OR DAMAGES.
+ *
|
+ *
+ *
+ * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
+ * @author Burton S. Garbow (original fortran minpack tests)
+ * @author Kenneth E. Hillstrom (original fortran minpack tests)
+ * @author Jorge J. More (original fortran minpack tests)
+ * @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
+ */
+public class MultiStartMultivariateVectorOptimizerTest {
+ @Test(expected=NullPointerException.class)
+ public void testGetOptimaBeforeOptimize() {
+ LinearProblem problem
+ = new LinearProblem(new double[][] { { 2 } }, new double[] { 3 });
+ JacobianMultivariateVectorOptimizer underlyingOptimizer
+ = new GaussNewtonOptimizer(true, new SimpleVectorValueChecker(1e-6, 1e-6));
+ JDKRandomGenerator g = new JDKRandomGenerator();
+ g.setSeed(16069223052l);
+ RandomVectorGenerator generator
+ = new UncorrelatedRandomVectorGenerator(1, new GaussianRandomGenerator(g));
+ MultiStartMultivariateVectorOptimizer optimizer
+ = new MultiStartMultivariateVectorOptimizer(underlyingOptimizer, 10, generator);
+
+ optimizer.getOptima();
+ }
+
+ @Test
+ public void testTrivial() {
+ LinearProblem problem
+ = new LinearProblem(new double[][] { { 2 } }, new double[] { 3 });
+ JacobianMultivariateVectorOptimizer underlyingOptimizer
+ = new GaussNewtonOptimizer(true, new SimpleVectorValueChecker(1e-6, 1e-6));
+ JDKRandomGenerator g = new JDKRandomGenerator();
+ g.setSeed(16069223052l);
+ RandomVectorGenerator generator
+ = new UncorrelatedRandomVectorGenerator(1, new GaussianRandomGenerator(g));
+ MultiStartMultivariateVectorOptimizer optimizer
+ = new MultiStartMultivariateVectorOptimizer(underlyingOptimizer, 10, generator);
+
+ PointVectorValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ problem.getTarget(),
+ new Weight(new double[] { 1 }),
+ new InitialGuess(new double[] { 0 }));
+ Assert.assertEquals(1.5, optimum.getPoint()[0], 1e-10);
+ Assert.assertEquals(3.0, optimum.getValue()[0], 1e-10);
+ PointVectorValuePair[] optima = optimizer.getOptima();
+ Assert.assertEquals(10, optima.length);
+ for (int i = 0; i < optima.length; i++) {
+ Assert.assertEquals(1.5, optima[i].getPoint()[0], 1e-10);
+ Assert.assertEquals(3.0, optima[i].getValue()[0], 1e-10);
+ }
+ Assert.assertTrue(optimizer.getEvaluations() > 20);
+ Assert.assertTrue(optimizer.getEvaluations() < 50);
+ Assert.assertEquals(100, optimizer.getMaxEvaluations());
+ }
+
+ /**
+ * Test demonstrating that the user exception is fnally thrown if none
+ * of the runs succeed.
+ */
+ @Test(expected=TestException.class)
+ public void testNoOptimum() {
+ JacobianMultivariateVectorOptimizer underlyingOptimizer
+ = new GaussNewtonOptimizer(true, new SimpleVectorValueChecker(1e-6, 1e-6));
+ JDKRandomGenerator g = new JDKRandomGenerator();
+ g.setSeed(12373523445l);
+ RandomVectorGenerator generator
+ = new UncorrelatedRandomVectorGenerator(1, new GaussianRandomGenerator(g));
+ MultiStartMultivariateVectorOptimizer optimizer
+ = new MultiStartMultivariateVectorOptimizer(underlyingOptimizer, 10, generator);
+ optimizer.optimize(new MaxEval(100),
+ new Target(new double[] { 0 }),
+ new Weight(new double[] { 1 }),
+ new InitialGuess(new double[] { 0 }),
+ new ModelFunction(new MultivariateVectorFunction() {
+ public double[] value(double[] point) {
+ throw new TestException();
+ }
+ }));
+ }
+
+ private static class TestException extends RuntimeException {}
+
+ private static class LinearProblem {
+ private final RealMatrix factors;
+ private final double[] target;
+
+ public LinearProblem(double[][] factors,
+ double[] target) {
+ this.factors = new BlockRealMatrix(factors);
+ this.target = target;
+ }
+
+ public Target getTarget() {
+ return new Target(target);
+ }
+
+ public ModelFunction getModelFunction() {
+ return new ModelFunction(new MultivariateVectorFunction() {
+ public double[] value(double[] variables) {
+ return factors.operate(variables);
+ }
+ });
+ }
+
+ public ModelFunctionJacobian getModelFunctionJacobian() {
+ return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
+ public double[][] value(double[] point) {
+ return factors.getData();
+ }
+ });
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizerAbstractTest.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizerAbstractTest.java
new file mode 100644
index 000000000..cc86ae0f4
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizerAbstractTest.java
@@ -0,0 +1,617 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Arrays;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.analysis.MultivariateMatrixFunction;
+import org.apache.commons.math3.exception.ConvergenceException;
+import org.apache.commons.math3.exception.DimensionMismatchException;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.geometry.euclidean.twod.Vector2D;
+import org.apache.commons.math3.linear.BlockRealMatrix;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.nonlinear.vector.Target;
+import org.apache.commons.math3.optim.nonlinear.vector.Weight;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunction;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunctionJacobian;
+import org.apache.commons.math3.util.FastMath;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Some of the unit tests are re-implementations of the MINPACK file17 and file22 test files.
+ * The redistribution policy for MINPACK is available here, for
+ * convenience, it is reproduced below.
+
+ *
+ *
+ * Minpack Copyright Notice (1999) University of Chicago.
+ * All rights reserved
+ * |
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * - The end-user documentation included with the redistribution, if any,
+ * must include the following acknowledgment:
+ *
This product includes software developed by the University of
+ * Chicago, as Operator of Argonne National Laboratory.
+ * Alternately, this acknowledgment may appear in the software itself,
+ * if and wherever such third-party acknowledgments normally appear.
+ * - WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
+ * UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
+ * THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
+ * OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
+ * OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
+ * USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
+ * THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
+ * DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
+ * UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
+ * BE CORRECTED.
+ * - LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
+ * HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
+ * ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
+ * INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
+ * ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
+ * PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
+ * SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
+ * (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
+ * EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
+ * POSSIBILITY OF SUCH LOSS OR DAMAGES.
+ *
|
+ *
+
+ * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
+ * @author Burton S. Garbow (original fortran minpack tests)
+ * @author Kenneth E. Hillstrom (original fortran minpack tests)
+ * @author Jorge J. More (original fortran minpack tests)
+ * @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
+ * @version $Id: AbstractLeastSquaresOptimizerAbstractTest.java 1407467 2012-11-09 14:30:49Z erans $
+ */
+public abstract class AbstractLeastSquaresOptimizerAbstractTest {
+
+ public abstract AbstractLeastSquaresOptimizer createOptimizer();
+
+ @Test
+ public void testTrivial() {
+ LinearProblem problem
+ = new LinearProblem(new double[][] { { 2 } }, new double[] { 3 });
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ PointVectorValuePair optimum =
+ optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ problem.getTarget(),
+ new Weight(new double[] { 1 }),
+ new InitialGuess(new double[] { 0 }));
+ Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
+ Assert.assertEquals(1.5, optimum.getPoint()[0], 1e-10);
+ Assert.assertEquals(3.0, optimum.getValue()[0], 1e-10);
+ }
+
+ @Test
+ public void testQRColumnsPermutation() {
+
+ LinearProblem problem
+ = new LinearProblem(new double[][] { { 1, -1 }, { 0, 2 }, { 1, -2 } },
+ new double[] { 4, 6, 1 });
+
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ PointVectorValuePair optimum =
+ optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ problem.getTarget(),
+ new Weight(new double[] { 1, 1, 1 }),
+ new InitialGuess(new double[] { 0, 0 }));
+ Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
+ Assert.assertEquals(7, optimum.getPoint()[0], 1e-10);
+ Assert.assertEquals(3, optimum.getPoint()[1], 1e-10);
+ Assert.assertEquals(4, optimum.getValue()[0], 1e-10);
+ Assert.assertEquals(6, optimum.getValue()[1], 1e-10);
+ Assert.assertEquals(1, optimum.getValue()[2], 1e-10);
+ }
+
+ @Test
+ public void testNoDependency() {
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 2, 0, 0, 0, 0, 0 },
+ { 0, 2, 0, 0, 0, 0 },
+ { 0, 0, 2, 0, 0, 0 },
+ { 0, 0, 0, 2, 0, 0 },
+ { 0, 0, 0, 0, 2, 0 },
+ { 0, 0, 0, 0, 0, 2 }
+ }, new double[] { 0, 1.1, 2.2, 3.3, 4.4, 5.5 });
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ PointVectorValuePair optimum =
+ optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ problem.getTarget(),
+ new Weight(new double[] { 1, 1, 1, 1, 1, 1 }),
+ new InitialGuess(new double[] { 0, 0, 0, 0, 0, 0 }));
+ Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
+ for (int i = 0; i < problem.target.length; ++i) {
+ Assert.assertEquals(0.55 * i, optimum.getPoint()[i], 1e-10);
+ }
+ }
+
+ @Test
+ public void testOneSet() {
+
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1, 0, 0 },
+ { -1, 1, 0 },
+ { 0, -1, 1 }
+ }, new double[] { 1, 1, 1});
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ PointVectorValuePair optimum =
+ optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ problem.getTarget(),
+ new Weight(new double[] { 1, 1, 1 }),
+ new InitialGuess(new double[] { 0, 0, 0 }));
+ Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
+ Assert.assertEquals(1, optimum.getPoint()[0], 1e-10);
+ Assert.assertEquals(2, optimum.getPoint()[1], 1e-10);
+ Assert.assertEquals(3, optimum.getPoint()[2], 1e-10);
+ }
+
+ @Test
+ public void testTwoSets() {
+ double epsilon = 1e-7;
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 2, 1, 0, 4, 0, 0 },
+ { -4, -2, 3, -7, 0, 0 },
+ { 4, 1, -2, 8, 0, 0 },
+ { 0, -3, -12, -1, 0, 0 },
+ { 0, 0, 0, 0, epsilon, 1 },
+ { 0, 0, 0, 0, 1, 1 }
+ }, new double[] { 2, -9, 2, 2, 1 + epsilon * epsilon, 2});
+
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ PointVectorValuePair optimum =
+ optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ problem.getTarget(),
+ new Weight(new double[] { 1, 1, 1, 1, 1, 1 }),
+ new InitialGuess(new double[] { 0, 0, 0, 0, 0, 0 }));
+ Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
+ Assert.assertEquals(3, optimum.getPoint()[0], 1e-10);
+ Assert.assertEquals(4, optimum.getPoint()[1], 1e-10);
+ Assert.assertEquals(-1, optimum.getPoint()[2], 1e-10);
+ Assert.assertEquals(-2, optimum.getPoint()[3], 1e-10);
+ Assert.assertEquals(1 + epsilon, optimum.getPoint()[4], 1e-10);
+ Assert.assertEquals(1 - epsilon, optimum.getPoint()[5], 1e-10);
+ }
+
+ @Test(expected=ConvergenceException.class)
+ public void testNonInvertible() throws Exception {
+
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1, 2, -3 },
+ { 2, 1, 3 },
+ { -3, 0, -9 }
+ }, new double[] { 1, 1, 1 });
+
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+
+ optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ problem.getTarget(),
+ new Weight(new double[] { 1, 1, 1 }),
+ new InitialGuess(new double[] { 0, 0, 0 }));
+ }
+
+ @Test
+ public void testIllConditioned() {
+ LinearProblem problem1 = new LinearProblem(new double[][] {
+ { 10, 7, 8, 7 },
+ { 7, 5, 6, 5 },
+ { 8, 6, 10, 9 },
+ { 7, 5, 9, 10 }
+ }, new double[] { 32, 23, 33, 31 });
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ PointVectorValuePair optimum1 =
+ optimizer.optimize(new MaxEval(100),
+ problem1.getModelFunction(),
+ problem1.getModelFunctionJacobian(),
+ problem1.getTarget(),
+ new Weight(new double[] { 1, 1, 1, 1 }),
+ new InitialGuess(new double[] { 0, 1, 2, 3 }));
+ Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
+ Assert.assertEquals(1, optimum1.getPoint()[0], 1e-10);
+ Assert.assertEquals(1, optimum1.getPoint()[1], 1e-10);
+ Assert.assertEquals(1, optimum1.getPoint()[2], 1e-10);
+ Assert.assertEquals(1, optimum1.getPoint()[3], 1e-10);
+
+ LinearProblem problem2 = new LinearProblem(new double[][] {
+ { 10.00, 7.00, 8.10, 7.20 },
+ { 7.08, 5.04, 6.00, 5.00 },
+ { 8.00, 5.98, 9.89, 9.00 },
+ { 6.99, 4.99, 9.00, 9.98 }
+ }, new double[] { 32, 23, 33, 31 });
+ PointVectorValuePair optimum2 =
+ optimizer.optimize(new MaxEval(100),
+ problem2.getModelFunction(),
+ problem2.getModelFunctionJacobian(),
+ problem2.getTarget(),
+ new Weight(new double[] { 1, 1, 1, 1 }),
+ new InitialGuess(new double[] { 0, 1, 2, 3 }));
+ Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
+ Assert.assertEquals(-81, optimum2.getPoint()[0], 1e-8);
+ Assert.assertEquals(137, optimum2.getPoint()[1], 1e-8);
+ Assert.assertEquals(-34, optimum2.getPoint()[2], 1e-8);
+ Assert.assertEquals( 22, optimum2.getPoint()[3], 1e-8);
+ }
+
+ @Test
+ public void testMoreEstimatedParametersSimple() {
+
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 3, 2, 0, 0 },
+ { 0, 1, -1, 1 },
+ { 2, 0, 1, 0 }
+ }, new double[] { 7, 3, 5 });
+
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ problem.getTarget(),
+ new Weight(new double[] { 1, 1, 1 }),
+ new InitialGuess(new double[] { 7, 6, 5, 4 }));
+ Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
+ }
+
+ @Test
+ public void testMoreEstimatedParametersUnsorted() {
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1, 1, 0, 0, 0, 0 },
+ { 0, 0, 1, 1, 1, 0 },
+ { 0, 0, 0, 0, 1, -1 },
+ { 0, 0, -1, 1, 0, 1 },
+ { 0, 0, 0, -1, 1, 0 }
+ }, new double[] { 3, 12, -1, 7, 1 });
+
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ PointVectorValuePair optimum =
+ optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ problem.getTarget(),
+ new Weight(new double[] { 1, 1, 1, 1, 1 }),
+ new InitialGuess(new double[] { 2, 2, 2, 2, 2, 2 }));
+ Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
+ Assert.assertEquals(3, optimum.getPointRef()[2], 1e-10);
+ Assert.assertEquals(4, optimum.getPointRef()[3], 1e-10);
+ Assert.assertEquals(5, optimum.getPointRef()[4], 1e-10);
+ Assert.assertEquals(6, optimum.getPointRef()[5], 1e-10);
+ }
+
+ @Test
+ public void testRedundantEquations() {
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1, 1 },
+ { 1, -1 },
+ { 1, 3 }
+ }, new double[] { 3, 1, 5 });
+
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ PointVectorValuePair optimum =
+ optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ problem.getTarget(),
+ new Weight(new double[] { 1, 1, 1 }),
+ new InitialGuess(new double[] { 1, 1 }));
+ Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
+ Assert.assertEquals(2, optimum.getPointRef()[0], 1e-10);
+ Assert.assertEquals(1, optimum.getPointRef()[1], 1e-10);
+ }
+
+ @Test
+ public void testInconsistentEquations() {
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1, 1 },
+ { 1, -1 },
+ { 1, 3 }
+ }, new double[] { 3, 1, 4 });
+
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ problem.getTarget(),
+ new Weight(new double[] { 1, 1, 1 }),
+ new InitialGuess(new double[] { 1, 1 }));
+ Assert.assertTrue(optimizer.getRMS() > 0.1);
+ }
+
+ @Test(expected=DimensionMismatchException.class)
+ public void testInconsistentSizes1() {
+ LinearProblem problem
+ = new LinearProblem(new double[][] { { 1, 0 }, { 0, 1 } },
+ new double[] { -1, 1 });
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ PointVectorValuePair optimum =
+ optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ problem.getTarget(),
+ new Weight(new double[] { 1, 1 }),
+ new InitialGuess(new double[] { 0, 0 }));
+ Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
+ Assert.assertEquals(-1, optimum.getPoint()[0], 1e-10);
+ Assert.assertEquals(1, optimum.getPoint()[1], 1e-10);
+
+ optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ problem.getTarget(),
+ new Weight(new double[] { 1 }),
+ new InitialGuess(new double[] { 0, 0 }));
+ }
+
+ @Test(expected=DimensionMismatchException.class)
+ public void testInconsistentSizes2() {
+ LinearProblem problem
+ = new LinearProblem(new double[][] { { 1, 0 }, { 0, 1 } },
+ new double[] { -1, 1 });
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ PointVectorValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ problem.getTarget(),
+ new Weight(new double[] { 1, 1 }),
+ new InitialGuess(new double[] { 0, 0 }));
+ Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
+ Assert.assertEquals(-1, optimum.getPoint()[0], 1e-10);
+ Assert.assertEquals(1, optimum.getPoint()[1], 1e-10);
+
+ optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ new Target(new double[] { 1 }),
+ new Weight(new double[] { 1 }),
+ new InitialGuess(new double[] { 0, 0 }));
+ }
+
+ @Test
+ public void testCircleFitting() {
+ CircleVectorial circle = new CircleVectorial();
+ circle.addPoint( 30, 68);
+ circle.addPoint( 50, -6);
+ circle.addPoint(110, -20);
+ circle.addPoint( 35, 15);
+ circle.addPoint( 45, 97);
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ PointVectorValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ circle.getModelFunction(),
+ circle.getModelFunctionJacobian(),
+ new Target(new double[] { 0, 0, 0, 0, 0 }),
+ new Weight(new double[] { 1, 1, 1, 1, 1 }),
+ new InitialGuess(new double[] { 98.680, 47.345 }));
+ Assert.assertTrue(optimizer.getEvaluations() < 10);
+ double rms = optimizer.getRMS();
+ Assert.assertEquals(1.768262623567235, FastMath.sqrt(circle.getN()) * rms, 1e-10);
+ Vector2D center = new Vector2D(optimum.getPointRef()[0], optimum.getPointRef()[1]);
+ Assert.assertEquals(69.96016176931406, circle.getRadius(center), 1e-6);
+ Assert.assertEquals(96.07590211815305, center.getX(), 1e-6);
+ Assert.assertEquals(48.13516790438953, center.getY(), 1e-6);
+ double[][] cov = optimizer.computeCovariances(optimum.getPoint(), 1e-14);
+ Assert.assertEquals(1.839, cov[0][0], 0.001);
+ Assert.assertEquals(0.731, cov[0][1], 0.001);
+ Assert.assertEquals(cov[0][1], cov[1][0], 1e-14);
+ Assert.assertEquals(0.786, cov[1][1], 0.001);
+
+ // add perfect measurements and check errors are reduced
+ double r = circle.getRadius(center);
+ for (double d= 0; d < 2 * FastMath.PI; d += 0.01) {
+ circle.addPoint(center.getX() + r * FastMath.cos(d), center.getY() + r * FastMath.sin(d));
+ }
+ double[] target = new double[circle.getN()];
+ Arrays.fill(target, 0);
+ double[] weights = new double[circle.getN()];
+ Arrays.fill(weights, 2);
+ optimum = optimizer.optimize(new MaxEval(100),
+ circle.getModelFunction(),
+ circle.getModelFunctionJacobian(),
+ new Target(target),
+ new Weight(weights),
+ new InitialGuess(new double[] { 98.680, 47.345 }));
+ cov = optimizer.computeCovariances(optimum.getPoint(), 1e-14);
+ Assert.assertEquals(0.0016, cov[0][0], 0.001);
+ Assert.assertEquals(3.2e-7, cov[0][1], 1e-9);
+ Assert.assertEquals(cov[0][1], cov[1][0], 1e-14);
+ Assert.assertEquals(0.0016, cov[1][1], 0.001);
+ }
+
+ @Test
+ public void testCircleFittingBadInit() {
+ CircleVectorial circle = new CircleVectorial();
+ double[][] points = circlePoints;
+ double[] target = new double[points.length];
+ Arrays.fill(target, 0);
+ double[] weights = new double[points.length];
+ Arrays.fill(weights, 2);
+ for (int i = 0; i < points.length; ++i) {
+ circle.addPoint(points[i][0], points[i][1]);
+ }
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ PointVectorValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ circle.getModelFunction(),
+ circle.getModelFunctionJacobian(),
+ new Target(target),
+ new Weight(weights),
+ new InitialGuess(new double[] { -12, -12 }));
+ Vector2D center = new Vector2D(optimum.getPointRef()[0], optimum.getPointRef()[1]);
+ Assert.assertTrue(optimizer.getEvaluations() < 25);
+ Assert.assertEquals( 0.043, optimizer.getRMS(), 1e-3);
+ Assert.assertEquals( 0.292235, circle.getRadius(center), 1e-6);
+ Assert.assertEquals(-0.151738, center.getX(), 1e-6);
+ Assert.assertEquals( 0.2075001, center.getY(), 1e-6);
+ }
+
+ @Test
+ public void testCircleFittingGoodInit() {
+ CircleVectorial circle = new CircleVectorial();
+ double[][] points = circlePoints;
+ double[] target = new double[points.length];
+ Arrays.fill(target, 0);
+ double[] weights = new double[points.length];
+ Arrays.fill(weights, 2);
+ for (int i = 0; i < points.length; ++i) {
+ circle.addPoint(points[i][0], points[i][1]);
+ }
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ PointVectorValuePair optimum =
+ optimizer.optimize(new MaxEval(100),
+ circle.getModelFunction(),
+ circle.getModelFunctionJacobian(),
+ new Target(target),
+ new Weight(weights),
+ new InitialGuess(new double[] { 0, 0 }));
+ Assert.assertEquals(-0.1517383071957963, optimum.getPointRef()[0], 1e-6);
+ Assert.assertEquals(0.2074999736353867, optimum.getPointRef()[1], 1e-6);
+ Assert.assertEquals(0.04268731682389561, optimizer.getRMS(), 1e-8);
+ }
+
+ private final double[][] circlePoints = new double[][] {
+ {-0.312967, 0.072366}, {-0.339248, 0.132965}, {-0.379780, 0.202724},
+ {-0.390426, 0.260487}, {-0.361212, 0.328325}, {-0.346039, 0.392619},
+ {-0.280579, 0.444306}, {-0.216035, 0.470009}, {-0.149127, 0.493832},
+ {-0.075133, 0.483271}, {-0.007759, 0.452680}, { 0.060071, 0.410235},
+ { 0.103037, 0.341076}, { 0.118438, 0.273884}, { 0.131293, 0.192201},
+ { 0.115869, 0.129797}, { 0.072223, 0.058396}, { 0.022884, 0.000718},
+ {-0.053355, -0.020405}, {-0.123584, -0.032451}, {-0.216248, -0.032862},
+ {-0.278592, -0.005008}, {-0.337655, 0.056658}, {-0.385899, 0.112526},
+ {-0.405517, 0.186957}, {-0.415374, 0.262071}, {-0.387482, 0.343398},
+ {-0.347322, 0.397943}, {-0.287623, 0.458425}, {-0.223502, 0.475513},
+ {-0.135352, 0.478186}, {-0.061221, 0.483371}, { 0.003711, 0.422737},
+ { 0.065054, 0.375830}, { 0.108108, 0.297099}, { 0.123882, 0.222850},
+ { 0.117729, 0.134382}, { 0.085195, 0.056820}, { 0.029800, -0.019138},
+ {-0.027520, -0.072374}, {-0.102268, -0.091555}, {-0.200299, -0.106578},
+ {-0.292731, -0.091473}, {-0.356288, -0.051108}, {-0.420561, 0.014926},
+ {-0.471036, 0.074716}, {-0.488638, 0.182508}, {-0.485990, 0.254068},
+ {-0.463943, 0.338438}, {-0.406453, 0.404704}, {-0.334287, 0.466119},
+ {-0.254244, 0.503188}, {-0.161548, 0.495769}, {-0.075733, 0.495560},
+ { 0.001375, 0.434937}, { 0.082787, 0.385806}, { 0.115490, 0.323807},
+ { 0.141089, 0.223450}, { 0.138693, 0.131703}, { 0.126415, 0.049174},
+ { 0.066518, -0.010217}, {-0.005184, -0.070647}, {-0.080985, -0.103635},
+ {-0.177377, -0.116887}, {-0.260628, -0.100258}, {-0.335756, -0.056251},
+ {-0.405195, -0.000895}, {-0.444937, 0.085456}, {-0.484357, 0.175597},
+ {-0.472453, 0.248681}, {-0.438580, 0.347463}, {-0.402304, 0.422428},
+ {-0.326777, 0.479438}, {-0.247797, 0.505581}, {-0.152676, 0.519380},
+ {-0.071754, 0.516264}, { 0.015942, 0.472802}, { 0.076608, 0.419077},
+ { 0.127673, 0.330264}, { 0.159951, 0.262150}, { 0.153530, 0.172681},
+ { 0.140653, 0.089229}, { 0.078666, 0.024981}, { 0.023807, -0.037022},
+ {-0.048837, -0.077056}, {-0.127729, -0.075338}, {-0.221271, -0.067526}
+ };
+
+ public void doTestStRD(final StatisticalReferenceDataset dataset,
+ final double errParams,
+ final double errParamsSd) {
+ final AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ final double[] w = new double[dataset.getNumObservations()];
+ Arrays.fill(w, 1);
+
+ final double[][] data = dataset.getData();
+ final double[] initial = dataset.getStartingPoint(0);
+ final StatisticalReferenceDataset.LeastSquaresProblem problem = dataset.getLeastSquaresProblem();
+ final PointVectorValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ new Target(data[1]),
+ new Weight(w),
+ new InitialGuess(initial));
+
+ final double[] actual = optimum.getPoint();
+ for (int i = 0; i < actual.length; i++) {
+ double expected = dataset.getParameter(i);
+ double delta = FastMath.abs(errParams * expected);
+ Assert.assertEquals(dataset.getName() + ", param #" + i,
+ expected, actual[i], delta);
+ }
+ }
+
+ @Test
+ public void testKirby2() throws IOException {
+ doTestStRD(StatisticalReferenceDatasetFactory.createKirby2(), 1E-7, 1E-7);
+ }
+
+ @Test
+ public void testHahn1() throws IOException {
+ doTestStRD(StatisticalReferenceDatasetFactory.createHahn1(), 1E-7, 1E-4);
+ }
+
+ static class LinearProblem {
+ private final RealMatrix factors;
+ private final double[] target;
+
+ public LinearProblem(double[][] factors, double[] target) {
+ this.factors = new BlockRealMatrix(factors);
+ this.target = target;
+ }
+
+ public Target getTarget() {
+ return new Target(target);
+ }
+
+ public ModelFunction getModelFunction() {
+ return new ModelFunction(new MultivariateVectorFunction() {
+ public double[] value(double[] params) {
+ return factors.operate(params);
+ }
+ });
+ }
+
+ public ModelFunctionJacobian getModelFunctionJacobian() {
+ return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
+ public double[][] value(double[] params) {
+ return factors.getData();
+ }
+ });
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizerTest.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizerTest.java
new file mode 100644
index 000000000..719761631
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizerTest.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law
+ * or agreed to in writing, software distributed under the License is
+ * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import java.io.IOException;
+import java.util.Arrays;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.nonlinear.vector.Target;
+import org.apache.commons.math3.optim.nonlinear.vector.Weight;
+import org.apache.commons.math3.util.FastMath;
+import org.junit.Test;
+import org.junit.Assert;
+
+public class AbstractLeastSquaresOptimizerTest {
+
+ public static AbstractLeastSquaresOptimizer createOptimizer() {
+ return new AbstractLeastSquaresOptimizer(null) {
+
+ @Override
+ protected PointVectorValuePair doOptimize() {
+ final double[] params = getStartPoint();
+ final double[] res = computeResiduals(computeObjectiveValue(params));
+ setCost(computeCost(res));
+ return new PointVectorValuePair(params, null);
+ }
+ };
+ }
+
+ @Test
+ public void testGetChiSquare() throws IOException {
+ final StatisticalReferenceDataset dataset
+ = StatisticalReferenceDatasetFactory.createKirby2();
+ final AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ final double[] a = dataset.getParameters();
+ final double[] y = dataset.getData()[1];
+ final double[] w = new double[y.length];
+ Arrays.fill(w, 1.0);
+
+ StatisticalReferenceDataset.LeastSquaresProblem problem
+ = dataset.getLeastSquaresProblem();
+
+ optimizer.optimize(new MaxEval(1),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ new Target(y),
+ new Weight(w),
+ new InitialGuess(a));
+ final double expected = dataset.getResidualSumOfSquares();
+ final double actual = optimizer.getChiSquare();
+ Assert.assertEquals(dataset.getName(), expected, actual,
+ 1E-11 * expected);
+ }
+
+ @Test
+ public void testGetRMS() throws IOException {
+ final StatisticalReferenceDataset dataset
+ = StatisticalReferenceDatasetFactory.createKirby2();
+ final AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ final double[] a = dataset.getParameters();
+ final double[] y = dataset.getData()[1];
+ final double[] w = new double[y.length];
+ Arrays.fill(w, 1);
+
+ StatisticalReferenceDataset.LeastSquaresProblem problem
+ = dataset.getLeastSquaresProblem();
+
+ optimizer.optimize(new MaxEval(1),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ new Target(y),
+ new Weight(w),
+ new InitialGuess(a));
+
+ final double expected = FastMath
+ .sqrt(dataset.getResidualSumOfSquares() /
+ dataset.getNumObservations());
+ final double actual = optimizer.getRMS();
+ Assert.assertEquals(dataset.getName(), expected, actual,
+ 1E-11 * expected);
+ }
+
+ @Test
+ public void testComputeSigma() throws IOException {
+ final StatisticalReferenceDataset dataset
+ = StatisticalReferenceDatasetFactory.createKirby2();
+ final AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ final double[] a = dataset.getParameters();
+ final double[] y = dataset.getData()[1];
+ final double[] w = new double[y.length];
+ Arrays.fill(w, 1);
+
+ StatisticalReferenceDataset.LeastSquaresProblem problem
+ = dataset.getLeastSquaresProblem();
+
+ final PointVectorValuePair optimum
+ = optimizer.optimize(new MaxEval(1),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ new Target(y),
+ new Weight(w),
+ new InitialGuess(a));
+
+ final double[] sig = optimizer.computeSigma(optimum.getPoint(), 1e-14);
+
+ final int dof = y.length - a.length;
+ final double[] expected = dataset.getParametersStandardDeviations();
+ for (int i = 0; i < sig.length; i++) {
+ final double actual = FastMath.sqrt(optimizer.getChiSquare() / dof) * sig[i];
+ Assert.assertEquals(dataset.getName() + ", parameter #" + i,
+ expected[i], actual, 1e-6 * expected[i]);
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizerTestValidation.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizerTestValidation.java
new file mode 100644
index 000000000..d9aa76141
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizerTestValidation.java
@@ -0,0 +1,334 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law
+ * or agreed to in writing, software distributed under the License is
+ * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.ArrayList;
+import java.awt.geom.Point2D;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.nonlinear.vector.Target;
+import org.apache.commons.math3.optim.nonlinear.vector.Weight;
+import org.apache.commons.math3.stat.descriptive.SummaryStatistics;
+import org.apache.commons.math3.stat.descriptive.StatisticalSummary;
+import org.apache.commons.math3.util.FastMath;
+import org.junit.Test;
+import org.junit.Assert;
+
+/**
+ * This class demonstrates the main functionality of the
+ * {@link AbstractLeastSquaresOptimizer}, common to the
+ * optimizer implementations in package
+ * {@link org.apache.commons.math3.optimization.general}.
+ *
+ * Not enabled by default, as the class name does not end with "Test".
+ *
+ * Invoke by running
+ *
+ * mvn test -Dtest=AbstractLeastSquaresOptimizerTestValidation
+ *
+ * or by running
+ *
+ * mvn test -Dtest=AbstractLeastSquaresOptimizerTestValidation -DargLine="-DmcRuns=1234 -server"
+ *
+ */
+public class AbstractLeastSquaresOptimizerTestValidation {
+ private static final int MONTE_CARLO_RUNS = Integer.parseInt(System.getProperty("mcRuns",
+ "100"));
+
+ /**
+ * Using a Monte-Carlo procedure, this test checks the error estimations
+ * as provided by the square-root of the diagonal elements of the
+ * covariance matrix.
+ *
+ * The test generates sets of observations, each sampled from
+ * a Gaussian distribution.
+ *
+ * The optimization problem solved is defined in class
+ * {@link StraightLineProblem}.
+ *
+ * The output (on stdout) will be a table summarizing the distribution
+ * of parameters generated by the Monte-Carlo process and by the direct
+ * estimation provided by the diagonal elements of the covariance matrix.
+ */
+ @Test
+ public void testParametersErrorMonteCarloObservations() {
+ // Error on the observations.
+ final double yError = 15;
+
+ // True values of the parameters.
+ final double slope = 123.456;
+ final double offset = -98.765;
+
+ // Samples generator.
+ final RandomStraightLinePointGenerator lineGenerator
+ = new RandomStraightLinePointGenerator(slope, offset,
+ yError,
+ -1e3, 1e4,
+ 138577L);
+
+ // Number of observations.
+ final int numObs = 100; // XXX Should be a command-line option.
+ // number of parameters.
+ final int numParams = 2;
+
+ // Parameters found for each of Monte-Carlo run.
+ final SummaryStatistics[] paramsFoundByDirectSolution = new SummaryStatistics[numParams];
+ // Sigma estimations (square-root of the diagonal elements of the
+ // covariance matrix), for each Monte-Carlo run.
+ final SummaryStatistics[] sigmaEstimate = new SummaryStatistics[numParams];
+
+ // Initialize statistics accumulators.
+ for (int i = 0; i < numParams; i++) {
+ paramsFoundByDirectSolution[i] = new SummaryStatistics();
+ sigmaEstimate[i] = new SummaryStatistics();
+ }
+
+ // Dummy optimizer (to compute the covariance matrix).
+ final AbstractLeastSquaresOptimizer optim = new DummyOptimizer();
+ final double[] init = { slope, offset };
+
+ // Monte-Carlo (generates many sets of observations).
+ final int mcRepeat = MONTE_CARLO_RUNS;
+ int mcCount = 0;
+ while (mcCount < mcRepeat) {
+ // Observations.
+ final Point2D.Double[] obs = lineGenerator.generate(numObs);
+
+ final StraightLineProblem problem = new StraightLineProblem(yError);
+ for (int i = 0; i < numObs; i++) {
+ final Point2D.Double p = obs[i];
+ problem.addPoint(p.x, p.y);
+ }
+
+ // Direct solution (using simple regression).
+ final double[] regress = problem.solve();
+
+ // Estimation of the standard deviation (diagonal elements of the
+ // covariance matrix).
+ final PointVectorValuePair optimum
+ = optim.optimize(new MaxEval(Integer.MAX_VALUE),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ new Target(problem.target()),
+ new Weight(problem.weight()),
+ new InitialGuess(init));
+ final double[] sigma = optim.computeSigma(optimum.getPoint(), 1e-14);
+
+ // Accumulate statistics.
+ for (int i = 0; i < numParams; i++) {
+ paramsFoundByDirectSolution[i].addValue(regress[i]);
+ sigmaEstimate[i].addValue(sigma[i]);
+ }
+
+ // Next Monte-Carlo.
+ ++mcCount;
+ }
+
+ // Print statistics.
+ final String line = "--------------------------------------------------------------";
+ System.out.println(" True value Mean Std deviation");
+ for (int i = 0; i < numParams; i++) {
+ System.out.println(line);
+ System.out.println("Parameter #" + i);
+
+ StatisticalSummary s = paramsFoundByDirectSolution[i].getSummary();
+ System.out.printf(" %+.6e %+.6e %+.6e\n",
+ init[i],
+ s.getMean(),
+ s.getStandardDeviation());
+
+ s = sigmaEstimate[i].getSummary();
+ System.out.printf("sigma: %+.6e (%+.6e)\n",
+ s.getMean(),
+ s.getStandardDeviation());
+ }
+ System.out.println(line);
+
+ // Check the error estimation.
+ for (int i = 0; i < numParams; i++) {
+ Assert.assertEquals(paramsFoundByDirectSolution[i].getSummary().getStandardDeviation(),
+ sigmaEstimate[i].getSummary().getMean(),
+ 8e-2);
+ }
+ }
+
+ /**
+ * In this test, the set of observations is fixed.
+ * Using a Monte-Carlo procedure, it generates sets of parameters,
+ * and determine the parameter change that will result in the
+ * normalized chi-square becoming larger by one than the value from
+ * the best fit solution.
+ *
+ * The optimization problem solved is defined in class
+ * {@link StraightLineProblem}.
+ *
+ * The output (on stdout) will be a list of lines containing:
+ *
+ * - slope of the straight line,
+ * - intercept of the straight line,
+ * - chi-square of the solution defined by the above two values.
+ *
+ * The output is separated into two blocks (with a blank line between
+ * them); the first block will contain all parameter sets for which
+ * {@code chi2 < chi2_b + 1}
+ * and the second block, all sets for which
+ * {@code chi2 >= chi2_b + 1}
+ * where {@code chi2_b} is the lowest chi-square (corresponding to the
+ * best solution).
+ */
+ @Test
+ public void testParametersErrorMonteCarloParameters() {
+ // Error on the observations.
+ final double yError = 15;
+
+ // True values of the parameters.
+ final double slope = 123.456;
+ final double offset = -98.765;
+
+ // Samples generator.
+ final RandomStraightLinePointGenerator lineGenerator
+ = new RandomStraightLinePointGenerator(slope, offset,
+ yError,
+ -1e3, 1e4,
+ 13839013L);
+
+ // Number of observations.
+ final int numObs = 10;
+ // number of parameters.
+ final int numParams = 2;
+
+ // Create a single set of observations.
+ final Point2D.Double[] obs = lineGenerator.generate(numObs);
+
+ final StraightLineProblem problem = new StraightLineProblem(yError);
+ for (int i = 0; i < numObs; i++) {
+ final Point2D.Double p = obs[i];
+ problem.addPoint(p.x, p.y);
+ }
+
+ // Direct solution (using simple regression).
+ final double[] regress = problem.solve();
+
+ // Dummy optimizer (to compute the chi-square).
+ final AbstractLeastSquaresOptimizer optim = new DummyOptimizer();
+ final double[] init = { slope, offset };
+ // Get chi-square of the best parameters set for the given set of
+ // observations.
+ final double bestChi2N = getChi2N(optim, problem, regress);
+ final double[] sigma = optim.computeSigma(regress, 1e-14);
+
+ // Monte-Carlo (generates a grid of parameters).
+ final int mcRepeat = MONTE_CARLO_RUNS;
+ final int gridSize = (int) FastMath.sqrt(mcRepeat);
+
+ // Parameters found for each of Monte-Carlo run.
+ // Index 0 = slope
+ // Index 1 = offset
+ // Index 2 = normalized chi2
+ final List paramsAndChi2 = new ArrayList(gridSize * gridSize);
+
+ final double slopeRange = 10 * sigma[0];
+ final double offsetRange = 10 * sigma[1];
+ final double minSlope = slope - 0.5 * slopeRange;
+ final double minOffset = offset - 0.5 * offsetRange;
+ final double deltaSlope = slopeRange/ gridSize;
+ final double deltaOffset = offsetRange / gridSize;
+ for (int i = 0; i < gridSize; i++) {
+ final double s = minSlope + i * deltaSlope;
+ for (int j = 0; j < gridSize; j++) {
+ final double o = minOffset + j * deltaOffset;
+ final double chi2N = getChi2N(optim, problem, new double[] {s, o});
+
+ paramsAndChi2.add(new double[] {s, o, chi2N});
+ }
+ }
+
+ // Output (for use with "gnuplot").
+
+ // Some info.
+
+ // For plotting separately sets of parameters that have a large chi2.
+ final double chi2NPlusOne = bestChi2N + 1;
+ int numLarger = 0;
+
+ final String lineFmt = "%+.10e %+.10e %.8e\n";
+
+ // Point with smallest chi-square.
+ System.out.printf(lineFmt, regress[0], regress[1], bestChi2N);
+ System.out.println(); // Empty line.
+
+ // Points within the confidence interval.
+ for (double[] d : paramsAndChi2) {
+ if (d[2] <= chi2NPlusOne) {
+ System.out.printf(lineFmt, d[0], d[1], d[2]);
+ }
+ }
+ System.out.println(); // Empty line.
+
+ // Points outside the confidence interval.
+ for (double[] d : paramsAndChi2) {
+ if (d[2] > chi2NPlusOne) {
+ ++numLarger;
+ System.out.printf(lineFmt, d[0], d[1], d[2]);
+ }
+ }
+ System.out.println(); // Empty line.
+
+ System.out.println("# sigma=" + Arrays.toString(sigma));
+ System.out.println("# " + numLarger + " sets filtered out");
+ }
+
+ /**
+ * @return the normalized chi-square.
+ */
+ private double getChi2N(AbstractLeastSquaresOptimizer optim,
+ StraightLineProblem problem,
+ double[] params) {
+ final double[] t = problem.target();
+ final double[] w = problem.weight();
+
+ optim.optimize(new MaxEval(Integer.MAX_VALUE),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ new Target(t),
+ new Weight(w),
+ new InitialGuess(params));
+
+ return optim.getChiSquare() / (t.length - params.length);
+ }
+}
+
+/**
+ * A dummy optimizer.
+ * Used for computing the covariance matrix.
+ */
+class DummyOptimizer extends AbstractLeastSquaresOptimizer {
+ public DummyOptimizer() {
+ super(null);
+ }
+
+ /**
+ * This method does nothing and returns a dummy value.
+ */
+ @Override
+ public PointVectorValuePair doOptimize() {
+ final double[] params = getStartPoint();
+ final double[] res = computeResiduals(computeObjectiveValue(params));
+ setCost(computeCost(res));
+ return new PointVectorValuePair(params, null);
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/CircleProblem.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/CircleProblem.java
new file mode 100644
index 000000000..41414fbda
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/CircleProblem.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import java.util.ArrayList;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.analysis.MultivariateMatrixFunction;
+import org.apache.commons.math3.util.MathUtils;
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunction;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunctionJacobian;
+
+/**
+ * Class that models a circle.
+ * The parameters of problem are:
+ *
+ * - the x-coordinate of the circle center,
+ * - the y-coordinate of the circle center,
+ * - the radius of the circle.
+ *
+ * The model functions are:
+ *
+ * - for each triplet (cx, cy, r), the (x, y) coordinates of a point on the
+ * corresponding circle.
+ *
+ */
+class CircleProblem {
+ /** Cloud of points assumed to be fitted by a circle. */
+ private final ArrayList points;
+ /** Error on the x-coordinate of the points. */
+ private final double xSigma;
+ /** Error on the y-coordinate of the points. */
+ private final double ySigma;
+ /** Number of points on the circumference (when searching which
+ model point is closest to a given "observation". */
+ private final int resolution;
+
+ /**
+ * @param xError Assumed error for the x-coordinate of the circle points.
+ * @param yError Assumed error for the y-coordinate of the circle points.
+ * @param searchResolution Number of points to try when searching the one
+ * that is closest to a given "observed" point.
+ */
+ public CircleProblem(double xError,
+ double yError,
+ int searchResolution) {
+ points = new ArrayList();
+ xSigma = xError;
+ ySigma = yError;
+ resolution = searchResolution;
+ }
+
+ /**
+ * @param xError Assumed error for the x-coordinate of the circle points.
+ * @param yError Assumed error for the y-coordinate of the circle points.
+ */
+ public CircleProblem(double xError,
+ double yError) {
+ this(xError, yError, 500);
+ }
+
+ public void addPoint(double px, double py) {
+ points.add(new double[] { px, py });
+ }
+
+ public double[] target() {
+ final double[] t = new double[points.size() * 2];
+ for (int i = 0; i < points.size(); i++) {
+ final double[] p = points.get(i);
+ final int index = i * 2;
+ t[index] = p[0];
+ t[index + 1] = p[1];
+ }
+
+ return t;
+ }
+
+ public double[] weight() {
+ final double wX = 1 / (xSigma * xSigma);
+ final double wY = 1 / (ySigma * ySigma);
+ final double[] w = new double[points.size() * 2];
+ for (int i = 0; i < points.size(); i++) {
+ final int index = i * 2;
+ w[index] = wX;
+ w[index + 1] = wY;
+ }
+
+ return w;
+ }
+
+ public ModelFunction getModelFunction() {
+ return new ModelFunction(new MultivariateVectorFunction() {
+ public double[] value(double[] params) {
+ final double cx = params[0];
+ final double cy = params[1];
+ final double r = params[2];
+
+ final double[] model = new double[points.size() * 2];
+
+ final double deltaTheta = MathUtils.TWO_PI / resolution;
+ for (int i = 0; i < points.size(); i++) {
+ final double[] p = points.get(i);
+ final double px = p[0];
+ final double py = p[1];
+
+ double bestX = 0;
+ double bestY = 0;
+ double dMin = Double.POSITIVE_INFINITY;
+
+ // Find the angle for which the circle passes closest to the
+ // current point (using a resolution of 100 points along the
+ // circumference).
+ for (double theta = 0; theta <= MathUtils.TWO_PI; theta += deltaTheta) {
+ final double currentX = cx + r * FastMath.cos(theta);
+ final double currentY = cy + r * FastMath.sin(theta);
+ final double dX = currentX - px;
+ final double dY = currentY - py;
+ final double d = dX * dX + dY * dY;
+ if (d < dMin) {
+ dMin = d;
+ bestX = currentX;
+ bestY = currentY;
+ }
+ }
+
+ final int index = i * 2;
+ model[index] = bestX;
+ model[index + 1] = bestY;
+ }
+
+ return model;
+ }
+ });
+ }
+
+ public ModelFunctionJacobian getModelFunctionJacobian() {
+ return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
+ public double[][] value(double[] point) {
+ return jacobian(point);
+ }
+ });
+ }
+
+ private double[][] jacobian(double[] params) {
+ final double[][] jacobian = new double[points.size() * 2][3];
+
+ for (int i = 0; i < points.size(); i++) {
+ final int index = i * 2;
+ // Partial derivative wrt x-coordinate of center.
+ jacobian[index][0] = 1;
+ jacobian[index + 1][0] = 0;
+ // Partial derivative wrt y-coordinate of center.
+ jacobian[index][1] = 0;
+ jacobian[index + 1][1] = 1;
+ // Partial derivative wrt radius.
+ final double[] p = points.get(i);
+ jacobian[index][2] = (p[0] - params[0]) / params[2];
+ jacobian[index + 1][2] = (p[1] - params[1]) / params[2];
+ }
+
+ return jacobian;
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/CircleVectorial.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/CircleVectorial.java
new file mode 100644
index 000000000..0248f9e06
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/CircleVectorial.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import java.util.ArrayList;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.analysis.MultivariateMatrixFunction;
+import org.apache.commons.math3.geometry.euclidean.twod.Vector2D;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunction;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunctionJacobian;
+
+/**
+ * Class used in the tests.
+ */
+class CircleVectorial {
+ private ArrayList points;
+
+ public CircleVectorial() {
+ points = new ArrayList();
+ }
+
+ public void addPoint(double px, double py) {
+ points.add(new Vector2D(px, py));
+ }
+
+ public int getN() {
+ return points.size();
+ }
+
+ public double getRadius(Vector2D center) {
+ double r = 0;
+ for (Vector2D point : points) {
+ r += point.distance(center);
+ }
+ return r / points.size();
+ }
+
+ public ModelFunction getModelFunction() {
+ return new ModelFunction(new MultivariateVectorFunction() {
+ public double[] value(double[] params) {
+ Vector2D center = new Vector2D(params[0], params[1]);
+ double radius = getRadius(center);
+ double[] residuals = new double[points.size()];
+ for (int i = 0; i < residuals.length; i++) {
+ residuals[i] = points.get(i).distance(center) - radius;
+ }
+
+ return residuals;
+ }
+ });
+ }
+
+ public ModelFunctionJacobian getModelFunctionJacobian() {
+ return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
+ public double[][] value(double[] params) {
+ final int n = points.size();
+ final Vector2D center = new Vector2D(params[0], params[1]);
+
+ double dRdX = 0;
+ double dRdY = 0;
+ for (Vector2D pk : points) {
+ double dk = pk.distance(center);
+ dRdX += (center.getX() - pk.getX()) / dk;
+ dRdY += (center.getY() - pk.getY()) / dk;
+ }
+ dRdX /= n;
+ dRdY /= n;
+
+ // Jacobian of the radius residuals.
+ double[][] jacobian = new double[n][2];
+ for (int i = 0; i < n; i++) {
+ final Vector2D pi = points.get(i);
+ final double di = pi.distance(center);
+ jacobian[i][0] = (center.getX() - pi.getX()) / di - dRdX;
+ jacobian[i][1] = (center.getY() - pi.getY()) / di - dRdY;
+ }
+
+ return jacobian;
+ }
+ });
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/GaussNewtonOptimizerTest.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/GaussNewtonOptimizerTest.java
new file mode 100644
index 000000000..af23e3f88
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/GaussNewtonOptimizerTest.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import java.io.IOException;
+import org.apache.commons.math3.exception.ConvergenceException;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+import org.apache.commons.math3.optim.SimpleVectorValueChecker;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.nonlinear.vector.Target;
+import org.apache.commons.math3.optim.nonlinear.vector.Weight;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunction;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunctionJacobian;
+import org.junit.Test;
+
+/**
+ * Some of the unit tests are re-implementations of the MINPACK file17 and file22 test files.
+ * The redistribution policy for MINPACK is available here, for
+ * convenience, it is reproduced below.
+
+ *
+ *
+ * Minpack Copyright Notice (1999) University of Chicago.
+ * All rights reserved
+ * |
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * - The end-user documentation included with the redistribution, if any,
+ * must include the following acknowledgment:
+ *
This product includes software developed by the University of
+ * Chicago, as Operator of Argonne National Laboratory.
+ * Alternately, this acknowledgment may appear in the software itself,
+ * if and wherever such third-party acknowledgments normally appear.
+ * - WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
+ * UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
+ * THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
+ * OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
+ * OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
+ * USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
+ * THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
+ * DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
+ * UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
+ * BE CORRECTED.
+ * - LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
+ * HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
+ * ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
+ * INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
+ * ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
+ * PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
+ * SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
+ * (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
+ * EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
+ * POSSIBILITY OF SUCH LOSS OR DAMAGES.
+ *
|
+ *
+
+ * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
+ * @author Burton S. Garbow (original fortran minpack tests)
+ * @author Kenneth E. Hillstrom (original fortran minpack tests)
+ * @author Jorge J. More (original fortran minpack tests)
+ * @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
+ */
+public class GaussNewtonOptimizerTest
+ extends AbstractLeastSquaresOptimizerAbstractTest {
+
+ @Override
+ public AbstractLeastSquaresOptimizer createOptimizer() {
+ return new GaussNewtonOptimizer(new SimpleVectorValueChecker(1.0e-6, 1.0e-6));
+ }
+
+ @Override
+ @Test(expected = ConvergenceException.class)
+ public void testMoreEstimatedParametersSimple() {
+ /*
+ * Exception is expected with this optimizer
+ */
+ super.testMoreEstimatedParametersSimple();
+ }
+
+ @Override
+ @Test(expected=ConvergenceException.class)
+ public void testMoreEstimatedParametersUnsorted() {
+ /*
+ * Exception is expected with this optimizer
+ */
+ super.testMoreEstimatedParametersUnsorted();
+ }
+
+ @Test(expected=TooManyEvaluationsException.class)
+ public void testMaxEvaluations() throws Exception {
+ CircleVectorial circle = new CircleVectorial();
+ circle.addPoint( 30.0, 68.0);
+ circle.addPoint( 50.0, -6.0);
+ circle.addPoint(110.0, -20.0);
+ circle.addPoint( 35.0, 15.0);
+ circle.addPoint( 45.0, 97.0);
+
+ GaussNewtonOptimizer optimizer
+ = new GaussNewtonOptimizer(new SimpleVectorValueChecker(1e-30, 1e-30));
+
+ optimizer.optimize(new MaxEval(100),
+ circle.getModelFunction(),
+ circle.getModelFunctionJacobian(),
+ new Target(new double[] { 0, 0, 0, 0, 0 }),
+ new Weight(new double[] { 1, 1, 1, 1, 1 }),
+ new InitialGuess(new double[] { 98.680, 47.345 }));
+ }
+
+ @Override
+ @Test(expected=ConvergenceException.class)
+ public void testCircleFittingBadInit() {
+ /*
+ * This test does not converge with this optimizer.
+ */
+ super.testCircleFittingBadInit();
+ }
+
+ @Override
+ @Test(expected = ConvergenceException.class)
+ public void testHahn1()
+ throws IOException {
+ /*
+ * TODO This test leads to a singular problem with the Gauss-Newton
+ * optimizer. This should be inquired.
+ */
+ super.testHahn1();
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/LevenbergMarquardtOptimizerTest.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/LevenbergMarquardtOptimizerTest.java
new file mode 100644
index 000000000..20a81ffb2
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/LevenbergMarquardtOptimizerTest.java
@@ -0,0 +1,403 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.nonlinear.vector.Target;
+import org.apache.commons.math3.optim.nonlinear.vector.Weight;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunction;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunctionJacobian;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.analysis.MultivariateMatrixFunction;
+import org.apache.commons.math3.exception.ConvergenceException;
+import org.apache.commons.math3.exception.DimensionMismatchException;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+import org.apache.commons.math3.geometry.euclidean.twod.Vector2D;
+import org.apache.commons.math3.linear.SingularMatrixException;
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.util.Precision;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.Ignore;
+
+/**
+ * Some of the unit tests are re-implementations of the MINPACK file17 and file22 test files.
+ * The redistribution policy for MINPACK is available here, for
+ * convenience, it is reproduced below.
+
+ *
+ *
+ * Minpack Copyright Notice (1999) University of Chicago.
+ * All rights reserved
+ * |
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * - The end-user documentation included with the redistribution, if any,
+ * must include the following acknowledgment:
+ *
This product includes software developed by the University of
+ * Chicago, as Operator of Argonne National Laboratory.
+ * Alternately, this acknowledgment may appear in the software itself,
+ * if and wherever such third-party acknowledgments normally appear.
+ * - WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
+ * UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
+ * THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
+ * OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
+ * OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
+ * USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
+ * THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
+ * DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
+ * UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
+ * BE CORRECTED.
+ * - LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
+ * HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
+ * ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
+ * INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
+ * ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
+ * PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
+ * SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
+ * (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
+ * EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
+ * POSSIBILITY OF SUCH LOSS OR DAMAGES.
+ *
|
+ *
+
+ * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
+ * @author Burton S. Garbow (original fortran minpack tests)
+ * @author Kenneth E. Hillstrom (original fortran minpack tests)
+ * @author Jorge J. More (original fortran minpack tests)
+ * @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
+ */
+public class LevenbergMarquardtOptimizerTest
+ extends AbstractLeastSquaresOptimizerAbstractTest {
+ @Override
+ public AbstractLeastSquaresOptimizer createOptimizer() {
+ return new LevenbergMarquardtOptimizer();
+ }
+
+ @Override
+ @Test(expected=SingularMatrixException.class)
+ public void testNonInvertible() {
+ /*
+ * Overrides the method from parent class, since the default singularity
+ * threshold (1e-14) does not trigger the expected exception.
+ */
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1, 2, -3 },
+ { 2, 1, 3 },
+ { -3, 0, -9 }
+ }, new double[] { 1, 1, 1 });
+
+ AbstractLeastSquaresOptimizer optimizer = createOptimizer();
+ PointVectorValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ problem.getTarget(),
+ new Weight(new double[] { 1, 1, 1 }),
+ new InitialGuess(new double[] { 0, 0, 0 }));
+ Assert.assertTrue(FastMath.sqrt(optimizer.getTargetSize()) * optimizer.getRMS() > 0.6);
+
+ optimizer.computeCovariances(optimum.getPoint(), 1.5e-14);
+ }
+
+ @Test
+ public void testControlParameters() {
+ CircleVectorial circle = new CircleVectorial();
+ circle.addPoint( 30.0, 68.0);
+ circle.addPoint( 50.0, -6.0);
+ circle.addPoint(110.0, -20.0);
+ circle.addPoint( 35.0, 15.0);
+ circle.addPoint( 45.0, 97.0);
+ checkEstimate(circle.getModelFunction(),
+ circle.getModelFunctionJacobian(),
+ 0.1, 10, 1.0e-14, 1.0e-16, 1.0e-10, false);
+ checkEstimate(circle.getModelFunction(),
+ circle.getModelFunctionJacobian(),
+ 0.1, 10, 1.0e-15, 1.0e-17, 1.0e-10, true);
+ checkEstimate(circle.getModelFunction(),
+ circle.getModelFunctionJacobian(),
+ 0.1, 5, 1.0e-15, 1.0e-16, 1.0e-10, true);
+ circle.addPoint(300, -300);
+ checkEstimate(circle.getModelFunction(),
+ circle.getModelFunctionJacobian(),
+ 0.1, 20, 1.0e-18, 1.0e-16, 1.0e-10, true);
+ }
+
+ private void checkEstimate(ModelFunction problem,
+ ModelFunctionJacobian problemJacobian,
+ double initialStepBoundFactor, int maxCostEval,
+ double costRelativeTolerance, double parRelativeTolerance,
+ double orthoTolerance, boolean shouldFail) {
+ try {
+ LevenbergMarquardtOptimizer optimizer
+ = new LevenbergMarquardtOptimizer(initialStepBoundFactor,
+ costRelativeTolerance,
+ parRelativeTolerance,
+ orthoTolerance,
+ Precision.SAFE_MIN);
+ optimizer.optimize(new MaxEval(maxCostEval),
+ problem,
+ problemJacobian,
+ new Target(new double[] { 0, 0, 0, 0, 0 }),
+ new Weight(new double[] { 1, 1, 1, 1, 1 }),
+ new InitialGuess(new double[] { 98.680, 47.345 }));
+ Assert.assertTrue(!shouldFail);
+ } catch (DimensionMismatchException ee) {
+ Assert.assertTrue(shouldFail);
+ } catch (TooManyEvaluationsException ee) {
+ Assert.assertTrue(shouldFail);
+ }
+ }
+
+ /**
+ * Non-linear test case: fitting of decay curve (from Chapter 8 of
+ * Bevington's textbook, "Data reduction and analysis for the physical sciences").
+ * XXX The expected ("reference") values may not be accurate and the tolerance too
+ * relaxed for this test to be currently really useful (the issue is under
+ * investigation).
+ */
+ @Test
+ public void testBevington() {
+ final double[][] dataPoints = {
+ // column 1 = times
+ { 15, 30, 45, 60, 75, 90, 105, 120, 135, 150,
+ 165, 180, 195, 210, 225, 240, 255, 270, 285, 300,
+ 315, 330, 345, 360, 375, 390, 405, 420, 435, 450,
+ 465, 480, 495, 510, 525, 540, 555, 570, 585, 600,
+ 615, 630, 645, 660, 675, 690, 705, 720, 735, 750,
+ 765, 780, 795, 810, 825, 840, 855, 870, 885, },
+ // column 2 = measured counts
+ { 775, 479, 380, 302, 185, 157, 137, 119, 110, 89,
+ 74, 61, 66, 68, 48, 54, 51, 46, 55, 29,
+ 28, 37, 49, 26, 35, 29, 31, 24, 25, 35,
+ 24, 30, 26, 28, 21, 18, 20, 27, 17, 17,
+ 14, 17, 24, 11, 22, 17, 12, 10, 13, 16,
+ 9, 9, 14, 21, 17, 13, 12, 18, 10, },
+ };
+
+ final BevingtonProblem problem = new BevingtonProblem();
+
+ final int len = dataPoints[0].length;
+ final double[] weights = new double[len];
+ for (int i = 0; i < len; i++) {
+ problem.addPoint(dataPoints[0][i],
+ dataPoints[1][i]);
+
+ weights[i] = 1 / dataPoints[1][i];
+ }
+
+ final LevenbergMarquardtOptimizer optimizer
+ = new LevenbergMarquardtOptimizer();
+
+ final PointVectorValuePair optimum
+ = optimizer.optimize(new MaxEval(100),
+ problem.getModelFunction(),
+ problem.getModelFunctionJacobian(),
+ new Target(dataPoints[1]),
+ new Weight(weights),
+ new InitialGuess(new double[] { 10, 900, 80, 27, 225 }));
+
+ final double[] solution = optimum.getPoint();
+ final double[] expectedSolution = { 10.4, 958.3, 131.4, 33.9, 205.0 };
+
+ final double[][] covarMatrix = optimizer.computeCovariances(solution, 1e-14);
+ final double[][] expectedCovarMatrix = {
+ { 3.38, -3.69, 27.98, -2.34, -49.24 },
+ { -3.69, 2492.26, 81.89, -69.21, -8.9 },
+ { 27.98, 81.89, 468.99, -44.22, -615.44 },
+ { -2.34, -69.21, -44.22, 6.39, 53.80 },
+ { -49.24, -8.9, -615.44, 53.8, 929.45 }
+ };
+
+ final int numParams = expectedSolution.length;
+
+ // Check that the computed solution is within the reference error range.
+ for (int i = 0; i < numParams; i++) {
+ final double error = FastMath.sqrt(expectedCovarMatrix[i][i]);
+ Assert.assertEquals("Parameter " + i, expectedSolution[i], solution[i], error);
+ }
+
+ // Check that each entry of the computed covariance matrix is within 10%
+ // of the reference matrix entry.
+ for (int i = 0; i < numParams; i++) {
+ for (int j = 0; j < numParams; j++) {
+ Assert.assertEquals("Covariance matrix [" + i + "][" + j + "]",
+ expectedCovarMatrix[i][j],
+ covarMatrix[i][j],
+ FastMath.abs(0.1 * expectedCovarMatrix[i][j]));
+ }
+ }
+ }
+
+ @Test
+ public void testCircleFitting2() {
+ final double xCenter = 123.456;
+ final double yCenter = 654.321;
+ final double xSigma = 10;
+ final double ySigma = 15;
+ final double radius = 111.111;
+ // The test is extremely sensitive to the seed.
+ final long seed = 59421061L;
+ final RandomCirclePointGenerator factory
+ = new RandomCirclePointGenerator(xCenter, yCenter, radius,
+ xSigma, ySigma,
+ seed);
+ final CircleProblem circle = new CircleProblem(xSigma, ySigma);
+
+ final int numPoints = 10;
+ for (Vector2D p : factory.generate(numPoints)) {
+ circle.addPoint(p.getX(), p.getY());
+ }
+
+ // First guess for the center's coordinates and radius.
+ final double[] init = { 90, 659, 115 };
+
+ final LevenbergMarquardtOptimizer optimizer
+ = new LevenbergMarquardtOptimizer();
+ final PointVectorValuePair optimum = optimizer.optimize(new MaxEval(100),
+ circle.getModelFunction(),
+ circle.getModelFunctionJacobian(),
+ new Target(circle.target()),
+ new Weight(circle.weight()),
+ new InitialGuess(init));
+
+ final double[] paramFound = optimum.getPoint();
+
+ // Retrieve errors estimation.
+ final double[] asymptoticStandardErrorFound = optimizer.computeSigma(paramFound, 1e-14);
+
+ // Check that the parameters are found within the assumed error bars.
+ Assert.assertEquals(xCenter, paramFound[0], asymptoticStandardErrorFound[0]);
+ Assert.assertEquals(yCenter, paramFound[1], asymptoticStandardErrorFound[1]);
+ Assert.assertEquals(radius, paramFound[2], asymptoticStandardErrorFound[2]);
+ }
+
+ private static class QuadraticProblem {
+ private List x;
+ private List y;
+
+ public QuadraticProblem() {
+ x = new ArrayList();
+ y = new ArrayList();
+ }
+
+ public void addPoint(double x, double y) {
+ this.x.add(x);
+ this.y.add(y);
+ }
+
+ public ModelFunction getModelFunction() {
+ return new ModelFunction(new MultivariateVectorFunction() {
+ public double[] value(double[] variables) {
+ double[] values = new double[x.size()];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = (variables[0] * x.get(i) + variables[1]) * x.get(i) + variables[2];
+ }
+ return values;
+ }
+ });
+ }
+
+ public ModelFunctionJacobian getModelFunctionJacobian() {
+ return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
+ public double[][] value(double[] params) {
+ double[][] jacobian = new double[x.size()][3];
+ for (int i = 0; i < jacobian.length; ++i) {
+ jacobian[i][0] = x.get(i) * x.get(i);
+ jacobian[i][1] = x.get(i);
+ jacobian[i][2] = 1.0;
+ }
+ return jacobian;
+ }
+ });
+ }
+ }
+
+ private static class BevingtonProblem {
+ private List time;
+ private List count;
+
+ public BevingtonProblem() {
+ time = new ArrayList();
+ count = new ArrayList();
+ }
+
+ public void addPoint(double t, double c) {
+ time.add(t);
+ count.add(c);
+ }
+
+ public ModelFunction getModelFunction() {
+ return new ModelFunction(new MultivariateVectorFunction() {
+ public double[] value(double[] params) {
+ double[] values = new double[time.size()];
+ for (int i = 0; i < values.length; ++i) {
+ final double t = time.get(i);
+ values[i] = params[0] +
+ params[1] * Math.exp(-t / params[3]) +
+ params[2] * Math.exp(-t / params[4]);
+ }
+ return values;
+ }
+ });
+ }
+
+ public ModelFunctionJacobian getModelFunctionJacobian() {
+ return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
+ public double[][] value(double[] params) {
+ double[][] jacobian = new double[time.size()][5];
+
+ for (int i = 0; i < jacobian.length; ++i) {
+ final double t = time.get(i);
+ jacobian[i][0] = 1;
+
+ final double p3 = params[3];
+ final double p4 = params[4];
+ final double tOp3 = t / p3;
+ final double tOp4 = t / p4;
+ jacobian[i][1] = Math.exp(-tOp3);
+ jacobian[i][2] = Math.exp(-tOp4);
+ jacobian[i][3] = params[1] * Math.exp(-tOp3) * tOp3 / p3;
+ jacobian[i][4] = params[2] * Math.exp(-tOp4) * tOp4 / p4;
+ }
+ return jacobian;
+ }
+ });
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/MinpackTest.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/MinpackTest.java
new file mode 100644
index 000000000..18e1ac6f5
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/MinpackTest.java
@@ -0,0 +1,1499 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import java.io.Serializable;
+import java.util.Arrays;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.analysis.MultivariateMatrixFunction;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.nonlinear.vector.Target;
+import org.apache.commons.math3.optim.nonlinear.vector.Weight;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunction;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunctionJacobian;
+import org.apache.commons.math3.util.FastMath;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Some of the unit tests are re-implementations of the MINPACK file17 and file22 test files.
+ * The redistribution policy for MINPACK is available here, for
+ * convenience, it is reproduced below.
+
+ *
+ *
+ * Minpack Copyright Notice (1999) University of Chicago.
+ * All rights reserved
+ * |
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * - The end-user documentation included with the redistribution, if any,
+ * must include the following acknowledgment:
+ *
This product includes software developed by the University of
+ * Chicago, as Operator of Argonne National Laboratory.
+ * Alternately, this acknowledgment may appear in the software itself,
+ * if and wherever such third-party acknowledgments normally appear.
+ * - WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
+ * UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
+ * THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
+ * OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
+ * OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
+ * USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
+ * THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
+ * DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
+ * UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
+ * BE CORRECTED.
+ * - LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
+ * HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
+ * ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
+ * INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
+ * ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
+ * PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
+ * SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
+ * (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
+ * EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
+ * POSSIBILITY OF SUCH LOSS OR DAMAGES.
+ *
|
+ *
+
+ * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
+ * @author Burton S. Garbow (original fortran minpack tests)
+ * @author Kenneth E. Hillstrom (original fortran minpack tests)
+ * @author Jorge J. More (original fortran minpack tests)
+ * @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
+ */
+public class MinpackTest {
+
+ @Test
+ public void testMinpackLinearFullRank() {
+ minpackTest(new LinearFullRankFunction(10, 5, 1.0,
+ 5.0, 2.23606797749979), false);
+ minpackTest(new LinearFullRankFunction(50, 5, 1.0,
+ 8.06225774829855, 6.70820393249937), false);
+ }
+
+ @Test
+ public void testMinpackLinearRank1() {
+ minpackTest(new LinearRank1Function(10, 5, 1.0,
+ 291.521868819476, 1.4638501094228), false);
+ minpackTest(new LinearRank1Function(50, 5, 1.0,
+ 3101.60039334535, 3.48263016573496), false);
+ }
+
+ @Test
+ public void testMinpackLinearRank1ZeroColsAndRows() {
+ minpackTest(new LinearRank1ZeroColsAndRowsFunction(10, 5, 1.0), false);
+ minpackTest(new LinearRank1ZeroColsAndRowsFunction(50, 5, 1.0), false);
+ }
+
+ @Test
+ public void testMinpackRosenbrok() {
+ minpackTest(new RosenbrockFunction(new double[] { -1.2, 1.0 },
+ FastMath.sqrt(24.2)), false);
+ minpackTest(new RosenbrockFunction(new double[] { -12.0, 10.0 },
+ FastMath.sqrt(1795769.0)), false);
+ minpackTest(new RosenbrockFunction(new double[] { -120.0, 100.0 },
+ 11.0 * FastMath.sqrt(169000121.0)), false);
+ }
+
+ @Test
+ public void testMinpackHelicalValley() {
+ minpackTest(new HelicalValleyFunction(new double[] { -1.0, 0.0, 0.0 },
+ 50.0), false);
+ minpackTest(new HelicalValleyFunction(new double[] { -10.0, 0.0, 0.0 },
+ 102.95630140987), false);
+ minpackTest(new HelicalValleyFunction(new double[] { -100.0, 0.0, 0.0},
+ 991.261822123701), false);
+ }
+
+ @Test
+ public void testMinpackPowellSingular() {
+ minpackTest(new PowellSingularFunction(new double[] { 3.0, -1.0, 0.0, 1.0 },
+ 14.6628782986152), false);
+ minpackTest(new PowellSingularFunction(new double[] { 30.0, -10.0, 0.0, 10.0 },
+ 1270.9838708654), false);
+ minpackTest(new PowellSingularFunction(new double[] { 300.0, -100.0, 0.0, 100.0 },
+ 126887.903284750), false);
+ }
+
+ @Test
+ public void testMinpackFreudensteinRoth() {
+ minpackTest(new FreudensteinRothFunction(new double[] { 0.5, -2.0 },
+ 20.0124960961895, 6.99887517584575,
+ new double[] {
+ 11.4124844654993,
+ -0.896827913731509
+ }), false);
+ minpackTest(new FreudensteinRothFunction(new double[] { 5.0, -20.0 },
+ 12432.833948863, 6.9988751744895,
+ new double[] {
+ 11.41300466147456,
+ -0.896796038685959
+ }), false);
+ minpackTest(new FreudensteinRothFunction(new double[] { 50.0, -200.0 },
+ 11426454.595762, 6.99887517242903,
+ new double[] {
+ 11.412781785788564,
+ -0.8968051074920405
+ }), false);
+ }
+
+ @Test
+ public void testMinpackBard() {
+ minpackTest(new BardFunction(1.0, 6.45613629515967, 0.0906359603390466,
+ new double[] {
+ 0.0824105765758334,
+ 1.1330366534715,
+ 2.34369463894115
+ }), false);
+ minpackTest(new BardFunction(10.0, 36.1418531596785, 4.17476870138539,
+ new double[] {
+ 0.840666673818329,
+ -158848033.259565,
+ -164378671.653535
+ }), false);
+ minpackTest(new BardFunction(100.0, 384.114678637399, 4.17476870135969,
+ new double[] {
+ 0.840666673867645,
+ -158946167.205518,
+ -164464906.857771
+ }), false);
+ }
+
+ @Test
+ public void testMinpackKowalikOsborne() {
+ minpackTest(new KowalikOsborneFunction(new double[] { 0.25, 0.39, 0.415, 0.39 },
+ 0.0728915102882945,
+ 0.017535837721129,
+ new double[] {
+ 0.192807810476249,
+ 0.191262653354071,
+ 0.123052801046931,
+ 0.136053221150517
+ }), false);
+ minpackTest(new KowalikOsborneFunction(new double[] { 2.5, 3.9, 4.15, 3.9 },
+ 2.97937007555202,
+ 0.032052192917937,
+ new double[] {
+ 728675.473768287,
+ -14.0758803129393,
+ -32977797.7841797,
+ -20571594.1977912
+ }), false);
+ minpackTest(new KowalikOsborneFunction(new double[] { 25.0, 39.0, 41.5, 39.0 },
+ 29.9590617016037,
+ 0.0175364017658228,
+ new double[] {
+ 0.192948328597594,
+ 0.188053165007911,
+ 0.122430604321144,
+ 0.134575665392506
+ }), false);
+ }
+
+ @Test
+ public void testMinpackMeyer() {
+ minpackTest(new MeyerFunction(new double[] { 0.02, 4000.0, 250.0 },
+ 41153.4665543031, 9.37794514651874,
+ new double[] {
+ 0.00560963647102661,
+ 6181.34634628659,
+ 345.223634624144
+ }), false);
+ minpackTest(new MeyerFunction(new double[] { 0.2, 40000.0, 2500.0 },
+ 4168216.89130846, 792.917871779501,
+ new double[] {
+ 1.42367074157994e-11,
+ 33695.7133432541,
+ 901.268527953801
+ }), true);
+ }
+
+ @Test
+ public void testMinpackWatson() {
+ minpackTest(new WatsonFunction(6, 0.0,
+ 5.47722557505166, 0.0478295939097601,
+ new double[] {
+ -0.0157249615083782, 1.01243488232965,
+ -0.232991722387673, 1.26043101102818,
+ -1.51373031394421, 0.99299727291842
+ }), false);
+ minpackTest(new WatsonFunction(6, 10.0,
+ 6433.12578950026, 0.0478295939096951,
+ new double[] {
+ -0.0157251901386677, 1.01243485860105,
+ -0.232991545843829, 1.26042932089163,
+ -1.51372776706575, 0.99299573426328
+ }), false);
+ minpackTest(new WatsonFunction(6, 100.0,
+ 674256.040605213, 0.047829593911544,
+ new double[] {
+ -0.0157247019712586, 1.01243490925658,
+ -0.232991922761641, 1.26043292929555,
+ -1.51373320452707, 0.99299901922322
+ }), false);
+ minpackTest(new WatsonFunction(9, 0.0,
+ 5.47722557505166, 0.00118311459212420,
+ new double[] {
+ -0.153070644166722e-4, 0.999789703934597,
+ 0.0147639634910978, 0.146342330145992,
+ 1.00082109454817, -2.61773112070507,
+ 4.10440313943354, -3.14361226236241,
+ 1.05262640378759
+ }), false);
+ minpackTest(new WatsonFunction(9, 10.0,
+ 12088.127069307, 0.00118311459212513,
+ new double[] {
+ -0.153071334849279e-4, 0.999789703941234,
+ 0.0147639629786217, 0.146342334818836,
+ 1.00082107321386, -2.61773107084722,
+ 4.10440307655564, -3.14361222178686,
+ 1.05262639322589
+ }), false);
+ minpackTest(new WatsonFunction(9, 100.0,
+ 1269109.29043834, 0.00118311459212384,
+ new double[] {
+ -0.153069523352176e-4, 0.999789703958371,
+ 0.0147639625185392, 0.146342341096326,
+ 1.00082104729164, -2.61773101573645,
+ 4.10440301427286, -3.14361218602503,
+ 1.05262638516774
+ }), false);
+ minpackTest(new WatsonFunction(12, 0.0,
+ 5.47722557505166, 0.217310402535861e-4,
+ new double[] {
+ -0.660266001396382e-8, 1.00000164411833,
+ -0.000563932146980154, 0.347820540050756,
+ -0.156731500244233, 1.05281515825593,
+ -3.24727109519451, 7.2884347837505,
+ -10.271848098614, 9.07411353715783,
+ -4.54137541918194, 1.01201187975044
+ }), false);
+ minpackTest(new WatsonFunction(12, 10.0,
+ 19220.7589790951, 0.217310402518509e-4,
+ new double[] {
+ -0.663710223017410e-8, 1.00000164411787,
+ -0.000563932208347327, 0.347820540486998,
+ -0.156731503955652, 1.05281517654573,
+ -3.2472711515214, 7.28843489430665,
+ -10.2718482369638, 9.07411364383733,
+ -4.54137546533666, 1.01201188830857
+ }), false);
+ minpackTest(new WatsonFunction(12, 100.0,
+ 2018918.04462367, 0.217310402539845e-4,
+ new double[] {
+ -0.663806046485249e-8, 1.00000164411786,
+ -0.000563932210324959, 0.347820540503588,
+ -0.156731504091375, 1.05281517718031,
+ -3.24727115337025, 7.28843489775302,
+ -10.2718482410813, 9.07411364688464,
+ -4.54137546660822, 1.0120118885369
+ }), false);
+ }
+
+ @Test
+ public void testMinpackBox3Dimensional() {
+ minpackTest(new Box3DimensionalFunction(10, new double[] { 0.0, 10.0, 20.0 },
+ 32.1115837449572), false);
+ }
+
+ @Test
+ public void testMinpackJennrichSampson() {
+ minpackTest(new JennrichSampsonFunction(10, new double[] { 0.3, 0.4 },
+ 64.5856498144943, 11.1517793413499,
+ new double[] {
+// 0.2578330049, 0.257829976764542
+ 0.2578199266368004, 0.25782997676455244
+ }), false);
+ }
+
+ @Test
+ public void testMinpackBrownDennis() {
+ minpackTest(new BrownDennisFunction(20,
+ new double[] { 25.0, 5.0, -5.0, -1.0 },
+ 2815.43839161816, 292.954288244866,
+ new double[] {
+ -11.59125141003, 13.2024883984741,
+ -0.403574643314272, 0.236736269844604
+ }), false);
+ minpackTest(new BrownDennisFunction(20,
+ new double[] { 250.0, 50.0, -50.0, -10.0 },
+ 555073.354173069, 292.954270581415,
+ new double[] {
+ -11.5959274272203, 13.2041866926242,
+ -0.403417362841545, 0.236771143410386
+ }), false);
+ minpackTest(new BrownDennisFunction(20,
+ new double[] { 2500.0, 500.0, -500.0, -100.0 },
+ 61211252.2338581, 292.954306151134,
+ new double[] {
+ -11.5902596937374, 13.2020628854665,
+ -0.403688070279258, 0.236665033746463
+ }), false);
+ }
+
+ @Test
+ public void testMinpackChebyquad() {
+ minpackTest(new ChebyquadFunction(1, 8, 1.0,
+ 1.88623796907732, 1.88623796907732,
+ new double[] { 0.5 }), false);
+ minpackTest(new ChebyquadFunction(1, 8, 10.0,
+ 5383344372.34005, 1.88424820499951,
+ new double[] { 0.9817314924684 }), false);
+ minpackTest(new ChebyquadFunction(1, 8, 100.0,
+ 0.118088726698392e19, 1.88424820499347,
+ new double[] { 0.9817314852934 }), false);
+ minpackTest(new ChebyquadFunction(8, 8, 1.0,
+ 0.196513862833975, 0.0593032355046727,
+ new double[] {
+ 0.0431536648587336, 0.193091637843267,
+ 0.266328593812698, 0.499999334628884,
+ 0.500000665371116, 0.733671406187302,
+ 0.806908362156733, 0.956846335141266
+ }), false);
+ minpackTest(new ChebyquadFunction(9, 9, 1.0,
+ 0.16994993465202, 0.0,
+ new double[] {
+ 0.0442053461357828, 0.199490672309881,
+ 0.23561910847106, 0.416046907892598,
+ 0.5, 0.583953092107402,
+ 0.764380891528940, 0.800509327690119,
+ 0.955794653864217
+ }), false);
+ minpackTest(new ChebyquadFunction(10, 10, 1.0,
+ 0.183747831178711, 0.0806471004038253,
+ new double[] {
+ 0.0596202671753563, 0.166708783805937,
+ 0.239171018813509, 0.398885290346268,
+ 0.398883667870681, 0.601116332129320,
+ 0.60111470965373, 0.760828981186491,
+ 0.833291216194063, 0.940379732824644
+ }), false);
+ }
+
+ @Test
+ public void testMinpackBrownAlmostLinear() {
+ minpackTest(new BrownAlmostLinearFunction(10, 0.5,
+ 16.5302162063499, 0.0,
+ new double[] {
+ 0.979430303349862, 0.979430303349862,
+ 0.979430303349862, 0.979430303349862,
+ 0.979430303349862, 0.979430303349862,
+ 0.979430303349862, 0.979430303349862,
+ 0.979430303349862, 1.20569696650138
+ }), false);
+ minpackTest(new BrownAlmostLinearFunction(10, 5.0,
+ 9765624.00089211, 0.0,
+ new double[] {
+ 0.979430303349865, 0.979430303349865,
+ 0.979430303349865, 0.979430303349865,
+ 0.979430303349865, 0.979430303349865,
+ 0.979430303349865, 0.979430303349865,
+ 0.979430303349865, 1.20569696650135
+ }), false);
+ minpackTest(new BrownAlmostLinearFunction(10, 50.0,
+ 0.9765625e17, 0.0,
+ new double[] {
+ 1.0, 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0, 1.0
+ }), false);
+ minpackTest(new BrownAlmostLinearFunction(30, 0.5,
+ 83.476044467848, 0.0,
+ new double[] {
+ 0.997754216442807, 0.997754216442807,
+ 0.997754216442807, 0.997754216442807,
+ 0.997754216442807, 0.997754216442807,
+ 0.997754216442807, 0.997754216442807,
+ 0.997754216442807, 0.997754216442807,
+ 0.997754216442807, 0.997754216442807,
+ 0.997754216442807, 0.997754216442807,
+ 0.997754216442807, 0.997754216442807,
+ 0.997754216442807, 0.997754216442807,
+ 0.997754216442807, 0.997754216442807,
+ 0.997754216442807, 0.997754216442807,
+ 0.997754216442807, 0.997754216442807,
+ 0.997754216442807, 0.997754216442807,
+ 0.997754216442807, 0.997754216442807,
+ 0.997754216442807, 1.06737350671578
+ }), false);
+ minpackTest(new BrownAlmostLinearFunction(40, 0.5,
+ 128.026364472323, 0.0,
+ new double[] {
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 1.00000000000002, 1.00000000000002,
+ 0.999999999999121
+ }), false);
+ }
+
+ @Test
+ public void testMinpackOsborne1() {
+ minpackTest(new Osborne1Function(new double[] { 0.5, 1.5, -1.0, 0.01, 0.02, },
+ 0.937564021037838, 0.00739249260904843,
+ new double[] {
+ 0.375410049244025, 1.93584654543108,
+ -1.46468676748716, 0.0128675339110439,
+ 0.0221227011813076
+ }), false);
+ }
+
+ @Test
+ public void testMinpackOsborne2() {
+ minpackTest(new Osborne2Function(new double[] {
+ 1.3, 0.65, 0.65, 0.7, 0.6,
+ 3.0, 5.0, 7.0, 2.0, 4.5, 5.5
+ },
+ 1.44686540984712, 0.20034404483314,
+ new double[] {
+ 1.30997663810096, 0.43155248076,
+ 0.633661261602859, 0.599428560991695,
+ 0.754179768272449, 0.904300082378518,
+ 1.36579949521007, 4.82373199748107,
+ 2.39868475104871, 4.56887554791452,
+ 5.67534206273052
+ }), false);
+ }
+
+ private void minpackTest(MinpackFunction function, boolean exceptionExpected) {
+ LevenbergMarquardtOptimizer optimizer
+ = new LevenbergMarquardtOptimizer(FastMath.sqrt(2.22044604926e-16),
+ FastMath.sqrt(2.22044604926e-16),
+ 2.22044604926e-16);
+ try {
+ PointVectorValuePair optimum
+ = optimizer.optimize(new MaxEval(400 * (function.getN() + 1)),
+ function.getModelFunction(),
+ function.getModelFunctionJacobian(),
+ new Target(function.getTarget()),
+ new Weight(function.getWeight()),
+ new InitialGuess(function.getStartPoint()));
+ Assert.assertFalse(exceptionExpected);
+ function.checkTheoreticalMinCost(optimizer.getRMS());
+ function.checkTheoreticalMinParams(optimum);
+ } catch (TooManyEvaluationsException e) {
+ Assert.assertTrue(exceptionExpected);
+ }
+ }
+
+ private static abstract class MinpackFunction {
+ protected int n;
+ protected int m;
+ protected double[] startParams;
+ protected double theoreticalMinCost;
+ protected double[] theoreticalMinParams;
+ protected double costAccuracy;
+ protected double paramsAccuracy;
+
+ protected MinpackFunction(int m, double[] startParams,
+ double theoreticalMinCost,
+ double[] theoreticalMinParams) {
+ this.m = m;
+ this.n = startParams.length;
+ this.startParams = startParams.clone();
+ this.theoreticalMinCost = theoreticalMinCost;
+ this.theoreticalMinParams = theoreticalMinParams;
+ this.costAccuracy = 1.0e-8;
+ this.paramsAccuracy = 1.0e-5;
+ }
+
+ protected static double[] buildArray(int n, double x) {
+ double[] array = new double[n];
+ Arrays.fill(array, x);
+ return array;
+ }
+
+ public double[] getTarget() {
+ return buildArray(m, 0.0);
+ }
+
+ public double[] getWeight() {
+ return buildArray(m, 1.0);
+ }
+
+ public double[] getStartPoint() {
+ return startParams.clone();
+ }
+
+ protected void setCostAccuracy(double costAccuracy) {
+ this.costAccuracy = costAccuracy;
+ }
+
+ protected void setParamsAccuracy(double paramsAccuracy) {
+ this.paramsAccuracy = paramsAccuracy;
+ }
+
+ public int getN() {
+ return startParams.length;
+ }
+
+ public void checkTheoreticalMinCost(double rms) {
+ double threshold = costAccuracy * (1.0 + theoreticalMinCost);
+ Assert.assertEquals(theoreticalMinCost, FastMath.sqrt(m) * rms, threshold);
+ }
+
+ public void checkTheoreticalMinParams(PointVectorValuePair optimum) {
+ double[] params = optimum.getPointRef();
+ if (theoreticalMinParams != null) {
+ for (int i = 0; i < theoreticalMinParams.length; ++i) {
+ double mi = theoreticalMinParams[i];
+ double vi = params[i];
+ Assert.assertEquals(mi, vi, paramsAccuracy * (1.0 + FastMath.abs(mi)));
+ }
+ }
+ }
+
+ public ModelFunction getModelFunction() {
+ return new ModelFunction(new MultivariateVectorFunction() {
+ public double[] value(double[] point) {
+ return computeValue(point);
+ }
+ });
+ }
+
+ public ModelFunctionJacobian getModelFunctionJacobian() {
+ return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
+ public double[][] value(double[] point) {
+ return computeJacobian(point);
+ }
+ });
+ }
+
+ public abstract double[][] computeJacobian(double[] variables);
+ public abstract double[] computeValue(double[] variables);
+ }
+
+ private static class LinearFullRankFunction extends MinpackFunction {
+ private static final long serialVersionUID = -9030323226268039536L;
+
+ public LinearFullRankFunction(int m, int n, double x0,
+ double theoreticalStartCost,
+ double theoreticalMinCost) {
+ super(m, buildArray(n, x0), theoreticalMinCost,
+ buildArray(n, -1.0));
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double t = 2.0 / m;
+ double[][] jacobian = new double[m][];
+ for (int i = 0; i < m; ++i) {
+ jacobian[i] = new double[n];
+ for (int j = 0; j < n; ++j) {
+ jacobian[i][j] = (i == j) ? (1 - t) : -t;
+ }
+ }
+ return jacobian;
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double sum = 0;
+ for (int i = 0; i < n; ++i) {
+ sum += variables[i];
+ }
+ double t = 1 + 2 * sum / m;
+ double[] f = new double[m];
+ for (int i = 0; i < n; ++i) {
+ f[i] = variables[i] - t;
+ }
+ Arrays.fill(f, n, m, -t);
+ return f;
+ }
+ }
+
+ private static class LinearRank1Function extends MinpackFunction {
+ private static final long serialVersionUID = 8494863245104608300L;
+
+ public LinearRank1Function(int m, int n, double x0,
+ double theoreticalStartCost,
+ double theoreticalMinCost) {
+ super(m, buildArray(n, x0), theoreticalMinCost, null);
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double[][] jacobian = new double[m][];
+ for (int i = 0; i < m; ++i) {
+ jacobian[i] = new double[n];
+ for (int j = 0; j < n; ++j) {
+ jacobian[i][j] = (i + 1) * (j + 1);
+ }
+ }
+ return jacobian;
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double[] f = new double[m];
+ double sum = 0;
+ for (int i = 0; i < n; ++i) {
+ sum += (i + 1) * variables[i];
+ }
+ for (int i = 0; i < m; ++i) {
+ f[i] = (i + 1) * sum - 1;
+ }
+ return f;
+ }
+ }
+
+ private static class LinearRank1ZeroColsAndRowsFunction extends MinpackFunction {
+ private static final long serialVersionUID = -3316653043091995018L;
+
+ public LinearRank1ZeroColsAndRowsFunction(int m, int n, double x0) {
+ super(m, buildArray(n, x0),
+ FastMath.sqrt((m * (m + 3) - 6) / (2.0 * (2 * m - 3))),
+ null);
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double[][] jacobian = new double[m][];
+ for (int i = 0; i < m; ++i) {
+ jacobian[i] = new double[n];
+ jacobian[i][0] = 0;
+ for (int j = 1; j < (n - 1); ++j) {
+ if (i == 0) {
+ jacobian[i][j] = 0;
+ } else if (i != (m - 1)) {
+ jacobian[i][j] = i * (j + 1);
+ } else {
+ jacobian[i][j] = 0;
+ }
+ }
+ jacobian[i][n - 1] = 0;
+ }
+ return jacobian;
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double[] f = new double[m];
+ double sum = 0;
+ for (int i = 1; i < (n - 1); ++i) {
+ sum += (i + 1) * variables[i];
+ }
+ for (int i = 0; i < (m - 1); ++i) {
+ f[i] = i * sum - 1;
+ }
+ f[m - 1] = -1;
+ return f;
+ }
+ }
+
+ private static class RosenbrockFunction extends MinpackFunction {
+ private static final long serialVersionUID = 2893438180956569134L;
+ public RosenbrockFunction(double[] startParams, double theoreticalStartCost) {
+ super(2, startParams, 0.0, buildArray(2, 1.0));
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double x1 = variables[0];
+ return new double[][] { { -20 * x1, 10 }, { -1, 0 } };
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ return new double[] { 10 * (x2 - x1 * x1), 1 - x1 };
+ }
+ }
+
+ private static class HelicalValleyFunction extends MinpackFunction {
+ private static final long serialVersionUID = 220613787843200102L;
+ public HelicalValleyFunction(double[] startParams,
+ double theoreticalStartCost) {
+ super(3, startParams, 0.0, new double[] { 1.0, 0.0, 0.0 });
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double tmpSquare = x1 * x1 + x2 * x2;
+ double tmp1 = twoPi * tmpSquare;
+ double tmp2 = FastMath.sqrt(tmpSquare);
+ return new double[][] {
+ { 100 * x2 / tmp1, -100 * x1 / tmp1, 10 },
+ { 10 * x1 / tmp2, 10 * x2 / tmp2, 0 },
+ { 0, 0, 1 }
+ };
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double tmp1;
+ if (x1 == 0) {
+ tmp1 = (x2 >= 0) ? 0.25 : -0.25;
+ } else {
+ tmp1 = FastMath.atan(x2 / x1) / twoPi;
+ if (x1 < 0) {
+ tmp1 += 0.5;
+ }
+ }
+ double tmp2 = FastMath.sqrt(x1 * x1 + x2 * x2);
+ return new double[] {
+ 10.0 * (x3 - 10 * tmp1),
+ 10.0 * (tmp2 - 1),
+ x3
+ };
+ }
+
+ private static final double twoPi = 2.0 * FastMath.PI;
+ }
+
+ private static class PowellSingularFunction extends MinpackFunction {
+ private static final long serialVersionUID = 7298364171208142405L;
+
+ public PowellSingularFunction(double[] startParams,
+ double theoreticalStartCost) {
+ super(4, startParams, 0.0, buildArray(4, 0.0));
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
+ return new double[][] {
+ { 1, 10, 0, 0 },
+ { 0, 0, sqrt5, -sqrt5 },
+ { 0, 2 * (x2 - 2 * x3), -4 * (x2 - 2 * x3), 0 },
+ { 2 * sqrt10 * (x1 - x4), 0, 0, -2 * sqrt10 * (x1 - x4) }
+ };
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
+ return new double[] {
+ x1 + 10 * x2,
+ sqrt5 * (x3 - x4),
+ (x2 - 2 * x3) * (x2 - 2 * x3),
+ sqrt10 * (x1 - x4) * (x1 - x4)
+ };
+ }
+
+ private static final double sqrt5 = FastMath.sqrt( 5.0);
+ private static final double sqrt10 = FastMath.sqrt(10.0);
+ }
+
+ private static class FreudensteinRothFunction extends MinpackFunction {
+ private static final long serialVersionUID = 2892404999344244214L;
+
+ public FreudensteinRothFunction(double[] startParams,
+ double theoreticalStartCost,
+ double theoreticalMinCost,
+ double[] theoreticalMinParams) {
+ super(2, startParams, theoreticalMinCost,
+ theoreticalMinParams);
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double x2 = variables[1];
+ return new double[][] {
+ { 1, x2 * (10 - 3 * x2) - 2 },
+ { 1, x2 * ( 2 + 3 * x2) - 14, }
+ };
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ return new double[] {
+ -13.0 + x1 + ((5.0 - x2) * x2 - 2.0) * x2,
+ -29.0 + x1 + ((1.0 + x2) * x2 - 14.0) * x2
+ };
+ }
+ }
+
+ private static class BardFunction extends MinpackFunction {
+ private static final long serialVersionUID = 5990442612572087668L;
+
+ public BardFunction(double x0,
+ double theoreticalStartCost,
+ double theoreticalMinCost,
+ double[] theoreticalMinParams) {
+ super(15, buildArray(3, x0), theoreticalMinCost,
+ theoreticalMinParams);
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double[][] jacobian = new double[m][];
+ for (int i = 0; i < m; ++i) {
+ double tmp1 = i + 1;
+ double tmp2 = 15 - i;
+ double tmp3 = (i <= 7) ? tmp1 : tmp2;
+ double tmp4 = x2 * tmp2 + x3 * tmp3;
+ tmp4 *= tmp4;
+ jacobian[i] = new double[] { -1, tmp1 * tmp2 / tmp4, tmp1 * tmp3 / tmp4 };
+ }
+ return jacobian;
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double[] f = new double[m];
+ for (int i = 0; i < m; ++i) {
+ double tmp1 = i + 1;
+ double tmp2 = 15 - i;
+ double tmp3 = (i <= 7) ? tmp1 : tmp2;
+ f[i] = y[i] - (x1 + tmp1 / (x2 * tmp2 + x3 * tmp3));
+ }
+ return f;
+ }
+
+ private static final double[] y = {
+ 0.14, 0.18, 0.22, 0.25, 0.29,
+ 0.32, 0.35, 0.39, 0.37, 0.58,
+ 0.73, 0.96, 1.34, 2.10, 4.39
+ };
+ }
+
+ private static class KowalikOsborneFunction extends MinpackFunction {
+ private static final long serialVersionUID = -4867445739880495801L;
+
+ public KowalikOsborneFunction(double[] startParams,
+ double theoreticalStartCost,
+ double theoreticalMinCost,
+ double[] theoreticalMinParams) {
+ super(11, startParams, theoreticalMinCost,
+ theoreticalMinParams);
+ if (theoreticalStartCost > 20.0) {
+ setCostAccuracy(2.0e-4);
+ setParamsAccuracy(5.0e-3);
+ }
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
+ double[][] jacobian = new double[m][];
+ for (int i = 0; i < m; ++i) {
+ double tmp = v[i] * (v[i] + x3) + x4;
+ double j1 = -v[i] * (v[i] + x2) / tmp;
+ double j2 = -v[i] * x1 / tmp;
+ double j3 = j1 * j2;
+ double j4 = j3 / v[i];
+ jacobian[i] = new double[] { j1, j2, j3, j4 };
+ }
+ return jacobian;
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
+ double[] f = new double[m];
+ for (int i = 0; i < m; ++i) {
+ f[i] = y[i] - x1 * (v[i] * (v[i] + x2)) / (v[i] * (v[i] + x3) + x4);
+ }
+ return f;
+ }
+
+ private static final double[] v = {
+ 4.0, 2.0, 1.0, 0.5, 0.25, 0.167, 0.125, 0.1, 0.0833, 0.0714, 0.0625
+ };
+
+ private static final double[] y = {
+ 0.1957, 0.1947, 0.1735, 0.1600, 0.0844, 0.0627,
+ 0.0456, 0.0342, 0.0323, 0.0235, 0.0246
+ };
+ }
+
+ private static class MeyerFunction extends MinpackFunction {
+ private static final long serialVersionUID = -838060619150131027L;
+
+ public MeyerFunction(double[] startParams,
+ double theoreticalStartCost,
+ double theoreticalMinCost,
+ double[] theoreticalMinParams) {
+ super(16, startParams, theoreticalMinCost,
+ theoreticalMinParams);
+ if (theoreticalStartCost > 1.0e6) {
+ setCostAccuracy(7.0e-3);
+ setParamsAccuracy(2.0e-2);
+ }
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double[][] jacobian = new double[m][];
+ for (int i = 0; i < m; ++i) {
+ double temp = 5.0 * (i + 1) + 45.0 + x3;
+ double tmp1 = x2 / temp;
+ double tmp2 = FastMath.exp(tmp1);
+ double tmp3 = x1 * tmp2 / temp;
+ jacobian[i] = new double[] { tmp2, tmp3, -tmp1 * tmp3 };
+ }
+ return jacobian;
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double[] f = new double[m];
+ for (int i = 0; i < m; ++i) {
+ f[i] = x1 * FastMath.exp(x2 / (5.0 * (i + 1) + 45.0 + x3)) - y[i];
+ }
+ return f;
+ }
+
+ private static final double[] y = {
+ 34780.0, 28610.0, 23650.0, 19630.0,
+ 16370.0, 13720.0, 11540.0, 9744.0,
+ 8261.0, 7030.0, 6005.0, 5147.0,
+ 4427.0, 3820.0, 3307.0, 2872.0
+ };
+ }
+
+ private static class WatsonFunction extends MinpackFunction {
+ private static final long serialVersionUID = -9034759294980218927L;
+
+ public WatsonFunction(int n, double x0,
+ double theoreticalStartCost,
+ double theoreticalMinCost,
+ double[] theoreticalMinParams) {
+ super(31, buildArray(n, x0), theoreticalMinCost,
+ theoreticalMinParams);
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double[][] jacobian = new double[m][];
+
+ for (int i = 0; i < (m - 2); ++i) {
+ double div = (i + 1) / 29.0;
+ double s2 = 0.0;
+ double dx = 1.0;
+ for (int j = 0; j < n; ++j) {
+ s2 += dx * variables[j];
+ dx *= div;
+ }
+ double temp= 2 * div * s2;
+ dx = 1.0 / div;
+ jacobian[i] = new double[n];
+ for (int j = 0; j < n; ++j) {
+ jacobian[i][j] = dx * (j - temp);
+ dx *= div;
+ }
+ }
+
+ jacobian[m - 2] = new double[n];
+ jacobian[m - 2][0] = 1;
+
+ jacobian[m - 1] = new double[n];
+ jacobian[m - 1][0]= -2 * variables[0];
+ jacobian[m - 1][1]= 1;
+
+ return jacobian;
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double[] f = new double[m];
+ for (int i = 0; i < (m - 2); ++i) {
+ double div = (i + 1) / 29.0;
+ double s1 = 0;
+ double dx = 1;
+ for (int j = 1; j < n; ++j) {
+ s1 += j * dx * variables[j];
+ dx *= div;
+ }
+ double s2 = 0;
+ dx = 1;
+ for (int j = 0; j < n; ++j) {
+ s2 += dx * variables[j];
+ dx *= div;
+ }
+ f[i] = s1 - s2 * s2 - 1;
+ }
+
+ double x1 = variables[0];
+ double x2 = variables[1];
+ f[m - 2] = x1;
+ f[m - 1] = x2 - x1 * x1 - 1;
+
+ return f;
+ }
+ }
+
+ private static class Box3DimensionalFunction extends MinpackFunction {
+ private static final long serialVersionUID = 5511403858142574493L;
+
+ public Box3DimensionalFunction(int m, double[] startParams,
+ double theoreticalStartCost) {
+ super(m, startParams, 0.0,
+ new double[] { 1.0, 10.0, 1.0 });
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double[][] jacobian = new double[m][];
+ for (int i = 0; i < m; ++i) {
+ double tmp = (i + 1) / 10.0;
+ jacobian[i] = new double[] {
+ -tmp * FastMath.exp(-tmp * x1),
+ tmp * FastMath.exp(-tmp * x2),
+ FastMath.exp(-i - 1) - FastMath.exp(-tmp)
+ };
+ }
+ return jacobian;
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double[] f = new double[m];
+ for (int i = 0; i < m; ++i) {
+ double tmp = (i + 1) / 10.0;
+ f[i] = FastMath.exp(-tmp * x1) - FastMath.exp(-tmp * x2)
+ + (FastMath.exp(-i - 1) - FastMath.exp(-tmp)) * x3;
+ }
+ return f;
+ }
+ }
+
+ private static class JennrichSampsonFunction extends MinpackFunction {
+ private static final long serialVersionUID = -2489165190443352947L;
+
+ public JennrichSampsonFunction(int m, double[] startParams,
+ double theoreticalStartCost,
+ double theoreticalMinCost,
+ double[] theoreticalMinParams) {
+ super(m, startParams, theoreticalMinCost,
+ theoreticalMinParams);
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double[][] jacobian = new double[m][];
+ for (int i = 0; i < m; ++i) {
+ double t = i + 1;
+ jacobian[i] = new double[] { -t * FastMath.exp(t * x1), -t * FastMath.exp(t * x2) };
+ }
+ return jacobian;
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double[] f = new double[m];
+ for (int i = 0; i < m; ++i) {
+ double temp = i + 1;
+ f[i] = 2 + 2 * temp - FastMath.exp(temp * x1) - FastMath.exp(temp * x2);
+ }
+ return f;
+ }
+ }
+
+ private static class BrownDennisFunction extends MinpackFunction {
+ private static final long serialVersionUID = 8340018645694243910L;
+
+ public BrownDennisFunction(int m, double[] startParams,
+ double theoreticalStartCost,
+ double theoreticalMinCost,
+ double[] theoreticalMinParams) {
+ super(m, startParams, theoreticalMinCost,
+ theoreticalMinParams);
+ setCostAccuracy(2.5e-8);
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
+ double[][] jacobian = new double[m][];
+ for (int i = 0; i < m; ++i) {
+ double temp = (i + 1) / 5.0;
+ double ti = FastMath.sin(temp);
+ double tmp1 = x1 + temp * x2 - FastMath.exp(temp);
+ double tmp2 = x3 + ti * x4 - FastMath.cos(temp);
+ jacobian[i] = new double[] {
+ 2 * tmp1, 2 * temp * tmp1, 2 * tmp2, 2 * ti * tmp2
+ };
+ }
+ return jacobian;
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
+ double[] f = new double[m];
+ for (int i = 0; i < m; ++i) {
+ double temp = (i + 1) / 5.0;
+ double tmp1 = x1 + temp * x2 - FastMath.exp(temp);
+ double tmp2 = x3 + FastMath.sin(temp) * x4 - FastMath.cos(temp);
+ f[i] = tmp1 * tmp1 + tmp2 * tmp2;
+ }
+ return f;
+ }
+ }
+
+ private static class ChebyquadFunction extends MinpackFunction {
+ private static final long serialVersionUID = -2394877275028008594L;
+
+ private static double[] buildChebyquadArray(int n, double factor) {
+ double[] array = new double[n];
+ double inv = factor / (n + 1);
+ for (int i = 0; i < n; ++i) {
+ array[i] = (i + 1) * inv;
+ }
+ return array;
+ }
+
+ public ChebyquadFunction(int n, int m, double factor,
+ double theoreticalStartCost,
+ double theoreticalMinCost,
+ double[] theoreticalMinParams) {
+ super(m, buildChebyquadArray(n, factor), theoreticalMinCost,
+ theoreticalMinParams);
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double[][] jacobian = new double[m][];
+ for (int i = 0; i < m; ++i) {
+ jacobian[i] = new double[n];
+ }
+
+ double dx = 1.0 / n;
+ for (int j = 0; j < n; ++j) {
+ double tmp1 = 1;
+ double tmp2 = 2 * variables[j] - 1;
+ double temp = 2 * tmp2;
+ double tmp3 = 0;
+ double tmp4 = 2;
+ for (int i = 0; i < m; ++i) {
+ jacobian[i][j] = dx * tmp4;
+ double ti = 4 * tmp2 + temp * tmp4 - tmp3;
+ tmp3 = tmp4;
+ tmp4 = ti;
+ ti = temp * tmp2 - tmp1;
+ tmp1 = tmp2;
+ tmp2 = ti;
+ }
+ }
+
+ return jacobian;
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double[] f = new double[m];
+
+ for (int j = 0; j < n; ++j) {
+ double tmp1 = 1;
+ double tmp2 = 2 * variables[j] - 1;
+ double temp = 2 * tmp2;
+ for (int i = 0; i < m; ++i) {
+ f[i] += tmp2;
+ double ti = temp * tmp2 - tmp1;
+ tmp1 = tmp2;
+ tmp2 = ti;
+ }
+ }
+
+ double dx = 1.0 / n;
+ boolean iev = false;
+ for (int i = 0; i < m; ++i) {
+ f[i] *= dx;
+ if (iev) {
+ f[i] += 1.0 / (i * (i + 2));
+ }
+ iev = ! iev;
+ }
+
+ return f;
+ }
+ }
+
+ private static class BrownAlmostLinearFunction extends MinpackFunction {
+ private static final long serialVersionUID = 8239594490466964725L;
+
+ public BrownAlmostLinearFunction(int m, double factor,
+ double theoreticalStartCost,
+ double theoreticalMinCost,
+ double[] theoreticalMinParams) {
+ super(m, buildArray(m, factor), theoreticalMinCost,
+ theoreticalMinParams);
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double[][] jacobian = new double[m][];
+ for (int i = 0; i < m; ++i) {
+ jacobian[i] = new double[n];
+ }
+
+ double prod = 1;
+ for (int j = 0; j < n; ++j) {
+ prod *= variables[j];
+ for (int i = 0; i < n; ++i) {
+ jacobian[i][j] = 1;
+ }
+ jacobian[j][j] = 2;
+ }
+
+ for (int j = 0; j < n; ++j) {
+ double temp = variables[j];
+ if (temp == 0) {
+ temp = 1;
+ prod = 1;
+ for (int k = 0; k < n; ++k) {
+ if (k != j) {
+ prod *= variables[k];
+ }
+ }
+ }
+ jacobian[n - 1][j] = prod / temp;
+ }
+
+ return jacobian;
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double[] f = new double[m];
+ double sum = -(n + 1);
+ double prod = 1;
+ for (int j = 0; j < n; ++j) {
+ sum += variables[j];
+ prod *= variables[j];
+ }
+ for (int i = 0; i < n; ++i) {
+ f[i] = variables[i] + sum;
+ }
+ f[n - 1] = prod - 1;
+ return f;
+ }
+ }
+
+ private static class Osborne1Function extends MinpackFunction {
+ private static final long serialVersionUID = 4006743521149849494L;
+
+ public Osborne1Function(double[] startParams,
+ double theoreticalStartCost,
+ double theoreticalMinCost,
+ double[] theoreticalMinParams) {
+ super(33, startParams, theoreticalMinCost,
+ theoreticalMinParams);
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
+ double x5 = variables[4];
+ double[][] jacobian = new double[m][];
+ for (int i = 0; i < m; ++i) {
+ double temp = 10.0 * i;
+ double tmp1 = FastMath.exp(-temp * x4);
+ double tmp2 = FastMath.exp(-temp * x5);
+ jacobian[i] = new double[] {
+ -1, -tmp1, -tmp2, temp * x2 * tmp1, temp * x3 * tmp2
+ };
+ }
+ return jacobian;
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
+ double x5 = variables[4];
+ double[] f = new double[m];
+ for (int i = 0; i < m; ++i) {
+ double temp = 10.0 * i;
+ double tmp1 = FastMath.exp(-temp * x4);
+ double tmp2 = FastMath.exp(-temp * x5);
+ f[i] = y[i] - (x1 + x2 * tmp1 + x3 * tmp2);
+ }
+ return f;
+ }
+
+ private static final double[] y = {
+ 0.844, 0.908, 0.932, 0.936, 0.925, 0.908, 0.881, 0.850, 0.818, 0.784, 0.751,
+ 0.718, 0.685, 0.658, 0.628, 0.603, 0.580, 0.558, 0.538, 0.522, 0.506, 0.490,
+ 0.478, 0.467, 0.457, 0.448, 0.438, 0.431, 0.424, 0.420, 0.414, 0.411, 0.406
+ };
+ }
+
+ private static class Osborne2Function extends MinpackFunction {
+ private static final long serialVersionUID = -8418268780389858746L;
+
+ public Osborne2Function(double[] startParams,
+ double theoreticalStartCost,
+ double theoreticalMinCost,
+ double[] theoreticalMinParams) {
+ super(65, startParams, theoreticalMinCost,
+ theoreticalMinParams);
+ }
+
+ @Override
+ public double[][] computeJacobian(double[] variables) {
+ double x01 = variables[0];
+ double x02 = variables[1];
+ double x03 = variables[2];
+ double x04 = variables[3];
+ double x05 = variables[4];
+ double x06 = variables[5];
+ double x07 = variables[6];
+ double x08 = variables[7];
+ double x09 = variables[8];
+ double x10 = variables[9];
+ double x11 = variables[10];
+ double[][] jacobian = new double[m][];
+ for (int i = 0; i < m; ++i) {
+ double temp = i / 10.0;
+ double tmp1 = FastMath.exp(-x05 * temp);
+ double tmp2 = FastMath.exp(-x06 * (temp - x09) * (temp - x09));
+ double tmp3 = FastMath.exp(-x07 * (temp - x10) * (temp - x10));
+ double tmp4 = FastMath.exp(-x08 * (temp - x11) * (temp - x11));
+ jacobian[i] = new double[] {
+ -tmp1,
+ -tmp2,
+ -tmp3,
+ -tmp4,
+ temp * x01 * tmp1,
+ x02 * (temp - x09) * (temp - x09) * tmp2,
+ x03 * (temp - x10) * (temp - x10) * tmp3,
+ x04 * (temp - x11) * (temp - x11) * tmp4,
+ -2 * x02 * x06 * (temp - x09) * tmp2,
+ -2 * x03 * x07 * (temp - x10) * tmp3,
+ -2 * x04 * x08 * (temp - x11) * tmp4
+ };
+ }
+ return jacobian;
+ }
+
+ @Override
+ public double[] computeValue(double[] variables) {
+ double x01 = variables[0];
+ double x02 = variables[1];
+ double x03 = variables[2];
+ double x04 = variables[3];
+ double x05 = variables[4];
+ double x06 = variables[5];
+ double x07 = variables[6];
+ double x08 = variables[7];
+ double x09 = variables[8];
+ double x10 = variables[9];
+ double x11 = variables[10];
+ double[] f = new double[m];
+ for (int i = 0; i < m; ++i) {
+ double temp = i / 10.0;
+ double tmp1 = FastMath.exp(-x05 * temp);
+ double tmp2 = FastMath.exp(-x06 * (temp - x09) * (temp - x09));
+ double tmp3 = FastMath.exp(-x07 * (temp - x10) * (temp - x10));
+ double tmp4 = FastMath.exp(-x08 * (temp - x11) * (temp - x11));
+ f[i] = y[i] - (x01 * tmp1 + x02 * tmp2 + x03 * tmp3 + x04 * tmp4);
+ }
+ return f;
+ }
+
+ private static final double[] y = {
+ 1.366, 1.191, 1.112, 1.013, 0.991,
+ 0.885, 0.831, 0.847, 0.786, 0.725,
+ 0.746, 0.679, 0.608, 0.655, 0.616,
+ 0.606, 0.602, 0.626, 0.651, 0.724,
+ 0.649, 0.649, 0.694, 0.644, 0.624,
+ 0.661, 0.612, 0.558, 0.533, 0.495,
+ 0.500, 0.423, 0.395, 0.375, 0.372,
+ 0.391, 0.396, 0.405, 0.428, 0.429,
+ 0.523, 0.562, 0.607, 0.653, 0.672,
+ 0.708, 0.633, 0.668, 0.645, 0.632,
+ 0.591, 0.559, 0.597, 0.625, 0.739,
+ 0.710, 0.729, 0.720, 0.636, 0.581,
+ 0.428, 0.292, 0.162, 0.098, 0.054
+ };
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/RandomCirclePointGenerator.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/RandomCirclePointGenerator.java
new file mode 100644
index 000000000..2c747157b
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/RandomCirclePointGenerator.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import org.apache.commons.math3.random.RandomGenerator;
+import org.apache.commons.math3.random.Well44497b;
+import org.apache.commons.math3.util.MathUtils;
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.distribution.RealDistribution;
+import org.apache.commons.math3.distribution.UniformRealDistribution;
+import org.apache.commons.math3.distribution.NormalDistribution;
+import org.apache.commons.math3.geometry.euclidean.twod.Vector2D;
+
+/**
+ * Factory for generating a cloud of points that approximate a circle.
+ */
+public class RandomCirclePointGenerator {
+ /** RNG for the x-coordinate of the center. */
+ private final RealDistribution cX;
+ /** RNG for the y-coordinate of the center. */
+ private final RealDistribution cY;
+ /** RNG for the parametric position of the point. */
+ private final RealDistribution tP;
+ /** Radius of the circle. */
+ private final double radius;
+
+ /**
+ * @param x Abscissa of the circle center.
+ * @param y Ordinate of the circle center.
+ * @param radius Radius of the circle.
+ * @param xSigma Error on the x-coordinate of the circumference points.
+ * @param ySigma Error on the y-coordinate of the circumference points.
+ * @param seed RNG seed.
+ */
+ public RandomCirclePointGenerator(double x,
+ double y,
+ double radius,
+ double xSigma,
+ double ySigma,
+ long seed) {
+ final RandomGenerator rng = new Well44497b(seed);
+ this.radius = radius;
+ cX = new NormalDistribution(rng, x, xSigma,
+ NormalDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
+ cY = new NormalDistribution(rng, y, ySigma,
+ NormalDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
+ tP = new UniformRealDistribution(rng, 0, MathUtils.TWO_PI,
+ UniformRealDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
+ }
+
+ /**
+ * Point generator.
+ *
+ * @param n Number of points to create.
+ * @return the cloud of {@code n} points.
+ */
+ public Vector2D[] generate(int n) {
+ final Vector2D[] cloud = new Vector2D[n];
+ for (int i = 0; i < n; i++) {
+ cloud[i] = create();
+ }
+ return cloud;
+ }
+
+ /**
+ * Create one point.
+ *
+ * @return a point.
+ */
+ private Vector2D create() {
+ final double t = tP.sample();
+ final double pX = cX.sample() + radius * FastMath.cos(t);
+ final double pY = cY.sample() + radius * FastMath.sin(t);
+
+ return new Vector2D(pX, pY);
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/RandomStraightLinePointGenerator.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/RandomStraightLinePointGenerator.java
new file mode 100644
index 000000000..91e4baecb
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/RandomStraightLinePointGenerator.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import java.awt.geom.Point2D;
+import org.apache.commons.math3.random.RandomGenerator;
+import org.apache.commons.math3.random.Well44497b;
+import org.apache.commons.math3.distribution.RealDistribution;
+import org.apache.commons.math3.distribution.UniformRealDistribution;
+import org.apache.commons.math3.distribution.NormalDistribution;
+
+/**
+ * Factory for generating a cloud of points that approximate a straight line.
+ */
+public class RandomStraightLinePointGenerator {
+ /** Slope. */
+ private final double slope;
+ /** Intercept. */
+ private final double intercept;
+ /** RNG for the x-coordinate. */
+ private final RealDistribution x;
+ /** RNG for the error on the y-coordinate. */
+ private final RealDistribution error;
+
+ /**
+ * The generator will create a cloud of points whose x-coordinates
+ * will be randomly sampled between {@code xLo} and {@code xHi}, and
+ * the corresponding y-coordinates will be computed as
+ *
+ * y = a x + b + N(0, error)
+ *
+ * where {@code N(mean, sigma)} is a Gaussian distribution with the
+ * given mean and standard deviation.
+ *
+ * @param a Slope.
+ * @param b Intercept.
+ * @param sigma Standard deviation on the y-coordinate of the point.
+ * @param lo Lowest value of the x-coordinate.
+ * @param hi Highest value of the x-coordinate.
+ * @param seed RNG seed.
+ */
+ public RandomStraightLinePointGenerator(double a,
+ double b,
+ double sigma,
+ double lo,
+ double hi,
+ long seed) {
+ final RandomGenerator rng = new Well44497b(seed);
+ slope = a;
+ intercept = b;
+ error = new NormalDistribution(rng, 0, sigma,
+ NormalDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
+ x = new UniformRealDistribution(rng, lo, hi,
+ UniformRealDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
+ }
+
+ /**
+ * Point generator.
+ *
+ * @param n Number of points to create.
+ * @return the cloud of {@code n} points.
+ */
+ public Point2D.Double[] generate(int n) {
+ final Point2D.Double[] cloud = new Point2D.Double[n];
+ for (int i = 0; i < n; i++) {
+ cloud[i] = create();
+ }
+ return cloud;
+ }
+
+ /**
+ * Create one point.
+ *
+ * @return a point.
+ */
+ private Point2D.Double create() {
+ final double abscissa = x.sample();
+ final double yModel = slope * abscissa + intercept;
+ final double ordinate = yModel + error.sample();
+
+ return new Point2D.Double(abscissa, ordinate);
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/StatisticalReferenceDataset.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/StatisticalReferenceDataset.java
new file mode 100644
index 000000000..93dcbbf88
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/StatisticalReferenceDataset.java
@@ -0,0 +1,383 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.util.ArrayList;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.analysis.MultivariateMatrixFunction;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunction;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunctionJacobian;
+import org.apache.commons.math3.util.MathArrays;
+
+/**
+ * This class gives access to the statistical reference datasets provided by the
+ * NIST (available
+ * here).
+ * Instances of this class can be created by invocation of the
+ * {@link StatisticalReferenceDatasetFactory}.
+ */
+public abstract class StatisticalReferenceDataset {
+
+ /** The name of this dataset. */
+ private final String name;
+
+ /** The total number of observations (data points). */
+ private final int numObservations;
+
+ /** The total number of parameters. */
+ private final int numParameters;
+
+ /** The total number of starting points for the optimizations. */
+ private final int numStartingPoints;
+
+ /** The values of the predictor. */
+ private final double[] x;
+
+ /** The values of the response. */
+ private final double[] y;
+
+ /**
+ * The starting values. {@code startingValues[j][i]} is the value of the
+ * {@code i}-th parameter in the {@code j}-th set of starting values.
+ */
+ private final double[][] startingValues;
+
+ /** The certified values of the parameters. */
+ private final double[] a;
+
+ /** The certified values of the standard deviation of the parameters. */
+ private final double[] sigA;
+
+ /** The certified value of the residual sum of squares. */
+ private double residualSumOfSquares;
+
+ /** The least-squares problem. */
+ private final LeastSquaresProblem problem;
+
+ /**
+ * Creates a new instance of this class from the specified data file. The
+ * file must follow the StRD format.
+ *
+ * @param in the data file
+ * @throws IOException if an I/O error occurs
+ */
+ public StatisticalReferenceDataset(final BufferedReader in)
+ throws IOException {
+
+ final ArrayList lines = new ArrayList();
+ for (String line = in.readLine(); line != null; line = in.readLine()) {
+ lines.add(line);
+ }
+ int[] index = findLineNumbers("Data", lines);
+ if (index == null) {
+ throw new AssertionError("could not find line indices for data");
+ }
+ this.numObservations = index[1] - index[0] + 1;
+ this.x = new double[this.numObservations];
+ this.y = new double[this.numObservations];
+ for (int i = 0; i < this.numObservations; i++) {
+ final String line = lines.get(index[0] + i - 1);
+ final String[] tokens = line.trim().split(" ++");
+ // Data columns are in reverse order!!!
+ this.y[i] = Double.parseDouble(tokens[0]);
+ this.x[i] = Double.parseDouble(tokens[1]);
+ }
+
+ index = findLineNumbers("Starting Values", lines);
+ if (index == null) {
+ throw new AssertionError(
+ "could not find line indices for starting values");
+ }
+ this.numParameters = index[1] - index[0] + 1;
+
+ double[][] start = null;
+ this.a = new double[numParameters];
+ this.sigA = new double[numParameters];
+ for (int i = 0; i < numParameters; i++) {
+ final String line = lines.get(index[0] + i - 1);
+ final String[] tokens = line.trim().split(" ++");
+ if (start == null) {
+ start = new double[tokens.length - 4][numParameters];
+ }
+ for (int j = 2; j < tokens.length - 2; j++) {
+ start[j - 2][i] = Double.parseDouble(tokens[j]);
+ }
+ this.a[i] = Double.parseDouble(tokens[tokens.length - 2]);
+ this.sigA[i] = Double.parseDouble(tokens[tokens.length - 1]);
+ }
+ if (start == null) {
+ throw new IOException("could not find starting values");
+ }
+ this.numStartingPoints = start.length;
+ this.startingValues = start;
+
+ double dummyDouble = Double.NaN;
+ String dummyString = null;
+ for (String line : lines) {
+ if (line.contains("Dataset Name:")) {
+ dummyString = line
+ .substring(line.indexOf("Dataset Name:") + 13,
+ line.indexOf("(")).trim();
+ }
+ if (line.contains("Residual Sum of Squares")) {
+ final String[] tokens = line.split(" ++");
+ dummyDouble = Double.parseDouble(tokens[4].trim());
+ }
+ }
+ if (Double.isNaN(dummyDouble)) {
+ throw new IOException(
+ "could not find certified value of residual sum of squares");
+ }
+ this.residualSumOfSquares = dummyDouble;
+
+ if (dummyString == null) {
+ throw new IOException("could not find dataset name");
+ }
+ this.name = dummyString;
+
+ this.problem = new LeastSquaresProblem();
+ }
+
+ class LeastSquaresProblem {
+ public ModelFunction getModelFunction() {
+ return new ModelFunction(new MultivariateVectorFunction() {
+ public double[] value(final double[] a) {
+ final int n = getNumObservations();
+ final double[] yhat = new double[n];
+ for (int i = 0; i < n; i++) {
+ yhat[i] = getModelValue(getX(i), a);
+ }
+ return yhat;
+ }
+ });
+ }
+
+ public ModelFunctionJacobian getModelFunctionJacobian() {
+ return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
+ public double[][] value(final double[] a)
+ throws IllegalArgumentException {
+ final int n = getNumObservations();
+ final double[][] j = new double[n][];
+ for (int i = 0; i < n; i++) {
+ j[i] = getModelDerivatives(getX(i), a);
+ }
+ return j;
+ }
+ });
+ }
+ }
+
+ /**
+ * Returns the name of this dataset.
+ *
+ * @return the name of the dataset
+ */
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * Returns the total number of observations (data points).
+ *
+ * @return the number of observations
+ */
+ public int getNumObservations() {
+ return numObservations;
+ }
+
+ /**
+ * Returns a copy of the data arrays. The data is laid out as follows
+ * {@code data[0][i] = x[i]}, {@code data[1][i] = y[i]},
+ *
+ * @return the array of data points.
+ */
+ public double[][] getData() {
+ return new double[][] {
+ MathArrays.copyOf(x), MathArrays.copyOf(y)
+ };
+ }
+
+ /**
+ * Returns the x-value of the {@code i}-th data point.
+ *
+ * @param i the index of the data point
+ * @return the x-value
+ */
+ public double getX(final int i) {
+ return x[i];
+ }
+
+ /**
+ * Returns the y-value of the {@code i}-th data point.
+ *
+ * @param i the index of the data point
+ * @return the y-value
+ */
+ public double getY(final int i) {
+ return y[i];
+ }
+
+ /**
+ * Returns the total number of parameters.
+ *
+ * @return the number of parameters
+ */
+ public int getNumParameters() {
+ return numParameters;
+ }
+
+ /**
+ * Returns the certified values of the paramters.
+ *
+ * @return the values of the parameters
+ */
+ public double[] getParameters() {
+ return MathArrays.copyOf(a);
+ }
+
+ /**
+ * Returns the certified value of the {@code i}-th parameter.
+ *
+ * @param i the index of the parameter
+ * @return the value of the parameter
+ */
+ public double getParameter(final int i) {
+ return a[i];
+ }
+
+ /**
+ * Reurns the certified values of the standard deviations of the parameters.
+ *
+ * @return the standard deviations of the parameters
+ */
+ public double[] getParametersStandardDeviations() {
+ return MathArrays.copyOf(sigA);
+ }
+
+ /**
+ * Returns the certified value of the standard deviation of the {@code i}-th
+ * parameter.
+ *
+ * @param i the index of the parameter
+ * @return the standard deviation of the parameter
+ */
+ public double getParameterStandardDeviation(final int i) {
+ return sigA[i];
+ }
+
+ /**
+ * Returns the certified value of the residual sum of squares.
+ *
+ * @return the residual sum of squares
+ */
+ public double getResidualSumOfSquares() {
+ return residualSumOfSquares;
+ }
+
+ /**
+ * Returns the total number of starting points (initial guesses for the
+ * optimization process).
+ *
+ * @return the number of starting points
+ */
+ public int getNumStartingPoints() {
+ return numStartingPoints;
+ }
+
+ /**
+ * Returns the {@code i}-th set of initial values of the parameters.
+ *
+ * @param i the index of the starting point
+ * @return the starting point
+ */
+ public double[] getStartingPoint(final int i) {
+ return MathArrays.copyOf(startingValues[i]);
+ }
+
+ /**
+ * Returns the least-squares problem corresponding to fitting the model to
+ * the specified data.
+ *
+ * @return the least-squares problem
+ */
+ public LeastSquaresProblem getLeastSquaresProblem() {
+ return problem;
+ }
+
+ /**
+ * Returns the value of the model for the specified values of the predictor
+ * variable and the parameters.
+ *
+ * @param x the predictor variable
+ * @param a the parameters
+ * @return the value of the model
+ */
+ public abstract double getModelValue(final double x, final double[] a);
+
+ /**
+ * Returns the values of the partial derivatives of the model with respect
+ * to the parameters.
+ *
+ * @param x the predictor variable
+ * @param a the parameters
+ * @return the partial derivatives
+ */
+ public abstract double[] getModelDerivatives(final double x,
+ final double[] a);
+
+ /**
+ *
+ * Parses the specified text lines, and extracts the indices of the first
+ * and last lines of the data defined by the specified {@code key}. This key
+ * must be one of
+ *
+ *
+ * - {@code "Starting Values"},
+ * - {@code "Certified Values"},
+ * - {@code "Data"}.
+ *
+ *
+ * In the NIST data files, the line indices are separated by the keywords
+ * {@code "lines"} and {@code "to"}.
+ *
+ *
+ * @param lines the line of text to be parsed
+ * @return an array of two {@code int}s. First value is the index of the
+ * first line, second value is the index of the last line.
+ * {@code null} if the line could not be parsed.
+ */
+ private static int[] findLineNumbers(final String key,
+ final Iterable lines) {
+ for (String text : lines) {
+ boolean flag = text.contains(key) && text.contains("lines") &&
+ text.contains("to") && text.contains(")");
+ if (flag) {
+ final int[] numbers = new int[2];
+ final String from = text.substring(text.indexOf("lines") + 5,
+ text.indexOf("to"));
+ numbers[0] = Integer.parseInt(from.trim());
+ final String to = text.substring(text.indexOf("to") + 2,
+ text.indexOf(")"));
+ numbers[1] = Integer.parseInt(to.trim());
+ return numbers;
+ }
+ }
+ return null;
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/StatisticalReferenceDatasetFactory.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/StatisticalReferenceDatasetFactory.java
new file mode 100644
index 000000000..c35910b5b
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/StatisticalReferenceDatasetFactory.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import org.apache.commons.math3.util.FastMath;
+
+/**
+ * A factory to create instances of {@link StatisticalReferenceDataset} from
+ * available resources.
+ */
+public class StatisticalReferenceDatasetFactory {
+
+ private StatisticalReferenceDatasetFactory() {
+ // Do nothing
+ }
+
+ /**
+ * Creates a new buffered reader from the specified resource name.
+ *
+ * @param name the name of the resource
+ * @return a buffered reader
+ * @throws IOException if an I/O error occured
+ */
+ public static BufferedReader createBufferedReaderFromResource(final String name)
+ throws IOException {
+ final InputStream resourceAsStream;
+ resourceAsStream = StatisticalReferenceDatasetFactory.class
+ .getResourceAsStream(name);
+ if (resourceAsStream == null) {
+ throw new IOException("could not find resource " + name);
+ }
+ return new BufferedReader(new InputStreamReader(resourceAsStream));
+ }
+
+ public static StatisticalReferenceDataset createKirby2()
+ throws IOException {
+ final BufferedReader in = createBufferedReaderFromResource("Kirby2.dat");
+ StatisticalReferenceDataset dataset = null;
+ try {
+ dataset = new StatisticalReferenceDataset(in) {
+
+ @Override
+ public double getModelValue(final double x, final double[] a) {
+ final double p = a[0] + x * (a[1] + x * a[2]);
+ final double q = 1.0 + x * (a[3] + x * a[4]);
+ return p / q;
+ }
+
+ @Override
+ public double[] getModelDerivatives(final double x,
+ final double[] a) {
+ final double[] dy = new double[5];
+ final double p = a[0] + x * (a[1] + x * a[2]);
+ final double q = 1.0 + x * (a[3] + x * a[4]);
+ dy[0] = 1.0 / q;
+ dy[1] = x / q;
+ dy[2] = x * dy[1];
+ dy[3] = -x * p / (q * q);
+ dy[4] = x * dy[3];
+ return dy;
+ }
+ };
+ } finally {
+ in.close();
+ }
+ return dataset;
+ }
+
+ public static StatisticalReferenceDataset createHahn1()
+ throws IOException {
+ final BufferedReader in = createBufferedReaderFromResource("Hahn1.dat");
+ StatisticalReferenceDataset dataset = null;
+ try {
+ dataset = new StatisticalReferenceDataset(in) {
+
+ @Override
+ public double getModelValue(final double x, final double[] a) {
+ final double p = a[0] + x * (a[1] + x * (a[2] + x * a[3]));
+ final double q = 1.0 + x * (a[4] + x * (a[5] + x * a[6]));
+ return p / q;
+ }
+
+ @Override
+ public double[] getModelDerivatives(final double x,
+ final double[] a) {
+ final double[] dy = new double[7];
+ final double p = a[0] + x * (a[1] + x * (a[2] + x * a[3]));
+ final double q = 1.0 + x * (a[4] + x * (a[5] + x * a[6]));
+ dy[0] = 1.0 / q;
+ dy[1] = x * dy[0];
+ dy[2] = x * dy[1];
+ dy[3] = x * dy[2];
+ dy[4] = -x * p / (q * q);
+ dy[5] = x * dy[4];
+ dy[6] = x * dy[5];
+ return dy;
+ }
+ };
+ } finally {
+ in.close();
+ }
+ return dataset;
+ }
+
+ public static StatisticalReferenceDataset createMGH17()
+ throws IOException {
+ final BufferedReader in = createBufferedReaderFromResource("MGH17.dat");
+ StatisticalReferenceDataset dataset = null;
+ try {
+ dataset = new StatisticalReferenceDataset(in) {
+
+ @Override
+ public double getModelValue(final double x, final double[] a) {
+ return a[0] + a[1] * FastMath.exp(-a[3] * x) + a[2] *
+ FastMath.exp(-a[4] * x);
+ }
+
+ @Override
+ public double[] getModelDerivatives(final double x,
+ final double[] a) {
+ final double[] dy = new double[5];
+ dy[0] = 1.0;
+ dy[1] = FastMath.exp(-x * a[3]);
+ dy[2] = FastMath.exp(-x * a[4]);
+ dy[3] = -x * a[1] * dy[1];
+ dy[4] = -x * a[2] * dy[2];
+ return dy;
+ }
+ };
+ } finally {
+ in.close();
+ }
+ return dataset;
+ }
+
+ public static StatisticalReferenceDataset createLanczos1()
+ throws IOException {
+ final BufferedReader in =
+ createBufferedReaderFromResource("Lanczos1.dat");
+ StatisticalReferenceDataset dataset = null;
+ try {
+ dataset = new StatisticalReferenceDataset(in) {
+
+ @Override
+ public double getModelValue(final double x, final double[] a) {
+ System.out.println(a[0]+", "+a[1]+", "+a[2]+", "+a[3]+", "+a[4]+", "+a[5]);
+ return a[0] * FastMath.exp(-a[3] * x) +
+ a[1] * FastMath.exp(-a[4] * x) +
+ a[2] * FastMath.exp(-a[5] * x);
+ }
+
+ @Override
+ public double[] getModelDerivatives(final double x,
+ final double[] a) {
+ final double[] dy = new double[6];
+ dy[0] = FastMath.exp(-x * a[3]);
+ dy[1] = FastMath.exp(-x * a[4]);
+ dy[2] = FastMath.exp(-x * a[5]);
+ dy[3] = -x * a[0] * dy[0];
+ dy[4] = -x * a[1] * dy[1];
+ dy[5] = -x * a[2] * dy[2];
+ return dy;
+ }
+ };
+ } finally {
+ in.close();
+ }
+ return dataset;
+ }
+
+ /**
+ * Returns an array with all available reference datasets.
+ *
+ * @return the array of datasets
+ * @throws IOException if an I/O error occurs
+ */
+ public StatisticalReferenceDataset[] createAll()
+ throws IOException {
+ return new StatisticalReferenceDataset[] {
+ createKirby2(), createMGH17()
+ };
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/StraightLineProblem.java b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/StraightLineProblem.java
new file mode 100644
index 000000000..09c16df97
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/StraightLineProblem.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import java.util.ArrayList;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.analysis.MultivariateMatrixFunction;
+import org.apache.commons.math3.analysis.UnivariateFunction;
+import org.apache.commons.math3.stat.regression.SimpleRegression;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunction;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunctionJacobian;
+
+/**
+ * Class that models a straight line defined as {@code y = a x + b}.
+ * The parameters of problem are:
+ *
+ * - {@code a}
+ * - {@code b}
+ *
+ * The model functions are:
+ *
+ * - for each pair (a, b), the y-coordinate of the line.
+ *
+ */
+class StraightLineProblem {
+ /** Cloud of points assumed to be fitted by a straight line. */
+ private final ArrayList points;
+ /** Error (on the y-coordinate of the points). */
+ private final double sigma;
+
+ /**
+ * @param error Assumed error for the y-coordinate.
+ */
+ public StraightLineProblem(double error) {
+ points = new ArrayList();
+ sigma = error;
+ }
+
+ public void addPoint(double px, double py) {
+ points.add(new double[] { px, py });
+ }
+
+ /**
+ * @return the list of x-coordinates.
+ */
+ public double[] x() {
+ final double[] v = new double[points.size()];
+ for (int i = 0; i < points.size(); i++) {
+ final double[] p = points.get(i);
+ v[i] = p[0]; // x-coordinate.
+ }
+
+ return v;
+ }
+
+ /**
+ * @return the list of y-coordinates.
+ */
+ public double[] y() {
+ final double[] v = new double[points.size()];
+ for (int i = 0; i < points.size(); i++) {
+ final double[] p = points.get(i);
+ v[i] = p[1]; // y-coordinate.
+ }
+
+ return v;
+ }
+
+ public double[] target() {
+ return y();
+ }
+
+ public double[] weight() {
+ final double weight = 1 / (sigma * sigma);
+ final double[] w = new double[points.size()];
+ for (int i = 0; i < points.size(); i++) {
+ w[i] = weight;
+ }
+
+ return w;
+ }
+
+ public ModelFunction getModelFunction() {
+ return new ModelFunction(new MultivariateVectorFunction() {
+ public double[] value(double[] params) {
+ final Model line = new Model(params[0], params[1]);
+
+ final double[] model = new double[points.size()];
+ for (int i = 0; i < points.size(); i++) {
+ final double[] p = points.get(i);
+ model[i] = line.value(p[0]);
+ }
+
+ return model;
+ }
+ });
+ }
+
+ public ModelFunctionJacobian getModelFunctionJacobian() {
+ return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
+ public double[][] value(double[] point) {
+ return jacobian(point);
+ }
+ });
+ }
+
+ /**
+ * Directly solve the linear problem, using the {@link SimpleRegression}
+ * class.
+ */
+ public double[] solve() {
+ final SimpleRegression regress = new SimpleRegression(true);
+ for (double[] d : points) {
+ regress.addData(d[0], d[1]);
+ }
+
+ final double[] result = { regress.getSlope(), regress.getIntercept() };
+ return result;
+ }
+
+ private double[][] jacobian(double[] params) {
+ final double[][] jacobian = new double[points.size()][2];
+
+ for (int i = 0; i < points.size(); i++) {
+ final double[] p = points.get(i);
+ // Partial derivative wrt "a".
+ jacobian[i][0] = p[0];
+ // Partial derivative wrt "b".
+ jacobian[i][1] = 1;
+ }
+
+ return jacobian;
+ }
+
+ /**
+ * Linear function.
+ */
+ public static class Model implements UnivariateFunction {
+ final double a;
+ final double b;
+
+ public Model(double a,
+ double b) {
+ this.a = a;
+ this.b = b;
+ }
+
+ public double value(double x) {
+ return a * x + b;
+ }
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/univariate/BracketFinderTest.java b/src/test/java/org/apache/commons/math3/optim/univariate/BracketFinderTest.java
new file mode 100644
index 000000000..af725adaa
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/univariate/BracketFinderTest.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.univariate;
+
+import org.apache.commons.math3.analysis.UnivariateFunction;
+import org.apache.commons.math3.optim.GoalType;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test for {@link BracketFinder}.
+ */
+public class BracketFinderTest {
+
+ @Test
+ public void testCubicMin() {
+ final BracketFinder bFind = new BracketFinder();
+ final UnivariateFunction func = new UnivariateFunction() {
+ public double value(double x) {
+ if (x < -2) {
+ return value(-2);
+ }
+ else {
+ return (x - 1) * (x + 2) * (x + 3);
+ }
+ }
+ };
+
+ bFind.search(func, GoalType.MINIMIZE, -2 , -1);
+ final double tol = 1e-15;
+ // Comparing with results computed in Python.
+ Assert.assertEquals(-2, bFind.getLo(), tol);
+ Assert.assertEquals(-1, bFind.getMid(), tol);
+ Assert.assertEquals(0.61803399999999997, bFind.getHi(), tol);
+ }
+
+ @Test
+ public void testCubicMax() {
+ final BracketFinder bFind = new BracketFinder();
+ final UnivariateFunction func = new UnivariateFunction() {
+ public double value(double x) {
+ if (x < -2) {
+ return value(-2);
+ }
+ else {
+ return -(x - 1) * (x + 2) * (x + 3);
+ }
+ }
+ };
+
+ bFind.search(func, GoalType.MAXIMIZE, -2 , -1);
+ final double tol = 1e-15;
+ Assert.assertEquals(-2, bFind.getLo(), tol);
+ Assert.assertEquals(-1, bFind.getMid(), tol);
+ Assert.assertEquals(0.61803399999999997, bFind.getHi(), tol);
+ }
+
+ @Test
+ public void testMinimumIsOnIntervalBoundary() {
+ final UnivariateFunction func = new UnivariateFunction() {
+ public double value(double x) {
+ return x * x;
+ }
+ };
+
+ final BracketFinder bFind = new BracketFinder();
+
+ bFind.search(func, GoalType.MINIMIZE, 0, 1);
+ Assert.assertTrue(bFind.getLo() <= 0);
+ Assert.assertTrue(0 <= bFind.getHi());
+
+ bFind.search(func, GoalType.MINIMIZE, -1, 0);
+ Assert.assertTrue(bFind.getLo() <= 0);
+ Assert.assertTrue(0 <= bFind.getHi());
+ }
+
+ @Test
+ public void testIntervalBoundsOrdering() {
+ final UnivariateFunction func = new UnivariateFunction() {
+ public double value(double x) {
+ return x * x;
+ }
+ };
+
+ final BracketFinder bFind = new BracketFinder();
+
+ bFind.search(func, GoalType.MINIMIZE, -1, 1);
+ Assert.assertTrue(bFind.getLo() <= 0);
+ Assert.assertTrue(0 <= bFind.getHi());
+
+ bFind.search(func, GoalType.MINIMIZE, 1, -1);
+ Assert.assertTrue(bFind.getLo() <= 0);
+ Assert.assertTrue(0 <= bFind.getHi());
+
+ bFind.search(func, GoalType.MINIMIZE, 1, 2);
+ Assert.assertTrue(bFind.getLo() <= 0);
+ Assert.assertTrue(0 <= bFind.getHi());
+
+ bFind.search(func, GoalType.MINIMIZE, 2, 1);
+ Assert.assertTrue(bFind.getLo() <= 0);
+ Assert.assertTrue(0 <= bFind.getHi());
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/univariate/BrentOptimizerTest.java b/src/test/java/org/apache/commons/math3/optim/univariate/BrentOptimizerTest.java
new file mode 100644
index 000000000..43b956ceb
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/univariate/BrentOptimizerTest.java
@@ -0,0 +1,301 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.univariate;
+
+
+import org.apache.commons.math3.analysis.QuinticFunction;
+import org.apache.commons.math3.analysis.UnivariateFunction;
+import org.apache.commons.math3.analysis.function.Sin;
+import org.apache.commons.math3.analysis.function.StepFunction;
+import org.apache.commons.math3.analysis.FunctionUtils;
+import org.apache.commons.math3.exception.NumberIsTooLargeException;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
+import org.apache.commons.math3.util.FastMath;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * @version $Id$
+ */
+public final class BrentOptimizerTest {
+
+ @Test
+ public void testSinMin() {
+ UnivariateFunction f = new Sin();
+ UnivariateOptimizer optimizer = new BrentOptimizer(1e-10, 1e-14);
+ Assert.assertEquals(3 * Math.PI / 2, optimizer.optimize(new MaxEval(200),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(4, 5)).getPoint(), 1e-8);
+ Assert.assertTrue(optimizer.getEvaluations() <= 50);
+ Assert.assertEquals(200, optimizer.getMaxEvaluations());
+ Assert.assertEquals(3 * Math.PI / 2, optimizer.optimize(new MaxEval(200),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(1, 5)).getPoint(), 1e-8);
+ Assert.assertTrue(optimizer.getEvaluations() <= 100);
+ Assert.assertTrue(optimizer.getEvaluations() >= 15);
+ try {
+ optimizer.optimize(new MaxEval(10),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(4, 5));
+ Assert.fail("an exception should have been thrown");
+ } catch (TooManyEvaluationsException fee) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testSinMinWithValueChecker() {
+ final UnivariateFunction f = new Sin();
+ final ConvergenceChecker checker = new SimpleUnivariateValueChecker(1e-5, 1e-14);
+ // The default stopping criterion of Brent's algorithm should not
+ // pass, but the search will stop at the given relative tolerance
+ // for the function value.
+ final UnivariateOptimizer optimizer = new BrentOptimizer(1e-10, 1e-14, checker);
+ final UnivariatePointValuePair result = optimizer.optimize(new MaxEval(200),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(4, 5));
+ Assert.assertEquals(3 * Math.PI / 2, result.getPoint(), 1e-3);
+ }
+
+ @Test
+ public void testBoundaries() {
+ final double lower = -1.0;
+ final double upper = +1.0;
+ UnivariateFunction f = new UnivariateFunction() {
+ public double value(double x) {
+ if (x < lower) {
+ throw new NumberIsTooSmallException(x, lower, true);
+ } else if (x > upper) {
+ throw new NumberIsTooLargeException(x, upper, true);
+ } else {
+ return x;
+ }
+ }
+ };
+ UnivariateOptimizer optimizer = new BrentOptimizer(1e-10, 1e-14);
+ Assert.assertEquals(lower,
+ optimizer.optimize(new MaxEval(100),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(lower, upper)).getPoint(),
+ 1.0e-8);
+ Assert.assertEquals(upper,
+ optimizer.optimize(new MaxEval(100),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MAXIMIZE,
+ new SearchInterval(lower, upper)).getPoint(),
+ 1.0e-8);
+ }
+
+ @Test
+ public void testQuinticMin() {
+ // The function has local minima at -0.27195613 and 0.82221643.
+ UnivariateFunction f = new QuinticFunction();
+ UnivariateOptimizer optimizer = new BrentOptimizer(1e-10, 1e-14);
+ Assert.assertEquals(-0.27195613, optimizer.optimize(new MaxEval(200),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(-0.3, -0.2)).getPoint(), 1.0e-8);
+ Assert.assertEquals( 0.82221643, optimizer.optimize(new MaxEval(200),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(0.3, 0.9)).getPoint(), 1.0e-8);
+ Assert.assertTrue(optimizer.getEvaluations() <= 50);
+
+ // search in a large interval
+ Assert.assertEquals(-0.27195613, optimizer.optimize(new MaxEval(200),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(-1.0, 0.2)).getPoint(), 1.0e-8);
+ Assert.assertTrue(optimizer.getEvaluations() <= 50);
+ }
+
+ @Test
+ public void testQuinticMinStatistics() {
+ // The function has local minima at -0.27195613 and 0.82221643.
+ UnivariateFunction f = new QuinticFunction();
+ UnivariateOptimizer optimizer = new BrentOptimizer(1e-11, 1e-14);
+
+ final DescriptiveStatistics[] stat = new DescriptiveStatistics[2];
+ for (int i = 0; i < stat.length; i++) {
+ stat[i] = new DescriptiveStatistics();
+ }
+
+ final double min = -0.75;
+ final double max = 0.25;
+ final int nSamples = 200;
+ final double delta = (max - min) / nSamples;
+ for (int i = 0; i < nSamples; i++) {
+ final double start = min + i * delta;
+ stat[0].addValue(optimizer.optimize(new MaxEval(40),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(min, max, start)).getPoint());
+ stat[1].addValue(optimizer.getEvaluations());
+ }
+
+ final double meanOptValue = stat[0].getMean();
+ final double medianEval = stat[1].getPercentile(50);
+ Assert.assertTrue(meanOptValue > -0.2719561281);
+ Assert.assertTrue(meanOptValue < -0.2719561280);
+ Assert.assertEquals(23, (int) medianEval);
+ }
+
+ @Test
+ public void testQuinticMax() {
+ // The quintic function has zeros at 0, +-0.5 and +-1.
+ // The function has a local maximum at 0.27195613.
+ UnivariateFunction f = new QuinticFunction();
+ UnivariateOptimizer optimizer = new BrentOptimizer(1e-12, 1e-14);
+ Assert.assertEquals(0.27195613, optimizer.optimize(new MaxEval(100),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MAXIMIZE,
+ new SearchInterval(0.2, 0.3)).getPoint(), 1e-8);
+ try {
+ optimizer.optimize(new MaxEval(5),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MAXIMIZE,
+ new SearchInterval(0.2, 0.3));
+ Assert.fail("an exception should have been thrown");
+ } catch (TooManyEvaluationsException miee) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testMinEndpoints() {
+ UnivariateFunction f = new Sin();
+ UnivariateOptimizer optimizer = new BrentOptimizer(1e-8, 1e-14);
+
+ // endpoint is minimum
+ double result = optimizer.optimize(new MaxEval(50),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(3 * Math.PI / 2, 5)).getPoint();
+ Assert.assertEquals(3 * Math.PI / 2, result, 1e-6);
+
+ result = optimizer.optimize(new MaxEval(50),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(4, 3 * Math.PI / 2)).getPoint();
+ Assert.assertEquals(3 * Math.PI / 2, result, 1e-6);
+ }
+
+ @Test
+ public void testMath832() {
+ final UnivariateFunction f = new UnivariateFunction() {
+ public double value(double x) {
+ final double sqrtX = FastMath.sqrt(x);
+ final double a = 1e2 * sqrtX;
+ final double b = 1e6 / x;
+ final double c = 1e4 / sqrtX;
+
+ return a + b + c;
+ }
+ };
+
+ UnivariateOptimizer optimizer = new BrentOptimizer(1e-10, 1e-8);
+ final double result = optimizer.optimize(new MaxEval(1483),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(Double.MIN_VALUE,
+ Double.MAX_VALUE)).getPoint();
+
+ Assert.assertEquals(804.9355825, result, 1e-6);
+ }
+
+ /**
+ * Contrived example showing that prior to the resolution of MATH-855
+ * (second revision), the algorithm would not return the best point if
+ * it happened to be the initial guess.
+ */
+ @Test
+ public void testKeepInitIfBest() {
+ final double minSin = 3 * Math.PI / 2;
+ final double offset = 1e-8;
+ final double delta = 1e-7;
+ final UnivariateFunction f1 = new Sin();
+ final UnivariateFunction f2 = new StepFunction(new double[] { minSin, minSin + offset, minSin + 2 * offset},
+ new double[] { 0, -1, 0 });
+ final UnivariateFunction f = FunctionUtils.add(f1, f2);
+ // A slightly less stringent tolerance would make the test pass
+ // even with the previous implementation.
+ final double relTol = 1e-8;
+ final UnivariateOptimizer optimizer = new BrentOptimizer(relTol, 1e-100);
+ final double init = minSin + 1.5 * offset;
+ final UnivariatePointValuePair result
+ = optimizer.optimize(new MaxEval(200),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(minSin - 6.789 * delta,
+ minSin + 9.876 * delta,
+ init));
+ final int numEval = optimizer.getEvaluations();
+
+ final double sol = result.getPoint();
+ final double expected = init;
+
+// System.out.println("numEval=" + numEval);
+// System.out.println("min=" + init + " f=" + f.value(init));
+// System.out.println("sol=" + sol + " f=" + f.value(sol));
+// System.out.println("exp=" + expected + " f=" + f.value(expected));
+
+ Assert.assertTrue("Best point not reported", f.value(sol) <= f.value(expected));
+ }
+
+ /**
+ * Contrived example showing that prior to the resolution of MATH-855,
+ * the algorithm, by always returning the last evaluated point, would
+ * sometimes not report the best point it had found.
+ */
+ @Test
+ public void testMath855() {
+ final double minSin = 3 * Math.PI / 2;
+ final double offset = 1e-8;
+ final double delta = 1e-7;
+ final UnivariateFunction f1 = new Sin();
+ final UnivariateFunction f2 = new StepFunction(new double[] { minSin, minSin + offset, minSin + 5 * offset },
+ new double[] { 0, -1, 0 });
+ final UnivariateFunction f = FunctionUtils.add(f1, f2);
+ final UnivariateOptimizer optimizer = new BrentOptimizer(1e-8, 1e-100);
+ final UnivariatePointValuePair result
+ = optimizer.optimize(new MaxEval(200),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(minSin - 6.789 * delta,
+ minSin + 9.876 * delta));
+ final int numEval = optimizer.getEvaluations();
+
+ final double sol = result.getPoint();
+ final double expected = 4.712389027602411;
+
+ // System.out.println("min=" + (minSin + offset) + " f=" + f.value(minSin + offset));
+ // System.out.println("sol=" + sol + " f=" + f.value(sol));
+ // System.out.println("exp=" + expected + " f=" + f.value(expected));
+
+ Assert.assertTrue("Best point not reported", f.value(sol) <= f.value(expected));
+ }
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/univariate/MultiStartUnivariateOptimizerTest.java b/src/test/java/org/apache/commons/math3/optim/univariate/MultiStartUnivariateOptimizerTest.java
new file mode 100644
index 000000000..9c918554a
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/univariate/MultiStartUnivariateOptimizerTest.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.univariate;
+
+import org.apache.commons.math3.analysis.QuinticFunction;
+import org.apache.commons.math3.analysis.UnivariateFunction;
+import org.apache.commons.math3.analysis.function.Sin;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.random.JDKRandomGenerator;
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class MultiStartUnivariateOptimizerTest {
+ @Test(expected=MathIllegalStateException.class)
+ public void testMissingMaxEval() {
+ UnivariateOptimizer underlying = new BrentOptimizer(1e-10, 1e-14);
+ JDKRandomGenerator g = new JDKRandomGenerator();
+ g.setSeed(44428400075l);
+ MultiStartUnivariateOptimizer optimizer = new MultiStartUnivariateOptimizer(underlying, 10, g);
+ optimizer.optimize(new UnivariateObjectiveFunction(new Sin()),
+ GoalType.MINIMIZE,
+ new SearchInterval(-1, 1));
+ }
+ @Test(expected=MathIllegalStateException.class)
+ public void testMissingSearchInterval() {
+ UnivariateOptimizer underlying = new BrentOptimizer(1e-10, 1e-14);
+ JDKRandomGenerator g = new JDKRandomGenerator();
+ g.setSeed(44428400075l);
+ MultiStartUnivariateOptimizer optimizer = new MultiStartUnivariateOptimizer(underlying, 10, g);
+ optimizer.optimize(new MaxEval(300),
+ new UnivariateObjectiveFunction(new Sin()),
+ GoalType.MINIMIZE);
+ }
+
+ @Test
+ public void testSinMin() {
+ UnivariateFunction f = new Sin();
+ UnivariateOptimizer underlying = new BrentOptimizer(1e-10, 1e-14);
+ JDKRandomGenerator g = new JDKRandomGenerator();
+ g.setSeed(44428400075l);
+ MultiStartUnivariateOptimizer optimizer = new MultiStartUnivariateOptimizer(underlying, 10, g);
+ optimizer.optimize(new MaxEval(300),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(-100.0, 100.0));
+ UnivariatePointValuePair[] optima = optimizer.getOptima();
+ for (int i = 1; i < optima.length; ++i) {
+ double d = (optima[i].getPoint() - optima[i-1].getPoint()) / (2 * FastMath.PI);
+ Assert.assertTrue(FastMath.abs(d - FastMath.rint(d)) < 1.0e-8);
+ Assert.assertEquals(-1.0, f.value(optima[i].getPoint()), 1.0e-10);
+ Assert.assertEquals(f.value(optima[i].getPoint()), optima[i].getValue(), 1.0e-10);
+ }
+ Assert.assertTrue(optimizer.getEvaluations() > 200);
+ Assert.assertTrue(optimizer.getEvaluations() < 300);
+ }
+
+ @Test
+ public void testQuinticMin() {
+ // The quintic function has zeros at 0, +-0.5 and +-1.
+ // The function has extrema (first derivative is zero) at 0.27195613 and 0.82221643,
+ UnivariateFunction f = new QuinticFunction();
+ UnivariateOptimizer underlying = new BrentOptimizer(1e-9, 1e-14);
+ JDKRandomGenerator g = new JDKRandomGenerator();
+ g.setSeed(4312000053L);
+ MultiStartUnivariateOptimizer optimizer = new MultiStartUnivariateOptimizer(underlying, 5, g);
+
+ UnivariatePointValuePair optimum
+ = optimizer.optimize(new MaxEval(300),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(-0.3, -0.2));
+ Assert.assertEquals(-0.27195613, optimum.getPoint(), 1e-9);
+ Assert.assertEquals(-0.0443342695, optimum.getValue(), 1e-9);
+
+ UnivariatePointValuePair[] optima = optimizer.getOptima();
+ for (int i = 0; i < optima.length; ++i) {
+ Assert.assertEquals(f.value(optima[i].getPoint()), optima[i].getValue(), 1e-9);
+ }
+ Assert.assertTrue(optimizer.getEvaluations() >= 50);
+ Assert.assertTrue(optimizer.getEvaluations() <= 100);
+ }
+
+ @Test
+ public void testBadFunction() {
+ UnivariateFunction f = new UnivariateFunction() {
+ public double value(double x) {
+ if (x < 0) {
+ throw new LocalException();
+ }
+ return 0;
+ }
+ };
+ UnivariateOptimizer underlying = new BrentOptimizer(1e-9, 1e-14);
+ JDKRandomGenerator g = new JDKRandomGenerator();
+ g.setSeed(4312000053L);
+ MultiStartUnivariateOptimizer optimizer = new MultiStartUnivariateOptimizer(underlying, 5, g);
+
+ try {
+ optimizer.optimize(new MaxEval(300),
+ new UnivariateObjectiveFunction(f),
+ GoalType.MINIMIZE,
+ new SearchInterval(-0.3, -0.2));
+ Assert.fail();
+ } catch (LocalException e) {
+ // Expected.
+ }
+
+ // Ensure that the exception was thrown because no optimum was found.
+ Assert.assertTrue(optimizer.getOptima()[0] == null);
+ }
+
+ private static class LocalException extends RuntimeException {
+ private static final long serialVersionUID = 1194682757034350629L;
+ }
+
+}
diff --git a/src/test/java/org/apache/commons/math3/optim/univariate/SimpleUnivariateValueCheckerTest.java b/src/test/java/org/apache/commons/math3/optim/univariate/SimpleUnivariateValueCheckerTest.java
new file mode 100644
index 000000000..e7b790472
--- /dev/null
+++ b/src/test/java/org/apache/commons/math3/optim/univariate/SimpleUnivariateValueCheckerTest.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.univariate;
+
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.junit.Test;
+import org.junit.Assert;
+
+public class SimpleUnivariateValueCheckerTest {
+ @Test(expected=NotStrictlyPositiveException.class)
+ public void testIterationCheckPrecondition() {
+ new SimpleUnivariateValueChecker(1e-1, 1e-2, 0);
+ }
+
+ @Test
+ public void testIterationCheck() {
+ final int max = 10;
+ final SimpleUnivariateValueChecker checker = new SimpleUnivariateValueChecker(1e-1, 1e-2, max);
+ Assert.assertTrue(checker.converged(max, null, null));
+ Assert.assertTrue(checker.converged(max + 1, null, null));
+ }
+
+ @Test
+ public void testIterationCheckDisabled() {
+ final SimpleUnivariateValueChecker checker = new SimpleUnivariateValueChecker(1e-8, 1e-8);
+
+ final UnivariatePointValuePair a = new UnivariatePointValuePair(1d, 1d);
+ final UnivariatePointValuePair b = new UnivariatePointValuePair(10d, 10d);
+
+ Assert.assertFalse(checker.converged(-1, a, b));
+ Assert.assertFalse(checker.converged(0, a, b));
+ Assert.assertFalse(checker.converged(1000000, a, b));
+
+ Assert.assertTrue(checker.converged(-1, a, a));
+ Assert.assertTrue(checker.converged(-1, b, b));
+ }
+
+}