Merged PoissonDistribution and PoissonDistributionImpl (MATH-711).

Merged TDistribution and TDistributionImpl (MATH-711).
Merged WeibullDistribution and WeibullDistributionImpl (MATH-711).

git-svn-id: https://svn.apache.org/repos/asf/commons/proper/math/trunk@1206434 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Sebastien Brisard 2011-11-26 13:23:27 +00:00
parent fda34e0376
commit d161d473bb
17 changed files with 771 additions and 988 deletions

View File

@ -103,7 +103,7 @@ public abstract class AbstractDistribution
* distribution.
*
* @return the variance (possibly Double.POSITIVE_INFINITY as
* for certain cases in {@link TDistributionImpl}) or
* for certain cases in {@link TDistribution}) or
* Double.NaN if it's not defined
*/
public double getNumericalVariance() {

View File

@ -73,7 +73,7 @@ public interface Distribution {
* distribution.
*
* @return the variance (possibly Double.POSITIVE_INFINITY as
* for certain cases in {@link TDistributionImpl}) or
* for certain cases in {@link TDistribution}) or
* Double.NaN if it's not defined
*/
double getNumericalVariance();

View File

@ -16,33 +16,253 @@
*/
package org.apache.commons.math.distribution;
import java.io.Serializable;
import org.apache.commons.math.exception.NotStrictlyPositiveException;
import org.apache.commons.math.exception.util.LocalizedFormats;
import org.apache.commons.math.special.Gamma;
import org.apache.commons.math.util.MathUtils;
import org.apache.commons.math.util.FastMath;
/**
* Interface representing the Poisson Distribution.
*
* <p>
* References:
* <ul>
* <li><a href="http://mathworld.wolfram.com/PoissonDistribution.html">
* Poisson distribution</a></li>
* </ul>
* </p>
* Implementation of the Poisson distribution.
*
* @see <a href="http://en.wikipedia.org/wiki/Poisson_distribution">Poisson distribution (Wikipedia)</a>
* @see <a href="http://mathworld.wolfram.com/PoissonDistribution.html">Poisson distribution (MathWorld)</a>
* @version $Id$
*/
public interface PoissonDistribution extends IntegerDistribution {
public class PoissonDistribution extends AbstractIntegerDistribution
implements Serializable {
/**
* Default maximum number of iterations for cumulative probability calculations.
* @since 2.1
*/
public static final int DEFAULT_MAX_ITERATIONS = 10000000;
/**
* Default convergence criterion.
* @since 2.1
*/
public static final double DEFAULT_EPSILON = 1e-12;
/** Serializable version identifier. */
private static final long serialVersionUID = -3349935121172596109L;
/** Distribution used to compute normal approximation. */
private final NormalDistribution normal;
/** Mean of the distribution. */
private final double mean;
/**
* Maximum number of iterations for cumulative probability. Cumulative
* probabilities are estimated using either Lanczos series approximation of
* {@link Gamma#regularizedGammaP(double, double, double, int)}
* or continued fraction approximation of
* {@link Gamma#regularizedGammaQ(double, double, double, int)}.
*/
private final int maxIterations;
/** Convergence criterion for cumulative probability. */
private final double epsilon;
/**
* Creates a new Poisson distribution with specified mean.
*
* @param p the Poisson mean
* @throws NotStrictlyPositiveException if {@code p <= 0}.
*/
public PoissonDistribution(double p) {
this(p, DEFAULT_EPSILON, DEFAULT_MAX_ITERATIONS);
}
/**
* Creates a new Poisson distribution with specified mean, convergence
* criterion and maximum number of iterations.
*
* @param p Poisson mean.
* @param epsilon Convergence criterion for cumulative probabilities.
* @param maxIterations the maximum number of iterations for cumulative
* probabilities.
* @since 2.1
*/
public PoissonDistribution(double p, double epsilon, int maxIterations) {
if (p <= 0) {
throw new NotStrictlyPositiveException(LocalizedFormats.MEAN, p);
}
mean = p;
normal = new NormalDistribution(p, FastMath.sqrt(p));
this.epsilon = epsilon;
this.maxIterations = maxIterations;
}
/**
* Creates a new Poisson distribution with the specified mean and
* convergence criterion.
*
* @param p Poisson mean.
* @param epsilon Convergence criterion for cumulative probabilities.
* @since 2.1
*/
public PoissonDistribution(double p, double epsilon) {
this(p, epsilon, DEFAULT_MAX_ITERATIONS);
}
/**
* Creates a new Poisson distribution with the specified mean and maximum
* number of iterations.
*
* @param p Poisson mean.
* @param maxIterations Maximum number of iterations for cumulative
* probabilities.
* @since 2.1
*/
public PoissonDistribution(double p, int maxIterations) {
this(p, DEFAULT_EPSILON, maxIterations);
}
/**
* Get the mean for the distribution.
*
* @return the mean for the distribution.
*/
double getMean();
public double getMean() {
return mean;
}
/** {@inheritDoc} */
public double probability(int x) {
double ret;
if (x < 0 || x == Integer.MAX_VALUE) {
ret = 0.0;
} else if (x == 0) {
ret = FastMath.exp(-mean);
} else {
ret = FastMath.exp(-SaddlePointExpansion.getStirlingError(x)
- SaddlePointExpansion.getDeviancePart(x, mean))
/ FastMath.sqrt(MathUtils.TWO_PI * x);
}
return ret;
}
/** {@inheritDoc} */
@Override
public double cumulativeProbability(int x) {
if (x < 0) {
return 0;
}
if (x == Integer.MAX_VALUE) {
return 1;
}
return Gamma.regularizedGammaQ((double) x + 1, mean, epsilon,
maxIterations);
}
/**
* Calculates the Poisson distribution function using a normal approximation.
* Calculates the Poisson distribution function using a normal
* approximation. The {@code N(mean, sqrt(mean))} distribution is used
* to approximate the Poisson distribution. The computation uses
* "half-correction" (evaluating the normal distribution function at
* {@code x + 0.5}).
*
* @param x the upper bound, inclusive
* @return the distribution function value calculated using a normal approximation
* @param x Upper bound, inclusive.
* @return the distribution function value calculated using a normal
* approximation.
*/
double normalApproximateProbability(int x) ;
public double normalApproximateProbability(int x) {
// calculate the probability using half-correction
return normal.cumulativeProbability(x + 0.5);
}
/**
* {@inheritDoc}
* <p>
* <strong>Algorithm Description</strong>:
* <ul>
* <li>For small means, uses simulation of a Poisson process
* using Uniform deviates, as described
* <a href="http://irmi.epfl.ch/cmos/Pmmi/interactive/rng7.htm"> here</a>.
* The Poisson process (and hence value returned) is bounded by 1000 * mean.
* </li>
* <li>For large means, uses the rejection algorithm described in
* <quote>
* Devroye, Luc. (1981).<i>The Computer Generation of Poisson Random Variables</i>
* <strong>Computing</strong> vol. 26 pp. 197-207.
* </quote>
* </li>
* </ul>
* </p>
*
* @return a random value.
* @since 2.2
*/
@Override
public int sample() {
return (int) FastMath.min(randomData.nextPoisson(mean), Integer.MAX_VALUE);
}
/** {@inheritDoc} */
@Override
protected int getDomainLowerBound(double p) {
return 0;
}
/** {@inheritDoc} */ @Override
protected int getDomainUpperBound(double p) {
return Integer.MAX_VALUE;
}
/**
* {@inheritDoc}
*
* The lower bound of the support is always 0 no matter the mean parameter.
*
* @return lower bound of the support (always 0)
*/
@Override
public int getSupportLowerBound() {
return 0;
}
/**
* {@inheritDoc}
*
* The upper bound of the support is positive infinity,
* regardless of the parameter values. There is no integer infinity,
* so this method returns {@code Integer.MAX_VALUE} and
* {@link #isSupportUpperBoundInclusive()} returns {@code true}.
*
* @return upper bound of the support (always {@code Integer.MAX_VALUE} for
* positive infinity)
*/
@Override
public int getSupportUpperBound() {
return Integer.MAX_VALUE;
}
/**
* {@inheritDoc}
*
* For mean parameter {@code p}, the mean is {@code p}.
*/
@Override
protected double calculateNumericalMean() {
return getMean();
}
/**
* {@inheritDoc}
*
* For mean parameter {@code p}, the variance is {@code p}.
*/
@Override
protected double calculateNumericalVariance() {
return getMean();
}
/** {@inheritDoc} */
@Override
public boolean isSupportUpperBoundInclusive() {
return true;
}
}

View File

@ -1,288 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.math.distribution;
import java.io.Serializable;
import org.apache.commons.math.exception.NotStrictlyPositiveException;
import org.apache.commons.math.exception.util.LocalizedFormats;
import org.apache.commons.math.special.Gamma;
import org.apache.commons.math.util.MathUtils;
import org.apache.commons.math.util.FastMath;
/**
* Implementation for the {@link PoissonDistribution}.
*
* @version $Id$
*/
public class PoissonDistributionImpl extends AbstractIntegerDistribution
implements PoissonDistribution, Serializable {
/**
* Default maximum number of iterations for cumulative probability calculations.
* @since 2.1
*/
public static final int DEFAULT_MAX_ITERATIONS = 10000000;
/**
* Default convergence criterion.
* @since 2.1
*/
public static final double DEFAULT_EPSILON = 1e-12;
/** Serializable version identifier. */
private static final long serialVersionUID = -3349935121172596109L;
/** Distribution used to compute normal approximation. */
private final NormalDistribution normal;
/** Mean of the distribution. */
private final double mean;
/**
* Maximum number of iterations for cumulative probability.
*
* Cumulative probabilities are estimated using either Lanczos series approximation of
* Gamma#regularizedGammaP or continued fraction approximation of Gamma#regularizedGammaQ.
*/
private final int maxIterations;
/**
* Convergence criterion for cumulative probability.
*/
private final double epsilon;
/**
* Create a new Poisson distribution with the given the mean. The mean value
* must be positive; otherwise an <code>IllegalArgument</code> is thrown.
*
* @param p the Poisson mean
* @throws NotStrictlyPositiveException if {@code p <= 0}.
*/
public PoissonDistributionImpl(double p) {
this(p, DEFAULT_EPSILON, DEFAULT_MAX_ITERATIONS);
}
/**
* Create a new Poisson distribution with the given mean, convergence criterion
* and maximum number of iterations.
*
* @param p Poisson mean.
* @param epsilon Convergence criterion for cumulative probabilities.
* @param maxIterations the maximum number of iterations for cumulative
* probabilities.
* @since 2.1
*/
public PoissonDistributionImpl(double p, double epsilon, int maxIterations) {
if (p <= 0) {
throw new NotStrictlyPositiveException(LocalizedFormats.MEAN, p);
}
mean = p;
normal = new NormalDistribution(p, FastMath.sqrt(p));
this.epsilon = epsilon;
this.maxIterations = maxIterations;
}
/**
* Create a new Poisson distribution with the given mean and convergence criterion.
*
* @param p Poisson mean.
* @param epsilon Convergence criterion for cumulative probabilities.
* @since 2.1
*/
public PoissonDistributionImpl(double p, double epsilon) {
this(p, epsilon, DEFAULT_MAX_ITERATIONS);
}
/**
* Create a new Poisson distribution with the given mean and maximum number of iterations.
*
* @param p Poisson mean.
* @param maxIterations Maximum number of iterations for cumulative probabilities.
* @since 2.1
*/
public PoissonDistributionImpl(double p, int maxIterations) {
this(p, DEFAULT_EPSILON, maxIterations);
}
/**
* {@inheritDoc}
*/
public double getMean() {
return mean;
}
/**
* The probability mass function {@code P(X = x)} for a Poisson distribution.
*
* @param x Value at which the probability density function is evaluated.
* @return the value of the probability mass function at {@code x}.
*/
public double probability(int x) {
double ret;
if (x < 0 || x == Integer.MAX_VALUE) {
ret = 0.0;
} else if (x == 0) {
ret = FastMath.exp(-mean);
} else {
ret = FastMath.exp(-SaddlePointExpansion.getStirlingError(x) -
SaddlePointExpansion.getDeviancePart(x, mean)) /
FastMath.sqrt(MathUtils.TWO_PI * x);
}
return ret;
}
/**
* The probability distribution function {@code P(X <= x)} for a Poisson
* distribution.
*
* @param x Value at which the PDF is evaluated.
* @return the Poisson distribution function evaluated at {@code x}.
* due to convergence or other numerical errors.
*/
@Override
public double cumulativeProbability(int x) {
if (x < 0) {
return 0;
}
if (x == Integer.MAX_VALUE) {
return 1;
}
return Gamma.regularizedGammaQ((double) x + 1, mean, epsilon, maxIterations);
}
/**
* Calculates the Poisson distribution function using a normal
* approximation. The {@code N(mean, sqrt(mean))} distribution is used
* to approximate the Poisson distribution.
* The computation uses "half-correction" (evaluating the normal
* distribution function at {@code x + 0.5}).
*
* @param x Upper bound, inclusive.
* @return the distribution function value calculated using a normal
* approximation.
* approximation.
*/
public double normalApproximateProbability(int x) {
// calculate the probability using half-correction
return normal.cumulativeProbability(x + 0.5);
}
/**
* Generates a random value sampled from this distribution.
* <br/>
* <strong>Algorithm Description</strong>:
* <ul>
* <li>For small means, uses simulation of a Poisson process
* using Uniform deviates, as described
* <a href="http://irmi.epfl.ch/cmos/Pmmi/interactive/rng7.htm"> here</a>.
* The Poisson process (and hence value returned) is bounded by 1000 * mean.
* </li>
* <li>For large means, uses the rejection algorithm described in
* <quote>
* Devroye, Luc. (1981).<i>The Computer Generation of Poisson Random Variables</i>
* <strong>Computing</strong> vol. 26 pp. 197-207.
* </quote>
* </li>
* </ul>
*
* @return a random value.
* @since 2.2
*/
@Override
public int sample() {
return (int) FastMath.min(randomData.nextPoisson(mean), Integer.MAX_VALUE);
}
/**
* Access the domain value lower bound, based on {@code p}, used to
* bracket a CDF root. This method is used by
* {@link #inverseCumulativeProbability(double)} to find critical values.
*
* @param p Desired probability for the critical value.
* @return the domain lower bound.
*/
@Override
protected int getDomainLowerBound(double p) {
return 0;
}
/**
* Access the domain value upper bound, based on {@code p}, used to
* bracket a CDF root. This method is used by
* {@link #inverseCumulativeProbability(double)} to find critical values.
*
* @param p Desired probability for the critical value.
* @return the domain upper bound.
*/
@Override
protected int getDomainUpperBound(double p) {
return Integer.MAX_VALUE;
}
/**
* {@inheritDoc}
*
* The lower bound of the support is always 0 no matter the mean parameter.
*
* @return lower bound of the support (always 0)
*/
@Override
public int getSupportLowerBound() {
return 0;
}
/**
* {@inheritDoc}
*
* The upper bound of the support is positive infinity,
* regardless of the parameter values. There is no integer infinity,
* so this method returns <code>Integer.MAX_VALUE</code> and
* {@link #isSupportUpperBoundInclusive()} returns <code>true</code>.
*
* @return upper bound of the support (always <code>Integer.MAX_VALUE</code> for positive infinity)
*/
@Override
public int getSupportUpperBound() {
return Integer.MAX_VALUE;
}
/**
* {@inheritDoc}
*
* For mean parameter <code>p</code>, the mean is <code>p</code>
*
* @return {@inheritDoc}
*/
@Override
protected double calculateNumericalMean() {
return getMean();
}
/**
* {@inheritDoc}
*
* For mean parameter <code>p</code>, the variance is <code>p</code>
*
* @return {@inheritDoc}
*/
@Override
protected double calculateNumericalVariance() {
return getMean();
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSupportUpperBoundInclusive() {
return true;
}
}

View File

@ -16,24 +16,232 @@
*/
package org.apache.commons.math.distribution;
import java.io.Serializable;
import org.apache.commons.math.exception.NotStrictlyPositiveException;
import org.apache.commons.math.exception.util.LocalizedFormats;
import org.apache.commons.math.special.Beta;
import org.apache.commons.math.special.Gamma;
import org.apache.commons.math.util.FastMath;
/**
* Student's t-Distribution.
*
* <p>
* References:
* <ul>
* <li><a href="http://mathworld.wolfram.com/Studentst-Distribution.html">
* Student's t-Distribution</a></li>
* </ul>
* </p>
* Implementation of Student's t-distribution.
*
* @see <a href="http://en.wikipedia.org/wiki/Student&apos;s_t-distribution">Student's t-distribution (Wikipedia)</a>
* @see <a href="http://mathworld.wolfram.com/Studentst-Distribution.html">Student's t-distribution (MathWorld)</a>
* @version $Id$
*/
public interface TDistribution extends ContinuousDistribution {
public class TDistribution extends AbstractContinuousDistribution
implements Serializable {
/**
* Access the number of degrees of freedom.
* Default inverse cumulative probability accuracy.
* @since 2.1
*/
public static final double DEFAULT_INVERSE_ABSOLUTE_ACCURACY = 1e-9;
/** Serializable version identifier */
private static final long serialVersionUID = -5852615386664158222L;
/** The degrees of freedom. */
private final double degreesOfFreedom;
/** Inverse cumulative probability accuracy. */
private final double solverAbsoluteAccuracy;
/**
* Create a t distribution using the given degrees of freedom and the
* specified inverse cumulative probability absolute accuracy.
*
* @param degreesOfFreedom Degrees of freedom.
* @param inverseCumAccuracy the maximum absolute error in inverse
* cumulative probability estimates
* (defaults to {@link #DEFAULT_INVERSE_ABSOLUTE_ACCURACY}).
* @throws NotStrictlyPositiveException if {@code degreesOfFreedom <= 0}
* @since 2.1
*/
public TDistribution(double degreesOfFreedom, double inverseCumAccuracy)
throws NotStrictlyPositiveException {
if (degreesOfFreedom <= 0) {
throw new NotStrictlyPositiveException(LocalizedFormats.DEGREES_OF_FREEDOM,
degreesOfFreedom);
}
this.degreesOfFreedom = degreesOfFreedom;
solverAbsoluteAccuracy = inverseCumAccuracy;
}
/**
* Create a t distribution using the given degrees of freedom.
*
* @param degreesOfFreedom Degrees of freedom.
* @throws NotStrictlyPositiveException if {@code degreesOfFreedom <= 0}
*/
public TDistribution(double degreesOfFreedom)
throws NotStrictlyPositiveException {
this(degreesOfFreedom, DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
}
/**
* Access the degrees of freedom.
*
* @return the degrees of freedom.
*/
double getDegreesOfFreedom();
public double getDegreesOfFreedom() {
return degreesOfFreedom;
}
/** {@inheritDoc} */
public double density(double x) {
final double n = degreesOfFreedom;
final double nPlus1Over2 = (n + 1) / 2;
return FastMath.exp(Gamma.logGamma(nPlus1Over2)
- 0.5 * (FastMath.log(FastMath.PI)
+ FastMath.log(n))
- Gamma.logGamma(n/2)
- nPlus1Over2 * FastMath.log(1 + x * x /n));
}
/** {@inheritDoc} */
public double cumulativeProbability(double x) {
double ret;
if (x == 0) {
ret = 0.5;
} else {
double t =
Beta.regularizedBeta(
degreesOfFreedom / (degreesOfFreedom + (x * x)),
0.5 * degreesOfFreedom,
0.5);
if (x < 0.0) {
ret = 0.5 * t;
} else {
ret = 1.0 - 0.5 * t;
}
}
return ret;
}
/**
* {@inheritDoc}
*
* Returns {@code Double.NEGATIVE_INFINITY} when {@code p = 0}
* and {@code Double.POSITIVE_INFINITY} when {@code p = 1}.
*/
@Override
public double inverseCumulativeProbability(final double p) {
if (p == 0) {
return Double.NEGATIVE_INFINITY;
}
if (p == 1) {
return Double.POSITIVE_INFINITY;
}
return super.inverseCumulativeProbability(p);
}
/** {@inheritDoc} */
@Override
protected double getDomainLowerBound(double p) {
return -Double.MAX_VALUE;
}
/** {@inheritDoc} */
@Override
protected double getDomainUpperBound(double p) {
return Double.MAX_VALUE;
}
/** {@inheritDoc} */
@Override
protected double getInitialDomain(double p) {
return 0;
}
/** {@inheritDoc} */
@Override
protected double getSolverAbsoluteAccuracy() {
return solverAbsoluteAccuracy;
}
/**
* {@inheritDoc}
*
* The lower bound of the support is always negative infinity no matter the
* parameters.
*
* @return lower bound of the support (always
* {@code Double.NEGATIVE_INFINITY})
*/
@Override
public double getSupportLowerBound() {
return Double.NEGATIVE_INFINITY;
}
/**
* {@inheritDoc}
*
* The upper bound of the support is always positive infinity no matter the
* parameters.
*
* @return upper bound of the support (always
* {@code Double.POSITIVE_INFINITY})
*/
@Override
public double getSupportUpperBound() {
return Double.POSITIVE_INFINITY;
}
/**
* {@inheritDoc}
*
* For degrees of freedom parameter {@code df}, the mean is
* <ul>
* <li>if {@code df > 1} then {@code 0},</li>
* <li>else undefined ({@code Double.NaN}).</li>
* </ul>
*/
@Override
protected double calculateNumericalMean() {
final double df = getDegreesOfFreedom();
if (df > 1) {
return 0;
}
return Double.NaN;
}
/**
* {@inheritDoc}
*
* For degrees of freedom parameter {@code df}, the variance is
* <ul>
* <li>if {@code df > 2} then {@code df / (df - 2)},</li>
* <li>if {@code 1 < df <= 2} then positive infinity
* ({@code Double.POSITIVE_INFINITY}),</li>
* <li>else undefined ({@code Double.NaN}).</li>
* </ul>
*/
@Override
protected double calculateNumericalVariance() {
final double df = getDegreesOfFreedom();
if (df > 2) {
return df / (df - 2);
}
if (df > 1 && df <= 2) {
return Double.POSITIVE_INFINITY;
}
return Double.NaN;
}
/** {@inheritDoc} */
@Override
public boolean isSupportLowerBoundInclusive() {
return false;
}
/** {@inheritDoc} */
@Override
public boolean isSupportUpperBoundInclusive() {
return false;
}
}

View File

@ -1,278 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.math.distribution;
import java.io.Serializable;
import org.apache.commons.math.exception.NotStrictlyPositiveException;
import org.apache.commons.math.exception.util.LocalizedFormats;
import org.apache.commons.math.special.Beta;
import org.apache.commons.math.special.Gamma;
import org.apache.commons.math.util.FastMath;
/**
* Default implementation of
* {@link org.apache.commons.math.distribution.TDistribution}.
*
* @version $Id$
*/
public class TDistributionImpl
extends AbstractContinuousDistribution
implements TDistribution, Serializable {
/**
* Default inverse cumulative probability accuracy.
* @since 2.1
*/
public static final double DEFAULT_INVERSE_ABSOLUTE_ACCURACY = 1e-9;
/** Serializable version identifier */
private static final long serialVersionUID = -5852615386664158222L;
/** The degrees of freedom. */
private final double degreesOfFreedom;
/** Inverse cumulative probability accuracy. */
private final double solverAbsoluteAccuracy;
/**
* Create a t distribution using the given degrees of freedom and the
* specified inverse cumulative probability absolute accuracy.
*
* @param degreesOfFreedom Degrees of freedom.
* @param inverseCumAccuracy the maximum absolute error in inverse
* cumulative probability estimates
* (defaults to {@link #DEFAULT_INVERSE_ABSOLUTE_ACCURACY}).
* @throws NotStrictlyPositiveException if {@code degreesOfFreedom <= 0}
* @since 2.1
*/
public TDistributionImpl(double degreesOfFreedom, double inverseCumAccuracy) {
if (degreesOfFreedom <= 0) {
throw new NotStrictlyPositiveException(LocalizedFormats.DEGREES_OF_FREEDOM,
degreesOfFreedom);
}
this.degreesOfFreedom = degreesOfFreedom;
solverAbsoluteAccuracy = inverseCumAccuracy;
}
/**
* Create a t distribution using the given degrees of freedom.
*
* @param degreesOfFreedom Degrees of freedom.
*/
public TDistributionImpl(double degreesOfFreedom) {
this(degreesOfFreedom, DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
}
/**
* Access the degrees of freedom.
*
* @return the degrees of freedom.
*/
public double getDegreesOfFreedom() {
return degreesOfFreedom;
}
/**
* {@inheritDoc}
*/
public double density(double x) {
final double n = degreesOfFreedom;
final double nPlus1Over2 = (n + 1) / 2;
return FastMath.exp(Gamma.logGamma(nPlus1Over2) -
0.5 * (FastMath.log(FastMath.PI) + FastMath.log(n)) -
Gamma.logGamma(n/2) - nPlus1Over2 * FastMath.log(1 + x * x /n));
}
/**
* {@inheritDoc}
*/
public double cumulativeProbability(double x) {
double ret;
if (x == 0) {
ret = 0.5;
} else {
double t =
Beta.regularizedBeta(
degreesOfFreedom / (degreesOfFreedom + (x * x)),
0.5 * degreesOfFreedom,
0.5);
if (x < 0.0) {
ret = 0.5 * t;
} else {
ret = 1.0 - 0.5 * t;
}
}
return ret;
}
/**
* {@inheritDoc}
*
* It will return {@code Double.NEGATIVE_INFINITY} when {@cod p = 0}
* and {@code Double.POSITIVE_INFINITY} when {@code p = 1}.
*/
@Override
public double inverseCumulativeProbability(final double p) {
if (p == 0) {
return Double.NEGATIVE_INFINITY;
}
if (p == 1) {
return Double.POSITIVE_INFINITY;
}
return super.inverseCumulativeProbability(p);
}
/**
* Access the domain value lower bound, based on {@code p}, used to
* bracket a CDF root. This method is used by
* {@link #inverseCumulativeProbability(double)} to find critical values.
*
* @param p Desired probability for the critical value
* @return the domain value lower bound, i.e. {@code P(X < 'lower bound') > p}.
*/
@Override
protected double getDomainLowerBound(double p) {
return -Double.MAX_VALUE;
}
/**
* Access the domain value upper bound, based on {@code p}, used to
* bracket a CDF root. This method is used by
* {@link #inverseCumulativeProbability(double)} to find critical values.
*
* @param p Desired probability for the critical value.
* @return the domain value upper bound, i.e. {@code P(X < 'upper bound') > p}.
*/
@Override
protected double getDomainUpperBound(double p) {
return Double.MAX_VALUE;
}
/**
* Access the initial domain value, based on {@code p}, used to
* bracket a CDF root. This method is used by
* {@link #inverseCumulativeProbability(double)} to find critical values.
*
* @param p Desired probability for the critical value.
* @return the initial domain value.
*/
@Override
protected double getInitialDomain(double p) {
return 0;
}
/**
* Return the absolute accuracy setting of the solver used to estimate
* inverse cumulative probabilities.
*
* @return the solver absolute accuracy.
* @since 2.1
*/
@Override
protected double getSolverAbsoluteAccuracy() {
return solverAbsoluteAccuracy;
}
/**
* {@inheritDoc}
*
* The lower bound of the support is always negative infinity
* no matter the parameters.
*
* @return lower bound of the support (always Double.NEGATIVE_INFINITY)
*/
@Override
public double getSupportLowerBound() {
return Double.NEGATIVE_INFINITY;
}
/**
* {@inheritDoc}
*
* The upper bound of the support is always positive infinity
* no matter the parameters.
*
* @return upper bound of the support (always Double.POSITIVE_INFINITY)
*/
@Override
public double getSupportUpperBound() {
return Double.POSITIVE_INFINITY;
}
/**
* {@inheritDoc}
*
* For degrees of freedom parameter df, the mean is
* <ul>
* <li>if <code>df &gt; 1</code> then <code>0</code></li>
* <li>else <code>undefined</code></li>
* </ul>
*
* @return {@inheritDoc}
*/
@Override
protected double calculateNumericalMean() {
final double df = getDegreesOfFreedom();
if (df > 1) {
return 0;
}
return Double.NaN;
}
/**
* {@inheritDoc}
*
* For degrees of freedom parameter df, the variance is
* <ul>
* <li>if <code>df &gt; 2</code> then <code>df / (df - 2)</code> </li>
* <li>if <code>1 &lt; df &lt;= 2</code> then <code>positive infinity</code></li>
* <li>else <code>undefined</code></li>
* </ul>
*
* @return {@inheritDoc}
*/
@Override
protected double calculateNumericalVariance() {
final double df = getDegreesOfFreedom();
if (df > 2) {
return df / (df - 2);
}
if (df > 1 && df <= 2) {
return Double.POSITIVE_INFINITY;
}
return Double.NaN;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSupportLowerBoundInclusive() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSupportUpperBoundInclusive() {
return false;
}
}

View File

@ -17,35 +17,248 @@
package org.apache.commons.math.distribution;
import java.io.Serializable;
import org.apache.commons.math.exception.OutOfRangeException;
import org.apache.commons.math.exception.NotStrictlyPositiveException;
import org.apache.commons.math.exception.util.LocalizedFormats;
import org.apache.commons.math.special.Gamma;
import org.apache.commons.math.util.FastMath;
/**
* Weibull Distribution. This interface defines the two parameter form of the
* distribution as defined by
* Implementation of the Weibull distribution. This implementation uses the
* two parameter form of the distribution defined by
* <a href="http://mathworld.wolfram.com/WeibullDistribution.html">
* Weibull Distribution</a>, equations (1) and (2).
*
* <p>
* References:
* <ul>
* <li><a href="http://mathworld.wolfram.com/WeibullDistribution.html">
* Weibull Distribution</a></li>
* </ul>
* </p>
*
* @since 1.1
* @see <a href="http://en.wikipedia.org/wiki/Weibull_distribution">Weibull distribution (Wikipedia)</a>
* @see <a href="http://mathworld.wolfram.com/WeibullDistribution.html">Weibull distribution (MathWorld)</a>
* @since 1.1 (changed to concrete class in 3.0)
* @version $Id$
*/
public interface WeibullDistribution extends ContinuousDistribution {
public class WeibullDistribution extends AbstractContinuousDistribution
implements Serializable {
/**
* Access the shape parameter.
*
* @return the shape parameter.
* Default inverse cumulative probability accuracy.
* @since 2.1
*/
double getShape();
public static final double DEFAULT_INVERSE_ABSOLUTE_ACCURACY = 1e-9;
/** Serializable version identifier. */
private static final long serialVersionUID = 8589540077390120676L;
/** The shape parameter. */
private final double shape;
/** The scale parameter. */
private final double scale;
/** Inverse cumulative probability accuracy. */
private final double solverAbsoluteAccuracy;
/**
* Access the scale parameter.
* Create a Weibull distribution with the given shape and scale and a
* location equal to zero.
*
* @return the scale parameter.
* @param alpha Shape parameter.
* @param beta Scale parameter.
* @throws NotStrictlyPositiveException if {@code alpha <= 0} or
* {@code beta <= 0}.
*/
double getScale();
public WeibullDistribution(double alpha, double beta)
throws NotStrictlyPositiveException {
this(alpha, beta, DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
}
/**
* Create a Weibull distribution with the given shape, scale and inverse
* cumulative probability accuracy and a location equal to zero.
*
* @param alpha Shape parameter.
* @param beta Scale parameter.
* @param inverseCumAccuracy Maximum absolute error in inverse
* cumulative probability estimates
* (defaults to {@link #DEFAULT_INVERSE_ABSOLUTE_ACCURACY}).
* @throws NotStrictlyPositiveException if {@code alpha <= 0} or
* {@code beta <= 0}.
* @since 2.1
*/
public WeibullDistribution(double alpha, double beta,
double inverseCumAccuracy)
throws NotStrictlyPositiveException {
if (alpha <= 0) {
throw new NotStrictlyPositiveException(LocalizedFormats.SHAPE,
alpha);
}
if (beta <= 0) {
throw new NotStrictlyPositiveException(LocalizedFormats.SCALE,
beta);
}
scale = beta;
shape = alpha;
solverAbsoluteAccuracy = inverseCumAccuracy;
}
/** {@inheritDoc} */
public double cumulativeProbability(double x) {
double ret;
if (x <= 0.0) {
ret = 0.0;
} else {
ret = 1.0 - FastMath.exp(-FastMath.pow(x / scale, shape));
}
return ret;
}
/**
* Access the shape parameter, {@code alpha}.
*
* @return the shape parameter, {@code alpha}.
*/
public double getShape() {
return shape;
}
/**
* Access the scale parameter, {@code beta}.
*
* @return the scale parameter, {@code beta}.
*/
public double getScale() {
return scale;
}
/** {@inheritDoc} */
public double density(double x) {
if (x < 0) {
return 0;
}
final double xscale = x / scale;
final double xscalepow = FastMath.pow(xscale, shape - 1);
/*
* FastMath.pow(x / scale, shape) =
* FastMath.pow(xscale, shape) =
* FastMath.pow(xscale, shape - 1) * xscale
*/
final double xscalepowshape = xscalepow * xscale;
return (shape / scale) * xscalepow * FastMath.exp(-xscalepowshape);
}
/**
* {@inheritDoc}
*
* Returns {@code 0} when {@code p == 0} and
* {@code Double.POSITIVE_INFINITY} when {@code p == 1}.
*/
@Override
public double inverseCumulativeProbability(double p) {
double ret;
if (p < 0.0 || p > 1.0) {
throw new OutOfRangeException(p, 0.0, 1.0);
} else if (p == 0) {
ret = 0.0;
} else if (p == 1) {
ret = Double.POSITIVE_INFINITY;
} else {
ret = scale * FastMath.pow(-FastMath.log(1.0 - p), 1.0 / shape);
}
return ret;
}
/** {@inheritDoc} */
@Override
protected double getDomainLowerBound(double p) {
return 0;
}
/** {@inheritDoc} */ @Override
protected double getDomainUpperBound(double p) {
return Double.MAX_VALUE;
}
/** {@inheritDoc} */ @Override
protected double getInitialDomain(double p) {
// use median
return FastMath.pow(scale * FastMath.log(2.0), 1.0 / shape);
}
/**
* Return the absolute accuracy setting of the solver used to estimate
* inverse cumulative probabilities.
*
* @return the solver absolute accuracy.
* @since 2.1
*/
@Override
protected double getSolverAbsoluteAccuracy() {
return solverAbsoluteAccuracy;
}
/**
* {@inheritDoc}
*
* The lower bound of the support is always 0 no matter the parameters.
*
* @return lower bound of the support (always 0)
*/
@Override
public double getSupportLowerBound() {
return 0;
}
/**
* {@inheritDoc}
*
* The upper bound of the support is always positive infinity
* no matter the parameters.
*
* @return upper bound of the support (always
* {@code Double.POSITIVE_INFINITY})
*/
@Override
public double getSupportUpperBound() {
return Double.POSITIVE_INFINITY;
}
/**
* {@inheritDoc}
*
* The mean is {@code scale * Gamma(1 + (1 / shape))}, where {@code Gamma()}
* is the Gamma-function.
*/
@Override
protected double calculateNumericalMean() {
final double sh = getShape();
final double sc = getScale();
return sc * FastMath.exp(Gamma.logGamma(1 + (1 / sh)));
}
/**
* {@inheritDoc}
*
* The variance is {@code scale^2 * Gamma(1 + (2 / shape)) - mean^2}
* where {@code Gamma()} is the Gamma-function.
*/
@Override
protected double calculateNumericalVariance() {
final double sh = getShape();
final double sc = getScale();
final double mn = getNumericalMean();
return (sc * sc) *
FastMath.exp(Gamma.logGamma(1 + (2 / sh))) -
(mn * mn);
}
/** {@inheritDoc} */
@Override
public boolean isSupportLowerBoundInclusive() {
return true;
}
/** {@inheritDoc} */
@Override
public boolean isSupportUpperBoundInclusive() {
return false;
}
}

View File

@ -1,288 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.math.distribution;
import java.io.Serializable;
import org.apache.commons.math.exception.OutOfRangeException;
import org.apache.commons.math.exception.NotStrictlyPositiveException;
import org.apache.commons.math.exception.util.LocalizedFormats;
import org.apache.commons.math.special.Gamma;
import org.apache.commons.math.util.FastMath;
/**
* Default implementation of
* {@link org.apache.commons.math.distribution.WeibullDistribution}.
*
* @since 1.1
* @version $Id$
*/
public class WeibullDistributionImpl extends AbstractContinuousDistribution
implements WeibullDistribution, Serializable {
/**
* Default inverse cumulative probability accuracy.
* @since 2.1
*/
public static final double DEFAULT_INVERSE_ABSOLUTE_ACCURACY = 1e-9;
/** Serializable version identifier. */
private static final long serialVersionUID = 8589540077390120676L;
/** The shape parameter. */
private final double shape;
/** The scale parameter. */
private final double scale;
/** Inverse cumulative probability accuracy. */
private final double solverAbsoluteAccuracy;
/**
* Create a Weibull distribution with the given shape and scale and a
* location equal to zero.
*
* @param alpha Shape parameter.
* @param beta Scale parameter.
*/
public WeibullDistributionImpl(double alpha, double beta) {
this(alpha, beta, DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
}
/**
* Create a Weibull distribution with the given shape, scale and inverse
* cumulative probability accuracy and a location equal to zero.
*
* @param alpha Shape parameter.
* @param beta Scale parameter.
* @param inverseCumAccuracy Maximum absolute error in inverse
* cumulative probability estimates
* (defaults to {@link #DEFAULT_INVERSE_ABSOLUTE_ACCURACY}).
* @throws NotStrictlyPositiveException if {@code alpha <= 0} or
* {@code beta <= 0}.
* @since 2.1
*/
public WeibullDistributionImpl(double alpha, double beta,
double inverseCumAccuracy) {
if (alpha <= 0) {
throw new NotStrictlyPositiveException(LocalizedFormats.SHAPE,
alpha);
}
if (beta <= 0) {
throw new NotStrictlyPositiveException(LocalizedFormats.SCALE,
beta);
}
scale = beta;
shape = alpha;
solverAbsoluteAccuracy = inverseCumAccuracy;
}
/**
* {@inheritDoc}
*/
public double cumulativeProbability(double x) {
double ret;
if (x <= 0.0) {
ret = 0.0;
} else {
ret = 1.0 - FastMath.exp(-FastMath.pow(x / scale, shape));
}
return ret;
}
/**
* {@inheritDoc}
*/
public double getShape() {
return shape;
}
/**
* {@inheritDoc}
*/
public double getScale() {
return scale;
}
/**
* {@inheritDoc}
*/
public double density(double x) {
if (x < 0) {
return 0;
}
final double xscale = x / scale;
final double xscalepow = FastMath.pow(xscale, shape - 1);
/*
* FastMath.pow(x / scale, shape) =
* FastMath.pow(xscale, shape) =
* FastMath.pow(xscale, shape - 1) * xscale
*/
final double xscalepowshape = xscalepow * xscale;
return (shape / scale) * xscalepow * FastMath.exp(-xscalepowshape);
}
/**
* {@inheritDoc}
*
* It will return {@code 0} when {@code p = 0} and
* {@code Double.POSITIVE_INFINITY} when {@code p = 1}.
*/
@Override
public double inverseCumulativeProbability(double p) {
double ret;
if (p < 0.0 || p > 1.0) {
throw new OutOfRangeException(p, 0.0, 1.0);
} else if (p == 0) {
ret = 0.0;
} else if (p == 1) {
ret = Double.POSITIVE_INFINITY;
} else {
ret = scale * FastMath.pow(-FastMath.log(1.0 - p), 1.0 / shape);
}
return ret;
}
/**
* Access the domain value lower bound, based on {@code p}, used to
* bracket a CDF root. This method is used by
* {@link #inverseCumulativeProbability(double)} to find critical values.
*
* @param p Desired probability for the critical value.
* @return the domain value lower bound, i.e. {@code P(X < 'lower bound') < p}.
*/
@Override
protected double getDomainLowerBound(double p) {
return 0;
}
/**
* Access the domain value upper bound, based on {@code p}, used to
* bracket a CDF root. This method is used by
* {@link #inverseCumulativeProbability(double)} to find critical values.
*
* @param p Desired probability for the critical value.
* @return the domain value upper bound, i.e. {@code P(X < 'upper bound') > p}.
*/
@Override
protected double getDomainUpperBound(double p) {
return Double.MAX_VALUE;
}
/**
* Access the initial domain value, based on {@code p}, used to
* bracket a CDF root. This method is used by
* {@link #inverseCumulativeProbability(double)} to find critical values.
*
* @param p Desired probability for the critical value.
* @return the initial domain value.
*/
@Override
protected double getInitialDomain(double p) {
// use median
return FastMath.pow(scale * FastMath.log(2.0), 1.0 / shape);
}
/**
* Return the absolute accuracy setting of the solver used to estimate
* inverse cumulative probabilities.
*
* @return the solver absolute accuracy.
* @since 2.1
*/
@Override
protected double getSolverAbsoluteAccuracy() {
return solverAbsoluteAccuracy;
}
/**
* {@inheritDoc}
*
* The lower bound of the support is always 0 no matter the parameters.
*
* @return lower bound of the support (always 0)
*/
@Override
public double getSupportLowerBound() {
return 0;
}
/**
* {@inheritDoc}
*
* The upper bound of the support is always positive infinity
* no matter the parameters.
*
* @return upper bound of the support (always Double.POSITIVE_INFINITY)
*/
@Override
public double getSupportUpperBound() {
return Double.POSITIVE_INFINITY;
}
/**
* {@inheritDoc}
*
* The mean is <code>scale * Gamma(1 + (1 / shape))</code>
* where <code>Gamma(...)</code> is the Gamma-function
*
* @return {@inheritDoc}
*/
@Override
protected double calculateNumericalMean() {
final double sh = getShape();
final double sc = getScale();
return sc * FastMath.exp(Gamma.logGamma(1 + (1 / sh)));
}
/**
* {@inheritDoc}
*
* The variance is
* <code>scale^2 * Gamma(1 + (2 / shape)) - mean^2</code>
* where <code>Gamma(...)</code> is the Gamma-function
*
* @return {@inheritDoc}
*/
@Override
protected double calculateNumericalVariance() {
final double sh = getShape();
final double sc = getScale();
final double mn = getNumericalMean();
return (sc * sc) *
FastMath.exp(Gamma.logGamma(1 + (2 / sh))) -
(mn * mn);
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSupportLowerBoundInclusive() {
return true;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isSupportUpperBoundInclusive() {
return false;
}
}

View File

@ -33,8 +33,8 @@ import org.apache.commons.math.distribution.FDistribution;
import org.apache.commons.math.distribution.HypergeometricDistribution;
import org.apache.commons.math.distribution.IntegerDistribution;
import org.apache.commons.math.distribution.PascalDistribution;
import org.apache.commons.math.distribution.TDistributionImpl;
import org.apache.commons.math.distribution.WeibullDistributionImpl;
import org.apache.commons.math.distribution.TDistribution;
import org.apache.commons.math.distribution.WeibullDistribution;
import org.apache.commons.math.distribution.ZipfDistributionImpl;
import org.apache.commons.math.exception.MathInternalError;
import org.apache.commons.math.exception.NotStrictlyPositiveException;
@ -784,7 +784,7 @@ public class RandomDataImpl implements RandomData, Serializable {
}
/**
* Generates a random value from the {@link TDistributionImpl T Distribution}.
* Generates a random value from the {@link TDistribution T Distribution}.
* This implementation uses {@link #nextInversionDeviate(ContinuousDistribution) inversion}
* to generate random values.
*
@ -793,11 +793,11 @@ public class RandomDataImpl implements RandomData, Serializable {
* @since 2.2
*/
public double nextT(double df) {
return nextInversionDeviate(new TDistributionImpl(df));
return nextInversionDeviate(new TDistribution(df));
}
/**
* Generates a random value from the {@link WeibullDistributionImpl Weibull Distribution}.
* Generates a random value from the {@link WeibullDistribution Weibull Distribution}.
* This implementation uses {@link #nextInversionDeviate(ContinuousDistribution) inversion}
* to generate random values.
*
@ -807,7 +807,7 @@ public class RandomDataImpl implements RandomData, Serializable {
* @since 2.2
*/
public double nextWeibull(double shape, double scale) {
return nextInversionDeviate(new WeibullDistributionImpl(shape, scale));
return nextInversionDeviate(new WeibullDistribution(shape, scale));
}
/**

View File

@ -19,7 +19,6 @@ package org.apache.commons.math.stat.correlation;
import org.apache.commons.math.MathException;
import org.apache.commons.math.MathRuntimeException;
import org.apache.commons.math.distribution.TDistribution;
import org.apache.commons.math.distribution.TDistributionImpl;
import org.apache.commons.math.exception.util.LocalizedFormats;
import org.apache.commons.math.exception.NullArgumentException;
import org.apache.commons.math.exception.DimensionMismatchException;
@ -162,7 +161,7 @@ public class PearsonsCorrelation {
* @throws MathException if an error occurs estimating probabilities
*/
public RealMatrix getCorrelationPValues() throws MathException {
TDistribution tDistribution = new TDistributionImpl(nObs - 2);
TDistribution tDistribution = new TDistribution(nObs - 2);
int nVars = correlationMatrix.getColumnDimension();
double[][] out = new double[nVars][nVars];
for (int i = 0; i < nVars; i++) {

View File

@ -21,7 +21,6 @@ import org.apache.commons.math.exception.OutOfRangeException;
import org.apache.commons.math.exception.NullArgumentException;
import org.apache.commons.math.exception.NumberIsTooSmallException;
import org.apache.commons.math.distribution.TDistribution;
import org.apache.commons.math.distribution.TDistributionImpl;
import org.apache.commons.math.exception.util.LocalizedFormats;
import org.apache.commons.math.stat.StatUtils;
import org.apache.commons.math.stat.descriptive.StatisticalSummary;
@ -30,7 +29,7 @@ import org.apache.commons.math.util.FastMath;
/**
* Implements t-test statistics defined in the {@link TTest} interface.
* <p>
* Uses commons-math {@link org.apache.commons.math.distribution.TDistributionImpl}
* Uses commons-math {@link org.apache.commons.math.distribution.TDistribution}
* implementation to estimate exact p-values.</p>
*
* @version $Id$
@ -939,7 +938,7 @@ public class TTestImpl implements TTest {
protected double tTest(double m, double mu, double v, double n)
throws MathException {
double t = FastMath.abs(t(m, mu, v, n));
TDistribution distribution = new TDistributionImpl(n - 1);
TDistribution distribution = new TDistribution(n - 1);
return 2.0 * distribution.cumulativeProbability(-t);
}
@ -965,7 +964,7 @@ public class TTestImpl implements TTest {
double t = FastMath.abs(t(m1, m2, v1, v2, n1, n2));
double degreesOfFreedom = 0;
degreesOfFreedom = df(v1, v2, n1, n2);
TDistribution distribution = new TDistributionImpl(degreesOfFreedom);
TDistribution distribution = new TDistribution(degreesOfFreedom);
return 2.0 * distribution.cumulativeProbability(-t);
}
@ -990,7 +989,7 @@ public class TTestImpl implements TTest {
throws MathException {
double t = FastMath.abs(homoscedasticT(m1, m2, v1, v2, n1, n2));
double degreesOfFreedom = n1 + n2 - 2;
TDistribution distribution = new TDistributionImpl(degreesOfFreedom);
TDistribution distribution = new TDistribution(degreesOfFreedom);
return 2.0 * distribution.cumulativeProbability(-t);
}

View File

@ -21,7 +21,6 @@ import java.io.Serializable;
import org.apache.commons.math.MathException;
import org.apache.commons.math.exception.OutOfRangeException;
import org.apache.commons.math.distribution.TDistribution;
import org.apache.commons.math.distribution.TDistributionImpl;
import org.apache.commons.math.exception.MathIllegalArgumentException;
import org.apache.commons.math.exception.NoDataException;
import org.apache.commons.math.exception.util.LocalizedFormats;
@ -125,8 +124,8 @@ public class SimpleRegression implements Serializable, UpdatingMultipleLinearReg
ybar = y;
} else {
if( hasIntercept ){
final double fact1 = 1.0 + (double) n;
final double fact2 = ((double) n) / (1.0 + (double) n);
final double fact1 = 1.0 + n;
final double fact2 = (n) / (1.0 + n);
final double dx = x - xbar;
final double dy = y - ybar;
sumXX += dx * dx * fact2;
@ -164,8 +163,8 @@ public class SimpleRegression implements Serializable, UpdatingMultipleLinearReg
public void removeData(final double x,final double y) {
if (n > 0) {
if (hasIntercept) {
final double fact1 = (double) n - 1.0;
final double fact2 = ((double) n) / ((double) n - 1.0);
final double fact1 = n - 1.0;
final double fact2 = (n) / (n - 1.0);
final double dx = x - xbar;
final double dy = y - ybar;
sumXX -= dx * dx * fact2;
@ -174,7 +173,7 @@ public class SimpleRegression implements Serializable, UpdatingMultipleLinearReg
xbar -= dx / fact1;
ybar -= dy / fact1;
} else {
final double fact1 = (double) n - 1.0;
final double fact1 = n - 1.0;
sumXX -= x * x;
sumYY -= y * y;
sumXY -= x * y;
@ -556,7 +555,7 @@ public class SimpleRegression implements Serializable, UpdatingMultipleLinearReg
return Double.NaN;
}
return FastMath.sqrt(
getMeanSquareError() * ((1d / (double) n) + (xbar * xbar) / sumXX));
getMeanSquareError() * ((1d / n) + (xbar * xbar) / sumXX));
}
/**
@ -637,7 +636,7 @@ public class SimpleRegression implements Serializable, UpdatingMultipleLinearReg
throw new OutOfRangeException(LocalizedFormats.SIGNIFICANCE_LEVEL,
alpha, 0, 1);
}
TDistribution distribution = new TDistributionImpl(n - 2);
TDistribution distribution = new TDistribution(n - 2);
return getSlopeStdErr() *
distribution.inverseCumulativeProbability(1d - alpha / 2d);
}
@ -664,7 +663,7 @@ public class SimpleRegression implements Serializable, UpdatingMultipleLinearReg
* @throws MathException if the significance level can not be computed.
*/
public double getSignificance() throws MathException {
TDistribution distribution = new TDistributionImpl(n - 2);
TDistribution distribution = new TDistribution(n - 2);
return 2d * (1.0 - distribution.cumulativeProbability(
FastMath.abs(getSlope()) / getSlopeStdErr()));
}
@ -709,19 +708,19 @@ public class SimpleRegression implements Serializable, UpdatingMultipleLinearReg
if( FastMath.abs( sumXX ) > Precision.SAFE_MIN ){
final double[] params = new double[]{ getIntercept(), getSlope() };
final double mse = getMeanSquareError();
final double _syy = sumYY + sumY * sumY / ((double) n);
final double _syy = sumYY + sumY * sumY / (n);
final double[] vcv = new double[]{
mse * (xbar *xbar /sumXX + 1.0 / ((double) n)),
mse * (xbar *xbar /sumXX + 1.0 / (n)),
-xbar*mse/sumXX,
mse/sumXX };
return new RegressionResults(
params, new double[][]{vcv}, true, n, 2,
sumY, _syy, getSumSquaredErrors(),true,false);
}else{
final double[] params = new double[]{ sumY/((double) n), Double.NaN };
final double[] params = new double[]{ sumY/(n), Double.NaN };
//final double mse = getMeanSquareError();
final double[] vcv = new double[]{
ybar / ((double) n - 1.0),
ybar / (n - 1.0),
Double.NaN,
Double.NaN };
return new RegressionResults(
@ -782,11 +781,11 @@ public class SimpleRegression implements Serializable, UpdatingMultipleLinearReg
if( variablesToInclude[0] != 1 && variablesToInclude[0] != 0 ){
throw new OutOfRangeException( variablesToInclude[0],0,1 );
}
final double _mean = sumY * sumY / ((double) n);
final double _mean = sumY * sumY / (n);
final double _syy = sumYY + _mean;
if( variablesToInclude[0] == 0 ){
//just the mean
final double[] vcv = new double[]{ sumYY/((double)((n-1)*n)) };
final double[] vcv = new double[]{ sumYY/(((n-1)*n)) };
final double[] params = new double[]{ ybar };
return new RegressionResults(
params, new double[][]{vcv}, true, n, 1,
@ -794,10 +793,10 @@ public class SimpleRegression implements Serializable, UpdatingMultipleLinearReg
}else if( variablesToInclude[0] == 1){
//final double _syy = sumYY + sumY * sumY / ((double) n);
final double _sxx = sumXX + sumX * sumX / ((double) n);
final double _sxy = sumXY + sumX * sumY / ((double) n);
final double _sxx = sumXX + sumX * sumX / (n);
final double _sxy = sumXY + sumX * sumY / (n);
final double _sse = FastMath.max(0d, _syy - _sxy * _sxy / _sxx);
final double _mse = _sse/((double)(n-1));
final double _mse = _sse/((n-1));
if( !Double.isNaN(_sxx) ){
final double[] vcv = new double[]{ _mse / _sxx };
final double[] params = new double[]{ _sxy/_sxx };

View File

@ -45,7 +45,7 @@ public class PoissonDistributionTest extends IntegerDistributionAbstractTest {
*/
@Override
public IntegerDistribution makeDistribution() {
return new PoissonDistributionImpl(DEFAULT_TEST_POISSON_PARAMETER);
return new PoissonDistribution(DEFAULT_TEST_POISSON_PARAMETER);
}
/**
@ -113,12 +113,12 @@ public class PoissonDistributionTest extends IntegerDistributionAbstractTest {
*/
@Test
public void testNormalApproximateProbability() throws Exception {
PoissonDistribution dist = new PoissonDistributionImpl(100);
PoissonDistribution dist = new PoissonDistribution(100);
double result = dist.normalApproximateProbability(110)
- dist.normalApproximateProbability(89);
Assert.assertEquals(0.706281887248, result, 1E-10);
dist = new PoissonDistributionImpl(10000);
dist = new PoissonDistribution(10000);
result = dist.normalApproximateProbability(10200)
- dist.normalApproximateProbability(9899);
Assert.assertEquals(0.820070051552, result, 1E-10);
@ -130,19 +130,19 @@ public class PoissonDistributionTest extends IntegerDistributionAbstractTest {
*/
@Test
public void testDegenerateInverseCumulativeProbability() throws Exception {
PoissonDistribution dist = new PoissonDistributionImpl(DEFAULT_TEST_POISSON_PARAMETER);
PoissonDistribution dist = new PoissonDistribution(DEFAULT_TEST_POISSON_PARAMETER);
Assert.assertEquals(Integer.MAX_VALUE, dist.inverseCumulativeProbability(1.0d));
Assert.assertEquals(-1, dist.inverseCumulativeProbability(0d));
}
@Test(expected=NotStrictlyPositiveException.class)
public void testNegativeMean() {
new PoissonDistributionImpl(-1);
new PoissonDistribution(-1);
}
@Test
public void testMean() {
PoissonDistribution dist = new PoissonDistributionImpl(10.0);
PoissonDistribution dist = new PoissonDistribution(10.0);
Assert.assertEquals(10.0, dist.getMean(), 0.0);
}
@ -150,7 +150,7 @@ public class PoissonDistributionTest extends IntegerDistributionAbstractTest {
public void testLargeMeanCumulativeProbability() {
double mean = 1.0;
while (mean <= 10000000.0) {
PoissonDistribution dist = new PoissonDistributionImpl(mean);
PoissonDistribution dist = new PoissonDistribution(mean);
double x = mean * 2.0;
double dx = x / 10.0;
@ -181,12 +181,12 @@ public class PoissonDistributionTest extends IntegerDistributionAbstractTest {
@Test
public void testCumulativeProbabilitySpecial() throws Exception {
PoissonDistribution dist;
dist = new PoissonDistributionImpl(9120);
dist = new PoissonDistribution(9120);
checkProbability(dist, 9075);
checkProbability(dist, 9102);
dist = new PoissonDistributionImpl(5058);
dist = new PoissonDistribution(5058);
checkProbability(dist, 5044);
dist = new PoissonDistributionImpl(6986);
dist = new PoissonDistribution(6986);
checkProbability(dist, 6950);
}
@ -202,7 +202,7 @@ public class PoissonDistributionTest extends IntegerDistributionAbstractTest {
public void testLargeMeanInverseCumulativeProbability() throws Exception {
double mean = 1.0;
while (mean <= 100000.0) { // Extended test value: 1E7. Reduced to limit run time.
PoissonDistribution dist = new PoissonDistributionImpl(mean);
PoissonDistribution dist = new PoissonDistribution(mean);
double p = 0.1;
double dp = p;
while (p < .99) {
@ -225,12 +225,12 @@ public class PoissonDistributionTest extends IntegerDistributionAbstractTest {
public void testMoments() {
final double tol = 1e-9;
PoissonDistribution dist;
dist = new PoissonDistributionImpl(1);
dist = new PoissonDistribution(1);
Assert.assertEquals(dist.getNumericalMean(), 1, tol);
Assert.assertEquals(dist.getNumericalVariance(), 1, tol);
dist = new PoissonDistributionImpl(11.23);
Assert.assertEquals(dist.getNumericalVariance(), 1, tol);
dist = new PoissonDistribution(11.23);
Assert.assertEquals(dist.getNumericalMean(), 11.23, tol);
Assert.assertEquals(dist.getNumericalVariance(), 11.23, tol);
}

View File

@ -34,7 +34,7 @@ public class TDistributionTest extends ContinuousDistributionAbstractTest {
/** Creates the default continuous distribution instance to use in tests. */
@Override
public TDistribution makeDistribution() {
return new TDistributionImpl(5.0);
return new TDistribution(5.0);
}
/** Creates the default cumulative probability distribution test input values */
@ -73,14 +73,14 @@ public class TDistributionTest extends ContinuousDistributionAbstractTest {
*/
@Test
public void testCumulativeProbabilityAgainstStackOverflow() throws Exception {
TDistributionImpl td = new TDistributionImpl(5.);
TDistribution td = new TDistribution(5.);
td.cumulativeProbability(.1);
td.cumulativeProbability(.01);
}
@Test
public void testSmallDf() throws Exception {
setDistribution(new TDistributionImpl(1d));
setDistribution(new TDistribution(1d));
// quantiles computed using R version 2.9.2
setCumulativeTestPoints(new double[] {-318.308838986, -31.8205159538, -12.7062047362,
-6.31375151468, -3.07768353718, 318.308838986, 31.8205159538, 12.7062047362,
@ -110,25 +110,25 @@ public class TDistributionTest extends ContinuousDistributionAbstractTest {
@Test(expected=NotStrictlyPositiveException.class)
public void testPreconditions() {
new TDistributionImpl(0);
new TDistribution(0);
}
@Test
public void testMoments() {
final double tol = 1e-9;
TDistribution dist;
dist = new TDistributionImpl(1);
dist = new TDistribution(1);
Assert.assertTrue(Double.isNaN(dist.getNumericalMean()));
Assert.assertTrue(Double.isNaN(dist.getNumericalVariance()));
dist = new TDistributionImpl(1.5);
dist = new TDistribution(1.5);
Assert.assertEquals(dist.getNumericalMean(), 0, tol);
Assert.assertTrue(Double.isInfinite(dist.getNumericalVariance()));
dist = new TDistributionImpl(5);
dist = new TDistribution(5);
Assert.assertEquals(dist.getNumericalMean(), 0, tol);
Assert.assertEquals(dist.getNumericalVariance(), 5d / (5d - 2d), tol);
Assert.assertEquals(dist.getNumericalVariance(), 5d / (5d - 2d), tol);
}
/*
@ -151,7 +151,7 @@ public class TDistributionTest extends ContinuousDistributionAbstractTest {
return;
}
private double[] makeNistResults(double[] args, int df){
TDistribution td = new TDistributionImpl(df);
TDistribution td = new TDistribution(df);
double[] res = new double[ args.length ];
for( int i = 0 ; i < res.length ; i++){
res[i] = 1.0 - td.cumulativeProbability(args[i]);

View File

@ -37,7 +37,7 @@ public class WeibullDistributionTest extends ContinuousDistributionAbstractTest
/** Creates the default continuous distribution instance to use in tests. */
@Override
public WeibullDistribution makeDistribution() {
return new WeibullDistributionImpl(1.2, 2.1);
return new WeibullDistribution(1.2, 2.1);
}
/** Creates the default cumulative probability distribution test input values */
@ -73,10 +73,10 @@ public class WeibullDistributionTest extends ContinuousDistributionAbstractTest
@Test
public void testAlpha() {
WeibullDistribution dist = new WeibullDistributionImpl(1, 2);
WeibullDistribution dist = new WeibullDistribution(1, 2);
Assert.assertEquals(1, dist.getShape(), 0);
try {
dist = new WeibullDistributionImpl(0, 2);
dist = new WeibullDistribution(0, 2);
Assert.fail("NotStrictlyPositiveException expected");
} catch (NotStrictlyPositiveException e) {
// Expected.
@ -85,10 +85,10 @@ public class WeibullDistributionTest extends ContinuousDistributionAbstractTest
@Test
public void testBeta() {
WeibullDistribution dist = new WeibullDistributionImpl(1, 2);
WeibullDistribution dist = new WeibullDistribution(1, 2);
Assert.assertEquals(2, dist.getScale(), 0);
try {
dist = new WeibullDistributionImpl(1, 0);
dist = new WeibullDistribution(1, 0);
Assert.fail("NotStrictlyPositiveException expected");
} catch (NotStrictlyPositiveException e) {
// Expected.
@ -99,17 +99,17 @@ public class WeibullDistributionTest extends ContinuousDistributionAbstractTest
public void testMoments() {
final double tol = 1e-9;
WeibullDistribution dist;
dist = new WeibullDistributionImpl(2.5, 3.5);
dist = new WeibullDistribution(2.5, 3.5);
// In R: 3.5*gamma(1+(1/2.5)) (or emperically: mean(rweibull(10000, 2.5, 3.5)))
Assert.assertEquals(dist.getNumericalMean(), 3.5 * FastMath.exp(Gamma.logGamma(1 + (1 / 2.5))), tol);
Assert.assertEquals(dist.getNumericalVariance(), (3.5 * 3.5) *
Assert.assertEquals(dist.getNumericalVariance(), (3.5 * 3.5) *
FastMath.exp(Gamma.logGamma(1 + (2 / 2.5))) -
(dist.getNumericalMean() * dist.getNumericalMean()), tol);
dist = new WeibullDistributionImpl(10.4, 2.222);
(dist.getNumericalMean() * dist.getNumericalMean()), tol);
dist = new WeibullDistribution(10.4, 2.222);
Assert.assertEquals(dist.getNumericalMean(), 2.222 * FastMath.exp(Gamma.logGamma(1 + (1 / 10.4))), tol);
Assert.assertEquals(dist.getNumericalVariance(), (2.222 * 2.222) *
Assert.assertEquals(dist.getNumericalVariance(), (2.222 * 2.222) *
FastMath.exp(Gamma.logGamma(1 + (2 / 10.4))) -
(dist.getNumericalMean() * dist.getNumericalMean()), tol);
}

View File

@ -38,9 +38,9 @@ import org.apache.commons.math.distribution.HypergeometricDistributionTest;
import org.apache.commons.math.distribution.PascalDistribution;
import org.apache.commons.math.distribution.PascalDistributionTest;
import org.apache.commons.math.distribution.PoissonDistribution;
import org.apache.commons.math.distribution.PoissonDistributionImpl;
import org.apache.commons.math.distribution.TDistributionImpl;
import org.apache.commons.math.distribution.WeibullDistributionImpl;
import org.apache.commons.math.distribution.PoissonDistribution;
import org.apache.commons.math.distribution.TDistribution;
import org.apache.commons.math.distribution.WeibullDistribution;
import org.apache.commons.math.distribution.ZipfDistributionImpl;
import org.apache.commons.math.distribution.ZipfDistributionTest;
import org.apache.commons.math.stat.Frequency;
@ -293,7 +293,7 @@ public class RandomDataTest {
* Start with upper and lower tail bins.
* Lower bin = [0, lower); Upper bin = [upper, +inf).
*/
PoissonDistribution poissonDistribution = new PoissonDistributionImpl(mean);
PoissonDistribution poissonDistribution = new PoissonDistribution(mean);
int lower = 1;
while (poissonDistribution.cumulativeProbability(lower - 1) * sampleSize < minExpectedCount) {
lower++;
@ -910,7 +910,7 @@ public class RandomDataTest {
public void testNextGamma() throws Exception {
double[] quartiles;
long[] counts;
// Tests shape > 1, one case in the rejection sampling
quartiles = TestUtils.getDistributionQuartiles(new GammaDistribution(4, 2));
counts = new long[4];
@ -920,8 +920,8 @@ public class RandomDataTest {
TestUtils.updateCounts(value, counts, quartiles);
}
TestUtils.assertChiSquareAccept(expected, counts, 0.001);
// Tests shape <= 1, another case in the rejection sampling
// Tests shape <= 1, another case in the rejection sampling
quartiles = TestUtils.getDistributionQuartiles(new GammaDistribution(0.3, 3));
counts = new long[4];
randomData.reSeed(1000);
@ -934,7 +934,7 @@ public class RandomDataTest {
@Test
public void testNextT() throws Exception {
double[] quartiles = TestUtils.getDistributionQuartiles(new TDistributionImpl(10));
double[] quartiles = TestUtils.getDistributionQuartiles(new TDistribution(10));
long[] counts = new long[4];
randomData.reSeed(1000);
for (int i = 0; i < 1000; i++) {
@ -946,7 +946,7 @@ public class RandomDataTest {
@Test
public void testNextWeibull() throws Exception {
double[] quartiles = TestUtils.getDistributionQuartiles(new WeibullDistributionImpl(1.2, 2.1));
double[] quartiles = TestUtils.getDistributionQuartiles(new WeibullDistribution(1.2, 2.1));
long[] counts = new long[4];
randomData.reSeed(1000);
for (int i = 0; i < 1000; i++) {

View File

@ -18,7 +18,6 @@ package org.apache.commons.math.stat.correlation;
import org.apache.commons.math.TestUtils;
import org.apache.commons.math.distribution.TDistribution;
import org.apache.commons.math.distribution.TDistributionImpl;
import org.apache.commons.math.linear.RealMatrix;
import org.apache.commons.math.linear.BlockRealMatrix;
import org.apache.commons.math.util.FastMath;
@ -164,7 +163,7 @@ public class PearsonsCorrelationTest {
fillUpper(rPMatrix, 0d);
TestUtils.assertEquals("correlation p values", rPMatrix, corrInstance.getCorrelationPValues(), 10E-15);
}
/**
* Test p-value near 0. JIRA: MATH-371
*/
@ -176,7 +175,7 @@ public class PearsonsCorrelationTest {
* Post fix, p-values diminish smoothly, vanishing at dimension = 127.
* Tested value is ~1E-303.
*/
int dimension = 120;
int dimension = 120;
double[][] data = new double[dimension][2];
for (int i = 0; i < dimension; i++) {
data[i][0] = i;
@ -185,7 +184,7 @@ public class PearsonsCorrelationTest {
PearsonsCorrelation corrInstance = new PearsonsCorrelation(data);
Assert.assertTrue(corrInstance.getCorrelationPValues().getEntry(0, 1) > 0);
}
/**
* Constant column
@ -227,7 +226,7 @@ public class PearsonsCorrelationTest {
*/
@Test
public void testStdErrorConsistency() throws Exception {
TDistribution tDistribution = new TDistributionImpl(45);
TDistribution tDistribution = new TDistribution(45);
RealMatrix matrix = createRealMatrix(swissData, 47, 5);
PearsonsCorrelation corrInstance = new PearsonsCorrelation(matrix);
RealMatrix rValues = corrInstance.getCorrelationMatrix();