MATH-1058
Precision improvements by using "expm1" and "log1p". Thanks to Sean Owen. git-svn-id: https://svn.apache.org/repos/asf/commons/proper/math/trunk@1538998 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
67fc63b994
commit
4ebd967c96
|
@ -51,6 +51,10 @@ If the output is not quite correct, check for invisible trailing spaces!
|
|||
</properties>
|
||||
<body>
|
||||
<release version="3.3" date="TBD" description="TBD">
|
||||
<action dev="erans" type="fix" issue="MATH-1058" due-to="Sean Owen">
|
||||
Precision improvements (for small values of the argument) in "Beta" function
|
||||
and in "LogNormalDistribution" and "WeibullDistribution".
|
||||
</action>
|
||||
<action dev="tn" type="fix" issue="MATH-1055" due-to="Sean Owen">
|
||||
Fixed some invalid links inside javadoc and added missing deprecated annotations.
|
||||
</action>
|
||||
|
|
|
@ -289,7 +289,7 @@ public class LogNormalDistribution extends AbstractRealDistribution {
|
|||
public double getNumericalVariance() {
|
||||
final double s = shape;
|
||||
final double ss = s * s;
|
||||
return (FastMath.exp(ss) - 1) * FastMath.exp(2 * scale + ss);
|
||||
return (FastMath.expm1(ss)) * FastMath.exp(2 * scale + ss);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -221,7 +221,7 @@ public class WeibullDistribution extends AbstractRealDistribution {
|
|||
} else if (p == 1) {
|
||||
ret = Double.POSITIVE_INFINITY;
|
||||
} else {
|
||||
ret = scale * FastMath.pow(-FastMath.log(1.0 - p), 1.0 / shape);
|
||||
ret = scale * FastMath.pow(-FastMath.log1p(-p), 1.0 / shape);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -218,7 +218,7 @@ public class Beta {
|
|||
return 1.0;
|
||||
}
|
||||
};
|
||||
ret = FastMath.exp((a * FastMath.log(x)) + (b * FastMath.log(1.0 - x)) -
|
||||
ret = FastMath.exp((a * FastMath.log(x)) + (b * FastMath.log1p(-x)) -
|
||||
FastMath.log(a) - logBeta(a, b)) *
|
||||
1.0 / fraction.evaluate(x, epsilon, maxIterations);
|
||||
}
|
||||
|
|
|
@ -243,4 +243,12 @@ public class LogNormalDistributionTest extends RealDistributionAbstractTest {
|
|||
Assert.assertEquals(dist.getNumericalMean(), 0.0, tol);
|
||||
Assert.assertEquals(dist.getNumericalVariance(), 0.0, tol);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTinyVariance() {
|
||||
LogNormalDistribution dist = new LogNormalDistribution(0, 1e-9);
|
||||
double t = dist.getNumericalVariance();
|
||||
Assert.assertEquals(1e-18, t, 1e-20);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -63,6 +63,16 @@ public class WeibullDistributionTest extends RealDistributionAbstractTest {
|
|||
|
||||
//---------------------------- Additional test cases -------------------------
|
||||
|
||||
@Test
|
||||
public void testInverseCumulativeProbabilitySmallPAccuracy() {
|
||||
WeibullDistribution dist = new WeibullDistribution(2, 3);
|
||||
double t = dist.inverseCumulativeProbability(1e-17);
|
||||
// Analytically, answer is solution to 1e-17 = 1-exp(-(x/3)^2)
|
||||
// x = sqrt(-9*log(1-1e-17))
|
||||
// If we're not careful, answer will be 0. Answer below is computed with care in Octave:
|
||||
Assert.assertEquals(9.48683298050514e-9, t, 1e-17);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInverseCumulativeProbabilityExtremes() {
|
||||
setInverseCumulativeTestPoints(new double[] {0.0, 1.0});
|
||||
|
|
|
@ -134,6 +134,13 @@ public class BetaTest {
|
|||
testRegularizedBeta(0.75, 0.5, 1.0, 2.0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRegularizedBetaTinyArgument() {
|
||||
double actual = Beta.regularizedBeta(1e-17, 1.0, 1e12);
|
||||
// This value is from R: pbeta(1e-17,1,1e12)
|
||||
TestUtils.assertEquals(9.999950000166648e-6, actual, 1e-16);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLogBetaNanPositive() {
|
||||
testLogBeta(Double.NaN, Double.NaN, 2.0);
|
||||
|
|
Loading…
Reference in New Issue