Don't Allocate Redundant Pages in BigArrays (#60201) (#60441)

The oversize algorithm was allocating more pages than necessary to accommodate `minTargetSize`.
An example would be that a 16k page size and 15k `minTargetSize` would result in a new size of 32k (2 pages).
The difference between the minimum number of necessary pages and the estimated size then keeps growing as sizes increase.

I don't think there is much value in preemptively allocating pages by over-sizing aggressively since the behavior of
the system is quite different from that of a single array where over-sizing avoids copying
once the minimum target size is more than a single page.

Relates #60173 which lead me to this when `BytesStreamOutput` would allocate a large number of never used
pages during serialization of repository metadata.
This commit is contained in:
Armin Braun 2020-07-30 11:09:58 +02:00 committed by GitHub
parent a2c49a4f02
commit 3bf4c01d8e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 16 additions and 9 deletions

View File

@ -58,15 +58,10 @@ public class BigArrays {
long newSize; long newSize;
if (minTargetSize < pageSize) { if (minTargetSize < pageSize) {
newSize = ArrayUtil.oversize((int)minTargetSize, bytesPerElement); newSize = Math.min(ArrayUtil.oversize((int) minTargetSize, bytesPerElement), pageSize);
} else { } else {
newSize = minTargetSize + (minTargetSize >>> 3); final long pages = (minTargetSize + pageSize - 1) / pageSize; // ceil(minTargetSize/pageSize)
} newSize = pages * pageSize;
if (newSize > pageSize) {
// round to a multiple of pageSize
newSize = newSize - (newSize % pageSize) + pageSize;
assert newSize % pageSize == 0;
} }
return newSize; return newSize;
@ -811,4 +806,3 @@ public class BigArrays {
return resize(array, newSize); return resize(array, newSize);
} }
} }

View File

@ -38,7 +38,9 @@ import java.util.List;
import java.util.function.Function; import java.util.function.Function;
import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING; import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.lessThan;
public class BigArraysTests extends ESTestCase { public class BigArraysTests extends ESTestCase {
@ -389,6 +391,17 @@ public class BigArraysTests extends ESTestCase {
} }
} }
public void testOverSizeUsesMinPageCount() {
final int pageSize = 1 << (randomIntBetween(2, 16));
final int minSize = randomIntBetween(1, pageSize) * randomIntBetween(1, 100);
final long size = BigArrays.overSize(minSize, pageSize, 1);
assertThat(size, greaterThanOrEqualTo((long)minSize));
if (size >= pageSize) {
assertThat(size + " is a multiple of " + pageSize, size % pageSize, equalTo(0L));
}
assertThat(size - minSize, lessThan((long) pageSize));
}
private List<BigArraysHelper> bigArrayCreators(final long maxSize, final boolean withBreaking) { private List<BigArraysHelper> bigArrayCreators(final long maxSize, final boolean withBreaking) {
final BigArrays byteBigArrays = newBigArraysInstance(maxSize, withBreaking); final BigArrays byteBigArrays = newBigArraysInstance(maxSize, withBreaking);
BigArraysHelper byteHelper = new BigArraysHelper(byteBigArrays, BigArraysHelper byteHelper = new BigArraysHelper(byteBigArrays,