Move page size constants to PageCacheRecycler (#36524)

`PageCacheRecycler` is the class that creates and holds pages of arrays
for various uses. `BigArrays` is just one user of these pages. This
commit moves the constants that define the page sizes for the recycler
to be on the recycler class.
This commit is contained in:
Tim Brooks 2018-12-12 07:00:50 -07:00 committed by GitHub
parent bdb1e0e04e
commit e63d52af63
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 80 additions and 74 deletions

View File

@ -20,7 +20,7 @@
package org.elasticsearch.nio;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.test.ESTestCase;
import org.junit.Before;
@ -85,7 +85,7 @@ public class BytesChannelContextTests extends ESTestCase {
assertEquals(messageLength, context.read());
assertEquals(0, channelBuffer.getIndex());
assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity());
assertEquals(PageCacheRecycler.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity());
verify(readConsumer, times(1)).apply(channelBuffer);
}
@ -103,7 +103,7 @@ public class BytesChannelContextTests extends ESTestCase {
assertEquals(bytes.length, context.read());
assertEquals(0, channelBuffer.getIndex());
assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity());
assertEquals(PageCacheRecycler.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity());
verify(readConsumer, times(2)).apply(channelBuffer);
}
@ -129,7 +129,7 @@ public class BytesChannelContextTests extends ESTestCase {
assertEquals(messageLength, context.read());
assertEquals(0, channelBuffer.getIndex());
assertEquals(BigArrays.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity());
assertEquals(PageCacheRecycler.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity());
verify(readConsumer, times(2)).apply(channelBuffer);
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.nio;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.test.ESTestCase;
import java.nio.ByteBuffer;
@ -29,9 +29,9 @@ import java.util.function.Supplier;
public class InboundChannelBufferTests extends ESTestCase {
private static final int PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES;
private static final int PAGE_SIZE = PageCacheRecycler.PAGE_SIZE_IN_BYTES;
private final Supplier<InboundChannelBuffer.Page> defaultPageSupplier = () ->
new InboundChannelBuffer.Page(ByteBuffer.allocate(BigArrays.BYTE_PAGE_SIZE), () -> {
new InboundChannelBuffer.Page(ByteBuffer.allocate(PageCacheRecycler.BYTE_PAGE_SIZE), () -> {
});
public void testNewBufferNoPages() {

View File

@ -29,6 +29,7 @@ import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.test.ESTestCase;
@ -36,7 +37,7 @@ import java.io.IOException;
public class Netty4UtilsTests extends ESTestCase {
private static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE;
private static final int PAGE_SIZE = PageCacheRecycler.BYTE_PAGE_SIZE;
private final BigArrays bigarrays = new BigArrays(null, new NoneCircuitBreakerService(), CircuitBreaker.REQUEST);
public void testToChannelBufferWithEmptyRef() throws IOException {

View File

@ -23,6 +23,7 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefIterator;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.ByteArray;
import org.elasticsearch.common.util.PageCacheRecycler;
import java.io.IOException;
@ -32,7 +33,7 @@ import java.io.IOException;
*/
public class PagedBytesReference extends BytesReference {
private static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE;
private static final int PAGE_SIZE = PageCacheRecycler.BYTE_PAGE_SIZE;
private final BigArrays bigarrays;
protected final ByteArray byteArray;

View File

@ -23,6 +23,7 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.bytes.PagedBytesReference;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.ByteArray;
import org.elasticsearch.common.util.PageCacheRecycler;
import java.io.IOException;
@ -98,8 +99,8 @@ public class BytesStreamOutput extends BytesStream {
@Override
public void reset() {
// shrink list of pages
if (bytes.size() > BigArrays.PAGE_SIZE_IN_BYTES) {
bytes = bigArrays.resize(bytes, BigArrays.PAGE_SIZE_IN_BYTES);
if (bytes.size() > PageCacheRecycler.PAGE_SIZE_IN_BYTES) {
bytes = bigArrays.resize(bytes, PageCacheRecycler.PAGE_SIZE_IN_BYTES);
}
// go back to start

View File

@ -24,6 +24,7 @@ import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.ByteArray;
import org.elasticsearch.common.util.PageCacheRecycler;
/**
* An bytes stream output that allows providing a {@link BigArrays} instance
@ -40,7 +41,7 @@ public class ReleasableBytesStreamOutput extends BytesStreamOutput
private Releasable releasable;
public ReleasableBytesStreamOutput(BigArrays bigarrays) {
this(BigArrays.PAGE_SIZE_IN_BYTES, bigarrays);
this(PageCacheRecycler.PAGE_SIZE_IN_BYTES, bigarrays);
}
public ReleasableBytesStreamOutput(int expectedSize, BigArrays bigArrays) {

View File

@ -115,36 +115,36 @@ abstract class AbstractBigArray extends AbstractArray {
protected final byte[] newBytePage(int page) {
if (recycler != null) {
final Recycler.V<byte[]> v = recycler.bytePage(clearOnResize);
return registerNewPage(v, page, BigArrays.BYTE_PAGE_SIZE);
return registerNewPage(v, page, PageCacheRecycler.BYTE_PAGE_SIZE);
} else {
return new byte[BigArrays.BYTE_PAGE_SIZE];
return new byte[PageCacheRecycler.BYTE_PAGE_SIZE];
}
}
protected final int[] newIntPage(int page) {
if (recycler != null) {
final Recycler.V<int[]> v = recycler.intPage(clearOnResize);
return registerNewPage(v, page, BigArrays.INT_PAGE_SIZE);
return registerNewPage(v, page, PageCacheRecycler.INT_PAGE_SIZE);
} else {
return new int[BigArrays.INT_PAGE_SIZE];
return new int[PageCacheRecycler.INT_PAGE_SIZE];
}
}
protected final long[] newLongPage(int page) {
if (recycler != null) {
final Recycler.V<long[]> v = recycler.longPage(clearOnResize);
return registerNewPage(v, page, BigArrays.LONG_PAGE_SIZE);
return registerNewPage(v, page, PageCacheRecycler.LONG_PAGE_SIZE);
} else {
return new long[BigArrays.LONG_PAGE_SIZE];
return new long[PageCacheRecycler.LONG_PAGE_SIZE];
}
}
protected final Object[] newObjectPage(int page) {
if (recycler != null) {
final Recycler.V<Object[]> v = recycler.objectPage();
return registerNewPage(v, page, BigArrays.OBJECT_PAGE_SIZE);
return registerNewPage(v, page, PageCacheRecycler.OBJECT_PAGE_SIZE);
} else {
return new Object[BigArrays.OBJECT_PAGE_SIZE];
return new Object[PageCacheRecycler.OBJECT_PAGE_SIZE];
}
}

View File

@ -37,17 +37,10 @@ public class BigArrays {
public static final BigArrays NON_RECYCLING_INSTANCE = new BigArrays(null, null, CircuitBreaker.REQUEST);
/** Page size in bytes: 16KB */
public static final int PAGE_SIZE_IN_BYTES = 1 << 14;
public static final int BYTE_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES;
public static final int INT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / Integer.BYTES;
public static final int LONG_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / Long.BYTES;
public static final int OBJECT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_OBJECT_REF;
/** Returns the next size to grow when working with parallel arrays that
* may have different page sizes or number of bytes per element. */
public static long overSize(long minTargetSize) {
return overSize(minTargetSize, PAGE_SIZE_IN_BYTES / 8, 1);
return overSize(minTargetSize, PageCacheRecycler.PAGE_SIZE_IN_BYTES / 8, 1);
}
/** Return the next size to grow to that is &gt;= <code>minTargetSize</code>.
@ -467,12 +460,12 @@ public class BigArrays {
* @param clearOnResize whether values should be set to 0 on initialization and resize
*/
public ByteArray newByteArray(long size, boolean clearOnResize) {
if (size > BYTE_PAGE_SIZE) {
if (size > PageCacheRecycler.BYTE_PAGE_SIZE) {
// when allocating big arrays, we want to first ensure we have the capacity by
// checking with the circuit breaker before attempting to allocate
adjustBreaker(BigByteArray.estimateRamBytes(size), false);
return new BigByteArray(size, this, clearOnResize);
} else if (size >= BYTE_PAGE_SIZE / 2 && recycler != null) {
} else if (size >= PageCacheRecycler.BYTE_PAGE_SIZE / 2 && recycler != null) {
final Recycler.V<byte[]> page = recycler.bytePage(clearOnResize);
return validate(new ByteArrayWrapper(this, page.v(), size, page, clearOnResize));
} else {
@ -508,7 +501,7 @@ public class BigArrays {
if (minSize <= array.size()) {
return array;
}
final long newSize = overSize(minSize, BYTE_PAGE_SIZE, 1);
final long newSize = overSize(minSize, PageCacheRecycler.BYTE_PAGE_SIZE, 1);
return resize(array, newSize);
}
@ -551,12 +544,12 @@ public class BigArrays {
* @param clearOnResize whether values should be set to 0 on initialization and resize
*/
public IntArray newIntArray(long size, boolean clearOnResize) {
if (size > INT_PAGE_SIZE) {
if (size > PageCacheRecycler.INT_PAGE_SIZE) {
// when allocating big arrays, we want to first ensure we have the capacity by
// checking with the circuit breaker before attempting to allocate
adjustBreaker(BigIntArray.estimateRamBytes(size), false);
return new BigIntArray(size, this, clearOnResize);
} else if (size >= INT_PAGE_SIZE / 2 && recycler != null) {
} else if (size >= PageCacheRecycler.INT_PAGE_SIZE / 2 && recycler != null) {
final Recycler.V<int[]> page = recycler.intPage(clearOnResize);
return validate(new IntArrayWrapper(this, page.v(), size, page, clearOnResize));
} else {
@ -593,7 +586,7 @@ public class BigArrays {
if (minSize <= array.size()) {
return array;
}
final long newSize = overSize(minSize, INT_PAGE_SIZE, Integer.BYTES);
final long newSize = overSize(minSize, PageCacheRecycler.INT_PAGE_SIZE, Integer.BYTES);
return resize(array, newSize);
}
@ -603,12 +596,12 @@ public class BigArrays {
* @param clearOnResize whether values should be set to 0 on initialization and resize
*/
public LongArray newLongArray(long size, boolean clearOnResize) {
if (size > LONG_PAGE_SIZE) {
if (size > PageCacheRecycler.LONG_PAGE_SIZE) {
// when allocating big arrays, we want to first ensure we have the capacity by
// checking with the circuit breaker before attempting to allocate
adjustBreaker(BigLongArray.estimateRamBytes(size), false);
return new BigLongArray(size, this, clearOnResize);
} else if (size >= LONG_PAGE_SIZE / 2 && recycler != null) {
} else if (size >= PageCacheRecycler.LONG_PAGE_SIZE / 2 && recycler != null) {
final Recycler.V<long[]> page = recycler.longPage(clearOnResize);
return validate(new LongArrayWrapper(this, page.v(), size, page, clearOnResize));
} else {
@ -645,7 +638,7 @@ public class BigArrays {
if (minSize <= array.size()) {
return array;
}
final long newSize = overSize(minSize, LONG_PAGE_SIZE, Long.BYTES);
final long newSize = overSize(minSize, PageCacheRecycler.LONG_PAGE_SIZE, Long.BYTES);
return resize(array, newSize);
}
@ -655,12 +648,12 @@ public class BigArrays {
* @param clearOnResize whether values should be set to 0 on initialization and resize
*/
public DoubleArray newDoubleArray(long size, boolean clearOnResize) {
if (size > LONG_PAGE_SIZE) {
if (size > PageCacheRecycler.LONG_PAGE_SIZE) {
// when allocating big arrays, we want to first ensure we have the capacity by
// checking with the circuit breaker before attempting to allocate
adjustBreaker(BigDoubleArray.estimateRamBytes(size), false);
return new BigDoubleArray(size, this, clearOnResize);
} else if (size >= LONG_PAGE_SIZE / 2 && recycler != null) {
} else if (size >= PageCacheRecycler.LONG_PAGE_SIZE / 2 && recycler != null) {
final Recycler.V<long[]> page = recycler.longPage(clearOnResize);
return validate(new DoubleArrayWrapper(this, page.v(), size, page, clearOnResize));
} else {
@ -694,7 +687,7 @@ public class BigArrays {
if (minSize <= array.size()) {
return array;
}
final long newSize = overSize(minSize, LONG_PAGE_SIZE, Long.BYTES);
final long newSize = overSize(minSize, PageCacheRecycler.LONG_PAGE_SIZE, Long.BYTES);
return resize(array, newSize);
}
@ -704,12 +697,12 @@ public class BigArrays {
* @param clearOnResize whether values should be set to 0 on initialization and resize
*/
public FloatArray newFloatArray(long size, boolean clearOnResize) {
if (size > INT_PAGE_SIZE) {
if (size > PageCacheRecycler.INT_PAGE_SIZE) {
// when allocating big arrays, we want to first ensure we have the capacity by
// checking with the circuit breaker before attempting to allocate
adjustBreaker(BigFloatArray.estimateRamBytes(size), false);
return new BigFloatArray(size, this, clearOnResize);
} else if (size >= INT_PAGE_SIZE / 2 && recycler != null) {
} else if (size >= PageCacheRecycler.INT_PAGE_SIZE / 2 && recycler != null) {
final Recycler.V<int[]> page = recycler.intPage(clearOnResize);
return validate(new FloatArrayWrapper(this, page.v(), size, page, clearOnResize));
} else {
@ -743,7 +736,7 @@ public class BigArrays {
if (minSize <= array.size()) {
return array;
}
final long newSize = overSize(minSize, INT_PAGE_SIZE, Float.BYTES);
final long newSize = overSize(minSize, PageCacheRecycler.INT_PAGE_SIZE, Float.BYTES);
return resize(array, newSize);
}
@ -752,12 +745,12 @@ public class BigArrays {
* @param size the initial length of the array
*/
public <T> ObjectArray<T> newObjectArray(long size) {
if (size > OBJECT_PAGE_SIZE) {
if (size > PageCacheRecycler.OBJECT_PAGE_SIZE) {
// when allocating big arrays, we want to first ensure we have the capacity by
// checking with the circuit breaker before attempting to allocate
adjustBreaker(BigObjectArray.estimateRamBytes(size), false);
return new BigObjectArray<>(size, this);
} else if (size >= OBJECT_PAGE_SIZE / 2 && recycler != null) {
} else if (size >= PageCacheRecycler.OBJECT_PAGE_SIZE / 2 && recycler != null) {
final Recycler.V<Object[]> page = recycler.objectPage();
return validate(new ObjectArrayWrapper<>(this, page.v(), size, page));
} else {
@ -785,7 +778,7 @@ public class BigArrays {
if (minSize <= array.size()) {
return array;
}
final long newSize = overSize(minSize, OBJECT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
final long newSize = overSize(minSize, PageCacheRecycler.OBJECT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
return resize(array, newSize);
}
}

View File

@ -25,7 +25,7 @@ import org.apache.lucene.util.RamUsageEstimator;
import java.util.Arrays;
import static org.elasticsearch.common.util.BigArrays.BYTE_PAGE_SIZE;
import static org.elasticsearch.common.util.PageCacheRecycler.BYTE_PAGE_SIZE;
/**
* Byte array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of

View File

@ -24,7 +24,7 @@ import org.apache.lucene.util.RamUsageEstimator;
import java.util.Arrays;
import static org.elasticsearch.common.util.BigArrays.LONG_PAGE_SIZE;
import static org.elasticsearch.common.util.PageCacheRecycler.LONG_PAGE_SIZE;
/**
* Double array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of

View File

@ -24,7 +24,7 @@ import org.apache.lucene.util.RamUsageEstimator;
import java.util.Arrays;
import static org.elasticsearch.common.util.BigArrays.INT_PAGE_SIZE;
import static org.elasticsearch.common.util.PageCacheRecycler.INT_PAGE_SIZE;
/**
* Float array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of

View File

@ -24,7 +24,7 @@ import org.apache.lucene.util.RamUsageEstimator;
import java.util.Arrays;
import static org.elasticsearch.common.util.BigArrays.INT_PAGE_SIZE;
import static org.elasticsearch.common.util.PageCacheRecycler.INT_PAGE_SIZE;
/**
* Int array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of

View File

@ -24,7 +24,7 @@ import org.apache.lucene.util.RamUsageEstimator;
import java.util.Arrays;
import static org.elasticsearch.common.util.BigArrays.LONG_PAGE_SIZE;
import static org.elasticsearch.common.util.PageCacheRecycler.LONG_PAGE_SIZE;
/**
* Long array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of

View File

@ -24,7 +24,7 @@ import org.apache.lucene.util.RamUsageEstimator;
import java.util.Arrays;
import static org.elasticsearch.common.util.BigArrays.OBJECT_PAGE_SIZE;
import static org.elasticsearch.common.util.PageCacheRecycler.OBJECT_PAGE_SIZE;
/**
* Int array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of

View File

@ -19,6 +19,7 @@
package org.elasticsearch.common.util;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.recycler.AbstractRecyclerC;
@ -54,6 +55,13 @@ public class PageCacheRecycler implements Releasable {
public static final Setting<Double> WEIGHT_OBJECTS_SETTING =
Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, Property.NodeScope);
/** Page size in bytes: 16KB */
public static final int PAGE_SIZE_IN_BYTES = 1 << 14;
public static final int OBJECT_PAGE_SIZE = PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_OBJECT_REF;
public static final int LONG_PAGE_SIZE = PAGE_SIZE_IN_BYTES / Long.BYTES;
public static final int INT_PAGE_SIZE = PAGE_SIZE_IN_BYTES / Integer.BYTES;
public static final int BYTE_PAGE_SIZE = PAGE_SIZE_IN_BYTES;
private final Recycler<byte[]> bytePage;
private final Recycler<int[]> intPage;
private final Recycler<long[]> longPage;
@ -94,13 +102,13 @@ public class PageCacheRecycler implements Releasable {
final double objectsWeight = WEIGHT_OBJECTS_SETTING .get(settings);
final double totalWeight = bytesWeight + intsWeight + longsWeight + objectsWeight;
final int maxPageCount = (int) Math.min(Integer.MAX_VALUE, limit / BigArrays.PAGE_SIZE_IN_BYTES);
final int maxPageCount = (int) Math.min(Integer.MAX_VALUE, limit / PAGE_SIZE_IN_BYTES);
final int maxBytePageCount = (int) (bytesWeight * maxPageCount / totalWeight);
bytePage = build(type, maxBytePageCount, availableProcessors, new AbstractRecyclerC<byte[]>() {
@Override
public byte[] newInstance(int sizing) {
return new byte[BigArrays.BYTE_PAGE_SIZE];
return new byte[BYTE_PAGE_SIZE];
}
@Override
public void recycle(byte[] value) {
@ -112,7 +120,7 @@ public class PageCacheRecycler implements Releasable {
intPage = build(type, maxIntPageCount, availableProcessors, new AbstractRecyclerC<int[]>() {
@Override
public int[] newInstance(int sizing) {
return new int[BigArrays.INT_PAGE_SIZE];
return new int[INT_PAGE_SIZE];
}
@Override
public void recycle(int[] value) {
@ -124,7 +132,7 @@ public class PageCacheRecycler implements Releasable {
longPage = build(type, maxLongPageCount, availableProcessors, new AbstractRecyclerC<long[]>() {
@Override
public long[] newInstance(int sizing) {
return new long[BigArrays.LONG_PAGE_SIZE];
return new long[LONG_PAGE_SIZE];
}
@Override
public void recycle(long[] value) {
@ -136,7 +144,7 @@ public class PageCacheRecycler implements Releasable {
objectPage = build(type, maxObjectPageCount, availableProcessors, new AbstractRecyclerC<Object[]>() {
@Override
public Object[] newInstance(int sizing) {
return new Object[BigArrays.OBJECT_PAGE_SIZE];
return new Object[OBJECT_PAGE_SIZE];
}
@Override
public void recycle(Object[] value) {
@ -144,7 +152,7 @@ public class PageCacheRecycler implements Releasable {
}
});
assert BigArrays.PAGE_SIZE_IN_BYTES * (maxBytePageCount + maxIntPageCount + maxLongPageCount + maxObjectPageCount) <= limit;
assert PAGE_SIZE_IN_BYTES * (maxBytePageCount + maxIntPageCount + maxLongPageCount + maxObjectPageCount) <= limit;
}
public Recycler.V<byte[]> bytePage(boolean clear) {

View File

@ -26,7 +26,7 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.test.ESTestCase;
import org.joda.time.DateTimeZone;
@ -128,7 +128,7 @@ public class BytesStreamsTests extends ESTestCase {
public void testSingleFullPageBulkWrite() throws Exception {
BytesStreamOutput out = new BytesStreamOutput();
int expectedSize = BigArrays.BYTE_PAGE_SIZE;
int expectedSize = PageCacheRecycler.BYTE_PAGE_SIZE;
byte[] expectedData = randomizedByteArrayWithSize(expectedSize);
// write in bulk
@ -144,7 +144,7 @@ public class BytesStreamsTests extends ESTestCase {
BytesStreamOutput out = new BytesStreamOutput();
int initialOffset = 10;
int additionalLength = BigArrays.BYTE_PAGE_SIZE;
int additionalLength = PageCacheRecycler.BYTE_PAGE_SIZE;
byte[] expectedData = randomizedByteArrayWithSize(initialOffset + additionalLength);
// first create initial offset
@ -163,7 +163,7 @@ public class BytesStreamsTests extends ESTestCase {
BytesStreamOutput out = new BytesStreamOutput();
int initialOffset = 10;
int additionalLength = BigArrays.BYTE_PAGE_SIZE * 2;
int additionalLength = PageCacheRecycler.BYTE_PAGE_SIZE * 2;
byte[] expectedData = randomizedByteArrayWithSize(initialOffset + additionalLength);
out.writeBytes(expectedData, 0, initialOffset);
assertEquals(initialOffset, out.size());
@ -181,7 +181,7 @@ public class BytesStreamsTests extends ESTestCase {
public void testSingleFullPage() throws Exception {
BytesStreamOutput out = new BytesStreamOutput();
int expectedSize = BigArrays.BYTE_PAGE_SIZE;
int expectedSize = PageCacheRecycler.BYTE_PAGE_SIZE;
byte[] expectedData = randomizedByteArrayWithSize(expectedSize);
// write byte-by-byte
@ -198,7 +198,7 @@ public class BytesStreamsTests extends ESTestCase {
public void testOneFullOneShortPage() throws Exception {
BytesStreamOutput out = new BytesStreamOutput();
int expectedSize = BigArrays.BYTE_PAGE_SIZE + 10;
int expectedSize = PageCacheRecycler.BYTE_PAGE_SIZE + 10;
byte[] expectedData = randomizedByteArrayWithSize(expectedSize);
// write byte-by-byte
@ -215,7 +215,7 @@ public class BytesStreamsTests extends ESTestCase {
public void testTwoFullOneShortPage() throws Exception {
BytesStreamOutput out = new BytesStreamOutput();
int expectedSize = (BigArrays.BYTE_PAGE_SIZE * 2) + 1;
int expectedSize = (PageCacheRecycler.BYTE_PAGE_SIZE * 2) + 1;
byte[] expectedData = randomizedByteArrayWithSize(expectedSize);
// write byte-by-byte
@ -236,9 +236,9 @@ public class BytesStreamsTests extends ESTestCase {
assertEquals(position, out.position());
out.seek(position += 10);
out.seek(position += BigArrays.BYTE_PAGE_SIZE);
out.seek(position += BigArrays.BYTE_PAGE_SIZE + 10);
out.seek(position += BigArrays.BYTE_PAGE_SIZE * 2);
out.seek(position += PageCacheRecycler.BYTE_PAGE_SIZE);
out.seek(position += PageCacheRecycler.BYTE_PAGE_SIZE + 10);
out.seek(position += PageCacheRecycler.BYTE_PAGE_SIZE * 2);
assertEquals(position, out.position());
assertEquals(position, BytesReference.toBytes(out.bytes()).length);

View File

@ -258,7 +258,7 @@ public class BigArraysTests extends ESTestCase {
random().nextBytes(array1);
final ByteArray array2 = bigArrays.newByteArray(array1.length, randomBoolean());
for (int i = 0; i < array1.length; ) {
final int len = Math.min(array1.length - i, randomBoolean() ? randomInt(10) : randomInt(3 * BigArrays.BYTE_PAGE_SIZE));
final int len = Math.min(array1.length - i, randomBoolean() ? randomInt(10) : randomInt(3 * PageCacheRecycler.BYTE_PAGE_SIZE));
array2.set(i, array1, i, len);
i += len;
}

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.ByteArray;
import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.test.ESTestCase;
@ -37,7 +38,7 @@ import java.util.Arrays;
public abstract class AbstractBytesReferenceTestCase extends ESTestCase {
protected static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE;
protected static final int PAGE_SIZE = PageCacheRecycler.BYTE_PAGE_SIZE;
protected final BigArrays bigarrays = new BigArrays(null, new NoneCircuitBreakerService(), CircuitBreaker.REQUEST);
public void testGet() throws IOException {

View File

@ -6,7 +6,7 @@
package org.elasticsearch.xpack.security.transport.nio;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.nio.BytesWriteHandler;
import org.elasticsearch.nio.FlushReadyWrite;
import org.elasticsearch.nio.InboundChannelBuffer;
@ -86,7 +86,7 @@ public class SSLChannelContextTests extends ESTestCase {
assertEquals(messageLength, context.read());
assertEquals(0, channelBuffer.getIndex());
assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity());
assertEquals(PageCacheRecycler.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity());
verify(readConsumer, times(1)).apply(channelBuffer);
}
@ -101,7 +101,7 @@ public class SSLChannelContextTests extends ESTestCase {
assertEquals(bytes.length, context.read());
assertEquals(0, channelBuffer.getIndex());
assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity());
assertEquals(PageCacheRecycler.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity());
verify(readConsumer, times(2)).apply(channelBuffer);
}
@ -124,7 +124,7 @@ public class SSLChannelContextTests extends ESTestCase {
assertEquals(messageLength, context.read());
assertEquals(0, channelBuffer.getIndex());
assertEquals(BigArrays.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity());
assertEquals(PageCacheRecycler.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity());
verify(readConsumer, times(2)).apply(channelBuffer);
}