mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
Refactor cache recycling so that it only caches large arrays (pages) that can later be used to build more complex data-structures such as hash tables. - QueueRecycler now takes a limit like other non-trivial recyclers. - New PageCacheRecycler (inspired of CacheRecycler) has the ability to cache byte[], int[], long[], double[] or Object[] arrays using a fixed amount of memory (either globally or per-thread depending on the Recycler impl, eg. queue is global while thread_local is per-thread). - Paged arrays in o.e.common.util can now optionally take a PageCacheRecycler to reuse existing pages. - All aggregators' data-structures now use PageCacheRecycler: - for all arrays (counts, mins, maxes, ...) - LongHash can now take a PageCacheRecycler - there is a new BytesRefHash (inspired from Lucene but quite different, still; for instance it cheats on BytesRef comparisons by using Unsafe) that also takes a PageCacheRecycler Close #4557
111 lines
3.9 KiB
Java
111 lines
3.9 KiB
Java
/*
|
|
* Licensed to ElasticSearch and Shay Banon under one
|
|
* or more contributor license agreements. See the NOTICE file
|
|
* distributed with this work for additional information
|
|
* regarding copyright ownership. ElasticSearch licenses this
|
|
* file to you under the Apache License, Version 2.0 (the
|
|
* "License"); you may not use this file except in compliance
|
|
* with the License. You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing,
|
|
* software distributed under the License is distributed on an
|
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
* KIND, either express or implied. See the License for the
|
|
* specific language governing permissions and limitations
|
|
* under the License.
|
|
*/
|
|
|
|
package org.elasticsearch.common.util;
|
|
|
|
import com.google.common.base.Preconditions;
|
|
import org.apache.lucene.util.ArrayUtil;
|
|
import org.apache.lucene.util.RamUsageEstimator;
|
|
import org.elasticsearch.cache.recycler.PageCacheRecycler;
|
|
|
|
import java.util.Arrays;
|
|
|
|
import static org.elasticsearch.common.util.BigArrays.LONG_PAGE_SIZE;
|
|
|
|
/**
|
|
* Long array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of
|
|
* configurable length.
|
|
*/
|
|
final class BigLongArray extends AbstractBigArray implements LongArray {
|
|
|
|
private long[][] pages;
|
|
|
|
/** Constructor. */
|
|
public BigLongArray(long size, PageCacheRecycler recycler, boolean clearOnResize) {
|
|
super(LONG_PAGE_SIZE, recycler, clearOnResize);
|
|
this.size = size;
|
|
pages = new long[numPages(size)][];
|
|
for (int i = 0; i < pages.length; ++i) {
|
|
pages[i] = newLongPage(i);
|
|
}
|
|
}
|
|
|
|
@Override
|
|
public long get(long index) {
|
|
final int pageIndex = pageIndex(index);
|
|
final int indexInPage = indexInPage(index);
|
|
return pages[pageIndex][indexInPage];
|
|
}
|
|
|
|
@Override
|
|
public long set(long index, long value) {
|
|
final int pageIndex = pageIndex(index);
|
|
final int indexInPage = indexInPage(index);
|
|
final long[] page = pages[pageIndex];
|
|
final long ret = page[indexInPage];
|
|
page[indexInPage] = value;
|
|
return ret;
|
|
}
|
|
|
|
@Override
|
|
public long increment(long index, long inc) {
|
|
final int pageIndex = pageIndex(index);
|
|
final int indexInPage = indexInPage(index);
|
|
return pages[pageIndex][indexInPage] += inc;
|
|
}
|
|
|
|
@Override
|
|
protected int numBytesPerElement() {
|
|
return RamUsageEstimator.NUM_BYTES_LONG;
|
|
}
|
|
|
|
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
|
public void resize(long newSize) {
|
|
final int numPages = numPages(newSize);
|
|
if (numPages > pages.length) {
|
|
pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
|
|
}
|
|
for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
|
|
pages[i] = newLongPage(i);
|
|
}
|
|
for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
|
|
pages[i] = null;
|
|
releasePage(i);
|
|
}
|
|
this.size = newSize;
|
|
}
|
|
|
|
@Override
|
|
public void fill(long fromIndex, long toIndex, long value) {
|
|
Preconditions.checkArgument(fromIndex <= toIndex);
|
|
final int fromPage = pageIndex(fromIndex);
|
|
final int toPage = pageIndex(toIndex - 1);
|
|
if (fromPage == toPage) {
|
|
Arrays.fill(pages[fromPage], indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value);
|
|
} else {
|
|
Arrays.fill(pages[fromPage], indexInPage(fromIndex), pages[fromPage].length, value);
|
|
for (int i = fromPage + 1; i < toPage; ++i) {
|
|
Arrays.fill(pages[i], value);
|
|
}
|
|
Arrays.fill(pages[toPage], 0, indexInPage(toIndex - 1) + 1, value);
|
|
}
|
|
}
|
|
|
|
}
|