Limit guava caches to 31.9GB
Guava's caches have overflow issues around 32GB with our default segment count of 16 and weight of 1 unit per byte. We give them 100MB of headroom so 31.9GB. This limits the sizes of both the field data and filter caches, the two large guava caches. Closes #6268
This commit is contained in:
parent
a836496e57
commit
0ff0985e01
|
@ -34,6 +34,14 @@ import java.util.Locale;
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class ByteSizeValue implements Serializable, Streamable {
|
public class ByteSizeValue implements Serializable, Streamable {
|
||||||
|
/**
|
||||||
|
* Largest size possible for Guava caches to prevent overflow. Guava's
|
||||||
|
* caches use integers to track weight per segment and we always 16 segments
|
||||||
|
* so caches of 32GB would always overflow that integer and they'd never be
|
||||||
|
* evicted by size. We set this to 31.9GB leaving 100MB of headroom to
|
||||||
|
* prevent overflow.
|
||||||
|
*/
|
||||||
|
public static final ByteSizeValue MAX_GUAVA_CACHE_SIZE = new ByteSizeValue(32 * ByteSizeUnit.C3 - 100 * ByteSizeUnit.C2);
|
||||||
|
|
||||||
private long size;
|
private long size;
|
||||||
|
|
||||||
|
|
|
@ -123,7 +123,16 @@ public class IndicesFilterCache extends AbstractComponent implements RemovalList
|
||||||
}
|
}
|
||||||
|
|
||||||
private void computeSizeInBytes() {
|
private void computeSizeInBytes() {
|
||||||
this.sizeInBytes = MemorySizeValue.parseBytesSizeValueOrHeapRatio(size).bytes();
|
long sizeInBytes = MemorySizeValue.parseBytesSizeValueOrHeapRatio(size).bytes();
|
||||||
|
if (sizeInBytes > ByteSizeValue.MAX_GUAVA_CACHE_SIZE.bytes()) {
|
||||||
|
logger.warn("reducing requested filter cache size of [{}] to the maximum allowed size of [{}]", new ByteSizeValue(sizeInBytes),
|
||||||
|
ByteSizeValue.MAX_GUAVA_CACHE_SIZE);
|
||||||
|
sizeInBytes = ByteSizeValue.MAX_GUAVA_CACHE_SIZE.bytes();
|
||||||
|
// Even though it feels wrong for size and sizeInBytes to get out of
|
||||||
|
// sync we don't update size here because it might cause the cache
|
||||||
|
// to be rebuilt every time new settings are applied.
|
||||||
|
}
|
||||||
|
this.sizeInBytes = sizeInBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void addReaderKeyToClean(Object readerKey) {
|
public void addReaderKeyToClean(Object readerKey) {
|
||||||
|
|
|
@ -55,8 +55,14 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL
|
||||||
public IndicesFieldDataCache(Settings settings, IndicesFieldDataCacheListener indicesFieldDataCacheListener) {
|
public IndicesFieldDataCache(Settings settings, IndicesFieldDataCacheListener indicesFieldDataCacheListener) {
|
||||||
super(settings);
|
super(settings);
|
||||||
this.indicesFieldDataCacheListener = indicesFieldDataCacheListener;
|
this.indicesFieldDataCacheListener = indicesFieldDataCacheListener;
|
||||||
final String size = componentSettings.get("size", "-1");
|
String size = componentSettings.get("size", "-1");
|
||||||
final long sizeInBytes = componentSettings.getAsMemory("size", "-1").bytes();
|
long sizeInBytes = componentSettings.getAsMemory("size", "-1").bytes();
|
||||||
|
if (sizeInBytes > ByteSizeValue.MAX_GUAVA_CACHE_SIZE.bytes()) {
|
||||||
|
logger.warn("reducing requested field data cache size of [{}] to the maximum allowed size of [{}]", new ByteSizeValue(sizeInBytes),
|
||||||
|
ByteSizeValue.MAX_GUAVA_CACHE_SIZE);
|
||||||
|
sizeInBytes = ByteSizeValue.MAX_GUAVA_CACHE_SIZE.bytes();
|
||||||
|
size = ByteSizeValue.MAX_GUAVA_CACHE_SIZE.toString();
|
||||||
|
}
|
||||||
final TimeValue expire = componentSettings.getAsTime("expire", null);
|
final TimeValue expire = componentSettings.getAsTime("expire", null);
|
||||||
CacheBuilder<Key, RamUsage> cacheBuilder = CacheBuilder.newBuilder()
|
CacheBuilder<Key, RamUsage> cacheBuilder = CacheBuilder.newBuilder()
|
||||||
.removalListener(this);
|
.removalListener(this);
|
||||||
|
|
|
@ -0,0 +1,95 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.util;
|
||||||
|
|
||||||
|
import com.google.common.cache.Cache;
|
||||||
|
import com.google.common.cache.CacheBuilder;
|
||||||
|
import com.google.common.cache.Weigher;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
|
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import static org.hamcrest.Matchers.*;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Asserts that Guava's caches can get stuck in an overflow state where they
|
||||||
|
* never clear themselves based on their "weight" policy if the weight grows
|
||||||
|
* beyond MAX_INT. If the noEvictionIf* methods start failing after upgrading
|
||||||
|
* Guava then the problem with Guava's caches can probably be considered fixed
|
||||||
|
* and {@code ByteSizeValue#MAX_GUAVA_CACHE_SIZE} can likely be removed.
|
||||||
|
*/
|
||||||
|
public class GuavaCacheOverflowTest extends ElasticsearchTestCase {
|
||||||
|
private final int tenMeg = ByteSizeValue.parseBytesSizeValue("10MB").bytesAsInt();
|
||||||
|
|
||||||
|
private Cache<Integer, Object> cache;
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void noEvictionIfWeightMaxWeightIs32GB() {
|
||||||
|
checkNoEviction("32GB");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void noEvictionIfWeightMaxWeightIsGreaterThan32GB() {
|
||||||
|
checkNoEviction(between(33, 50) + "GB");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void evictionIfWeightSlowlyGoesOverMaxWeight() {
|
||||||
|
buildCache("30GB");
|
||||||
|
// Add about 100GB of weight to the cache
|
||||||
|
int entries = 10240;
|
||||||
|
fillCache(entries);
|
||||||
|
|
||||||
|
// And as expected, some are purged.
|
||||||
|
int missing = 0;
|
||||||
|
for (int i = 0; i < 31; i++) {
|
||||||
|
if (cache.getIfPresent(i + tenMeg) == null) {
|
||||||
|
missing++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertThat(missing, both(greaterThan(0)).and(lessThan(entries)));
|
||||||
|
}
|
||||||
|
|
||||||
|
private void buildCache(String size) {
|
||||||
|
cache = CacheBuilder.newBuilder().concurrencyLevel(16).maximumWeight(ByteSizeValue.parseBytesSizeValue(size).bytes())
|
||||||
|
.weigher(new Weigher<Integer, Object>() {
|
||||||
|
@Override
|
||||||
|
public int weigh(Integer key, Object value) {
|
||||||
|
return key;
|
||||||
|
}
|
||||||
|
}).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void fillCache(int entries) {
|
||||||
|
for (int i = 0; i < entries; i++) {
|
||||||
|
cache.put(i + tenMeg, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void checkNoEviction(String size) {
|
||||||
|
buildCache(size);
|
||||||
|
// Adds ~100GB worth of weight to the cache
|
||||||
|
fillCache(10240);
|
||||||
|
// But nothing has been purged!
|
||||||
|
for (int i = 0; i < 10000; i++) {
|
||||||
|
assertNotNull(cache.getIfPresent(i + tenMeg));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue