Fielddata: Remove soft/resident caches.
These caches have no advantage compared to the default node cache. Additionally, the soft cache makes use of soft references which make fielddata loading quite unpredictable in addition to pushing more pressure on the garbage collector. The `none` cache is still there because of tests. There is no other good reason to use it. LongFieldDataBenchmark has been removed because the refactoring exposed a compilation error in this class, which seems to not having been working for a long time. In addition it's not as much useful now that we are progressively moving more fields to doc values. Close #7443
This commit is contained in:
parent
238efe505b
commit
b745b0151c
|
@ -19,26 +19,10 @@
|
|||
|
||||
package org.elasticsearch.index.fielddata;
|
||||
|
||||
import com.google.common.cache.Cache;
|
||||
import com.google.common.cache.CacheBuilder;
|
||||
import com.google.common.cache.RemovalListener;
|
||||
import com.google.common.cache.RemovalNotification;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.SegmentReader;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.lucene.SegmentReaderUtils;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.service.IndexService;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardUtils;
|
||||
import org.elasticsearch.index.shard.service.IndexShard;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCacheListener;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
/**
|
||||
* A simple field data cache abstraction on the *index* level.
|
||||
|
@ -94,174 +78,4 @@ public interface IndexFieldDataCache {
|
|||
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The resident field data cache is a *per field* cache that keeps all the values in memory.
|
||||
*/
|
||||
static abstract class FieldBased implements IndexFieldDataCache, SegmentReader.CoreClosedListener, RemovalListener<FieldBased.Key, Accountable>, IndexReader.ReaderClosedListener {
|
||||
private final IndexService indexService;
|
||||
private final FieldMapper.Names fieldNames;
|
||||
private final FieldDataType fieldDataType;
|
||||
private final Cache<Key, Accountable> cache;
|
||||
private final IndicesFieldDataCacheListener indicesFieldDataCacheListener;
|
||||
private final ESLogger logger;
|
||||
|
||||
protected FieldBased(ESLogger logger, IndexService indexService, FieldMapper.Names fieldNames, FieldDataType fieldDataType, CacheBuilder cache, IndicesFieldDataCacheListener indicesFieldDataCacheListener) {
|
||||
assert indexService != null;
|
||||
this.logger = logger;
|
||||
this.indexService = indexService;
|
||||
this.fieldNames = fieldNames;
|
||||
this.fieldDataType = fieldDataType;
|
||||
this.indicesFieldDataCacheListener = indicesFieldDataCacheListener;
|
||||
cache.removalListener(this);
|
||||
//noinspection unchecked
|
||||
this.cache = cache.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRemoval(RemovalNotification<Key, Accountable> notification) {
|
||||
final Key key = notification.getKey();
|
||||
assert key != null && key.listeners != null;
|
||||
|
||||
final Accountable value = notification.getValue();
|
||||
long sizeInBytes = key.sizeInBytes;
|
||||
assert sizeInBytes >= 0 || value != null : "Expected size [" + sizeInBytes + "] to be positive or value [" + value + "] to be non-null";
|
||||
if (sizeInBytes == -1 && value != null) {
|
||||
sizeInBytes = value.ramBytesUsed();
|
||||
}
|
||||
for (Listener listener : key.listeners) {
|
||||
try {
|
||||
listener.onUnload(fieldNames, fieldDataType, notification.wasEvicted(), sizeInBytes);
|
||||
} catch (Throwable e) {
|
||||
logger.error("Failed to call listener on field data cache unloading", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public <FD extends AtomicFieldData, IFD extends IndexFieldData<FD>> FD load(final AtomicReaderContext context, final IFD indexFieldData) throws Exception {
|
||||
final Key key = new Key(context.reader().getCoreCacheKey());
|
||||
//noinspection unchecked
|
||||
final Accountable accountable = cache.get(key, new Callable<FD>() {
|
||||
@Override
|
||||
public FD call() throws Exception {
|
||||
SegmentReaderUtils.registerCoreListener(context.reader(), FieldBased.this);
|
||||
|
||||
key.listeners.add(indicesFieldDataCacheListener);
|
||||
final ShardId shardId = ShardUtils.extractShardId(context.reader());
|
||||
if (shardId != null) {
|
||||
final IndexShard shard = indexService.shard(shardId.id());
|
||||
if (shard != null) {
|
||||
key.listeners.add(shard.fieldData());
|
||||
}
|
||||
}
|
||||
final FD fieldData = indexFieldData.loadDirect(context);
|
||||
for (Listener listener : key.listeners) {
|
||||
try {
|
||||
listener.onLoad(fieldNames, fieldDataType, fieldData);
|
||||
} catch (Throwable e) {
|
||||
// load anyway since listeners should not throw exceptions
|
||||
logger.error("Failed to call listener on atomic field data loading", e);
|
||||
}
|
||||
}
|
||||
key.sizeInBytes = fieldData.ramBytesUsed();
|
||||
return fieldData;
|
||||
}
|
||||
});
|
||||
return (FD) accountable;
|
||||
}
|
||||
|
||||
public <FD extends AtomicFieldData, IFD extends IndexFieldData.Global<FD>> IFD load(final IndexReader indexReader, final IFD indexFieldData) throws Exception {
|
||||
final Key key = new Key(indexReader.getCoreCacheKey());
|
||||
//noinspection unchecked
|
||||
final Accountable accountable = cache.get(key, new Callable<Accountable>() {
|
||||
@Override
|
||||
public Accountable call() throws Exception {
|
||||
indexReader.addReaderClosedListener(FieldBased.this);
|
||||
key.listeners.add(indicesFieldDataCacheListener);
|
||||
final ShardId shardId = ShardUtils.extractShardId(indexReader);
|
||||
if (shardId != null) {
|
||||
IndexShard shard = indexService.shard(shardId.id());
|
||||
if (shard != null) {
|
||||
key.listeners.add(shard.fieldData());
|
||||
}
|
||||
}
|
||||
final Accountable ifd = (Accountable) indexFieldData.localGlobalDirect(indexReader);
|
||||
for (Listener listener : key.listeners) {
|
||||
try {
|
||||
listener.onLoad(fieldNames, fieldDataType, ifd);
|
||||
} catch (Throwable e) {
|
||||
// load anyway since listeners should not throw exceptions
|
||||
logger.error("Failed to call listener on global ordinals loading", e);
|
||||
}
|
||||
}
|
||||
key.sizeInBytes = ifd.ramBytesUsed();
|
||||
return ifd;
|
||||
}
|
||||
});
|
||||
return (IFD) accountable;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
cache.invalidateAll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear(String fieldName) {
|
||||
cache.invalidateAll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear(Object coreCacheKey) {
|
||||
cache.invalidate(new Key(coreCacheKey));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose(Object coreCacheKey) {
|
||||
cache.invalidate(new Key(coreCacheKey));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose(IndexReader reader) {
|
||||
cache.invalidate(reader.getCoreCacheKey());
|
||||
}
|
||||
|
||||
static class Key {
|
||||
final Object readerKey;
|
||||
final List<Listener> listeners = new ArrayList<>();
|
||||
long sizeInBytes = -1; // optional size in bytes (we keep it here in case the values are soft references)
|
||||
|
||||
Key(Object readerKey) {
|
||||
this.readerKey = readerKey;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
Key key = (Key) o;
|
||||
if (!readerKey.equals(key.readerKey)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return readerKey.hashCode();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class Resident extends FieldBased {
|
||||
|
||||
public Resident(ESLogger logger, IndexService indexService, FieldMapper.Names fieldNames, FieldDataType fieldDataType, IndicesFieldDataCacheListener indicesFieldDataCacheListener) {
|
||||
super(logger, indexService, fieldNames, fieldDataType, CacheBuilder.newBuilder(), indicesFieldDataCacheListener);
|
||||
}
|
||||
}
|
||||
|
||||
static class Soft extends FieldBased {
|
||||
|
||||
public Soft(ESLogger logger, IndexService indexService, FieldMapper.Names fieldNames, FieldDataType fieldDataType, IndicesFieldDataCacheListener indicesFieldDataCacheListener) {
|
||||
super(logger, indexService, fieldNames, fieldDataType, CacheBuilder.newBuilder().softValues(), indicesFieldDataCacheListener);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.ExceptionsHelper;
|
|||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.KeyedLock;
|
||||
|
@ -40,8 +39,6 @@ import org.elasticsearch.index.service.IndexService;
|
|||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCacheListener;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
|
@ -54,9 +51,7 @@ import java.util.concurrent.ConcurrentMap;
|
|||
public class IndexFieldDataService extends AbstractIndexComponent {
|
||||
|
||||
public static final String FIELDDATA_CACHE_KEY = "index.fielddata.cache";
|
||||
public static final String FIELDDATA_CACHE_VALUE_SOFT = "soft";
|
||||
public static final String FIELDDATA_CACHE_VALUE_NODE = "node";
|
||||
public static final String FIELDDATA_CACHE_VALUE_RESIDENT = "resident";
|
||||
|
||||
private static final String DISABLED_FORMAT = "disabled";
|
||||
private static final String DOC_VALUES_FORMAT = "doc_values";
|
||||
|
@ -69,7 +64,6 @@ public class IndexFieldDataService extends AbstractIndexComponent {
|
|||
private final static ImmutableMap<String, IndexFieldData.Builder> docValuesBuildersByType;
|
||||
private final static ImmutableMap<Tuple<String, String>, IndexFieldData.Builder> buildersByTypeAndFormat;
|
||||
private final CircuitBreakerService circuitBreakerService;
|
||||
private final IndicesFieldDataCacheListener indicesFieldDataCacheListener;
|
||||
|
||||
static {
|
||||
buildersByType = MapBuilder.<String, IndexFieldData.Builder>newMapBuilder()
|
||||
|
@ -146,25 +140,12 @@ public class IndexFieldDataService extends AbstractIndexComponent {
|
|||
|
||||
IndexService indexService;
|
||||
|
||||
// public for testing
|
||||
public IndexFieldDataService(Index index, CircuitBreakerService circuitBreakerService) {
|
||||
this(index, ImmutableSettings.Builder.EMPTY_SETTINGS,
|
||||
new IndicesFieldDataCache(ImmutableSettings.Builder.EMPTY_SETTINGS, new IndicesFieldDataCacheListener(circuitBreakerService), new ThreadPool("testing-only")),
|
||||
circuitBreakerService, new IndicesFieldDataCacheListener(circuitBreakerService));
|
||||
}
|
||||
|
||||
// public for testing
|
||||
public IndexFieldDataService(Index index, CircuitBreakerService circuitBreakerService, IndicesFieldDataCache indicesFieldDataCache) {
|
||||
this(index, ImmutableSettings.Builder.EMPTY_SETTINGS, indicesFieldDataCache, circuitBreakerService, new IndicesFieldDataCacheListener(circuitBreakerService));
|
||||
}
|
||||
|
||||
@Inject
|
||||
public IndexFieldDataService(Index index, @IndexSettings Settings indexSettings, IndicesFieldDataCache indicesFieldDataCache,
|
||||
CircuitBreakerService circuitBreakerService, IndicesFieldDataCacheListener indicesFieldDataCacheListener) {
|
||||
CircuitBreakerService circuitBreakerService) {
|
||||
super(index, indexSettings);
|
||||
this.indicesFieldDataCache = indicesFieldDataCache;
|
||||
this.circuitBreakerService = circuitBreakerService;
|
||||
this.indicesFieldDataCacheListener = indicesFieldDataCacheListener;
|
||||
}
|
||||
|
||||
// we need to "inject" the index service to not create cyclic dep
|
||||
|
@ -279,11 +260,7 @@ public class IndexFieldDataService extends AbstractIndexComponent {
|
|||
// we default to node level cache, which in turn defaults to be unbounded
|
||||
// this means changing the node level settings is simple, just set the bounds there
|
||||
String cacheType = type.getSettings().get("cache", indexSettings.get(FIELDDATA_CACHE_KEY, FIELDDATA_CACHE_VALUE_NODE));
|
||||
if (FIELDDATA_CACHE_VALUE_RESIDENT.equals(cacheType)) {
|
||||
cache = new IndexFieldDataCache.Resident(logger, indexService, fieldNames, type, indicesFieldDataCacheListener);
|
||||
} else if (FIELDDATA_CACHE_VALUE_SOFT.equals(cacheType)) {
|
||||
cache = new IndexFieldDataCache.Soft(logger, indexService, fieldNames, type, indicesFieldDataCacheListener);
|
||||
} else if (FIELDDATA_CACHE_VALUE_NODE.equals(cacheType)) {
|
||||
if (FIELDDATA_CACHE_VALUE_NODE.equals(cacheType)) {
|
||||
cache = indicesFieldDataCache.buildIndexFieldDataCache(indexService, index, fieldNames, type);
|
||||
} else if ("none".equals(cacheType)){
|
||||
cache = new IndexFieldDataCache.None();
|
||||
|
|
|
@ -111,15 +111,10 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL
|
|||
Key key = notification.getKey();
|
||||
assert key != null && key.listeners != null;
|
||||
IndexFieldCache indexCache = key.indexCache;
|
||||
long sizeInBytes = key.sizeInBytes;
|
||||
final Accountable value = notification.getValue();
|
||||
assert sizeInBytes >= 0 || value != null : "Expected size [" + sizeInBytes + "] to be positive or value [" + value + "] to be non-null";
|
||||
if (sizeInBytes == -1 && value != null) {
|
||||
sizeInBytes = value.ramBytesUsed();
|
||||
}
|
||||
for (IndexFieldDataCache.Listener listener : key.listeners) {
|
||||
try {
|
||||
listener.onUnload(indexCache.fieldNames, indexCache.fieldDataType, notification.wasEvicted(), sizeInBytes);
|
||||
listener.onUnload(indexCache.fieldNames, indexCache.fieldDataType, notification.wasEvicted(), value.ramBytesUsed());
|
||||
} catch (Throwable e) {
|
||||
// load anyway since listeners should not throw exceptions
|
||||
logger.error("Failed to call listener on field data cache unloading", e);
|
||||
|
@ -192,7 +187,6 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL
|
|||
logger.error("Failed to call listener on atomic field data loading", e);
|
||||
}
|
||||
}
|
||||
key.sizeInBytes = fieldData.ramBytesUsed();
|
||||
return fieldData;
|
||||
}
|
||||
});
|
||||
|
@ -283,7 +277,6 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL
|
|||
public final Object readerKey;
|
||||
|
||||
public final List<IndexFieldDataCache.Listener> listeners = new ArrayList<>();
|
||||
long sizeInBytes = -1; // optional size in bytes (we keep it here in case the values are soft references)
|
||||
|
||||
|
||||
Key(IndexFieldCache indexCache, Object readerKey) {
|
||||
|
|
|
@ -1,159 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.benchmark.fielddata;
|
||||
|
||||
import org.apache.lucene.analysis.core.KeywordAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.document.LongField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.fielddata.AtomicNumericFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
|
||||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
|
||||
import java.util.Random;
|
||||
|
||||
public class LongFieldDataBenchmark {
|
||||
|
||||
private static final Random RANDOM = new Random();
|
||||
private static final int SECONDS_PER_YEAR = 60 * 60 * 24 * 365;
|
||||
|
||||
public static enum Data {
|
||||
SINGLE_VALUES_DENSE_ENUM {
|
||||
public int numValues() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextValue() {
|
||||
return RANDOM.nextInt(16);
|
||||
}
|
||||
},
|
||||
SINGLE_VALUED_DENSE_DATE {
|
||||
public int numValues() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextValue() {
|
||||
// somewhere in-between 2010 and 2012
|
||||
return 1000L * (40L * SECONDS_PER_YEAR + RANDOM.nextInt(2 * SECONDS_PER_YEAR));
|
||||
}
|
||||
},
|
||||
MULTI_VALUED_DATE {
|
||||
public int numValues() {
|
||||
return RANDOM.nextInt(3);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextValue() {
|
||||
// somewhere in-between 2010 and 2012
|
||||
return 1000L * (40L * SECONDS_PER_YEAR + RANDOM.nextInt(2 * SECONDS_PER_YEAR));
|
||||
}
|
||||
},
|
||||
MULTI_VALUED_ENUM {
|
||||
public int numValues() {
|
||||
return RANDOM.nextInt(3);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextValue() {
|
||||
return 3 + RANDOM.nextInt(8);
|
||||
}
|
||||
},
|
||||
SINGLE_VALUED_SPARSE_RANDOM {
|
||||
public int numValues() {
|
||||
return RANDOM.nextFloat() < 0.1f ? 1 : 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextValue() {
|
||||
return RANDOM.nextLong();
|
||||
}
|
||||
},
|
||||
MULTI_VALUED_SPARSE_RANDOM {
|
||||
public int numValues() {
|
||||
return RANDOM.nextFloat() < 0.1f ? 1 + RANDOM.nextInt(5) : 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextValue() {
|
||||
return RANDOM.nextLong();
|
||||
}
|
||||
},
|
||||
MULTI_VALUED_DENSE_RANDOM {
|
||||
public int numValues() {
|
||||
return 1 + RANDOM.nextInt(3);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextValue() {
|
||||
return RANDOM.nextLong();
|
||||
}
|
||||
};
|
||||
|
||||
public abstract int numValues();
|
||||
|
||||
public abstract long nextValue();
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
final IndexWriterConfig iwc = new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer());
|
||||
final String fieldName = "f";
|
||||
final int numDocs = 1000000;
|
||||
System.out.println("Data\tLoading time\tImplementation\tActual size\tExpected size");
|
||||
for (Data data : Data.values()) {
|
||||
final RAMDirectory dir = new RAMDirectory();
|
||||
final IndexWriter indexWriter = new IndexWriter(dir, iwc);
|
||||
for (int i = 0; i < numDocs; ++i) {
|
||||
final Document doc = new Document();
|
||||
final int numFields = data.numValues();
|
||||
for (int j = 0; j < numFields; ++j) {
|
||||
doc.add(new LongField(fieldName, data.nextValue(), Store.NO));
|
||||
}
|
||||
indexWriter.addDocument(doc);
|
||||
}
|
||||
indexWriter.forceMerge(1, true);
|
||||
indexWriter.close();
|
||||
|
||||
final DirectoryReader dr = DirectoryReader.open(dir);
|
||||
final IndexFieldDataService fds = new IndexFieldDataService(new Index("dummy"), new NoneCircuitBreakerService());
|
||||
final LongFieldMapper mapper = new LongFieldMapper.Builder(fieldName).build(new BuilderContext(null, new ContentPath(1)));
|
||||
final IndexNumericFieldData fd = fds.getForField(mapper);
|
||||
final long start = System.nanoTime();
|
||||
final AtomicNumericFieldData afd = fd.loadDirect(SlowCompositeReaderWrapper.wrap(dr).getContext());
|
||||
final long loadingTimeMs = (System.nanoTime() - start) / 1000 / 1000;
|
||||
System.out.println(data + "\t" + loadingTimeMs + "\t" + afd.getClass().getSimpleName() + "\t" + RamUsageEstimator.humanReadableUnits(afd.ramBytesUsed()));
|
||||
dr.close();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -475,15 +475,6 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
|||
if (random.nextBoolean()) {
|
||||
builder.put(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, random.nextBoolean());
|
||||
}
|
||||
|
||||
// Disabled for now as it seems to make tests unstable
|
||||
// TODO: figure out what is wrong with the soft and/or resident caches?
|
||||
/*if (random.nextBoolean()) {
|
||||
builder.put(IndexFieldDataService.FIELDDATA_CACHE_KEY, randomFrom(
|
||||
IndexFieldDataService.FIELDDATA_CACHE_VALUE_NODE,
|
||||
IndexFieldDataService.FIELDDATA_CACHE_VALUE_RESIDENT,
|
||||
IndexFieldDataService.FIELDDATA_CACHE_VALUE_SOFT));
|
||||
}*/
|
||||
|
||||
return builder;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue