SOLR-13003: Query Result Cache does not honour maxRamBytes parameter.

This commit is contained in:
Andrzej Bialecki 2019-07-03 17:03:43 +02:00
parent e3d247f288
commit 5897787291
20 changed files with 319 additions and 122 deletions

View File

@ -41,9 +41,12 @@ import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Accountables;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.lucene.util.RoaringDocIdSet;
import static org.apache.lucene.util.RamUsageEstimator.HASHTABLE_RAM_BYTES_PER_ENTRY;
import static org.apache.lucene.util.RamUsageEstimator.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY;
import static org.apache.lucene.util.RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED;
/**
* A {@link QueryCache} that evicts queries using a LRU (least-recently-used)
* eviction policy in order to remain under a given maximum size and number of
@ -86,18 +89,6 @@ import org.apache.lucene.util.RoaringDocIdSet;
*/
public class LRUQueryCache implements QueryCache, Accountable {
// approximate memory usage that we assign to all queries
// this maps roughly to a BooleanQuery with a couple term clauses
static final long QUERY_DEFAULT_RAM_BYTES_USED = RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED;
static final long HASHTABLE_RAM_BYTES_PER_ENTRY =
2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF // key + value
* 2; // hash tables need to be oversized to avoid collisions, assume 2x capacity
static final long LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY =
HASHTABLE_RAM_BYTES_PER_ENTRY
+ 2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF; // previous & next references
private final int maxSize;
private final long maxRamBytesUsed;
private final Predicate<LeafReaderContext> leavesToCache;

View File

@ -338,7 +338,7 @@ public class TermInSetQuery extends Query implements Accountable {
public boolean isCacheable(LeafReaderContext ctx) {
// Only cache instances that have a reasonable size. Otherwise it might cause memory issues
// with the query cache if most memory ends up being spent on queries rather than doc id sets.
return ramBytesUsed() <= LRUQueryCache.QUERY_DEFAULT_RAM_BYTES_USED;
return ramBytesUsed() <= RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED;
}
};

View File

@ -207,6 +207,16 @@ public final class RamUsageEstimator {
STRING_SIZE = (int) shallowSizeOfInstance(String.class);
}
/** Approximate memory usage that we assign to a Hashtable / HashMap entry. */
public static final long HASHTABLE_RAM_BYTES_PER_ENTRY =
2 * NUM_BYTES_OBJECT_REF // key + value
* 2; // hash tables need to be oversized to avoid collisions, assume 2x capacity
/** Approximate memory usage that we assign to a LinkedHashMap entry. */
public static final long LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY =
HASHTABLE_RAM_BYTES_PER_ENTRY
+ 2 * NUM_BYTES_OBJECT_REF; // previous & next references
/**
* Aligns an object size to be the next multiple of {@link #NUM_BYTES_OBJECT_ALIGNMENT}.
*/

View File

@ -62,6 +62,10 @@ import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.RamUsageTester;
import org.apache.lucene.util.TestUtil;
import static org.apache.lucene.util.RamUsageEstimator.HASHTABLE_RAM_BYTES_PER_ENTRY;
import static org.apache.lucene.util.RamUsageEstimator.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY;
import static org.apache.lucene.util.RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED;
public class TestLRUQueryCache extends LuceneTestCase {
private static final QueryCachingPolicy ALWAYS_CACHE = new QueryCachingPolicy() {
@ -290,7 +294,7 @@ public class TestLRUQueryCache extends LuceneTestCase {
return ((DocIdSet) o).ramBytesUsed();
}
if (o instanceof Query) {
return LRUQueryCache.QUERY_DEFAULT_RAM_BYTES_USED;
return QUERY_DEFAULT_RAM_BYTES_USED;
}
if (o instanceof IndexReader || o.getClass().getSimpleName().equals("SegmentCoreReaders")) {
// do not take readers or core cache keys into account
@ -301,8 +305,8 @@ public class TestLRUQueryCache extends LuceneTestCase {
queue.addAll(map.keySet());
queue.addAll(map.values());
final long sizePerEntry = o instanceof LinkedHashMap
? LRUQueryCache.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY
: LRUQueryCache.HASHTABLE_RAM_BYTES_PER_ENTRY;
? LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY
: HASHTABLE_RAM_BYTES_PER_ENTRY;
return sizePerEntry * map.size();
}
// follow links to other objects, but ignore their memory usage
@ -416,7 +420,7 @@ public class TestLRUQueryCache extends LuceneTestCase {
return ((DocIdSet) o).ramBytesUsed();
}
if (o instanceof Query) {
return LRUQueryCache.QUERY_DEFAULT_RAM_BYTES_USED;
return QUERY_DEFAULT_RAM_BYTES_USED;
}
if (o.getClass().getSimpleName().equals("SegmentCoreReaders")) {
// do not follow references to core cache keys
@ -752,7 +756,7 @@ public class TestLRUQueryCache extends LuceneTestCase {
assertEquals(segmentCount2, missCount2.longValue());
// check that the recomputed stats are the same as those reported by the cache
assertEquals(queryCache.ramBytesUsed(), (segmentCount1 + segmentCount2) * LRUQueryCache.HASHTABLE_RAM_BYTES_PER_ENTRY + ramBytesUsage.longValue());
assertEquals(queryCache.ramBytesUsed(), (segmentCount1 + segmentCount2) * HASHTABLE_RAM_BYTES_PER_ENTRY + ramBytesUsage.longValue());
assertEquals(queryCache.getCacheSize(), cacheSize.longValue());
reader1.close();

View File

@ -207,6 +207,11 @@ Bug Fixes
* SOLR-13404: Support group.query in multi-shard environment when group.main=true or group.format=simple (Munendra S N)
Improvements
----------------------
* SOLR-13003: Query Result Cache does not honour maxRamBytes parameter. (ab, Brian Ecker)
Other Changes
----------------------

View File

@ -42,6 +42,8 @@ import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.solr.ltr.feature.Feature;
import org.apache.solr.ltr.model.LTRScoringModel;
import org.apache.solr.request.SolrQueryRequest;
@ -52,10 +54,12 @@ import org.slf4j.LoggerFactory;
* The ranking query that is run, reranking results using the
* LTRScoringModel algorithm
*/
public class LTRScoringQuery extends Query {
public class LTRScoringQuery extends Query implements Accountable {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(LTRScoringQuery.class);
// contains a description of the model
final private LTRScoringModel ltrScoringModel;
final private boolean extractAllFeatures;
@ -305,6 +309,14 @@ public class LTRScoringQuery extends Query {
return field;
}
@Override
public long ramBytesUsed() {
return BASE_RAM_BYTES +
RamUsageEstimator.sizeOfObject(efi) +
RamUsageEstimator.sizeOfObject(ltrScoringModel) +
RamUsageEstimator.sizeOfObject(originalQuery, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED);
}
public static class FeatureInfo {
final private String name;
private float value;

View File

@ -28,6 +28,8 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.solr.core.SolrResourceLoader;
import org.apache.solr.ltr.DocInfo;
import org.apache.solr.request.SolrQueryRequest;
@ -56,7 +58,8 @@ import org.apache.solr.util.SolrPluginUtils;
* the {@link #validate()} function, and must implement the {@link #paramsToMap()}
* and createWeight() methods.
*/
public abstract class Feature extends Query {
public abstract class Feature extends Query implements Accountable {
private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(Feature.class);
final protected String name;
private int index = -1;
@ -147,6 +150,13 @@ public abstract class Feature extends Query {
return sameClassAs(o) && equalsTo(getClass().cast(o));
}
@Override
public long ramBytesUsed() {
return BASE_RAM_BYTES +
RamUsageEstimator.sizeOfObject(name) +
RamUsageEstimator.sizeOfObject(params);
}
@Override
public void visit(QueryVisitor visitor) {
visitor.visitLeaf(this);

View File

@ -27,6 +27,8 @@ import java.util.Objects;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.solr.core.SolrResourceLoader;
import org.apache.solr.ltr.feature.Feature;
import org.apache.solr.ltr.feature.FeatureException;
@ -76,7 +78,8 @@ import org.apache.solr.util.SolrPluginUtils;
* implement the {@link #score(float[])} and
* {@link #explain(LeafReaderContext, int, float, List)} methods.
*/
public abstract class LTRScoringModel {
public abstract class LTRScoringModel implements Accountable {
private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(LTRScoringModel.class);
protected final String name;
private final String featureStoreName;
@ -240,6 +243,17 @@ public abstract class LTRScoringModel {
return true;
}
@Override
public long ramBytesUsed() {
return BASE_RAM_BYTES +
RamUsageEstimator.sizeOfObject(allFeatures, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED) +
RamUsageEstimator.sizeOfObject(features, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED) +
RamUsageEstimator.sizeOfObject(featureStoreName) +
RamUsageEstimator.sizeOfObject(name) +
RamUsageEstimator.sizeOfObject(norms) +
RamUsageEstimator.sizeOfObject(params);
}
public Collection<Feature> getAllFeatures() {
return allFeatures;
}

View File

@ -38,6 +38,7 @@ public class DocSlice extends DocSetBase implements DocList {
final float[] scores; // optional score list
final long matches;
final float maxScore;
final long ramBytesUsed; // cached value
/**
* Primary constructor for a DocSlice instance.
@ -55,6 +56,7 @@ public class DocSlice extends DocSetBase implements DocList {
this.scores=scores;
this.matches=matches;
this.maxScore=maxScore;
this.ramBytesUsed = BASE_RAM_BYTES_USED + ((long)docs.length << 2) + (scores == null ? 0 : ((long)scores.length<<2)+RamUsageEstimator.NUM_BYTES_ARRAY_HEADER);
}
@Override
@ -172,7 +174,7 @@ public class DocSlice extends DocSetBase implements DocList {
/** WARNING: this can over-estimate real memory use since backing arrays are shared with other DocSlice instances */
@Override
public long ramBytesUsed() {
return BASE_RAM_BYTES_USED + ((long)docs.length << 2) + (scores == null ? 0 : ((long)scores.length<<2)+RamUsageEstimator.NUM_BYTES_ARRAY_HEADER);
return ramBytesUsed;
}
@Override

View File

@ -17,6 +17,8 @@
package org.apache.solr.search;
import com.codahale.metrics.MetricRegistry;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.solr.common.SolrException;
import org.apache.solr.metrics.MetricsMap;
import org.apache.solr.metrics.SolrMetricManager;
@ -45,9 +47,11 @@ import java.util.concurrent.TimeUnit;
* @see org.apache.solr.search.SolrCache
* @since solr 1.4
*/
public class FastLRUCache<K, V> extends SolrCacheBase implements SolrCache<K,V> {
public class FastLRUCache<K, V> extends SolrCacheBase implements SolrCache<K,V>, Accountable {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FastLRUCache.class);
// contains the statistics objects for all open caches of the same type
private List<ConcurrentLRUCache.Stats> statsList;
@ -62,8 +66,6 @@ public class FastLRUCache<K, V> extends SolrCacheBase implements SolrCache<K,V>
private MetricsMap cacheMap;
private Set<String> metricNames = ConcurrentHashMap.newKeySet();
private MetricRegistry registry;
private SolrMetricManager metricManager;
private String registryName;
@Override
public Object init(Map args, Object persistence, CacheRegenerator regenerator) {
@ -101,7 +103,7 @@ public class FastLRUCache<K, V> extends SolrCacheBase implements SolrCache<K,V>
str = (String) args.get("maxRamMB");
this.maxRamBytes = str == null ? Long.MAX_VALUE : (long) (Double.parseDouble(str) * 1024L * 1024L);
if (maxRamBytes != Long.MAX_VALUE) {
int ramLowerWatermark = (int) (maxRamBytes * 0.8);
long ramLowerWatermark = Math.round(maxRamBytes * 0.8);
description = generateDescription(maxRamBytes, ramLowerWatermark, newThread);
cache = new ConcurrentLRUCache<K, V>(ramLowerWatermark, maxRamBytes, newThread, null);
} else {
@ -229,8 +231,6 @@ public class FastLRUCache<K, V> extends SolrCacheBase implements SolrCache<K,V>
@Override
public void initializeMetrics(SolrMetricManager manager, String registryName, String tag, String scope) {
this.metricManager = manager;
this.registryName = registryName;
registry = manager.registry(registryName);
cacheMap = new MetricsMap((detailed, map) -> {
if (cache != null) {
@ -300,6 +300,12 @@ public class FastLRUCache<K, V> extends SolrCacheBase implements SolrCache<K,V>
return name() + (cacheMap != null ? cacheMap.getValue().toString() : "");
}
@Override
public long ramBytesUsed() {
return BASE_RAM_BYTES_USED +
RamUsageEstimator.sizeOfObject(cache) +
RamUsageEstimator.sizeOfObject(statsList);
}
}

View File

@ -54,6 +54,7 @@ import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BytesRef;
@ -61,6 +62,7 @@ import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.BytesRefIterator;
import org.apache.lucene.util.DocIdSetBuilder;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.schema.FieldType;
@ -376,7 +378,9 @@ public class GraphTermsQParserPlugin extends QParserPlugin {
// modified version of PointInSetQuery
abstract class PointSetQuery extends Query implements DocSetProducer {
abstract class PointSetQuery extends Query implements DocSetProducer, Accountable {
protected static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(PointSetQuery.class);
// A little bit overkill for us, since all of our "terms" are always in the same field:
final PrefixCodedTerms sortedPackedPoints;
final int sortedPackedPointsHashCode;
@ -384,6 +388,7 @@ abstract class PointSetQuery extends Query implements DocSetProducer {
final int bytesPerDim;
final int numDims;
int maxDocFreq = Integer.MAX_VALUE;
final long ramBytesUsed; // cache
/**
* Iterator of encoded point values.
@ -541,6 +546,8 @@ abstract class PointSetQuery extends Query implements DocSetProducer {
}
sortedPackedPoints = builder.finish();
sortedPackedPointsHashCode = sortedPackedPoints.hashCode();
ramBytesUsed = BASE_RAM_BYTES +
RamUsageEstimator.sizeOfObject(sortedPackedPoints);
}
private FixedBitSet getLiveDocs(IndexSearcher searcher) throws IOException {
@ -563,6 +570,11 @@ abstract class PointSetQuery extends Query implements DocSetProducer {
return getDocSet(searcher);
}
@Override
public long ramBytesUsed() {
return ramBytesUsed;
}
public DocSet getDocSet(IndexSearcher searcher) throws IOException {
IndexReaderContext top = ReaderUtil.getTopLevelContext(searcher.getTopReaderContext());
List<LeafReaderContext> segs = top.leaves();

View File

@ -25,6 +25,8 @@ import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.TimeUnit;
import com.codahale.metrics.MetricRegistry;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.solr.common.SolrException;
import org.apache.solr.metrics.MetricsMap;
import org.apache.solr.metrics.SolrMetricManager;
@ -48,9 +50,11 @@ import static org.apache.solr.common.params.CommonParams.NAME;
* @see org.apache.solr.search.SolrCache
* @since solr 3.6
*/
public class LFUCache<K, V> implements SolrCache<K, V> {
public class LFUCache<K, V> implements SolrCache<K, V>, Accountable {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(LFUCache.class);
// contains the statistics objects for all open caches of the same type
private List<ConcurrentLFUCache.Stats> statsList;
@ -67,8 +71,6 @@ public class LFUCache<K, V> implements SolrCache<K, V> {
private MetricsMap cacheMap;
private Set<String> metricNames = ConcurrentHashMap.newKeySet();
private MetricRegistry registry;
private SolrMetricManager metricManager;
private String registryName;
@Override
public Object init(Map args, Object persistence, CacheRegenerator regenerator) {
@ -237,8 +239,6 @@ public class LFUCache<K, V> implements SolrCache<K, V> {
@Override
public void initializeMetrics(SolrMetricManager manager, String registryName, String tag, String scope) {
this.metricManager = manager;
this.registryName = registryName;
registry = manager.registry(registryName);
cacheMap = new MetricsMap((detailed, map) -> {
if (cache != null) {
@ -315,4 +315,14 @@ public class LFUCache<K, V> implements SolrCache<K, V> {
return name + (cacheMap != null ? cacheMap.getValue().toString() : "");
}
@Override
public long ramBytesUsed() {
synchronized (statsList) {
return BASE_RAM_BYTES_USED +
RamUsageEstimator.sizeOfObject(name) +
RamUsageEstimator.sizeOfObject(metricNames) +
RamUsageEstimator.sizeOfObject(statsList) +
RamUsageEstimator.sizeOfObject(cache);
}
}
}

View File

@ -18,7 +18,6 @@ package org.apache.solr.search;
import java.lang.invoke.MethodHandles;
import java.util.Collection;
import java.util.Collections;
import java.util.concurrent.ConcurrentHashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
@ -37,6 +36,8 @@ import org.apache.solr.metrics.SolrMetricManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.lucene.util.RamUsageEstimator.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY;
import static org.apache.lucene.util.RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED;
/**
*
@ -46,21 +47,6 @@ public class LRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V>, Acco
static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(LRUCache.class);
/// Copied from Lucene's LRUQueryCache
// memory usage of a simple term query
public static final long DEFAULT_RAM_BYTES_USED = 192;
public static final long HASHTABLE_RAM_BYTES_PER_ENTRY =
2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF // key + value
* 2; // hash tables need to be oversized to avoid collisions, assume 2x capacity
static final long LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY =
HASHTABLE_RAM_BYTES_PER_ENTRY
+ 2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF; // previous & next references
/// End copied code
/* An instance of this class will be shared across multiple instances
* of an LRUCache at the same time. Make sure everything is thread safe.
*/
@ -89,8 +75,6 @@ public class LRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V>, Acco
private MetricsMap cacheMap;
private Set<String> metricNames = ConcurrentHashMap.newKeySet();
private MetricRegistry registry;
private SolrMetricManager metricManager;
private String registryName;
private long maxRamBytes = Long.MAX_VALUE;
// The synchronization used for the map will be used to update this,
@ -119,11 +103,7 @@ public class LRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V>, Acco
do {
Map.Entry<K, V> entry = iterator.next();
if (entry.getKey() != null) {
if (entry.getKey() instanceof Accountable) {
bytesToDecrement += ((Accountable) entry.getKey()).ramBytesUsed();
} else {
bytesToDecrement += DEFAULT_RAM_BYTES_USED;
}
bytesToDecrement += RamUsageEstimator.sizeOfObject(entry.getKey(), QUERY_DEFAULT_RAM_BYTES_USED);
}
if (entry.getValue() != null) {
bytesToDecrement += ((Accountable) entry.getValue()).ramBytesUsed();
@ -197,10 +177,10 @@ public class LRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V>, Acco
inserts++;
// important to calc and add new ram bytes first so that removeEldestEntry can compare correctly
long keySize = DEFAULT_RAM_BYTES_USED;
if (maxRamBytes != Long.MAX_VALUE) {
if (key != null && key instanceof Accountable) {
keySize = ((Accountable) key).ramBytesUsed();
long keySize = 0;
if (key != null) {
keySize = RamUsageEstimator.sizeOfObject(key, QUERY_DEFAULT_RAM_BYTES_USED);
}
long valueSize = 0;
if (value != null) {
@ -221,12 +201,7 @@ public class LRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V>, Acco
// the key existed in the map but we added its size before the put, so let's back out
bytesToDecrement += LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY;
if (key != null) {
if (key instanceof Accountable) {
Accountable aKey = (Accountable) key;
bytesToDecrement += aKey.ramBytesUsed();
} else {
bytesToDecrement += DEFAULT_RAM_BYTES_USED;
}
bytesToDecrement += RamUsageEstimator.sizeOfObject(key, QUERY_DEFAULT_RAM_BYTES_USED);
}
ramBytesUsed -= bytesToDecrement;
}
@ -334,8 +309,6 @@ public class LRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V>, Acco
@Override
public void initializeMetrics(SolrMetricManager manager, String registryName, String tag, String scope) {
this.metricManager = manager;
this.registryName = registryName;
registry = manager.registry(registryName);
cacheMap = new MetricsMap((detailed, res) -> {
synchronized (map) {
@ -391,12 +364,8 @@ public class LRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V>, Acco
@Override
public Collection<Accountable> getChildResources() {
if (maxRamBytes != Long.MAX_VALUE) {
synchronized (map) {
return Accountables.namedAccountables(getName(), (Map<?, ? extends Accountable>) map);
}
} else {
return Collections.emptyList();
synchronized (map) {
return Accountables.namedAccountables(getName(), (Map<?, ? extends Accountable>) map);
}
}
}

View File

@ -19,13 +19,19 @@ package org.apache.solr.search;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.RamUsageEstimator;
import java.util.List;
import java.util.ArrayList;
/** A hash key encapsulating a query, a list of filters, and a sort
*
*/
public final class QueryResultKey {
public final class QueryResultKey implements Accountable {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(QueryResultKey.class);
private static final long BASE_SF_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(SortField.class);
final Query query;
final Sort sort;
final SortField[] sfields;
@ -33,6 +39,7 @@ public final class QueryResultKey {
final int nc_flags; // non-comparable flags... ignored by hashCode and equals
private final int hc; // cached hashCode
private final long ramBytesUsed; // cached
private static SortField[] defaultSort = new SortField[0];
@ -53,11 +60,19 @@ public final class QueryResultKey {
}
sfields = (this.sort !=null) ? this.sort.getSort() : defaultSort;
long ramSfields = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER;
for (SortField sf : sfields) {
h = h*29 + sf.hashCode();
ramSfields += BASE_SF_RAM_BYTES_USED + RamUsageEstimator.sizeOfObject(sf.getField());
}
hc = h;
ramBytesUsed =
BASE_RAM_BYTES_USED +
ramSfields +
RamUsageEstimator.sizeOfObject(query, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED) +
RamUsageEstimator.sizeOfObject(filters, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED);
}
@Override
@ -153,4 +168,8 @@ public final class QueryResultKey {
return set2.isEmpty();
}
@Override
public long ramBytesUsed() {
return ramBytesUsed;
}
}

View File

@ -25,8 +25,13 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.solr.common.util.Cache;
import static org.apache.lucene.util.RamUsageEstimator.HASHTABLE_RAM_BYTES_PER_ENTRY;
import static org.apache.lucene.util.RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED;
/**
* A LFU cache implementation based upon ConcurrentHashMap.
* <p>
@ -38,7 +43,11 @@ import org.apache.solr.common.util.Cache;
*
* @since solr 1.6
*/
public class ConcurrentLFUCache<K, V> implements Cache<K,V> {
public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
private static final long BASE_RAM_BYTES_USED =
RamUsageEstimator.shallowSizeOfInstance(ConcurrentLFUCache.class) +
new Stats().ramBytesUsed() +
RamUsageEstimator.shallowSizeOfInstance(ConcurrentHashMap.class);
private final ConcurrentHashMap<Object, CacheEntry<K, V>> map;
private final int upperWaterMark, lowerWaterMark;
@ -53,6 +62,7 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V> {
private final EvictionListener<K, V> evictionListener;
private CleanupThread cleanupThread;
private final boolean timeDecay;
private final AtomicLong ramBytes = new AtomicLong(0);
public ConcurrentLFUCache(int upperWaterMark, final int lowerWaterMark, int acceptableSize,
int initialSize, boolean runCleanupThread, boolean runNewThreadForCleanup,
@ -101,6 +111,7 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V> {
CacheEntry<K, V> cacheEntry = map.remove(key);
if (cacheEntry != null) {
stats.size.decrementAndGet();
ramBytes.addAndGet(-cacheEntry.ramBytesUsed() - HASHTABLE_RAM_BYTES_PER_ENTRY);
return cacheEntry.value;
}
return null;
@ -114,8 +125,11 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V> {
int currentSize;
if (oldCacheEntry == null) {
currentSize = stats.size.incrementAndGet();
ramBytes.addAndGet(e.ramBytesUsed() + HASHTABLE_RAM_BYTES_PER_ENTRY); // added key + value + entry
} else {
currentSize = stats.size.get();
ramBytes.addAndGet(-oldCacheEntry.ramBytesUsed());
ramBytes.addAndGet(e.ramBytesUsed());
}
if (islive) {
stats.putCounter.incrementAndGet();
@ -211,6 +225,7 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V> {
private void evictEntry(K key) {
CacheEntry<K, V> o = map.remove(key);
if (o == null) return;
ramBytes.addAndGet(-(o.ramBytesUsed() + HASHTABLE_RAM_BYTES_PER_ENTRY));
stats.size.decrementAndGet();
stats.evictionCounter.incrementAndGet();
if (evictionListener != null) evictionListener.evictedEntry(o.key, o.value);
@ -311,15 +326,26 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V> {
@Override
public void clear() {
map.clear();
ramBytes.set(0);
}
public Map<Object, CacheEntry<K, V>> getMap() {
return map;
}
public static class CacheEntry<K, V> implements Comparable<CacheEntry<K, V>> {
K key;
V value;
@Override
public long ramBytesUsed() {
return BASE_RAM_BYTES_USED + ramBytes.get();
}
public static class CacheEntry<K, V> implements Comparable<CacheEntry<K, V>>, Accountable {
public static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(CacheEntry.class)
// AtomicLong
+ RamUsageEstimator.primitiveSizes.get(long.class);
final K key;
final V value;
final long ramBytesUsed;
volatile AtomicLong hits = new AtomicLong(0);
long hitsCopy = 0;
volatile long lastAccessed = 0;
@ -329,6 +355,9 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V> {
this.key = key;
this.value = value;
this.lastAccessed = lastAccessed;
ramBytesUsed = BASE_RAM_BYTES_USED +
RamUsageEstimator.sizeOfObject(key, QUERY_DEFAULT_RAM_BYTES_USED) +
RamUsageEstimator.sizeOfObject(value, QUERY_DEFAULT_RAM_BYTES_USED);
}
@Override
@ -356,6 +385,11 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V> {
public String toString() {
return "key: " + key + " value: " + value + " hits:" + hits.get();
}
@Override
public long ramBytesUsed() {
return ramBytesUsed;
}
}
private boolean isDestroyed = false;
@ -375,7 +409,12 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V> {
}
public static class Stats {
public static class Stats implements Accountable {
private static final long RAM_BYTES_USED =
RamUsageEstimator.shallowSizeOfInstance(Stats.class) +
5 * RamUsageEstimator.primitiveSizes.get(long.class) +
RamUsageEstimator.primitiveSizes.get(int.class);
private final AtomicLong accessCounter = new AtomicLong(0),
putCounter = new AtomicLong(0),
nonLivePutCounter = new AtomicLong(0),
@ -419,6 +458,11 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V> {
evictionCounter.addAndGet(other.evictionCounter.get());
size.set(Math.max(size.get(), other.size.get()));
}
@Override
public long ramBytesUsed() {
return RAM_BYTES_USED;
}
}
public static interface EvictionListener<K, V> {

View File

@ -19,7 +19,6 @@ import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.PriorityQueue;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.solr.common.util.Cache;
import org.apache.solr.search.LRUCache;
import java.util.ArrayList;
import java.util.Arrays;
@ -36,6 +35,9 @@ import java.util.concurrent.atomic.LongAdder;
import java.util.concurrent.locks.ReentrantLock;
import java.lang.ref.WeakReference;
import static org.apache.lucene.util.RamUsageEstimator.HASHTABLE_RAM_BYTES_PER_ENTRY;
import static org.apache.lucene.util.RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED;
/**
* A LRU cache implementation based upon ConcurrentHashMap and other techniques to reduce
* contention and synchronization overhead to utilize multiple CPU cores more effectively.
@ -50,7 +52,11 @@ import java.lang.ref.WeakReference;
*/
public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConcurrentLRUCache.class);
private static final long BASE_RAM_BYTES_USED =
RamUsageEstimator.shallowSizeOfInstance(ConcurrentLRUCache.class) +
new Stats().ramBytesUsed() +
RamUsageEstimator.primitiveSizes.get(long.class) +
RamUsageEstimator.shallowSizeOfInstance(ConcurrentHashMap.class);
private final ConcurrentHashMap<Object, CacheEntry<K,V>> map;
private final int upperWaterMark, lowerWaterMark;
@ -131,9 +137,7 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
CacheEntry<K,V> cacheEntry = map.remove(key);
if (cacheEntry != null) {
stats.size.decrementAndGet();
if (ramUpperWatermark != Long.MAX_VALUE) {
ramBytes.addAndGet(-cacheEntry.ramBytesUsed() - LRUCache.HASHTABLE_RAM_BYTES_PER_ENTRY);
}
ramBytes.addAndGet(-cacheEntry.ramBytesUsed() - HASHTABLE_RAM_BYTES_PER_ENTRY);
return cacheEntry.value;
}
return null;
@ -147,23 +151,11 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
int currentSize;
if (oldCacheEntry == null) {
currentSize = stats.size.incrementAndGet();
if (ramUpperWatermark != Long.MAX_VALUE) {
ramBytes.addAndGet(e.ramBytesUsed() + LRUCache.HASHTABLE_RAM_BYTES_PER_ENTRY); // added key + value + entry
}
ramBytes.addAndGet(e.ramBytesUsed() + HASHTABLE_RAM_BYTES_PER_ENTRY); // added key + value + entry
} else {
currentSize = stats.size.get();
if (ramUpperWatermark != Long.MAX_VALUE) {
if (oldCacheEntry.value instanceof Accountable) {
ramBytes.addAndGet(-((Accountable)oldCacheEntry.value).ramBytesUsed());
} else {
ramBytes.addAndGet(-LRUCache.DEFAULT_RAM_BYTES_USED);
}
if (val instanceof Accountable) {
ramBytes.addAndGet(((Accountable)val).ramBytesUsed());
} else {
ramBytes.addAndGet(LRUCache.DEFAULT_RAM_BYTES_USED);
}
}
ramBytes.addAndGet(-oldCacheEntry.ramBytesUsed());
ramBytes.addAndGet(e.ramBytesUsed());
}
if (islive) {
stats.putCounter.increment();
@ -245,7 +237,6 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
for (int i = entriesInAccessOrder.size() - 1; i >= 0; i--) {
CacheEntry<K, V> kvCacheEntry = entriesInAccessOrder.get(i);
evictEntry(kvCacheEntry.key);
ramBytes.addAndGet(-(kvCacheEntry.ramBytesUsed() + LRUCache.HASHTABLE_RAM_BYTES_PER_ENTRY));
if (ramBytes.get() <= ramLowerWatermark) {
break; // we are done!
}
@ -476,6 +467,7 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
private void evictEntry(K key) {
CacheEntry<K,V> o = map.remove(key);
if (o == null) return;
ramBytes.addAndGet(-(o.ramBytesUsed() + HASHTABLE_RAM_BYTES_PER_ENTRY));
stats.size.decrementAndGet();
stats.evictionCounter.incrementAndGet();
if(evictionListener != null) evictionListener.evictedEntry(o.key,o.value);
@ -553,6 +545,7 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
@Override
public void clear() {
map.clear();
ramBytes.set(0);
}
public Map<Object, CacheEntry<K,V>> getMap() {
@ -562,8 +555,9 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
public static class CacheEntry<K,V> implements Comparable<CacheEntry<K,V>>, Accountable {
public static long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOf(CacheEntry.class);
K key;
V value;
final K key;
final V value;
final long ramBytesUsed; // cache
volatile long lastAccessed = 0;
long lastAccessedCopy = 0;
@ -572,6 +566,10 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
this.key = key;
this.value = value;
this.lastAccessed = lastAccessed;
this.ramBytesUsed =
BASE_RAM_BYTES_USED +
RamUsageEstimator.sizeOfObject(key, QUERY_DEFAULT_RAM_BYTES_USED) +
RamUsageEstimator.sizeOfObject(value, QUERY_DEFAULT_RAM_BYTES_USED);
}
public void setLastAccessed(long lastAccessed) {
@ -601,18 +599,7 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
@Override
public long ramBytesUsed() {
long ramBytes = BASE_RAM_BYTES_USED;
if (key instanceof Accountable) {
ramBytes += ((Accountable) key).ramBytesUsed();
} else {
ramBytes += LRUCache.DEFAULT_RAM_BYTES_USED;
}
if (value instanceof Accountable) {
ramBytes += ((Accountable) value).ramBytesUsed();
} else {
ramBytes += LRUCache.DEFAULT_RAM_BYTES_USED;
}
return ramBytes;
return ramBytesUsed;
}
@Override
@ -637,7 +624,21 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
}
public static class Stats {
public static class Stats implements Accountable {
private static final long RAM_BYTES_USED =
// accounts for field refs
RamUsageEstimator.shallowSizeOfInstance(Stats.class) +
// LongAdder
3 * (
RamUsageEstimator.NUM_BYTES_ARRAY_HEADER +
RamUsageEstimator.primitiveSizes.get(long.class) +
2 * (RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.primitiveSizes.get(long.class))
) +
// AtomicLong
2 * RamUsageEstimator.primitiveSizes.get(long.class) +
// AtomicInteger
RamUsageEstimator.primitiveSizes.get(int.class);
private final AtomicLong accessCounter = new AtomicLong(0);
private final LongAdder putCounter = new LongAdder();
private final LongAdder nonLivePutCounter = new LongAdder();
@ -681,6 +682,11 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
evictionCounter.addAndGet(other.evictionCounter.get());
size.set(Math.max(size.get(), other.size.get()));
}
@Override
public long ramBytesUsed() {
return RAM_BYTES_USED;
}
}
public static interface EvictionListener<K,V>{

View File

@ -144,6 +144,7 @@ public class QueryResultKeyTest extends SolrTestCaseJ4 {
assertNotNull(key1);
assertNotNull(key2);
assertEquals(key1.hashCode(), key2.hashCode());
assertEquals(key1.ramBytesUsed(), key2.ramBytesUsed());
assertEquals(key1, key2);
assertEquals(key2, key1);
}

View File

@ -16,6 +16,10 @@
*/
package org.apache.solr.search;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.lucene.util.TestUtil;
import org.apache.solr.SolrTestCase;
import org.apache.solr.metrics.MetricsMap;
@ -294,6 +298,34 @@ public class TestFastLRUCache extends SolrTestCase {
System.out.println("time=" + timer.getTime() + ", minSize="+minSize+",maxSize="+maxSize);
}
public void testAccountable() {
FastLRUCache<Query, DocSet> sc = new FastLRUCache<>();
try {
sc.initializeMetrics(metricManager, registry, "foo", scope);
Map l = new HashMap();
l.put("size", "100");
l.put("initialSize", "10");
l.put("autowarmCount", "25");
CacheRegenerator cr = new NoOpRegenerator();
Object o = sc.init(l, null, cr);
sc.setState(SolrCache.State.LIVE);
long initialBytes = sc.ramBytesUsed();
WildcardQuery q = new WildcardQuery(new Term("foo", "bar"));
DocSet docSet = new BitDocSet();
sc.put(q, docSet);
long updatedBytes = sc.ramBytesUsed();
assertTrue(updatedBytes > initialBytes);
long estimated = initialBytes + q.ramBytesUsed() + docSet.ramBytesUsed() + ConcurrentLRUCache.CacheEntry.BASE_RAM_BYTES_USED
+ RamUsageEstimator.HASHTABLE_RAM_BYTES_PER_ENTRY;
assertEquals(estimated, updatedBytes);
sc.clear();
long clearedBytes = sc.ramBytesUsed();
assertEquals(initialBytes, clearedBytes);
} finally {
sc.close();
}
}
/***
public void testPerf() {
doPerfTest(1000000, 100000, 200000); // big cache, warmup

View File

@ -26,6 +26,10 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.lucene.util.TestUtil;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.util.ExecutorUtil;
@ -405,6 +409,51 @@ public class TestLFUCache extends SolrTestCaseJ4 {
assertNull("Exception during concurrent access: " + error.get(), error.get());
}
@Test
public void testAccountable() throws Exception {
SolrMetricManager metricManager = new SolrMetricManager();
Random r = random();
String registry = TestUtil.randomSimpleString(r, 2, 10);
String scope = TestUtil.randomSimpleString(r, 2, 10);
LFUCache lfuCache = new LFUCache();
lfuCache.initializeMetrics(metricManager, registry, "foo", scope + ".lfuCache");
try {
Map params = new HashMap();
params.put("size", "100");
params.put("initialSize", "10");
params.put("autowarmCount", "25");
NoOpRegenerator regenerator = new NoOpRegenerator();
Object initObj = lfuCache.init(params, null, regenerator);
lfuCache.setState(SolrCache.State.LIVE);
long initialBytes = lfuCache.ramBytesUsed();
WildcardQuery q = new WildcardQuery(new Term("foo", "bar"));
DocSet docSet = new BitDocSet();
// 1 insert
lfuCache.put(q, docSet);
long updatedBytes = lfuCache.ramBytesUsed();
assertTrue(updatedBytes > initialBytes);
long estimated = initialBytes + q.ramBytesUsed() + docSet.ramBytesUsed() + ConcurrentLFUCache.CacheEntry.BASE_RAM_BYTES_USED
+ RamUsageEstimator.HASHTABLE_RAM_BYTES_PER_ENTRY;
assertEquals(estimated, updatedBytes);
TermQuery tq = new TermQuery(new Term("foo", "bar"));
lfuCache.put(tq, docSet);
estimated += RamUsageEstimator.sizeOfObject(tq, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED) +
docSet.ramBytesUsed() + ConcurrentLFUCache.CacheEntry.BASE_RAM_BYTES_USED +
RamUsageEstimator.HASHTABLE_RAM_BYTES_PER_ENTRY;
updatedBytes = lfuCache.ramBytesUsed();
assertEquals(estimated, updatedBytes);
lfuCache.clear();
long clearedBytes = lfuCache.ramBytesUsed();
assertEquals(initialBytes, clearedBytes);
} finally {
lfuCache.close();
}
}
// From the original LRU cache tests, they're commented out there too because they take a while.
// void doPerfTest(int iter, int cacheSize, int maxKey) {
// long start = System.currentTimeMillis();

View File

@ -146,19 +146,19 @@ public class TestLRUCache extends SolrTestCase {
}
});
assertEquals(1, accountableLRUCache.size());
assertEquals(baseSize + 512 * 1024 + LRUCache.DEFAULT_RAM_BYTES_USED + LRUCache.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY, accountableLRUCache.ramBytesUsed());
accountableLRUCache.put("2", new Accountable() {
assertEquals(baseSize + 512 * 1024 + RamUsageEstimator.sizeOfObject("1") + RamUsageEstimator.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY, accountableLRUCache.ramBytesUsed());
accountableLRUCache.put("20", new Accountable() {
@Override
public long ramBytesUsed() {
return 512 * 1024;
}
});
assertEquals(1, accountableLRUCache.size());
assertEquals(baseSize + 512 * 1024 + LRUCache.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY + LRUCache.DEFAULT_RAM_BYTES_USED, accountableLRUCache.ramBytesUsed());
assertEquals(baseSize + 512 * 1024 + RamUsageEstimator.sizeOfObject("20") + RamUsageEstimator.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY, accountableLRUCache.ramBytesUsed());
Map<String,Object> nl = accountableLRUCache.getMetricsMap().getValue();
assertEquals(1L, nl.get("evictions"));
assertEquals(1L, nl.get("evictionsRamUsage"));
accountableLRUCache.put("3", new Accountable() {
accountableLRUCache.put("300", new Accountable() {
@Override
public long ramBytesUsed() {
return 1024;
@ -168,7 +168,8 @@ public class TestLRUCache extends SolrTestCase {
assertEquals(1L, nl.get("evictions"));
assertEquals(1L, nl.get("evictionsRamUsage"));
assertEquals(2L, accountableLRUCache.size());
assertEquals(baseSize + 513 * 1024 + LRUCache.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY * 2 + LRUCache.DEFAULT_RAM_BYTES_USED * 2, accountableLRUCache.ramBytesUsed());
assertEquals(baseSize + 513 * 1024 + RamUsageEstimator.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY * 2 +
RamUsageEstimator.sizeOfObject("20") + RamUsageEstimator.sizeOfObject("300"), accountableLRUCache.ramBytesUsed());
accountableLRUCache.clear();
assertEquals(RamUsageEstimator.shallowSizeOfInstance(LRUCache.class), accountableLRUCache.ramBytesUsed());