SOLR-8241: Add CaffeineCache, an efficient implementation of SolrCache.

This commit is contained in:
Andrzej Bialecki 2019-10-03 14:19:59 +02:00
parent 2bdfc39d89
commit 8007ac0cb0
13 changed files with 867 additions and 155 deletions

View File

@ -23,7 +23,7 @@ com.fasterxml.jackson.core.version = 2.9.9
/com.fasterxml.jackson.core/jackson-databind = 2.9.9.3 /com.fasterxml.jackson.core/jackson-databind = 2.9.9.3
/com.fasterxml.jackson.dataformat/jackson-dataformat-smile = ${com.fasterxml.jackson.core.version} /com.fasterxml.jackson.dataformat/jackson-dataformat-smile = ${com.fasterxml.jackson.core.version}
/com.github.ben-manes.caffeine/caffeine = 2.4.0 /com.github.ben-manes.caffeine/caffeine = 2.8.0
/com.github.virtuald/curvesapi = 1.04 /com.github.virtuald/curvesapi = 1.04
/com.google.guava/guava = 25.1-jre /com.google.guava/guava = 25.1-jre

View File

@ -152,6 +152,8 @@ New Features
* SOLR-13625: Add CsvStream, TsvStream Streaming Expressions and supporting Stream Evaluators (Joel bernstein) * SOLR-13625: Add CsvStream, TsvStream Streaming Expressions and supporting Stream Evaluators (Joel bernstein)
* SOLR-8241: Add CaffeineCache, an efficient implementation of SolrCache.(Ben Manes, Shawn Heisey, David Smiley, Andrzej Bialecki)
Improvements Improvements
---------------------- ----------------------

View File

@ -0,0 +1,365 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search;
import java.lang.invoke.MethodHandles;
import java.time.Duration;
import java.util.Collections;
import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.LongAdder;
import com.codahale.metrics.MetricRegistry;
import com.github.benmanes.caffeine.cache.RemovalCause;
import com.github.benmanes.caffeine.cache.RemovalListener;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.solr.common.SolrException;
import org.apache.solr.metrics.MetricsMap;
import org.apache.solr.metrics.SolrMetricManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.Policy.Eviction;
import com.github.benmanes.caffeine.cache.stats.CacheStats;
import com.google.common.annotations.VisibleForTesting;
/**
* A SolrCache backed by the Caffeine caching library [1]. By default it uses the Window TinyLFU (W-TinyLFU)
* eviction policy.
* <p>This cache supports either maximum size limit (the number of items) or maximum ram bytes limit, but
* not both. If both values are set then only maxRamMB limit is used and maximum size limit is ignored.</p>
* <p>
* W-TinyLFU [2] is a near optimal policy that uses recency and frequency to determine which entry
* to evict in O(1) time. The estimated frequency is retained in a Count-Min Sketch and entries
* reside on LRU priority queues [3]. By capturing the historic frequency of an entry, the cache is
* able to outperform classic policies like LRU and LFU, as well as modern policies like ARC and
* LIRS. This policy performed particularly well in search workloads.
* <p>
* [1] https://github.com/ben-manes/caffeine
* [2] http://arxiv.org/pdf/1512.00727.pdf
* [3] http://highscalability.com/blog/2016/1/25/design-of-a-modern-cache.html
*/
public class CaffeineCache<K, V> extends SolrCacheBase implements SolrCache<K, V>, Accountable, RemovalListener<K, V> {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(CaffeineCache.class)
+ RamUsageEstimator.shallowSizeOfInstance(CacheStats.class)
+ 2 * RamUsageEstimator.shallowSizeOfInstance(LongAdder.class);
private Executor executor;
private CacheStats priorStats;
private long priorInserts;
private String description;
private LongAdder inserts;
private Cache<K,V> cache;
private long warmupTime;
private int maxSize;
private long maxRamBytes;
private int initialSize;
private int maxIdleTimeSec;
private boolean cleanupThread;
private Set<String> metricNames = ConcurrentHashMap.newKeySet();
private MetricsMap cacheMap;
private MetricRegistry registry;
private long initialRamBytes = 0;
private final LongAdder ramBytes = new LongAdder();
public CaffeineCache() {
this.priorStats = CacheStats.empty();
}
@Override
@SuppressWarnings({"unchecked", "rawtypes"})
public Object init(Map args, Object persistence, CacheRegenerator regenerator) {
super.init(args, regenerator);
String str = (String) args.get(SIZE_PARAM);
maxSize = (str == null) ? 1024 : Integer.parseInt(str);
str = (String) args.get("initialSize");
initialSize = Math.min((str == null) ? 1024 : Integer.parseInt(str), maxSize);
str = (String) args.get(MAX_IDLE_TIME_PARAM);
if (str == null) {
maxIdleTimeSec = -1;
} else {
maxIdleTimeSec = Integer.parseInt(str);
}
str = (String) args.get(MAX_RAM_MB_PARAM);
int maxRamMB = str == null ? -1 : Double.valueOf(str).intValue();
maxRamBytes = maxRamMB < 0 ? Long.MAX_VALUE : maxRamMB * 1024L * 1024L;
str = (String) args.get(CLEANUP_THREAD_PARAM);
cleanupThread = str != null && Boolean.parseBoolean(str);
if (cleanupThread) {
executor = ForkJoinPool.commonPool();
} else {
executor = Runnable::run;
}
description = generateDescription(maxSize, initialSize);
cache = buildCache(null);
inserts = new LongAdder();
initialRamBytes =
RamUsageEstimator.shallowSizeOfInstance(cache.getClass()) +
RamUsageEstimator.shallowSizeOfInstance(executor.getClass()) +
RamUsageEstimator.sizeOfObject(description);
return persistence;
}
private Cache<K, V> buildCache(Cache<K, V> prev) {
Caffeine builder = Caffeine.newBuilder()
.initialCapacity(initialSize)
.executor(executor)
.removalListener(this)
.recordStats();
if (maxIdleTimeSec > 0) {
builder.expireAfterAccess(Duration.ofSeconds(maxIdleTimeSec));
}
if (maxRamBytes != Long.MAX_VALUE) {
builder.maximumWeight(maxRamBytes);
builder.weigher((k, v) -> (int) (RamUsageEstimator.sizeOfObject(k) + RamUsageEstimator.sizeOfObject(v)));
} else {
builder.maximumSize(maxSize);
}
Cache<K, V> newCache = builder.build();
if (prev != null) {
newCache.putAll(prev.asMap());
}
return newCache;
}
@Override
public void onRemoval(K key, V value, RemovalCause cause) {
ramBytes.add(
- (RamUsageEstimator.sizeOfObject(key, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED) +
RamUsageEstimator.sizeOfObject(value, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED) +
RamUsageEstimator.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY)
);
}
@Override
public long ramBytesUsed() {
return BASE_RAM_BYTES_USED + initialRamBytes + ramBytes.sum();
}
@Override
public V get(K key) {
return cache.getIfPresent(key);
}
@Override
public V put(K key, V val) {
inserts.increment();
V old = cache.asMap().put(key, val);
ramBytes.add(RamUsageEstimator.sizeOfObject(key, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED) +
RamUsageEstimator.sizeOfObject(val, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED));
if (old != null) {
ramBytes.add(- RamUsageEstimator.sizeOfObject(old, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED));
} else {
ramBytes.add(RamUsageEstimator.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY);
}
return old;
}
@Override
public void clear() {
cache.invalidateAll();
ramBytes.reset();
}
@Override
public int size() {
return cache.asMap().size();
}
@Override
public void close() {
cache.invalidateAll();
cache.cleanUp();
if (executor instanceof ExecutorService) {
((ExecutorService)executor).shutdownNow();
}
ramBytes.reset();
}
@Override
public int getMaxSize() {
return maxSize;
}
@Override
public void setMaxSize(int maxSize) {
if (this.maxSize == maxSize) {
return;
}
Optional<Eviction<K, V>> evictionOpt = cache.policy().eviction();
if (evictionOpt.isPresent()) {
Eviction<K, V> eviction = evictionOpt.get();
eviction.setMaximum(maxSize);
this.maxSize = maxSize;
initialSize = Math.min(1024, this.maxSize);
description = generateDescription(this.maxSize, initialSize);
cache.cleanUp();
}
}
@Override
public int getMaxRamMB() {
return maxRamBytes != Long.MAX_VALUE ? (int) (maxRamBytes / 1024L / 1024L) : -1;
}
@Override
public void setMaxRamMB(int maxRamMB) {
long newMaxRamBytes = maxRamMB < 0 ? Long.MAX_VALUE : maxRamMB * 1024L * 1024L;
if (newMaxRamBytes != maxRamBytes) {
maxRamBytes = newMaxRamBytes;
Optional<Eviction<K, V>> evictionOpt = cache.policy().eviction();
if (evictionOpt.isPresent()) {
Eviction<K, V> eviction = evictionOpt.get();
if (!eviction.isWeighted()) {
// rebuild cache using weigher
cache = buildCache(cache);
return;
} else if (maxRamBytes == Long.MAX_VALUE) {
// rebuild cache using maxSize
cache = buildCache(cache);
return;
}
eviction.setMaximum(newMaxRamBytes);
description = generateDescription(this.maxSize, initialSize);
cache.cleanUp();
}
}
}
@Override
public void warm(SolrIndexSearcher searcher, SolrCache<K,V> old) {
if (regenerator == null) {
return;
}
long warmingStartTime = System.nanoTime();
Map<K, V> hottest = Collections.emptyMap();
CaffeineCache<K,V> other = (CaffeineCache<K,V>)old;
// warm entries
if (isAutowarmingOn()) {
Eviction<K, V> policy = other.cache.policy().eviction().get();
int size = autowarm.getWarmCount(other.cache.asMap().size());
hottest = policy.hottest(size);
}
for (Entry<K, V> entry : hottest.entrySet()) {
try {
boolean continueRegen = regenerator.regenerateItem(
searcher, this, old, entry.getKey(), entry.getValue());
if (!continueRegen) {
break;
}
}
catch (Exception e) {
SolrException.log(log, "Error during auto-warming of key:" + entry.getKey(), e);
}
}
inserts.reset();
priorStats = other.cache.stats().plus(other.priorStats);
priorInserts = other.inserts.sum() + other.priorInserts;
warmupTime = TimeUnit.MILLISECONDS.convert(System.nanoTime() - warmingStartTime, TimeUnit.NANOSECONDS);
}
/** Returns the description of this cache. */
private String generateDescription(int limit, int initialSize) {
return String.format(Locale.ROOT, "TinyLfu Cache(maxSize=%d, initialSize=%d%s)",
limit, initialSize, isAutowarmingOn() ? (", " + getAutowarmDescription()) : "");
}
//////////////////////// SolrInfoBean methods //////////////////////
@Override
public String getName() {
return CaffeineCache.class.getName();
}
@Override
public String getDescription() {
return description;
}
// for unit tests only
@VisibleForTesting
MetricsMap getMetricsMap() {
return cacheMap;
}
@Override
public MetricRegistry getMetricRegistry() {
return registry;
}
@Override
public String toString() {
return name() + (cacheMap != null ? cacheMap.getValue().toString() : "");
}
@Override
public Set<String> getMetricNames() {
return metricNames;
}
@Override
public void initializeMetrics(SolrMetricManager manager, String registryName, String tag, String scope) {
registry = manager.registry(registryName);
cacheMap = new MetricsMap((detailed, map) -> {
CacheStats stats = cache.stats();
long insertCount = inserts.sum();
map.put(LOOKUPS_PARAM, stats.requestCount());
map.put(HITS_PARAM, stats.hitCount());
map.put(HIT_RATIO_PARAM, stats.hitRate());
map.put(INSERTS_PARAM, insertCount);
map.put(EVICTIONS_PARAM, stats.evictionCount());
map.put(SIZE_PARAM, cache.asMap().size());
map.put("warmupTime", warmupTime);
map.put(RAM_BYTES_USED_PARAM, ramBytesUsed());
map.put(MAX_RAM_MB_PARAM, getMaxRamMB());
CacheStats cumulativeStats = priorStats.plus(stats);
map.put("cumulative_lookups", cumulativeStats.requestCount());
map.put("cumulative_hits", cumulativeStats.hitCount());
map.put("cumulative_hitratio", cumulativeStats.hitRate());
map.put("cumulative_inserts", priorInserts + insertCount);
map.put("cumulative_evictions", cumulativeStats.evictionCount());
});
manager.registerGauge(this, registryName, cacheMap, tag, true, scope, getCategory().toString());
}
}

View File

@ -54,9 +54,6 @@ public class FastLRUCache<K, V> extends SolrCacheBase implements SolrCache<K, V>
public static final String MIN_SIZE_PARAM = "minSize"; public static final String MIN_SIZE_PARAM = "minSize";
public static final String ACCEPTABLE_SIZE_PARAM = "acceptableSize"; public static final String ACCEPTABLE_SIZE_PARAM = "acceptableSize";
public static final String INITIAL_SIZE_PARAM = "initialSize";
public static final String CLEANUP_THREAD_PARAM = "cleanupThread";
public static final String SHOW_ITEMS_PARAM = "showItems";
// contains the statistics objects for all open caches of the same type // contains the statistics objects for all open caches of the same type
private List<ConcurrentLRUCache.Stats> statsList; private List<ConcurrentLRUCache.Stats> statsList;

View File

@ -38,6 +38,9 @@ public interface SolrCache<K,V> extends SolrInfoBean, SolrMetricProducer {
String RAM_BYTES_USED_PARAM = "ramBytesUsed"; String RAM_BYTES_USED_PARAM = "ramBytesUsed";
String MAX_RAM_MB_PARAM = "maxRamMB"; String MAX_RAM_MB_PARAM = "maxRamMB";
String MAX_IDLE_TIME_PARAM = "maxIdleTime"; String MAX_IDLE_TIME_PARAM = "maxIdleTime";
String INITIAL_SIZE_PARAM = "initialSize";
String CLEANUP_THREAD_PARAM = "cleanupThread";
String SHOW_ITEMS_PARAM = "showItems";
/** /**
* The initialization routine. Instance specific arguments are passed in * The initialization routine. Instance specific arguments are passed in

View File

@ -23,8 +23,9 @@ import java.util.Map;
import java.util.TreeSet; import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger; //import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.LongAdder;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountable;
@ -69,7 +70,7 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
private long maxIdleTimeNs; private long maxIdleTimeNs;
private final TimeSource timeSource = TimeSource.NANO_TIME; private final TimeSource timeSource = TimeSource.NANO_TIME;
private final AtomicLong oldestEntry = new AtomicLong(0L); private final AtomicLong oldestEntry = new AtomicLong(0L);
private final AtomicLong ramBytes = new AtomicLong(0); private final LongAdder ramBytes = new LongAdder();
public ConcurrentLFUCache(int upperWaterMark, final int lowerWaterMark, int acceptableSize, public ConcurrentLFUCache(int upperWaterMark, final int lowerWaterMark, int acceptableSize,
int initialSize, boolean runCleanupThread, boolean runNewThreadForCleanup, int initialSize, boolean runCleanupThread, boolean runNewThreadForCleanup,
@ -155,11 +156,11 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
public V get(K key) { public V get(K key) {
CacheEntry<K, V> e = map.get(key); CacheEntry<K, V> e = map.get(key);
if (e == null) { if (e == null) {
if (islive) stats.missCounter.incrementAndGet(); if (islive) stats.missCounter.increment();
} else if (islive) { } else if (islive) {
e.lastAccessed = timeSource.getEpochTimeNs(); e.lastAccessed = timeSource.getEpochTimeNs();
stats.accessCounter.incrementAndGet(); stats.accessCounter.increment();
e.hits.incrementAndGet(); e.hits.increment();
} }
return e != null ? e.value : null; return e != null ? e.value : null;
} }
@ -168,8 +169,8 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
public V remove(K key) { public V remove(K key) {
CacheEntry<K, V> cacheEntry = map.remove(key); CacheEntry<K, V> cacheEntry = map.remove(key);
if (cacheEntry != null) { if (cacheEntry != null) {
stats.size.decrementAndGet(); stats.size.decrement();
ramBytes.addAndGet(-cacheEntry.ramBytesUsed() - HASHTABLE_RAM_BYTES_PER_ENTRY); ramBytes.add(-cacheEntry.ramBytesUsed() - HASHTABLE_RAM_BYTES_PER_ENTRY);
return cacheEntry.value; return cacheEntry.value;
} }
return null; return null;
@ -187,23 +188,24 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
* @lucene.internal * @lucene.internal
*/ */
public V putCacheEntry(CacheEntry<K, V> e) { public V putCacheEntry(CacheEntry<K, V> e) {
stats.accessCounter.incrementAndGet(); stats.accessCounter.increment();
// initialize oldestEntry // initialize oldestEntry
oldestEntry.updateAndGet(x -> x > e.lastAccessed || x == 0 ? e.lastAccessed : x); oldestEntry.updateAndGet(x -> x > e.lastAccessed || x == 0 ? e.lastAccessed : x);
CacheEntry<K, V> oldCacheEntry = map.put(e.key, e); CacheEntry<K, V> oldCacheEntry = map.put(e.key, e);
int currentSize; int currentSize;
if (oldCacheEntry == null) { if (oldCacheEntry == null) {
currentSize = stats.size.incrementAndGet(); stats.size.increment();
ramBytes.addAndGet(e.ramBytesUsed() + HASHTABLE_RAM_BYTES_PER_ENTRY); // added key + value + entry currentSize = stats.size.intValue();
ramBytes.add(e.ramBytesUsed() + HASHTABLE_RAM_BYTES_PER_ENTRY); // added key + value + entry
} else { } else {
currentSize = stats.size.get(); currentSize = stats.size.intValue();
ramBytes.addAndGet(-oldCacheEntry.ramBytesUsed()); ramBytes.add(-oldCacheEntry.ramBytesUsed());
ramBytes.addAndGet(e.ramBytesUsed()); ramBytes.add(e.ramBytesUsed());
} }
if (islive) { if (islive) {
stats.putCounter.incrementAndGet(); stats.putCounter.increment();
} else { } else {
stats.nonLivePutCounter.incrementAndGet(); stats.nonLivePutCounter.increment();
} }
// Check if we need to clear out old entries from the cache. // Check if we need to clear out old entries from the cache.
@ -242,7 +244,7 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
isCleaning = true; isCleaning = true;
this.lowHitCount = lowHitCount; // volatile write to make isCleaning visible this.lowHitCount = lowHitCount; // volatile write to make isCleaning visible
int sz = stats.size.get(); int sz = stats.size.intValue();
boolean evictByIdleTime = maxIdleTimeNs != Long.MAX_VALUE; boolean evictByIdleTime = maxIdleTimeNs != Long.MAX_VALUE;
long idleCutoff = evictByIdleTime ? timeSource.getEpochTimeNs() - maxIdleTimeNs : -1L; long idleCutoff = evictByIdleTime ? timeSource.getEpochTimeNs() - maxIdleTimeNs : -1L;
if (sz <= upperWaterMark && (evictByIdleTime && oldestEntry.get() > idleCutoff)) { if (sz <= upperWaterMark && (evictByIdleTime && oldestEntry.get() > idleCutoff)) {
@ -264,7 +266,7 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
if (entry.getValue().lastAccessedCopy < idleCutoff) { if (entry.getValue().lastAccessedCopy < idleCutoff) {
iterator.remove(); iterator.remove();
postRemoveEntry(entry.getValue()); postRemoveEntry(entry.getValue());
stats.evictionIdleCounter.incrementAndGet(); stats.evictionIdleCounter.increment();
} else { } else {
if (entry.getValue().lastAccessedCopy < currentOldestEntry) { if (entry.getValue().lastAccessedCopy < currentOldestEntry) {
currentOldestEntry = entry.getValue().lastAccessedCopy; currentOldestEntry = entry.getValue().lastAccessedCopy;
@ -275,7 +277,7 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
oldestEntry.set(currentOldestEntry); oldestEntry.set(currentOldestEntry);
} }
// refresh size and maybe return // refresh size and maybe return
sz = stats.size.get(); sz = stats.size.intValue();
if (sz <= upperWaterMark) { if (sz <= upperWaterMark) {
return; return;
} }
@ -286,10 +288,11 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
for (CacheEntry<K, V> ce : map.values()) { for (CacheEntry<K, V> ce : map.values()) {
// set hitsCopy to avoid later Atomic reads. Primitive types are faster than the atomic get(). // set hitsCopy to avoid later Atomic reads. Primitive types are faster than the atomic get().
ce.hitsCopy = ce.hits.get(); ce.hitsCopy = ce.hits.longValue();
ce.lastAccessedCopy = ce.lastAccessed; ce.lastAccessedCopy = ce.lastAccessed;
if (timeDecay) { if (timeDecay) {
ce.hits.set(ce.hitsCopy >>> 1); ce.hits.reset();
ce.hits.add(ce.hitsCopy >>> 1);
} }
if (tree.size() < wantToRemove) { if (tree.size() < wantToRemove) {
tree.add(ce); tree.add(ce);
@ -342,9 +345,9 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
private void postRemoveEntry(CacheEntry<K, V> o) { private void postRemoveEntry(CacheEntry<K, V> o) {
if (o == null) return; if (o == null) return;
ramBytes.addAndGet(-(o.ramBytesUsed() + HASHTABLE_RAM_BYTES_PER_ENTRY)); ramBytes.add(-(o.ramBytesUsed() + HASHTABLE_RAM_BYTES_PER_ENTRY));
stats.size.decrementAndGet(); stats.size.decrement();
stats.evictionCounter.incrementAndGet(); stats.evictionCounter.increment();
if (evictionListener != null) evictionListener.evictedEntry(o.key, o.value); if (evictionListener != null) evictionListener.evictedEntry(o.key, o.value);
} }
@ -367,7 +370,7 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
try { try {
for (Map.Entry<Object, CacheEntry<K, V>> entry : map.entrySet()) { for (Map.Entry<Object, CacheEntry<K, V>> entry : map.entrySet()) {
CacheEntry<K, V> ce = entry.getValue(); CacheEntry<K, V> ce = entry.getValue();
ce.hitsCopy = ce.hits.get(); ce.hitsCopy = ce.hits.longValue();
ce.lastAccessedCopy = ce.lastAccessed; ce.lastAccessedCopy = ce.lastAccessed;
if (tree.size() < n) { if (tree.size() < n) {
tree.add(ce); tree.add(ce);
@ -411,7 +414,7 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
try { try {
for (Map.Entry<Object, CacheEntry<K, V>> entry : map.entrySet()) { for (Map.Entry<Object, CacheEntry<K, V>> entry : map.entrySet()) {
CacheEntry<K, V> ce = entry.getValue(); CacheEntry<K, V> ce = entry.getValue();
ce.hitsCopy = ce.hits.get(); ce.hitsCopy = ce.hits.longValue();
ce.lastAccessedCopy = ce.lastAccessed; ce.lastAccessedCopy = ce.lastAccessed;
if (tree.size() < n) { if (tree.size() < n) {
tree.add(ce); tree.add(ce);
@ -437,13 +440,13 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
} }
public int size() { public int size() {
return stats.size.get(); return stats.size.intValue();
} }
@Override @Override
public void clear() { public void clear() {
map.clear(); map.clear();
ramBytes.set(0); ramBytes.reset();
} }
public Map<Object, CacheEntry<K, V>> getMap() { public Map<Object, CacheEntry<K, V>> getMap() {
@ -452,7 +455,7 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
@Override @Override
public long ramBytesUsed() { public long ramBytesUsed() {
return BASE_RAM_BYTES_USED + ramBytes.get(); return BASE_RAM_BYTES_USED + ramBytes.sum();
} }
public static class CacheEntry<K, V> implements Comparable<CacheEntry<K, V>>, Accountable { public static class CacheEntry<K, V> implements Comparable<CacheEntry<K, V>>, Accountable {
@ -463,7 +466,7 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
final K key; final K key;
final V value; final V value;
final long ramBytesUsed; final long ramBytesUsed;
volatile AtomicLong hits = new AtomicLong(0); final LongAdder hits = new LongAdder();
long hitsCopy = 0; long hitsCopy = 0;
volatile long lastAccessed = 0; volatile long lastAccessed = 0;
long lastAccessedCopy = 0; long lastAccessedCopy = 0;
@ -500,7 +503,7 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
@Override @Override
public String toString() { public String toString() {
return "key: " + key + " value: " + value + " hits:" + hits.get(); return "key: " + key + " value: " + value + " hits:" + hits.longValue();
} }
@Override @Override
@ -529,57 +532,63 @@ public class ConcurrentLFUCache<K, V> implements Cache<K,V>, Accountable {
public static class Stats implements Accountable { public static class Stats implements Accountable {
private static final long RAM_BYTES_USED = private static final long RAM_BYTES_USED =
RamUsageEstimator.shallowSizeOfInstance(Stats.class) + RamUsageEstimator.shallowSizeOfInstance(Stats.class) +
6 * RamUsageEstimator.primitiveSizes.get(long.class) + // LongAdder
RamUsageEstimator.primitiveSizes.get(int.class); 7 * (
RamUsageEstimator.NUM_BYTES_ARRAY_HEADER +
RamUsageEstimator.primitiveSizes.get(long.class) +
2 * (RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.primitiveSizes.get(long.class))
);
private final AtomicLong accessCounter = new AtomicLong(0), private final LongAdder accessCounter = new LongAdder();
putCounter = new AtomicLong(0), private final LongAdder putCounter = new LongAdder();
nonLivePutCounter = new AtomicLong(0), private final LongAdder nonLivePutCounter = new LongAdder();
missCounter = new AtomicLong(); private final LongAdder missCounter = new LongAdder();
private final AtomicInteger size = new AtomicInteger(); private final LongAdder size = new LongAdder();
private AtomicLong evictionCounter = new AtomicLong(); private LongAdder evictionCounter = new LongAdder();
private AtomicLong evictionIdleCounter = new AtomicLong(); private LongAdder evictionIdleCounter = new LongAdder();
public long getCumulativeLookups() { public long getCumulativeLookups() {
return (accessCounter.get() - putCounter.get() - nonLivePutCounter.get()) + missCounter.get(); return (accessCounter.longValue() - putCounter.longValue() - nonLivePutCounter.longValue()) + missCounter.longValue();
} }
public long getCumulativeHits() { public long getCumulativeHits() {
return accessCounter.get() - putCounter.get() - nonLivePutCounter.get(); return accessCounter.longValue() - putCounter.longValue() - nonLivePutCounter.longValue();
} }
public long getCumulativePuts() { public long getCumulativePuts() {
return putCounter.get(); return putCounter.longValue();
} }
public long getCumulativeEvictions() { public long getCumulativeEvictions() {
return evictionCounter.get(); return evictionCounter.longValue();
} }
public long getCumulativeIdleEvictions() { public long getCumulativeIdleEvictions() {
return evictionIdleCounter.get(); return evictionIdleCounter.longValue();
} }
public int getCurrentSize() { public int getCurrentSize() {
return size.get(); return size.intValue();
} }
public long getCumulativeNonLivePuts() { public long getCumulativeNonLivePuts() {
return nonLivePutCounter.get(); return nonLivePutCounter.longValue();
} }
public long getCumulativeMisses() { public long getCumulativeMisses() {
return missCounter.get(); return missCounter.longValue();
} }
public void add(Stats other) { public void add(Stats other) {
accessCounter.addAndGet(other.accessCounter.get()); accessCounter.add(other.accessCounter.longValue());
putCounter.addAndGet(other.putCounter.get()); putCounter.add(other.putCounter.longValue());
nonLivePutCounter.addAndGet(other.nonLivePutCounter.get()); nonLivePutCounter.add(other.nonLivePutCounter.longValue());
missCounter.addAndGet(other.missCounter.get()); missCounter.add(other.missCounter.longValue());
evictionCounter.addAndGet(other.evictionCounter.get()); evictionCounter.add(other.evictionCounter.longValue());
evictionIdleCounter.addAndGet(other.evictionIdleCounter.get()); evictionIdleCounter.add(other.evictionIdleCounter.longValue());
size.set(Math.max(size.get(), other.size.get())); long maxSize = Math.max(size.longValue(), other.size.longValue());
size.reset();
size.add(maxSize);
} }
@Override @Override

View File

@ -32,7 +32,7 @@ import java.util.Map;
import java.util.TreeSet; import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger; //import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.LongAdder; import java.util.concurrent.atomic.LongAdder;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
@ -78,7 +78,7 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
private boolean runCleanupThread; private boolean runCleanupThread;
private long ramLowerWatermark, ramUpperWatermark; private long ramLowerWatermark, ramUpperWatermark;
private final AtomicLong ramBytes = new AtomicLong(0); private final LongAdder ramBytes = new LongAdder();
public ConcurrentLRUCache(long ramLowerWatermark, long ramUpperWatermark, public ConcurrentLRUCache(long ramLowerWatermark, long ramUpperWatermark,
boolean runCleanupThread, EvictionListener<K, V> evictionListener) { boolean runCleanupThread, EvictionListener<K, V> evictionListener) {
@ -202,8 +202,8 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
public V remove(K key) { public V remove(K key) {
CacheEntry<K,V> cacheEntry = map.remove(key); CacheEntry<K,V> cacheEntry = map.remove(key);
if (cacheEntry != null) { if (cacheEntry != null) {
stats.size.decrementAndGet(); stats.size.decrement();
ramBytes.addAndGet(-cacheEntry.ramBytesUsed() - HASHTABLE_RAM_BYTES_PER_ENTRY); ramBytes.add(-cacheEntry.ramBytesUsed() - HASHTABLE_RAM_BYTES_PER_ENTRY);
return cacheEntry.value; return cacheEntry.value;
} }
return null; return null;
@ -226,12 +226,13 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
CacheEntry<K,V> oldCacheEntry = map.put(e.key, e); CacheEntry<K,V> oldCacheEntry = map.put(e.key, e);
int currentSize; int currentSize;
if (oldCacheEntry == null) { if (oldCacheEntry == null) {
currentSize = stats.size.incrementAndGet(); stats.size.increment();
ramBytes.addAndGet(e.ramBytesUsed() + HASHTABLE_RAM_BYTES_PER_ENTRY); // added key + value + entry currentSize = stats.size.intValue();
ramBytes.add(e.ramBytesUsed() + HASHTABLE_RAM_BYTES_PER_ENTRY); // added key + value + entry
} else { } else {
currentSize = stats.size.get(); currentSize = stats.size.intValue();
ramBytes.addAndGet(-oldCacheEntry.ramBytesUsed()); ramBytes.add(-oldCacheEntry.ramBytesUsed());
ramBytes.addAndGet(e.ramBytesUsed()); ramBytes.add(e.ramBytesUsed());
} }
if (islive) { if (islive) {
stats.putCounter.increment(); stats.putCounter.increment();
@ -250,7 +251,7 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
// Thread safety note: isCleaning read is piggybacked (comes after) other volatile reads // Thread safety note: isCleaning read is piggybacked (comes after) other volatile reads
// in this method. // in this method.
long idleCutoff = timeSource.getEpochTimeNs() - maxIdleTimeNs; long idleCutoff = timeSource.getEpochTimeNs() - maxIdleTimeNs;
if ((currentSize > upperWaterMark || ramBytes.get() > ramUpperWatermark || oldestEntryNs.get() < idleCutoff) && !isCleaning) { if ((currentSize > upperWaterMark || ramBytes.sum() > ramUpperWatermark || oldestEntryNs.get() < idleCutoff) && !isCleaning) {
if (newThreadForCleanup) { if (newThreadForCleanup) {
new Thread(this::markAndSweep).start(); new Thread(this::markAndSweep).start();
} else if (cleanupThread != null){ } else if (cleanupThread != null){
@ -311,7 +312,7 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
Map.Entry<Object, CacheEntry<K, V>> entry = iterator.next(); Map.Entry<Object, CacheEntry<K, V>> entry = iterator.next();
if (entry.getValue().createTime < idleCutoff) { if (entry.getValue().createTime < idleCutoff) {
iterator.remove(); iterator.remove();
stats.evictionIdleCounter.incrementAndGet(); stats.evictionIdleCounter.increment();
postRemoveEntry(entry.getValue()); postRemoveEntry(entry.getValue());
} else { } else {
if (entry.getValue().createTime < currentOldestEntry) { if (entry.getValue().createTime < currentOldestEntry) {
@ -341,7 +342,7 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
for (int i = entriesInAccessOrder.size() - 1; i >= 0; i--) { for (int i = entriesInAccessOrder.size() - 1; i >= 0; i--) {
CacheEntry<K, V> kvCacheEntry = entriesInAccessOrder.get(i); CacheEntry<K, V> kvCacheEntry = entriesInAccessOrder.get(i);
evictEntry(kvCacheEntry.key); evictEntry(kvCacheEntry.key);
if (ramBytes.get() <= ramLowerWatermark) { if (ramBytes.sum() <= ramLowerWatermark) {
break; // we are done! break; // we are done!
} }
} }
@ -366,7 +367,7 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
this.oldestEntry = oldestEntry; // volatile write to make isCleaning visible this.oldestEntry = oldestEntry; // volatile write to make isCleaning visible
long timeCurrent = stats.accessCounter.longValue(); long timeCurrent = stats.accessCounter.longValue();
int sz = stats.size.get(); int sz = stats.size.intValue();
int numRemoved = 0; int numRemoved = 0;
int numKept = 0; int numKept = 0;
@ -585,9 +586,9 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
private void postRemoveEntry(CacheEntry<K, V> o) { private void postRemoveEntry(CacheEntry<K, V> o) {
if (o == null) return; if (o == null) return;
ramBytes.addAndGet(-(o.ramBytesUsed() + HASHTABLE_RAM_BYTES_PER_ENTRY)); ramBytes.add(-(o.ramBytesUsed() + HASHTABLE_RAM_BYTES_PER_ENTRY));
stats.size.decrementAndGet(); stats.size.decrement();
stats.evictionCounter.incrementAndGet(); stats.evictionCounter.increment();
if(evictionListener != null) evictionListener.evictedEntry(o.key,o.value); if(evictionListener != null) evictionListener.evictedEntry(o.key,o.value);
} }
@ -657,13 +658,13 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
} }
public int size() { public int size() {
return stats.size.get(); return stats.size.intValue();
} }
@Override @Override
public void clear() { public void clear() {
map.clear(); map.clear();
ramBytes.set(0); ramBytes.reset();
} }
public Map<Object, CacheEntry<K,V>> getMap() { public Map<Object, CacheEntry<K,V>> getMap() {
@ -749,23 +750,21 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
// accounts for field refs // accounts for field refs
RamUsageEstimator.shallowSizeOfInstance(Stats.class) + RamUsageEstimator.shallowSizeOfInstance(Stats.class) +
// LongAdder // LongAdder
3 * ( 6 * (
RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER +
RamUsageEstimator.primitiveSizes.get(long.class) + RamUsageEstimator.primitiveSizes.get(long.class) +
2 * (RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.primitiveSizes.get(long.class)) 2 * (RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.primitiveSizes.get(long.class))
) + ) +
// AtomicLong // AtomicLong
3 * RamUsageEstimator.primitiveSizes.get(long.class) + RamUsageEstimator.primitiveSizes.get(long.class);
// AtomicInteger
RamUsageEstimator.primitiveSizes.get(int.class);
private final AtomicLong accessCounter = new AtomicLong(0); private final AtomicLong accessCounter = new AtomicLong(0);
private final LongAdder putCounter = new LongAdder(); private final LongAdder putCounter = new LongAdder();
private final LongAdder nonLivePutCounter = new LongAdder(); private final LongAdder nonLivePutCounter = new LongAdder();
private final LongAdder missCounter = new LongAdder(); private final LongAdder missCounter = new LongAdder();
private final AtomicInteger size = new AtomicInteger(); private final LongAdder size = new LongAdder();
private AtomicLong evictionCounter = new AtomicLong(); private LongAdder evictionCounter = new LongAdder();
private AtomicLong evictionIdleCounter = new AtomicLong(); private LongAdder evictionIdleCounter = new LongAdder();
public long getCumulativeLookups() { public long getCumulativeLookups() {
return (accessCounter.longValue() - putCounter.longValue() - nonLivePutCounter.longValue()) + missCounter.longValue(); return (accessCounter.longValue() - putCounter.longValue() - nonLivePutCounter.longValue()) + missCounter.longValue();
@ -780,15 +779,15 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
} }
public long getCumulativeEvictions() { public long getCumulativeEvictions() {
return evictionCounter.get(); return evictionCounter.longValue();
} }
public long getCumulativeIdleEvictions() { public long getCumulativeIdleEvictions() {
return evictionIdleCounter.get(); return evictionIdleCounter.longValue();
} }
public int getCurrentSize() { public int getCurrentSize() {
return size.get(); return size.intValue();
} }
public long getCumulativeNonLivePuts() { public long getCumulativeNonLivePuts() {
@ -804,8 +803,10 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
putCounter.add(other.putCounter.longValue()); putCounter.add(other.putCounter.longValue());
nonLivePutCounter.add(other.nonLivePutCounter.longValue()); nonLivePutCounter.add(other.nonLivePutCounter.longValue());
missCounter.add(other.missCounter.longValue()); missCounter.add(other.missCounter.longValue());
evictionCounter.addAndGet(other.evictionCounter.get()); evictionCounter.add(other.evictionCounter.longValue());
size.set(Math.max(size.get(), other.size.get())); long maxSize = Math.max(size.longValue(), other.size.longValue());
size.reset();
size.add(maxSize);
} }
@Override @Override
@ -862,7 +863,7 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
@Override @Override
public long ramBytesUsed() { public long ramBytesUsed() {
return BASE_RAM_BYTES_USED + ramBytes.get(); return BASE_RAM_BYTES_USED + ramBytes.sum();
} }
@Override @Override

View File

@ -0,0 +1,285 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import com.github.benmanes.caffeine.cache.RemovalCause;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.TestUtil;
import org.apache.solr.SolrTestCase;
import org.apache.solr.metrics.SolrMetricManager;
import org.junit.Test;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
/**
* Test for {@link CaffeineCache}.
*/
public class TestCaffeineCache extends SolrTestCase {
SolrMetricManager metricManager = new SolrMetricManager();
String registry = TestUtil.randomSimpleString(random(), 2, 10);
String scope = TestUtil.randomSimpleString(random(), 2, 10);
@Test
public void testSimple() throws IOException {
CaffeineCache<Integer, String> lfuCache = new CaffeineCache<>();
lfuCache.initializeMetrics(metricManager, registry, "foo", scope + "-1");
CaffeineCache<Integer, String> newLFUCache = new CaffeineCache<>();
newLFUCache.initializeMetrics(metricManager, registry, "foo2", scope + "-2");
Map<String, String> params = new HashMap<>();
params.put("size", "100");
params.put("initialSize", "10");
params.put("autowarmCount", "25");
NoOpRegenerator regenerator = new NoOpRegenerator();
Object initObj = lfuCache.init(params, null, regenerator);
lfuCache.setState(SolrCache.State.LIVE);
for (int i = 0; i < 101; i++) {
lfuCache.put(i + 1, Integer.toString(i + 1));
}
assertEquals("15", lfuCache.get(15));
assertEquals("75", lfuCache.get(75));
assertEquals(null, lfuCache.get(110));
Map<String, Object> nl = lfuCache.getMetricsMap().getValue();
assertEquals(3L, nl.get("lookups"));
assertEquals(2L, nl.get("hits"));
assertEquals(101L, nl.get("inserts"));
assertEquals(null, lfuCache.get(1)); // first item put in should be the first out
// Test autowarming
newLFUCache.init(params, initObj, regenerator);
newLFUCache.warm(null, lfuCache);
newLFUCache.setState(SolrCache.State.LIVE);
newLFUCache.put(103, "103");
assertEquals("15", newLFUCache.get(15));
assertEquals("75", newLFUCache.get(75));
assertEquals(null, newLFUCache.get(50));
nl = newLFUCache.getMetricsMap().getValue();
assertEquals(3L, nl.get("lookups"));
assertEquals(2L, nl.get("hits"));
assertEquals(1L, nl.get("inserts"));
assertEquals(0L, nl.get("evictions"));
assertEquals(7L, nl.get("cumulative_lookups"));
assertEquals(4L, nl.get("cumulative_hits"));
assertEquals(102L, nl.get("cumulative_inserts"));
}
@Test
public void testTimeDecay() {
Cache<Integer, String> cacheDecay = Caffeine.newBuilder()
.executor(Runnable::run)
.maximumSize(20)
.build();
for (int i = 1; i < 21; i++) {
cacheDecay.put(i, Integer.toString(i));
}
Map<Integer, String> itemsDecay;
// Now increase the freq count for 5 items
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 10; ++j) {
cacheDecay.getIfPresent(i + 13);
}
}
// OK, 13 - 17 should have larger counts and should stick past next few collections
cacheDecay.put(22, "22");
cacheDecay.put(23, "23");
cacheDecay.put(24, "24");
cacheDecay.put(25, "25");
itemsDecay = cacheDecay.policy().eviction().get().hottest(10);
// 13 - 17 should be in cache, but 11 and 18 (among others) should not. Testing that elements before and
// after the ones with increased counts are removed, and all the increased count ones are still in the cache
assertNull(itemsDecay.get(11));
assertNull(itemsDecay.get(18));
assertNotNull(itemsDecay.get(13));
assertNotNull(itemsDecay.get(14));
assertNotNull(itemsDecay.get(15));
assertNotNull(itemsDecay.get(16));
assertNotNull(itemsDecay.get(17));
// Testing that all the elements in front of the ones with increased counts are gone
for (int idx = 26; idx < 32; ++idx) {
cacheDecay.put(idx, Integer.toString(idx));
}
//Surplus count should be at 0
itemsDecay = cacheDecay.policy().eviction().get().hottest(10);
assertNull(itemsDecay.get(20));
assertNull(itemsDecay.get(24));
assertNotNull(itemsDecay.get(13));
assertNotNull(itemsDecay.get(14));
assertNotNull(itemsDecay.get(15));
assertNotNull(itemsDecay.get(16));
assertNotNull(itemsDecay.get(17));
}
@Test
public void testMaxIdleTime() throws Exception {
int IDLE_TIME_SEC = 5;
CountDownLatch removed = new CountDownLatch(1);
AtomicReference<RemovalCause> removalCause = new AtomicReference<>();
CaffeineCache<String, String> cache = new CaffeineCache<>() {
@Override
public void onRemoval(String key, String value, RemovalCause cause) {
super.onRemoval(key, value, cause);
removalCause.set(cause);
removed.countDown();
}
};
Map<String, String> params = new HashMap<>();
params.put("size", "6");
params.put("maxIdleTime", "" + IDLE_TIME_SEC);
cache.init(params, null, new NoOpRegenerator());
cache.put("foo", "bar");
assertEquals("bar", cache.get("foo"));
// sleep for at least the idle time before inserting other entries
// the eviction is piggy-backed on put()
Thread.sleep(TimeUnit.SECONDS.toMillis(IDLE_TIME_SEC * 2));
cache.put("abc", "xyz");
boolean await = removed.await(30, TimeUnit.SECONDS);
assertTrue("did not expire entry in in time", await);
assertEquals(RemovalCause.EXPIRED, removalCause.get());
assertNull(cache.get("foo"));
}
@Test
public void testSetLimits() throws Exception {
AtomicReference<CountDownLatch> removed = new AtomicReference<>(new CountDownLatch(2));
List<RemovalCause> removalCauses = new ArrayList<>();
List<String> removedKeys = new ArrayList<>();
Set<String> allKeys = new HashSet<>();
CaffeineCache<String, Accountable> cache = new CaffeineCache<>() {
@Override
public Accountable put(String key, Accountable val) {
allKeys.add(key);
return super.put(key, val);
}
@Override
public void onRemoval(String key, Accountable value, RemovalCause cause) {
super.onRemoval(key, value, cause);
removalCauses.add(cause);
removedKeys.add(key);
removed.get().countDown();
}
};
Map<String, String> params = new HashMap<>();
params.put("size", "5");
cache.init(params, null, new NoOpRegenerator());
for (int i = 0; i < 5; i++) {
cache.put("foo-" + i, new Accountable() {
@Override
public long ramBytesUsed() {
return 1024 * 1024;
}
});
}
assertEquals(5, cache.size());
// no evictions yet
assertEquals(2, removed.get().getCount());
cache.put("abc1", new Accountable() {
@Override
public long ramBytesUsed() {
return 1;
}
});
cache.put("abc2", new Accountable() {
@Override
public long ramBytesUsed() {
return 2;
}
});
boolean await = removed.get().await(30, TimeUnit.SECONDS);
assertTrue("did not evict entries in in time", await);
assertEquals(5, cache.size());
assertEquals(2, cache.get("abc2").ramBytesUsed());
for (String key : removedKeys) {
assertNull("key " + key + " still present!", cache.get(key));
allKeys.remove(key);
}
for (RemovalCause cause : removalCauses) {
assertEquals(RemovalCause.SIZE, cause);
}
removed.set(new CountDownLatch(2));
removalCauses.clear();
removedKeys.clear();
// trim down by item count
cache.setMaxSize(3);
cache.put("abc3", new Accountable() {
@Override
public long ramBytesUsed() {
return 3;
}
});
await = removed.get().await(30, TimeUnit.SECONDS);
assertTrue("did not evict entries in in time", await);
assertEquals(3, cache.size());
for (String key : removedKeys) {
assertNull("key " + key + " still present!", cache.get(key));
allKeys.remove(key);
}
for (RemovalCause cause : removalCauses) {
assertEquals(RemovalCause.SIZE, cause);
}
// at least one item has to go
removed.set(new CountDownLatch(1));
removalCauses.clear();
removedKeys.clear();
// trim down by ram size
cache.setMaxRamMB(1);
await = removed.get().await(30, TimeUnit.SECONDS);
assertTrue("did not evict entries in in time", await);
for (String key : removedKeys) {
assertNull("key " + key + " still present!", cache.get(key));
allKeys.remove(key);
}
for (RemovalCause cause : removalCauses) {
assertEquals(RemovalCause.SIZE, cause);
}
// check total size of remaining items
long total = 0;
for (String key : allKeys) {
Accountable a = cache.get(key);
assertNotNull("missing value for key " + key, a);
total += a.ramBytesUsed();
}
assertTrue("total ram bytes should be greater than 0", total > 0);
assertTrue("total ram bytes exceeded limit", total < 1024 * 1024);
}
}

View File

@ -16,6 +16,7 @@
*/ */
package org.apache.solr.search; package org.apache.solr.search;
import org.apache.commons.math3.stat.descriptive.SummaryStatistics;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.search.WildcardQuery;
@ -33,6 +34,7 @@ import java.io.IOException;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.Random; import java.util.Random;
import java.util.TreeMap;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
@ -471,7 +473,7 @@ public class TestFastLRUCache extends SolrTestCase {
} }
void cachePerfTest(final SolrCache sc, final int nThreads, final int numGets, int cacheSize, final int maxKey) { double[] cachePerfTest(final SolrCache sc, final int nThreads, final int numGets, int cacheSize, final int maxKey) {
Map l = new HashMap(); Map l = new HashMap();
l.put("size", ""+cacheSize); l.put("size", ""+cacheSize);
l.put("initialSize", ""+cacheSize); l.put("initialSize", ""+cacheSize);
@ -512,37 +514,73 @@ public class TestFastLRUCache extends SolrTestCase {
} }
} }
System.out.println("time=" + timer.getTime() + " impl=" +sc.getClass().getSimpleName() double time = timer.getTime();
+" nThreads= " + nThreads + " size="+cacheSize+" maxKey="+maxKey+" gets="+numGets double hitRatio = (1-(((double)puts.get())/numGets));
+" hitRatio="+(1-(((double)puts.get())/numGets))); // System.out.println("time=" + time + " impl=" +sc.getClass().getSimpleName()
// +" nThreads= " + nThreads + " size="+cacheSize+" maxKey="+maxKey+" gets="+numGets
// +" hitRatio="+(1-(((double)puts.get())/numGets)));
return new double[]{time, hitRatio};
} }
void perfTestBoth(int nThreads, int numGets, int cacheSize, int maxKey) { private int NUM_RUNS = 5;
cachePerfTest(new LRUCache(), nThreads, numGets, cacheSize, maxKey); void perfTestBoth(int maxThreads, int numGets, int cacheSize, int maxKey,
cachePerfTest(new FastLRUCache(), nThreads, numGets, cacheSize, maxKey); Map<String, Map<String, SummaryStatistics>> timeStats,
Map<String, Map<String, SummaryStatistics>> hitStats) {
for (int nThreads = 1 ; nThreads <= maxThreads; nThreads++) {
String testKey = "threads=" + nThreads + ",gets=" + numGets + ",size=" + cacheSize + ",maxKey=" + maxKey;
System.err.println(testKey);
for (int i = 0; i < NUM_RUNS; i++) {
double[] data = cachePerfTest(new LRUCache(), nThreads, numGets, cacheSize, maxKey);
timeStats.computeIfAbsent(testKey, k -> new TreeMap<>())
.computeIfAbsent("LRUCache", k -> new SummaryStatistics())
.addValue(data[0]);
hitStats.computeIfAbsent(testKey, k -> new TreeMap<>())
.computeIfAbsent("LRUCache", k -> new SummaryStatistics())
.addValue(data[1]);
data = cachePerfTest(new CaffeineCache(), nThreads, numGets, cacheSize, maxKey);
timeStats.computeIfAbsent(testKey, k -> new TreeMap<>())
.computeIfAbsent("CaffeineCache", k -> new SummaryStatistics())
.addValue(data[0]);
hitStats.computeIfAbsent(testKey, k -> new TreeMap<>())
.computeIfAbsent("CaffeineCache", k -> new SummaryStatistics())
.addValue(data[1]);
data = cachePerfTest(new FastLRUCache(), nThreads, numGets, cacheSize, maxKey);
timeStats.computeIfAbsent(testKey, k -> new TreeMap<>())
.computeIfAbsent("FastLRUCache", k -> new SummaryStatistics())
.addValue(data[0]);
hitStats.computeIfAbsent(testKey, k -> new TreeMap<>())
.computeIfAbsent("FastLRUCache", k -> new SummaryStatistics())
.addValue(data[1]);
}
}
} }
int NUM_THREADS = 4;
/*** /***
public void testCachePerf() { public void testCachePerf() {
Map<String, Map<String, SummaryStatistics>> timeStats = new TreeMap<>();
Map<String, Map<String, SummaryStatistics>> hitStats = new TreeMap<>();
// warmup // warmup
perfTestBoth(2, 100000, 100000, 120000); perfTestBoth(NUM_THREADS, 100000, 100000, 120000, new HashMap<>(), new HashMap());
perfTestBoth(1, 2000000, 100000, 100000); // big cache, 100% hit ratio
perfTestBoth(2, 2000000, 100000, 100000); // big cache, 100% hit ratio
perfTestBoth(1, 2000000, 100000, 120000); // big cache, bigger hit ratio
perfTestBoth(2, 2000000, 100000, 120000); // big cache, bigger hit ratio
perfTestBoth(1, 2000000, 100000, 200000); // big cache, ~50% hit ratio
perfTestBoth(2, 2000000, 100000, 200000); // big cache, ~50% hit ratio
perfTestBoth(1, 2000000, 100000, 1000000); // big cache, ~10% hit ratio
perfTestBoth(2, 2000000, 100000, 1000000); // big cache, ~10% hit ratio
perfTestBoth(1, 2000000, 1000, 1000); // small cache, ~100% hit ratio perfTestBoth(NUM_THREADS, 2000000, 100000, 100000, timeStats, hitStats); // big cache, 100% hit ratio
perfTestBoth(2, 2000000, 1000, 1000); // small cache, ~100% hit ratio perfTestBoth(NUM_THREADS, 2000000, 100000, 120000, timeStats, hitStats); // big cache, bigger hit ratio
perfTestBoth(1, 2000000, 1000, 1200); // small cache, bigger hit ratio perfTestBoth(NUM_THREADS, 2000000, 100000, 200000, timeStats, hitStats); // big cache, ~50% hit ratio
perfTestBoth(2, 2000000, 1000, 1200); // small cache, bigger hit ratio perfTestBoth(NUM_THREADS, 2000000, 100000, 1000000, timeStats, hitStats); // big cache, ~10% hit ratio
perfTestBoth(1, 2000000, 1000, 2000); // small cache, ~50% hit ratio
perfTestBoth(2, 2000000, 1000, 2000); // small cache, ~50% hit ratio perfTestBoth(NUM_THREADS, 2000000, 1000, 1000, timeStats, hitStats); // small cache, ~100% hit ratio
perfTestBoth(1, 2000000, 1000, 10000); // small cache, ~10% hit ratio perfTestBoth(NUM_THREADS, 2000000, 1000, 1200, timeStats, hitStats); // small cache, bigger hit ratio
perfTestBoth(2, 2000000, 1000, 10000); // small cache, ~10% hit ratio perfTestBoth(NUM_THREADS, 2000000, 1000, 2000, timeStats, hitStats); // small cache, ~50% hit ratio
perfTestBoth(NUM_THREADS, 2000000, 1000, 10000, timeStats, hitStats); // small cache, ~10% hit ratio
System.out.println("\n=====================\n");
timeStats.forEach((testKey, map) -> {
Map<String, SummaryStatistics> hits = hitStats.get(testKey);
System.out.println("* " + testKey);
map.forEach((type, summary) -> {
System.out.println("\t" + String.format("%14s", type) + "\ttime " + summary.getMean() + "\thitRatio " + hits.get(type).getMean());
});
});
} }
***/ ***/

View File

@ -44,10 +44,10 @@ public class BlockCacheTest extends SolrTestCase {
public void testBlockCache() { public void testBlockCache() {
int blocksInTest = 2000000; int blocksInTest = 2000000;
int blockSize = 1024; int blockSize = 1024;
int slabSize = blockSize * 4096; int slabSize = blockSize * 4096;
long totalMemory = 2 * slabSize; long totalMemory = 2 * slabSize;
BlockCache blockCache = new BlockCache(new Metrics(), true, totalMemory, slabSize, blockSize); BlockCache blockCache = new BlockCache(new Metrics(), true, totalMemory, slabSize, blockSize);
byte[] buffer = new byte[1024]; byte[] buffer = new byte[1024];
Random random = random(); Random random = random();
@ -82,7 +82,7 @@ public class BlockCacheTest extends SolrTestCase {
long t3 = System.nanoTime(); long t3 = System.nanoTime();
if (blockCache.fetch(blockCacheKey, buffer)) { if (blockCache.fetch(blockCacheKey, buffer)) {
fetchTime += (System.nanoTime() - t3); fetchTime += (System.nanoTime() - t3);
assertTrue(Arrays.equals(testData, buffer)); assertTrue("buffer content differs", Arrays.equals(testData, buffer));
} }
} }
System.out.println("Cache Hits = " + hitsInCache.get()); System.out.println("Cache Hits = " + hitsInCache.get());
@ -101,7 +101,7 @@ public class BlockCacheTest extends SolrTestCase {
// always returns the same thing so we don't actually have to store the bytes redundantly to check them. // always returns the same thing so we don't actually have to store the bytes redundantly to check them.
private static byte getByte(long pos) { private static byte getByte(long pos) {
// knuth multiplicative hash method, then take top 8 bits // knuth multiplicative hash method, then take top 8 bits
return (byte) ((((int)pos) * (int)(2654435761L)) >> 24); return (byte) ((((int) pos) * (int) (2654435761L)) >> 24);
// just the lower bits of the block number, to aid in debugging... // just the lower bits of the block number, to aid in debugging...
// return (byte)(pos>>10); // return (byte)(pos>>10);
@ -117,17 +117,17 @@ public class BlockCacheTest extends SolrTestCase {
final long totalMemory = 2 * slabSize; // 2 slabs of memory, so only half of what is needed for all blocks final long totalMemory = 2 * slabSize; // 2 slabs of memory, so only half of what is needed for all blocks
/*** /***
final int blocksInTest = 16384; // pick something bigger than 256, since that would lead to a slab size of 64 blocks and the bitset locks would consist of a single word. final int blocksInTest = 16384; // pick something bigger than 256, since that would lead to a slab size of 64 blocks and the bitset locks would consist of a single word.
final int blockSize = 1024; final int blockSize = 1024;
final int slabSize = blocksInTest * blockSize / 4; final int slabSize = blocksInTest * blockSize / 4;
final long totalMemory = 2 * slabSize; // 2 slabs of memory, so only half of what is needed for all blocks final long totalMemory = 2 * slabSize; // 2 slabs of memory, so only half of what is needed for all blocks
***/ ***/
final int nThreads=64; final int nThreads = 64;
final int nReads=1000000; final int nReads = 1000000;
final int readsPerThread=nReads/nThreads; final int readsPerThread = nReads / nThreads;
final int readLastBlockOdds=10; // odds (1 in N) of the next block operation being on the same block as the previous operation... helps flush concurrency issues final int readLastBlockOdds = 10; // odds (1 in N) of the next block operation being on the same block as the previous operation... helps flush concurrency issues
final int showErrors=50; // show first 50 validation failures final int showErrors = 50; // show first 50 validation failures
final BlockCache blockCache = new BlockCache(new Metrics(), true, totalMemory, slabSize, blockSize); final BlockCache blockCache = new BlockCache(new Metrics(), true, totalMemory, slabSize, blockSize);
@ -142,7 +142,7 @@ public class BlockCacheTest extends SolrTestCase {
Thread[] threads = new Thread[nThreads]; Thread[] threads = new Thread[nThreads];
for (int i=0; i<threads.length; i++) { for (int i = 0; i < threads.length; i++) {
final int threadnum = i; final int threadnum = i;
final long seed = rnd.nextLong(); final long seed = rnd.nextLong();
@ -168,14 +168,15 @@ public class BlockCacheTest extends SolrTestCase {
} }
public void test(int iter) { public void test(int iter) {
for (int i=0; i<iter; i++) { for (int i = 0; i < iter; i++) {
test(); test();
} }
} }
public void test() { public void test() {
long block = r.nextInt(blocksInTest); long block = r.nextInt(blocksInTest);
if (r.nextInt(readLastBlockOdds) == 0) block = lastBlock.get(); // some percent of the time, try to read the last block another thread was just reading/writing if (r.nextInt(readLastBlockOdds) == 0)
block = lastBlock.get(); // some percent of the time, try to read the last block another thread was just reading/writing
lastBlock.set(block); lastBlock.set(block);
@ -192,7 +193,8 @@ public class BlockCacheTest extends SolrTestCase {
long globalPos = globalOffset + i; long globalPos = globalOffset + i;
if (buffer[i] != getByte(globalPos)) { if (buffer[i] != getByte(globalPos)) {
failed.set(true); failed.set(true);
if (validateFails.incrementAndGet() <= showErrors) System.out.println("ERROR: read was " + "block=" + block + " blockOffset=" + blockOffset + " len=" + len + " globalPos=" + globalPos + " localReadOffset=" + i + " got=" + buffer[i] + " expected=" + getByte(globalPos)); if (validateFails.incrementAndGet() <= showErrors)
System.out.println("ERROR: read was " + "block=" + block + " blockOffset=" + blockOffset + " len=" + len + " globalPos=" + globalPos + " localReadOffset=" + i + " got=" + buffer[i] + " expected=" + getByte(globalPos));
break; break;
} }
} }
@ -229,7 +231,7 @@ public class BlockCacheTest extends SolrTestCase {
System.out.println("Cache Store Fails = " + storeFails.get()); System.out.println("Cache Store Fails = " + storeFails.get());
System.out.println("Blocks with Errors = " + validateFails.get()); System.out.println("Blocks with Errors = " + validateFails.get());
assertFalse( failed.get() ); assertFalse("cached bytes differ from expected", failed.get());
} }
@ -245,12 +247,12 @@ public class BlockCacheTest extends SolrTestCase {
// TODO: introduce more randomness in cache size, hit rate, etc // TODO: introduce more randomness in cache size, hit rate, etc
final int blocksInTest = 400; final int blocksInTest = 400;
final int maxEntries = blocksInTest/2; final int maxEntries = blocksInTest / 2;
final int nThreads=64; final int nThreads = 64;
final int nReads=1000000; final int nReads = 1000000;
final int readsPerThread=nReads/nThreads; final int readsPerThread = nReads / nThreads;
final int readLastBlockOdds=10; // odds (1 in N) of the next block operation being on the same block as the previous operation... helps flush concurrency issues final int readLastBlockOdds = 10; // odds (1 in N) of the next block operation being on the same block as the previous operation... helps flush concurrency issues
final int updateAnywayOdds = 3; // sometimes insert a new entry for the key even if one was found final int updateAnywayOdds = 3; // sometimes insert a new entry for the key even if one was found
final int invalidateOdds = 20; // sometimes invalidate an entry final int invalidateOdds = 20; // sometimes invalidate an entry
@ -258,17 +260,24 @@ public class BlockCacheTest extends SolrTestCase {
final AtomicLong removals = new AtomicLong(); final AtomicLong removals = new AtomicLong();
final AtomicLong inserts = new AtomicLong(); final AtomicLong inserts = new AtomicLong();
RemovalListener<Long,Val> listener = (k, v, removalCause) -> { RemovalListener<Long, Val> listener = (k, v, removalCause) -> {
assert v.key == k; removals.incrementAndGet();
if (v == null) {
if (removalCause != RemovalCause.COLLECTED) {
throw new RuntimeException("Null value for key " + k + ", removalCause=" + removalCause);
} else {
return;
}
}
assertEquals("cache key differs from value's key", (Long) k, (Long) v.key);
if (!v.live.compareAndSet(true, false)) { if (!v.live.compareAndSet(true, false)) {
throw new RuntimeException("listener called more than once! k=" + k + " v=" + v + " removalCause=" + removalCause); throw new RuntimeException("listener called more than once! k=" + k + " v=" + v + " removalCause=" + removalCause);
// return; // use this variant if listeners may be called more than once // return; // use this variant if listeners may be called more than once
} }
removals.incrementAndGet();
}; };
com.github.benmanes.caffeine.cache.Cache<Long,Val> cache = Caffeine.newBuilder() com.github.benmanes.caffeine.cache.Cache<Long, Val> cache = Caffeine.newBuilder()
.removalListener(listener) .removalListener(listener)
.maximumSize(maxEntries) .maximumSize(maxEntries)
.executor(Runnable::run) .executor(Runnable::run)
@ -279,11 +288,12 @@ public class BlockCacheTest extends SolrTestCase {
final AtomicLong maxObservedSize = new AtomicLong(); final AtomicLong maxObservedSize = new AtomicLong();
Thread[] threads = new Thread[nThreads]; Thread[] threads = new Thread[nThreads];
for (int i=0; i<threads.length; i++) { for (int i = 0; i < threads.length; i++) {
final long seed = rnd.nextLong(); final long seed = rnd.nextLong();
threads[i] = new Thread() { threads[i] = new Thread() {
Random r; Random r;
@Override @Override
public void run() { public void run() {
try { try {
@ -296,13 +306,13 @@ public class BlockCacheTest extends SolrTestCase {
} }
public void test(int iter) { public void test(int iter) {
for (int i=0; i<iter; i++) { for (int i = 0; i < iter; i++) {
test(); test();
} }
} }
boolean odds(int odds) { boolean odds(int odds) {
return odds > 0 && r.nextInt(odds)==0; return odds > 0 && r.nextInt(odds) == 0;
} }
long getBlock() { long getBlock() {
@ -329,7 +339,7 @@ public class BlockCacheTest extends SolrTestCase {
Val v = cache.getIfPresent(k); Val v = cache.getIfPresent(k);
if (v != null) { if (v != null) {
hits.incrementAndGet(); hits.incrementAndGet();
assert k.equals(v.key); assertEquals("cache key differs from value's key", (Long) k, (Long) v.key);
} }
if (v == null || odds(updateAnywayOdds)) { if (v == null || odds(updateAnywayOdds)) {
@ -358,13 +368,10 @@ public class BlockCacheTest extends SolrTestCase {
// Thread.sleep(1000); // need to wait if executor is used for listener? // Thread.sleep(1000); // need to wait if executor is used for listener?
long cacheSize = cache.estimatedSize(); long cacheSize = cache.estimatedSize();
System.out.println("Done! # of Elements = " + cacheSize + " inserts=" + inserts.get() + " removals=" + removals.get() + " hits=" + hits.get() + " maxObservedSize=" + maxObservedSize); System.out.println("Done! # of Elements = " + cacheSize + " inserts=" + inserts.get() + " removals=" + removals.get() + " hits=" + hits.get() + " maxObservedSize=" + maxObservedSize);
assert inserts.get() - removals.get() == cacheSize; assertEquals("cache size different from (inserts - removal)", cacheSize, inserts.get() - removals.get());
assertFalse( failed.get() ); assertFalse(failed.get());
} }
} }

View File

@ -1 +0,0 @@
5aa8bbb851b1ad403cc140094ba4a25998369efe

View File

@ -0,0 +1 @@
6000774d7f8412ced005a704188ced78beeed2bb

View File

@ -33,22 +33,27 @@ Solr caches are associated with a specific instance of an Index Searcher, a spec
When a new searcher is opened, the current searcher continues servicing requests while the new one auto-warms its cache. The new searcher uses the current searcher's cache to pre-populate its own. When the new searcher is ready, it is registered as the current searcher and begins handling all new search requests. The old searcher will be closed once it has finished servicing all its requests. When a new searcher is opened, the current searcher continues servicing requests while the new one auto-warms its cache. The new searcher uses the current searcher's cache to pre-populate its own. When the new searcher is ready, it is registered as the current searcher and begins handling all new search requests. The old searcher will be closed once it has finished servicing all its requests.
In Solr, there are three cache implementations: `solr.search.LRUCache`, `solr.search.FastLRUCache,` and `solr.search.LFUCache`. === Cache implementations
In Solr, the following cache implementations are available: recommended `solr.search.CaffeineCache`, and legacy implementations: `solr.search.LRUCache`, `solr.search.FastLRUCache,` and `solr.search.LFUCache`.
The `CaffeineCache` is an implementation backed by the https://github.com/ben-manes/caffeine[Caffeine caching library]. By default it uses a Window TinyLFU (W-TinyLFU) eviction policy, which allows the eviction based on both frequency and recency of use in O(1) time with a small footprint. Generally this cache implementation is recommended over other legacy caches as it usually offers lower memory footprint, higher hit ratio and better multi-threaded performance over legacy caches.
The acronym LRU stands for Least Recently Used. When an LRU cache fills up, the entry with the oldest last-accessed timestamp is evicted to make room for the new entry. The net effect is that entries that are accessed frequently tend to stay in the cache, while those that are not accessed frequently tend to drop out and will be re-fetched from the index if needed again. The acronym LRU stands for Least Recently Used. When an LRU cache fills up, the entry with the oldest last-accessed timestamp is evicted to make room for the new entry. The net effect is that entries that are accessed frequently tend to stay in the cache, while those that are not accessed frequently tend to drop out and will be re-fetched from the index if needed again.
The `FastLRUCache`, which was introduced in Solr 1.4, is designed to be lock-free, so it is well suited for caches which are hit several times in a request. The `FastLRUCache`, which was introduced in Solr 1.4, is designed to be lock-free, so it is well suited for caches which are hit several times in a request.
Both `LRUCache` and `FastLRUCache` use an auto-warm count that supports both integers and percentages which get evaluated relative to the current size of the cache when warming happens. `CaffeineCache`, `LRUCache` and `FastLRUCache` use an auto-warm count that supports both integers and percentages which get evaluated relative to the current size of the cache when warming happens.
The `LFUCache` refers to the Least Frequently Used cache. This works in a way similar to the LRU cache, except that when the cache fills up, the entry that has been used the least is evicted. The `LFUCache` refers to the Least Frequently Used cache. This works in a way similar to the LRU cache, except that when the cache fills up, the entry that has been used the least is evicted.
The Statistics page in the Solr Admin UI will display information about the performance of all the active caches. This information can help you fine-tune the sizes of the various caches appropriately for your particular application. When a Searcher terminates, a summary of its cache usage is also written to the log. The Statistics page in the Solr Admin UI will display information about the performance of all the active caches. This information can help you fine-tune the sizes of the various caches appropriately for your particular application. When a Searcher terminates, a summary of its cache usage is also written to the log.
Each cache has settings to define its initial size (`initialSize`), maximum size (`size`) and number of items to use for during warming (`autowarmCount`). The LRU and FastLRU cache implementations can take a percentage instead of an absolute value for `autowarmCount`. Each cache has settings to define its initial size (`initialSize`), maximum size (`size`) and number of items to use for during warming (`autowarmCount`). The Caffeine, LRU and FastLRU cache implementations can take a percentage instead of an absolute value for `autowarmCount`.
Each cache implementation also supports a `maxIdleTime` attribute that controls the automatic eviction of entries that haven't been used for a while. This attribute is expressed in seconds, with the default value of `0` meaning no entries are automatically evicted due to exceeded idle time. Smaller values of this attribute will cause older entries to be evicted quickly, which will reduce cache memory usage but may instead cause thrashing due to a repeating eviction-lookup-miss-insertion cycle of the same entries. Larger values will cause entries to stay around longer, waiting to be reused, at the cost of increased memory usage. Reasonable values, depending on the query volume and patterns, may lie somewhere between 60-3600. Please note that this condition is evaluated synchronously and before other eviction conditions on every entry insertion. Each cache implementation also supports a `maxIdleTime` attribute that controls the automatic eviction of entries that haven't been used for a while. This attribute is expressed in seconds, with the default value of `0` meaning no entries are automatically evicted due to exceeded idle time. Smaller values of this attribute will cause older entries to be evicted quickly, which will reduce cache memory usage but may instead cause thrashing due to a repeating eviction-lookup-miss-insertion cycle of the same entries. Larger values will cause entries to stay around longer, waiting to be reused, at the cost of increased memory usage. Reasonable values, depending on the query volume and patterns, may lie somewhere between 60-3600. Please note that this condition is evaluated synchronously and before other eviction conditions on every entry insertion.
`CaffeineCache`, `LRUCache` and `FastLRUCache` support a `maxRamMB` attribute that limits the maximum amount of memory a cache may consume. When both `size` and `maxRamMB` limits are specified the behavior will differ among implementations: in `CaffeineCache` the `maxRamMB` limit will take precedence and the `size` limit will be ignored, while in `LRUCache` and `FastLRUCache` both limits will be observed, with entries being evicted whenever any of the limits is reached.
`FastLRUCache` and `LFUCache` support `showItems` attribute. This is the number of cache items to display in the stats page for the cache. It is for debugging. `FastLRUCache` and `LFUCache` support `showItems` attribute. This is the number of cache items to display in the stats page for the cache. It is for debugging.
Details of each cache are described below. Details of each cache are described below.