SOLR-9366: Limit memory consumed by FastLRUCache with a new 'maxRamMB' config parameter

(cherry picked from commit 487b097)
This commit is contained in:
Shalin Shekhar Mangar 2016-11-15 10:59:58 +05:30
parent 8989c77478
commit 53dad4f0d1
7 changed files with 339 additions and 186 deletions

View File

@ -60,6 +60,9 @@ New Features
* SOLR-9038: Add a command-line tool to manage the snapshots functionality (Hrishikesh Gadre via yonik)
* SOLR-9366: Limit memory consumed by FastLRUCache with a new 'maxRamMB' config parameter.
(yonik, Michael Sun, shalin)
Optimizations
----------------------
* SOLR-9704: Facet Module / JSON Facet API: Optimize blockChildren facets that have

View File

@ -43,7 +43,7 @@ import java.util.concurrent.TimeUnit;
* @see org.apache.solr.search.SolrCache
* @since solr 1.4
*/
public class FastLRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V> {
public class FastLRUCache<K, V> extends SolrCacheBase implements SolrCache<K,V> {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
// contains the statistics objects for all open caches of the same type
@ -55,6 +55,8 @@ public class FastLRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V> {
private ConcurrentLRUCache<K,V> cache;
private int showItems = 0;
private long maxRamBytes;
@Override
public Object init(Map args, Object persistence, CacheRegenerator regenerator) {
super.init(args, regenerator);
@ -87,8 +89,18 @@ public class FastLRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V> {
str = (String) args.get("showItems");
showItems = str == null ? 0 : Integer.parseInt(str);
description = generateDescription(limit, initialSize, minLimit, acceptableLimit, newThread);
cache = new ConcurrentLRUCache<>(limit, minLimit, acceptableLimit, initialSize, newThread, false, null);
str = (String) args.get("maxRamMB");
this.maxRamBytes = str == null ? Long.MAX_VALUE : (long) (Double.parseDouble(str) * 1024L * 1024L);
if (maxRamBytes != Long.MAX_VALUE) {
int ramLowerWatermark = (int) (maxRamBytes * 0.8);
description = generateDescription(maxRamBytes, ramLowerWatermark, newThread);
cache = new ConcurrentLRUCache<K, V>(ramLowerWatermark, maxRamBytes, newThread, null);
} else {
description = generateDescription(limit, initialSize, minLimit, acceptableLimit, newThread);
cache = new ConcurrentLRUCache<>(limit, minLimit, acceptableLimit, initialSize, newThread, false, null);
}
cache.setAlive(false);
statsList = (List<ConcurrentLRUCache.Stats>) persistence;
@ -118,6 +130,16 @@ public class FastLRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V> {
return description;
}
protected String generateDescription(long maxRamBytes, long ramLowerWatermark, boolean newThread) {
String description = "Concurrent LRU Cache(ramMinSize=" + ramLowerWatermark + ", ramMaxSize" + maxRamBytes
+ ", cleanupThread=" + newThread;
if (isAutowarmingOn()) {
description += ", " + getAutowarmDescription();
}
description += ')';
return description;
}
@Override
public int size() {
return cache.size();

View File

@ -46,9 +46,9 @@ public class LRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V>, Acco
/// Copied from Lucene's LRUQueryCache
// memory usage of a simple term query
static final long DEFAULT_RAM_BYTES_USED = 192;
public static final long DEFAULT_RAM_BYTES_USED = 192;
static final long HASHTABLE_RAM_BYTES_PER_ENTRY =
public static final long HASHTABLE_RAM_BYTES_PER_ENTRY =
2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF // key + value
* 2; // hash tables need to be oversized to avoid collisions, assume 2x capacity

View File

@ -15,14 +15,20 @@
* limitations under the License.
*/
package org.apache.solr.util;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.PriorityQueue;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.solr.common.util.Cache;
import org.apache.solr.search.LRUCache;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
@ -45,9 +51,11 @@ import java.lang.ref.WeakReference;
*
* @since solr 1.4
*/
public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConcurrentLRUCache.class);
private final ConcurrentHashMap<Object, CacheEntry<K,V>> map;
private final int upperWaterMark, lowerWaterMark;
private final ReentrantLock markAndSweepLock = new ReentrantLock(true);
@ -58,7 +66,29 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
private final int acceptableWaterMark;
private long oldestEntry = 0; // not volatile, only accessed in the cleaning method
private final EvictionListener<K,V> evictionListener;
private CleanupThread cleanupThread ;
private CleanupThread cleanupThread;
private final long ramLowerWatermark, ramUpperWatermark;
private final AtomicLong ramBytes = new AtomicLong(0);
public ConcurrentLRUCache(long ramLowerWatermark, long ramUpperWatermark,
boolean runCleanupThread, EvictionListener<K, V> evictionListener) {
this.ramLowerWatermark = ramLowerWatermark;
this.ramUpperWatermark = ramUpperWatermark;
this.evictionListener = evictionListener;
this.map = new ConcurrentHashMap<>();
this.newThreadForCleanup = false;
this.acceptableWaterMark = -1;
this.lowerWaterMark = Integer.MIN_VALUE;
this.upperWaterMark = Integer.MAX_VALUE;
if (runCleanupThread) {
cleanupThread = new CleanupThread(this);
cleanupThread.start();
}
}
public ConcurrentLRUCache(int upperWaterMark, final int lowerWaterMark, int acceptableWatermark,
int initialSize, boolean runCleanupThread, boolean runNewThreadForCleanup,
@ -76,6 +106,8 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
cleanupThread = new CleanupThread(this);
cleanupThread.start();
}
this.ramLowerWatermark = Long.MIN_VALUE;
this.ramUpperWatermark = Long.MAX_VALUE;
}
public ConcurrentLRUCache(int size, int lowerWatermark) {
@ -103,6 +135,9 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
CacheEntry<K,V> cacheEntry = map.remove(key);
if (cacheEntry != null) {
stats.size.decrementAndGet();
if (ramUpperWatermark != Long.MAX_VALUE) {
ramBytes.addAndGet(-cacheEntry.ramBytesUsed() - LRUCache.HASHTABLE_RAM_BYTES_PER_ENTRY);
}
return cacheEntry.value;
}
return null;
@ -116,8 +151,23 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
int currentSize;
if (oldCacheEntry == null) {
currentSize = stats.size.incrementAndGet();
if (ramUpperWatermark != Long.MAX_VALUE) {
ramBytes.addAndGet(e.ramBytesUsed() + LRUCache.HASHTABLE_RAM_BYTES_PER_ENTRY); // added key + value + entry
}
} else {
currentSize = stats.size.get();
if (ramUpperWatermark != Long.MAX_VALUE) {
if (oldCacheEntry.value instanceof Accountable) {
ramBytes.addAndGet(-((Accountable)oldCacheEntry.value).ramBytesUsed());
} else {
ramBytes.addAndGet(-LRUCache.DEFAULT_RAM_BYTES_USED);
}
if (val instanceof Accountable) {
ramBytes.addAndGet(((Accountable)val).ramBytesUsed());
} else {
ramBytes.addAndGet(LRUCache.DEFAULT_RAM_BYTES_USED);
}
}
}
if (islive) {
stats.putCounter.increment();
@ -135,7 +185,7 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
//
// Thread safety note: isCleaning read is piggybacked (comes after) other volatile reads
// in this method.
if (currentSize > upperWaterMark && !isCleaning) {
if ((currentSize > upperWaterMark || ramBytes.get() > ramUpperWatermark) && !isCleaning) {
if (newThreadForCleanup) {
new Thread(this::markAndSweep).start();
} else if (cleanupThread != null){
@ -169,189 +219,225 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
if (!markAndSweepLock.tryLock()) return;
try {
long oldestEntry = this.oldestEntry;
isCleaning = true;
this.oldestEntry = oldestEntry; // volatile write to make isCleaning visible
long timeCurrent = stats.accessCounter.longValue();
int sz = stats.size.get();
int numRemoved = 0;
int numKept = 0;
long newestEntry = timeCurrent;
long newNewestEntry = -1;
long newOldestEntry = Long.MAX_VALUE;
int wantToKeep = lowerWaterMark;
int wantToRemove = sz - lowerWaterMark;
@SuppressWarnings("unchecked") // generic array's are annoying
CacheEntry<K,V>[] eset = new CacheEntry[sz];
int eSize = 0;
// System.out.println("newestEntry="+newestEntry + " oldestEntry="+oldestEntry);
// System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
for (CacheEntry<K,V> ce : map.values()) {
// set lastAccessedCopy to avoid more volatile reads
ce.lastAccessedCopy = ce.lastAccessed;
long thisEntry = ce.lastAccessedCopy;
// since the wantToKeep group is likely to be bigger than wantToRemove, check it first
if (thisEntry > newestEntry - wantToKeep) {
// this entry is guaranteed not to be in the bottom
// group, so do nothing.
numKept++;
newOldestEntry = Math.min(thisEntry, newOldestEntry);
} else if (thisEntry < oldestEntry + wantToRemove) { // entry in bottom group?
// this entry is guaranteed to be in the bottom group
// so immediately remove it from the map.
evictEntry(ce.key);
numRemoved++;
} else {
// This entry *could* be in the bottom group.
// Collect these entries to avoid another full pass... this is wasted
// effort if enough entries are normally removed in this first pass.
// An alternate impl could make a full second pass.
if (eSize < eset.length-1) {
eset[eSize++] = ce;
newNewestEntry = Math.max(thisEntry, newNewestEntry);
newOldestEntry = Math.min(thisEntry, newOldestEntry);
}
}
if (upperWaterMark != Integer.MAX_VALUE) {
markAndSweepByCacheSize();
} else if (ramUpperWatermark != Long.MAX_VALUE) {
markAndSweepByRamSize();
} else {
// should never happen
throw new AssertionError("ConcurrentLRUCache initialized with neither size limits nor ram limits");
}
// System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
// TODO: allow this to be customized in the constructor?
int numPasses=1; // maximum number of linear passes over the data
// if we didn't remove enough entries, then make more passes
// over the values we collected, with updated min and max values.
while (sz - numRemoved > acceptableWaterMark && --numPasses>=0) {
oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry;
newOldestEntry = Long.MAX_VALUE;
newestEntry = newNewestEntry;
newNewestEntry = -1;
wantToKeep = lowerWaterMark - numKept;
wantToRemove = sz - lowerWaterMark - numRemoved;
// iterate backward to make it easy to remove items.
for (int i=eSize-1; i>=0; i--) {
CacheEntry<K,V> ce = eset[i];
long thisEntry = ce.lastAccessedCopy;
if (thisEntry > newestEntry - wantToKeep) {
// this entry is guaranteed not to be in the bottom
// group, so do nothing but remove it from the eset.
numKept++;
// remove the entry by moving the last element to its position
eset[i] = eset[eSize-1];
eSize--;
newOldestEntry = Math.min(thisEntry, newOldestEntry);
} else if (thisEntry < oldestEntry + wantToRemove) { // entry in bottom group?
// this entry is guaranteed to be in the bottom group
// so immediately remove it from the map.
evictEntry(ce.key);
numRemoved++;
// remove the entry by moving the last element to its position
eset[i] = eset[eSize-1];
eSize--;
} else {
// This entry *could* be in the bottom group, so keep it in the eset,
// and update the stats.
newNewestEntry = Math.max(thisEntry, newNewestEntry);
newOldestEntry = Math.min(thisEntry, newOldestEntry);
}
}
// System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
}
// if we still didn't remove enough entries, then make another pass while
// inserting into a priority queue
if (sz - numRemoved > acceptableWaterMark) {
oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry;
newOldestEntry = Long.MAX_VALUE;
newestEntry = newNewestEntry;
newNewestEntry = -1;
wantToKeep = lowerWaterMark - numKept;
wantToRemove = sz - lowerWaterMark - numRemoved;
PQueue<K,V> queue = new PQueue<>(wantToRemove);
for (int i=eSize-1; i>=0; i--) {
CacheEntry<K,V> ce = eset[i];
long thisEntry = ce.lastAccessedCopy;
if (thisEntry > newestEntry - wantToKeep) {
// this entry is guaranteed not to be in the bottom
// group, so do nothing but remove it from the eset.
numKept++;
// removal not necessary on last pass.
// eset[i] = eset[eSize-1];
// eSize--;
newOldestEntry = Math.min(thisEntry, newOldestEntry);
} else if (thisEntry < oldestEntry + wantToRemove) { // entry in bottom group?
// this entry is guaranteed to be in the bottom group
// so immediately remove it.
evictEntry(ce.key);
numRemoved++;
// removal not necessary on last pass.
// eset[i] = eset[eSize-1];
// eSize--;
} else {
// This entry *could* be in the bottom group.
// add it to the priority queue
// everything in the priority queue will be removed, so keep track of
// the lowest value that ever comes back out of the queue.
// first reduce the size of the priority queue to account for
// the number of items we have already removed while executing
// this loop so far.
queue.myMaxSize = sz - lowerWaterMark - numRemoved;
while (queue.size() > queue.myMaxSize && queue.size() > 0) {
CacheEntry otherEntry = queue.pop();
newOldestEntry = Math.min(otherEntry.lastAccessedCopy, newOldestEntry);
}
if (queue.myMaxSize <= 0) break;
Object o = queue.myInsertWithOverflow(ce);
if (o != null) {
newOldestEntry = Math.min(((CacheEntry)o).lastAccessedCopy, newOldestEntry);
}
}
}
// Now delete everything in the priority queue.
// avoid using pop() since order doesn't matter anymore
for (CacheEntry<K,V> ce : queue.getValues()) {
if (ce==null) continue;
evictEntry(ce.key);
numRemoved++;
}
// System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " initialQueueSize="+ wantToRemove + " finalQueueSize=" + queue.size() + " sz-numRemoved=" + (sz-numRemoved));
}
oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry;
this.oldestEntry = oldestEntry;
} finally {
isCleaning = false; // set before markAndSweep.unlock() for visibility
markAndSweepLock.unlock();
}
}
/*
Must be called after acquiring markAndSweeoLock
*/
private void markAndSweepByRamSize() {
List<CacheEntry<K, V>> entriesInAccessOrder = new ArrayList<>(map.size());
map.forEach((o, kvCacheEntry) -> {
kvCacheEntry.lastAccessedCopy = kvCacheEntry.lastAccessed; // important because we want to avoid volatile read during comparisons
entriesInAccessOrder.add(kvCacheEntry);
});
Collections.sort(entriesInAccessOrder); // newer access is smaller, older access is bigger
// iterate in oldest to newest order
for (int i = entriesInAccessOrder.size() - 1; i >= 0; i--) {
CacheEntry<K, V> kvCacheEntry = entriesInAccessOrder.get(i);
evictEntry(kvCacheEntry.key);
ramBytes.addAndGet(-(kvCacheEntry.ramBytesUsed() + LRUCache.HASHTABLE_RAM_BYTES_PER_ENTRY));
if (ramBytes.get() <= ramLowerWatermark) {
break; // we are done!
}
}
}
/*
Must be called after acquiring markAndSweeoLock
*/
private void markAndSweepByCacheSize() {
long oldestEntry = this.oldestEntry;
isCleaning = true;
this.oldestEntry = oldestEntry; // volatile write to make isCleaning visible
long timeCurrent = stats.accessCounter.longValue();
int sz = stats.size.get();
int numRemoved = 0;
int numKept = 0;
long newestEntry = timeCurrent;
long newNewestEntry = -1;
long newOldestEntry = Long.MAX_VALUE;
int wantToKeep = lowerWaterMark;
int wantToRemove = sz - lowerWaterMark;
@SuppressWarnings("unchecked") // generic array's are annoying
CacheEntry<K,V>[] eset = new CacheEntry[sz];
int eSize = 0;
// System.out.println("newestEntry="+newestEntry + " oldestEntry="+oldestEntry);
// System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
for (CacheEntry<K,V> ce : map.values()) {
// set lastAccessedCopy to avoid more volatile reads
ce.lastAccessedCopy = ce.lastAccessed;
long thisEntry = ce.lastAccessedCopy;
// since the wantToKeep group is likely to be bigger than wantToRemove, check it first
if (thisEntry > newestEntry - wantToKeep) {
// this entry is guaranteed not to be in the bottom
// group, so do nothing.
numKept++;
newOldestEntry = Math.min(thisEntry, newOldestEntry);
} else if (thisEntry < oldestEntry + wantToRemove) { // entry in bottom group?
// this entry is guaranteed to be in the bottom group
// so immediately remove it from the map.
evictEntry(ce.key);
numRemoved++;
} else {
// This entry *could* be in the bottom group.
// Collect these entries to avoid another full pass... this is wasted
// effort if enough entries are normally removed in this first pass.
// An alternate impl could make a full second pass.
if (eSize < eset.length-1) {
eset[eSize++] = ce;
newNewestEntry = Math.max(thisEntry, newNewestEntry);
newOldestEntry = Math.min(thisEntry, newOldestEntry);
}
}
}
// System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
// TODO: allow this to be customized in the constructor?
int numPasses=1; // maximum number of linear passes over the data
// if we didn't remove enough entries, then make more passes
// over the values we collected, with updated min and max values.
while (sz - numRemoved > acceptableWaterMark && --numPasses>=0) {
oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry;
newOldestEntry = Long.MAX_VALUE;
newestEntry = newNewestEntry;
newNewestEntry = -1;
wantToKeep = lowerWaterMark - numKept;
wantToRemove = sz - lowerWaterMark - numRemoved;
// iterate backward to make it easy to remove items.
for (int i=eSize-1; i>=0; i--) {
CacheEntry<K,V> ce = eset[i];
long thisEntry = ce.lastAccessedCopy;
if (thisEntry > newestEntry - wantToKeep) {
// this entry is guaranteed not to be in the bottom
// group, so do nothing but remove it from the eset.
numKept++;
// remove the entry by moving the last element to its position
eset[i] = eset[eSize-1];
eSize--;
newOldestEntry = Math.min(thisEntry, newOldestEntry);
} else if (thisEntry < oldestEntry + wantToRemove) { // entry in bottom group?
// this entry is guaranteed to be in the bottom group
// so immediately remove it from the map.
evictEntry(ce.key);
numRemoved++;
// remove the entry by moving the last element to its position
eset[i] = eset[eSize-1];
eSize--;
} else {
// This entry *could* be in the bottom group, so keep it in the eset,
// and update the stats.
newNewestEntry = Math.max(thisEntry, newNewestEntry);
newOldestEntry = Math.min(thisEntry, newOldestEntry);
}
}
// System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
}
// if we still didn't remove enough entries, then make another pass while
// inserting into a priority queue
if (sz - numRemoved > acceptableWaterMark) {
oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry;
newOldestEntry = Long.MAX_VALUE;
newestEntry = newNewestEntry;
newNewestEntry = -1;
wantToKeep = lowerWaterMark - numKept;
wantToRemove = sz - lowerWaterMark - numRemoved;
PQueue<K,V> queue = new PQueue<>(wantToRemove);
for (int i=eSize-1; i>=0; i--) {
CacheEntry<K,V> ce = eset[i];
long thisEntry = ce.lastAccessedCopy;
if (thisEntry > newestEntry - wantToKeep) {
// this entry is guaranteed not to be in the bottom
// group, so do nothing but remove it from the eset.
numKept++;
// removal not necessary on last pass.
// eset[i] = eset[eSize-1];
// eSize--;
newOldestEntry = Math.min(thisEntry, newOldestEntry);
} else if (thisEntry < oldestEntry + wantToRemove) { // entry in bottom group?
// this entry is guaranteed to be in the bottom group
// so immediately remove it.
evictEntry(ce.key);
numRemoved++;
// removal not necessary on last pass.
// eset[i] = eset[eSize-1];
// eSize--;
} else {
// This entry *could* be in the bottom group.
// add it to the priority queue
// everything in the priority queue will be removed, so keep track of
// the lowest value that ever comes back out of the queue.
// first reduce the size of the priority queue to account for
// the number of items we have already removed while executing
// this loop so far.
queue.myMaxSize = sz - lowerWaterMark - numRemoved;
while (queue.size() > queue.myMaxSize && queue.size() > 0) {
CacheEntry otherEntry = queue.pop();
newOldestEntry = Math.min(otherEntry.lastAccessedCopy, newOldestEntry);
}
if (queue.myMaxSize <= 0) break;
Object o = queue.myInsertWithOverflow(ce);
if (o != null) {
newOldestEntry = Math.min(((CacheEntry)o).lastAccessedCopy, newOldestEntry);
}
}
}
// Now delete everything in the priority queue.
// avoid using pop() since order doesn't matter anymore
for (CacheEntry<K,V> ce : queue.getValues()) {
if (ce==null) continue;
evictEntry(ce.key);
numRemoved++;
}
// System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " initialQueueSize="+ wantToRemove + " finalQueueSize=" + queue.size() + " sz-numRemoved=" + (sz-numRemoved));
}
oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry;
this.oldestEntry = oldestEntry;
}
private static class PQueue<K,V> extends PriorityQueue<CacheEntry<K,V>> {
int myMaxSize;
final Object[] heap;
@ -477,7 +563,9 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
return map;
}
public static class CacheEntry<K,V> implements Comparable<CacheEntry<K,V>> {
public static class CacheEntry<K,V> implements Comparable<CacheEntry<K,V>>, Accountable {
public static long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOf(CacheEntry.class);
K key;
V value;
volatile long lastAccessed = 0;
@ -514,6 +602,27 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
public String toString() {
return "key: " + key + " value: " + value + " lastAccessed:" + lastAccessed;
}
@Override
public long ramBytesUsed() {
long ramBytes = BASE_RAM_BYTES_USED;
if (key instanceof Accountable) {
ramBytes += ((Accountable) key).ramBytesUsed();
} else {
ramBytes += LRUCache.DEFAULT_RAM_BYTES_USED;
}
if (value instanceof Accountable) {
ramBytes += ((Accountable) value).ramBytesUsed();
} else {
ramBytes += LRUCache.DEFAULT_RAM_BYTES_USED;
}
return ramBytes;
}
@Override
public Collection<Accountable> getChildResources() {
return Collections.emptyList();
}
}
private boolean isDestroyed = false;
@ -632,4 +741,14 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
super.finalize();
}
}
@Override
public long ramBytesUsed() {
return BASE_RAM_BYTES_USED + ramBytes.get();
}
@Override
public Collection<Accountable> getChildResources() {
return Collections.emptyList();
}
}

View File

@ -436,6 +436,9 @@
the cache. (see java.util.HashMap)
autowarmCount - the number of entries to prepopulate from
and old cache.
maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
to occupy. Note that when this option is specified, the size
and initialSize parameters are ignored.
-->
<filterCache class="solr.FastLRUCache"
size="512"

View File

@ -436,6 +436,9 @@
the cache. (see java.util.HashMap)
autowarmCount - the number of entries to prepopulate from
and old cache.
maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
to occupy. Note that when this option is specified, the size
and initialSize parameters are ignored.
-->
<filterCache class="solr.FastLRUCache"
size="512"

View File

@ -449,7 +449,10 @@
initialSize - the initial capacity (number of entries) of
the cache. (see java.util.HashMap)
autowarmCount - the number of entries to prepopulate from
and old cache.
and old cache.
maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
to occupy. Note that when this option is specified, the size
and initialSize parameters are ignored.
-->
<filterCache class="solr.FastLRUCache"
size="512"