SOLR-667: change markAndSweep algorithm, make FastLRUCache the default for filterCache in example and test

git-svn-id: https://svn.apache.org/repos/asf/lucene/solr/trunk@710068 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Yonik Seeley 2008-11-03 14:32:58 +00:00
parent 965f609838
commit d5096312a6
5 changed files with 383 additions and 53 deletions

View File

@ -209,6 +209,12 @@
<maxBooleanClauses>1024</maxBooleanClauses> <maxBooleanClauses>1024</maxBooleanClauses>
<!-- There are two implementations of cache available for Solr,
LRUCache, based on a synchronized LinkedHashMap, and
FastLRUCache, based on a ConcurrentHashMap. FastLRUCache has faster gets
and slower puts in single threaded operation and thus is generally faster
than LRUCache when the hit ratio of the cache is high (> 75%), and may be
faster under other scenarios on multi-cpu systems. -->
<!-- Cache used by SolrIndexSearcher for filters (DocSets), <!-- Cache used by SolrIndexSearcher for filters (DocSets),
unordered sets of *all* documents that match a query. unordered sets of *all* documents that match a query.
When a new searcher is opened, its caches may be prepopulated When a new searcher is opened, its caches may be prepopulated
@ -216,7 +222,7 @@
autowarmCount is the number of items to prepopulate. For LRUCache, autowarmCount is the number of items to prepopulate. For LRUCache,
the autowarmed items will be the most recently accessed items. the autowarmed items will be the most recently accessed items.
Parameters: Parameters:
class - the SolrCache implementation (currently only LRUCache) class - the SolrCache implementation LRUCache or FastLRUCache
size - the maximum number of entries in the cache size - the maximum number of entries in the cache
initialSize - the initial capacity (number of entries) of initialSize - the initial capacity (number of entries) of
the cache. (seel java.util.HashMap) the cache. (seel java.util.HashMap)
@ -224,7 +230,7 @@
and old cache. and old cache.
--> -->
<filterCache <filterCache
class="solr.LRUCache" class="solr.FastLRUCache"
size="512" size="512"
initialSize="512" initialSize="512"
autowarmCount="128"/> autowarmCount="128"/>

View File

@ -1,5 +1,7 @@
package org.apache.solr.common.util; package org.apache.solr.common.util;
import org.apache.lucene.util.PriorityQueue;
import java.util.LinkedHashMap; import java.util.LinkedHashMap;
import java.util.Map; import java.util.Map;
import java.util.TreeSet; import java.util.TreeSet;
@ -26,11 +28,12 @@ public class ConcurrentLRUCache {
private final int upperWaterMark, lowerWaterMark; private final int upperWaterMark, lowerWaterMark;
private volatile boolean stop = false; private volatile boolean stop = false;
private final ReentrantLock markAndSweepLock = new ReentrantLock(true); private final ReentrantLock markAndSweepLock = new ReentrantLock(true);
private volatile boolean isCleaning = false; private boolean isCleaning = false; // not volatile... piggybacked on other volatile vars
private final boolean newThreadForCleanup; private final boolean newThreadForCleanup;
private volatile boolean islive = true; private volatile boolean islive = true;
private final Stats stats = new Stats(); private final Stats stats = new Stats();
private final int acceptableWaterMark; private final int acceptableWaterMark;
private long oldestEntry = 0; // not volatile, only accessed in the cleaning method
public ConcurrentLRUCache(int upperWaterMark, final int lowerWaterMark, int acceptableWatermark, int initialSize, boolean runCleanupThread, boolean runNewThreadForCleanup, final int delay) { public ConcurrentLRUCache(int upperWaterMark, final int lowerWaterMark, int acceptableWatermark, int initialSize, boolean runCleanupThread, boolean runNewThreadForCleanup, final int delay) {
if (upperWaterMark < 1) throw new IllegalArgumentException("upperWaterMark must be > 0"); if (upperWaterMark < 1) throw new IllegalArgumentException("upperWaterMark must be > 0");
@ -99,6 +102,9 @@ public class ConcurrentLRUCache {
// //
// There is a race between the check and the call to markAndSweep, but // There is a race between the check and the call to markAndSweep, but
// it's unimportant because markAndSweep actually aquires the lock or returns if it can't. // it's unimportant because markAndSweep actually aquires the lock or returns if it can't.
//
// Thread safety note: isCleaning read is piggybacked (comes after) other volatile reads
// in this method.
if (stats.size.get() > upperWaterMark && !isCleaning) { if (stats.size.get() > upperWaterMark && !isCleaning) {
if (newThreadForCleanup) { if (newThreadForCleanup) {
new Thread() { new Thread() {
@ -122,63 +128,232 @@ public class ConcurrentLRUCache {
* config parameter, the second stage takes over. * config parameter, the second stage takes over.
* <p/> * <p/>
* The second stage is more intensive and tries to bring down the cache size * The second stage is more intensive and tries to bring down the cache size
* to the 'minSize' config parameter. * to the 'lowerWaterMark' config parameter.
*/ */
public void markAndSweep() { private void markAndSweep() {
// if we want to keep at least 1000 entries, then timestamps of
// current through current-1000 are guaranteed not to be the oldest (but that does
// not mean there are 1000 entries in that group... it's acutally anywhere between
// 1 and 1000).
// Also, if we want to remove 500 entries, then
// oldestEntry through oldestEntry+500 are guaranteed to be
// removed (however many there are there).
if (!markAndSweepLock.tryLock()) return; if (!markAndSweepLock.tryLock()) return;
try { try {
long oldestEntry = this.oldestEntry;
isCleaning = true; isCleaning = true;
int size = stats.size.get(); this.oldestEntry = oldestEntry; // volatile write to make isCleaning visible
long currentLatestAccessed = stats.accessCounter.get();
int itemsToBeRemoved = size - lowerWaterMark;
int itemsRemoved = 0;
if (itemsToBeRemoved < 1) return;
// currentLatestAccessed is the counter value of the item accessed most recently
// therefore remove all items whose last accessed counter is less than (currentLatestAccessed - lowerWaterMark)
long removeOlderThan = currentLatestAccessed - lowerWaterMark;
for (Map.Entry<Object, CacheEntry> entry : map.entrySet()) {
if (entry.getValue().lastAccessed <= removeOlderThan && itemsRemoved < itemsToBeRemoved) {
evictEntry(entry.getKey());
}
}
// Since the removal of items in the above loop depends on the value of the lastAccessed variable, long timeCurrent = stats.accessCounter.get();
// between the time we recorded the number of items to be removed and the actual removal process, int sz = stats.size.get();
// some items may graduate above the removeOlderThan value and escape eviction.
// Therefore, we again check if the size less than acceptableWaterMark, if not we remove items forcefully
// using a method which does not depend on the value of lastAccessed but can be more costly to run
size = stats.size.get(); int numRemoved = 0;
// In the first attempt, try to use a simple algorithm to remove old entries int numKept = 0;
// If the size of the cache is <= acceptableWatermark then return long newestEntry = timeCurrent;
if (size <= acceptableWaterMark) return; long newNewestEntry = -1;
// Remove items until size becomes lower than acceptableWaterMark long newOldestEntry = Integer.MAX_VALUE;
itemsToBeRemoved = size - acceptableWaterMark;
TreeSet<CacheEntry> tree = new TreeSet<CacheEntry>(); int wantToKeep = lowerWaterMark;
// This loop may remove a few newer items because we try to forcefully fill a int wantToRemove = sz - lowerWaterMark;
// bucket of fixed size and remove them even if they have become newer in the meantime
// The caveat is that this may lead to more cache misses because we may have removed CacheEntry[] eset = new CacheEntry[sz];
// an item which was used very recently (against the philosophy of LRU) int eSize = 0;
for (Map.Entry<Object, CacheEntry> entry : map.entrySet()) {
CacheEntry v = entry.getValue(); // System.out.println("newestEntry="+newestEntry + " oldestEntry="+oldestEntry);
v.lastAccessedCopy = v.lastAccessed; // System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
if (tree.size() < itemsToBeRemoved) {
tree.add(v); for (CacheEntry ce : map.values()) {
// set lastAccessedCopy to avoid more volatile reads
ce.lastAccessedCopy = ce.lastAccessed;
long thisEntry = ce.lastAccessedCopy;
// since the wantToKeep group is likely to be bigger than wantToRemove, check it first
if (thisEntry > newestEntry - wantToKeep) {
// this entry is guaranteed not to be in the bottom
// group, so do nothing.
numKept++;
newOldestEntry = Math.min(thisEntry, newOldestEntry);
} else if (thisEntry < oldestEntry + wantToRemove) { // entry in bottom group?
// this entry is guaranteed to be in the bottom group
// so immediately remove it from the map.
evictEntry(ce.key);
numRemoved++;
} else { } else {
if (v.lastAccessedCopy < tree.first().lastAccessedCopy) { // This entry *could* be in the bottom group.
tree.remove(tree.first()); // Collect these entries to avoid another full pass... this is wasted
tree.add(v); // effort if enough entries are normally removed in this first pass.
// An alternate impl could make a full second pass.
if (eSize < eset.length-1) {
eset[eSize++] = ce;
newNewestEntry = Math.max(thisEntry, newNewestEntry);
newOldestEntry = Math.min(thisEntry, newOldestEntry);
} }
} }
} }
for (CacheEntry sortCacheEntry : tree)
evictEntry(sortCacheEntry.key); // System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
// TODO: allow this to be customized in the constructor?
int numPasses=1; // maximum number of linear passes over the data
// if we didn't remove enough entries, then make more passes
// over the values we collected, with updated min and max values.
while (sz - numRemoved > acceptableWaterMark && --numPasses>=0) {
oldestEntry = newOldestEntry == Integer.MAX_VALUE ? oldestEntry : newOldestEntry;
newOldestEntry = Integer.MAX_VALUE;
newestEntry = newNewestEntry;
newNewestEntry = -1;
wantToKeep = lowerWaterMark - numKept;
wantToRemove = sz - lowerWaterMark - numRemoved;
// iterate backward to make it easy to remove items.
for (int i=eSize-1; i>=0; i--) {
CacheEntry ce = eset[i];
long thisEntry = ce.lastAccessedCopy;
if (thisEntry > newestEntry - wantToKeep) {
// this entry is guaranteed not to be in the bottom
// group, so do nothing but remove it from the eset.
numKept++;
// remove the entry by moving the last element to it's position
eset[i] = eset[eSize-1];
eSize--;
newOldestEntry = Math.min(thisEntry, newOldestEntry);
} else if (thisEntry < oldestEntry + wantToRemove) { // entry in bottom group?
// this entry is guaranteed to be in the bottom group
// so immediately remove it from the map.
evictEntry(ce.key);
numRemoved++;
// remove the entry by moving the last element to it's position
eset[i] = eset[eSize-1];
eSize--;
} else {
// This entry *could* be in the bottom group, so keep it in the eset,
// and update the stats.
newNewestEntry = Math.max(thisEntry, newNewestEntry);
newOldestEntry = Math.min(thisEntry, newOldestEntry);
}
}
// System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
}
// if we still didn't remove enough entries, then make another pass while
// inserting into a priority queue
if (sz - numRemoved > acceptableWaterMark) {
oldestEntry = newOldestEntry == Integer.MAX_VALUE ? oldestEntry : newOldestEntry;
newOldestEntry = Integer.MAX_VALUE;
newestEntry = newNewestEntry;
newNewestEntry = -1;
wantToKeep = lowerWaterMark - numKept;
wantToRemove = sz - lowerWaterMark - numRemoved;
PQueue queue = new PQueue(wantToRemove);
for (int i=eSize-1; i>=0; i--) {
CacheEntry ce = eset[i];
long thisEntry = ce.lastAccessedCopy;
if (thisEntry > newestEntry - wantToKeep) {
// this entry is guaranteed not to be in the bottom
// group, so do nothing but remove it from the eset.
numKept++;
// removal not necessary on last pass.
// eset[i] = eset[eSize-1];
// eSize--;
newOldestEntry = Math.min(thisEntry, newOldestEntry);
} else if (thisEntry < oldestEntry + wantToRemove) { // entry in bottom group?
// this entry is guaranteed to be in the bottom group
// so immediately remove it.
evictEntry(ce.key);
numRemoved++;
// removal not necessary on last pass.
// eset[i] = eset[eSize-1];
// eSize--;
} else {
// This entry *could* be in the bottom group.
// add it to the priority queue
// everything in the priority queue will be removed, so keep track of
// the lowest value that ever comes back out of the queue.
// first reduce the size of the priority queue to account for
// the number of items we have already removed while executing
// this loop so far.
queue.myMaxSize = sz - lowerWaterMark - numRemoved;
while (queue.size() > queue.myMaxSize && queue.size() > 0) {
CacheEntry otherEntry = (CacheEntry) queue.pop();
newOldestEntry = Math.min(otherEntry.lastAccessedCopy, newOldestEntry);
}
if (queue.myMaxSize <= 0) break;
Object o = queue.myInsertWithOverflow(ce);
if (o != null) {
newOldestEntry = Math.min(((CacheEntry)o).lastAccessedCopy, newOldestEntry);
}
}
}
// Now delete everything in the priority queue.
// avoid using pop() since order doesn't matter anymore
for (Object o : queue.getValues()) {
if (o==null) continue;
CacheEntry ce = (CacheEntry)o;
evictEntry(ce.key);
numRemoved++;
}
// System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " initialQueueSize="+ wantToRemove + " finalQueueSize=" + queue.size() + " sz-numRemoved=" + (sz-numRemoved));
}
oldestEntry = newOldestEntry == Integer.MAX_VALUE ? oldestEntry : newOldestEntry;
this.oldestEntry = oldestEntry;
} finally { } finally {
isCleaning = false; isCleaning = false; // set before markAndSweep.unlock() for visibility
markAndSweepLock.unlock(); markAndSweepLock.unlock();
} }
} }
private static class PQueue extends PriorityQueue {
int myMaxSize;
PQueue(int maxSz) {
super.initialize(maxSz);
myMaxSize = maxSz;
}
Object[] getValues() { return heap; }
protected boolean lessThan(Object a, Object b) {
// reverse the parameter order so that the queue keeps the oldest items
return ((CacheEntry)b).lastAccessedCopy < ((CacheEntry)a).lastAccessedCopy;
}
// necessary because maxSize is private in base class
public Object myInsertWithOverflow(Object element) {
if (size() < myMaxSize) {
put(element);
return null;
} else if (size() > 0 && !lessThan(element, heap[1])) {
Object ret = heap[1];
heap[1] = element;
adjustTop();
return ret;
} else {
return element;
}
}
}
private void evictEntry(Object key) { private void evictEntry(Object key) {
Object o = map.remove(key); Object o = map.remove(key);
@ -189,6 +364,7 @@ public class ConcurrentLRUCache {
public Map getLatestAccessedItems(long n) { public Map getLatestAccessedItems(long n) {
// we need to grab the lock since we are changing lastAccessedCopy
markAndSweepLock.lock(); markAndSweepLock.lock();
Map result = new LinkedHashMap(); Map result = new LinkedHashMap();
TreeSet<CacheEntry> tree = new TreeSet<CacheEntry>(); TreeSet<CacheEntry> tree = new TreeSet<CacheEntry>();

View File

@ -44,7 +44,7 @@ public class FastLRUCache implements SolrCache {
this.regenerator = regenerator; this.regenerator = regenerator;
name = (String) args.get("name"); name = (String) args.get("name");
String str = (String) args.get("size"); String str = (String) args.get("size");
final int limit = str == null ? 1024 : Integer.parseInt(str); int limit = str == null ? 1024 : Integer.parseInt(str);
int minLimit; int minLimit;
str = (String) args.get("minSize"); str = (String) args.get("minSize");
if (str == null) { if (str == null) {
@ -52,6 +52,9 @@ public class FastLRUCache implements SolrCache {
} else { } else {
minLimit = Integer.parseInt(str); minLimit = Integer.parseInt(str);
} }
if (minLimit==0) minLimit=1;
if (limit <= minLimit) limit=minLimit+1;
int acceptableLimit; int acceptableLimit;
str = (String) args.get("acceptableSize"); str = (String) args.get("acceptableSize");
if (str == null) { if (str == null) {
@ -59,12 +62,14 @@ public class FastLRUCache implements SolrCache {
} else { } else {
acceptableLimit = Integer.parseInt(str); acceptableLimit = Integer.parseInt(str);
} }
acceptableLimit = Math.max(limit,acceptableLimit);
str = (String) args.get("initialSize"); str = (String) args.get("initialSize");
final int initialSize = str == null ? 1024 : Integer.parseInt(str); final int initialSize = str == null ? limit : Integer.parseInt(str);
str = (String) args.get("autowarmCount"); str = (String) args.get("autowarmCount");
autowarmCount = str == null ? 0 : Integer.parseInt(str); autowarmCount = str == null ? 0 : Integer.parseInt(str);
description = "Concurrent LRU Cache(maxSize=" + limit + ", initialSize=" + initialSize; description = "Concurrent LRU Cache(maxSize=" + limit + ", initialSize=" + initialSize + ", minSize="+minLimit + ", acceptableSize="+acceptableLimit;
if (autowarmCount > 0) { if (autowarmCount > 0) {
description += ", autowarmCount=" + autowarmCount description += ", autowarmCount=" + autowarmCount
+ ", regenerator=" + regenerator; + ", regenerator=" + regenerator;

View File

@ -2,10 +2,13 @@ package org.apache.solr.search;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.ConcurrentLRUCache;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.Random;
import java.util.concurrent.atomic.AtomicInteger;
/** /**
@ -40,7 +43,9 @@ public class TestFastLRUCache extends TestCase {
assertEquals(2L, nl.get("lookups")); assertEquals(2L, nl.get("lookups"));
assertEquals(1L, nl.get("hits")); assertEquals(1L, nl.get("hits"));
assertEquals(101L, nl.get("inserts")); assertEquals(101L, nl.get("inserts"));
assertEquals(11L, nl.get("evictions"));
assertEquals(null, sc.get(1)); // first item put in should be the first out
FastLRUCache scNew = new FastLRUCache(); FastLRUCache scNew = new FastLRUCache();
scNew.init(l, o, cr); scNew.init(l, o, cr);
@ -55,10 +60,148 @@ public class TestFastLRUCache extends TestCase {
assertEquals(1L, nl.get("inserts")); assertEquals(1L, nl.get("inserts"));
assertEquals(0L, nl.get("evictions")); assertEquals(0L, nl.get("evictions"));
assertEquals(4L, nl.get("cumulative_lookups")); assertEquals(5L, nl.get("cumulative_lookups"));
assertEquals(2L, nl.get("cumulative_hits")); assertEquals(2L, nl.get("cumulative_hits"));
assertEquals(102L, nl.get("cumulative_inserts")); assertEquals(102L, nl.get("cumulative_inserts"));
assertEquals(11L, nl.get("cumulative_evictions"));
} }
void doPerfTest(int iter, int cacheSize, int maxKey) {
long start = System.currentTimeMillis();
int lowerWaterMark = cacheSize;
int upperWaterMark = (int)(lowerWaterMark * 1.1);
Random r = new Random(0);
ConcurrentLRUCache cache = new ConcurrentLRUCache(upperWaterMark, lowerWaterMark, (upperWaterMark+lowerWaterMark)/2, upperWaterMark, false, false, 0);
boolean getSize=false;
int minSize=0,maxSize=0;
for (int i=0; i<iter; i++) {
cache.put(r.nextInt(maxKey),"TheValue");
int sz = cache.size();
if (!getSize && sz >= cacheSize) {
getSize = true;
minSize = sz;
} else {
if (sz < minSize) minSize=sz;
else if (sz > maxSize) maxSize=sz;
}
}
long end = System.currentTimeMillis();
System.out.println("time=" + (end-start) + ", minSize="+minSize+",maxSize="+maxSize);
}
/***
public void testPerf() {
doPerfTest(1000000, 100000, 200000); // big cache, warmup
doPerfTest(2000000, 100000, 200000); // big cache
doPerfTest(2000000, 100000, 120000); // smaller key space increases distance between oldest, newest and makes the first passes less effective.
doPerfTest(6000000, 1000, 2000); // small cache, smaller hit rate
doPerfTest(6000000, 1000, 1200); // small cache, bigger hit rate
}
***/
// returns number of puts
int useCache(SolrCache sc, int numGets, int maxKey, int seed) {
int ret = 0;
Random r = new Random(seed);
// use like a cache... gets and a put if not found
for (int i=0; i<numGets; i++) {
Integer k = r.nextInt(maxKey);
Integer v = (Integer)sc.get(k);
if (v == null) {
sc.put(k, k);
ret++;
}
}
return ret;
}
void fillCache(SolrCache sc, int cacheSize, int maxKey) {
Random r = new Random(0);
for (int i=0; i<cacheSize; i++) {
Integer kv = r.nextInt(maxKey);
sc.put(kv,kv);
}
}
void cachePerfTest(final SolrCache sc, final int nThreads, final int numGets, int cacheSize, final int maxKey) {
Map l = new HashMap();
l.put("size", ""+cacheSize);
l.put("initialSize", ""+cacheSize);
Object o = sc.init(l, null, null);
sc.setState(SolrCache.State.LIVE);
fillCache(sc, cacheSize, maxKey);
long start = System.currentTimeMillis();
Thread[] threads = new Thread[nThreads];
final AtomicInteger puts = new AtomicInteger(0);
for (int i=0; i<threads.length; i++) {
final int seed=i;
threads[i] = new Thread() {
public void run() {
int ret = useCache(sc, numGets/nThreads, maxKey, seed);
puts.addAndGet(ret);
}
};
}
for (Thread thread : threads) {
try {
thread.start();
} catch (Exception e) {
e.printStackTrace();
}
}
for (Thread thread : threads) {
try {
thread.join();
} catch (Exception e) {
e.printStackTrace();
}
}
long end = System.currentTimeMillis();
System.out.println("time=" + (end-start) + " impl=" +sc.getClass().getSimpleName()
+" nThreads= " + nThreads + " size="+cacheSize+" maxKey="+maxKey+" gets="+numGets
+" hitRatio="+(1-(((double)puts.get())/numGets)));
}
void perfTestBoth(int nThreads, int numGets, int cacheSize, int maxKey) {
cachePerfTest(new LRUCache(), nThreads, numGets, cacheSize, maxKey);
cachePerfTest(new FastLRUCache(), nThreads, numGets, cacheSize, maxKey);
}
/***
public void testCachePerf() {
// warmup
perfTestBoth(2, 100000, 100000, 120000);
perfTestBoth(1, 2000000, 100000, 100000); // big cache, 100% hit ratio
perfTestBoth(2, 2000000, 100000, 100000); // big cache, 100% hit ratio
perfTestBoth(1, 2000000, 100000, 120000); // big cache, bigger hit ratio
perfTestBoth(2, 2000000, 100000, 120000); // big cache, bigger hit ratio
perfTestBoth(1, 2000000, 100000, 200000); // big cache, ~50% hit ratio
perfTestBoth(2, 2000000, 100000, 200000); // big cache, ~50% hit ratio
perfTestBoth(1, 2000000, 100000, 1000000); // big cache, ~10% hit ratio
perfTestBoth(2, 2000000, 100000, 1000000); // big cache, ~10% hit ratio
perfTestBoth(1, 2000000, 1000, 1000); // small cache, ~100% hit ratio
perfTestBoth(2, 2000000, 1000, 1000); // small cache, ~100% hit ratio
perfTestBoth(1, 2000000, 1000, 1200); // small cache, bigger hit ratio
perfTestBoth(2, 2000000, 1000, 1200); // small cache, bigger hit ratio
perfTestBoth(1, 2000000, 1000, 2000); // small cache, ~50% hit ratio
perfTestBoth(2, 2000000, 1000, 2000); // small cache, ~50% hit ratio
perfTestBoth(1, 2000000, 1000, 10000); // small cache, ~10% hit ratio
perfTestBoth(2, 2000000, 1000, 10000); // small cache, ~10% hit ratio
}
***/
} }

View File

@ -143,7 +143,7 @@
that match a particular query. that match a particular query.
--> -->
<filterCache <filterCache
class="solr.search.LRUCache" class="solr.search.FastLRUCache"
size="512" size="512"
initialSize="512" initialSize="512"
autowarmCount="256"/> autowarmCount="256"/>