SOLR-1006 -- ConcurrentLRUCache API improvements

git-svn-id: https://svn.apache.org/repos/asf/lucene/solr/trunk@743294 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Shalin Shekhar Mangar 2009-02-11 10:54:09 +00:00
parent c4a96dc1de
commit 790679130e
2 changed files with 58 additions and 6 deletions

View File

@ -55,6 +55,10 @@ public class ConcurrentLRUCache<K,V> {
}
}
public ConcurrentLRUCache(int size, int lowerWatermark) {
this(size, lowerWatermark, (int) Math.floor((lowerWatermark + size) / 2),
(int) Math.ceil(0.75 * size), false, false, null);
}
public void setAlive(boolean live) {
islive = live;
@ -74,7 +78,6 @@ public class ConcurrentLRUCache<K,V> {
CacheEntry<K,V> cacheEntry = map.remove(key);
if (cacheEntry != null) {
stats.size.decrementAndGet();
if(evictionListener != null) evictionListener.evictedEntry(cacheEntry.key , cacheEntry.value);
return cacheEntry.value;
}
return null;
@ -363,8 +366,41 @@ public class ConcurrentLRUCache<K,V> {
if(evictionListener != null) evictionListener.evictedEntry(o.key,o.value);
}
/**
* Returns 'n' number of oldest accessed entries present in this cache.
*
* This uses a TreeSet to collect the 'n' oldest items ordered by ascending last access time
* and returns a LinkedHashMap containing 'n' or less than 'n' entries.
* @param n the number of oldest items needed
* @return a LinkedHashMap containing 'n' or less than 'n' entries
*/
public Map<K, V> getOldestAccessedItems(int n) {
markAndSweepLock.lock();
Map<K, V> result = new LinkedHashMap<K, V>();
TreeSet<CacheEntry> tree = new TreeSet<CacheEntry>();
try {
for (Map.Entry<Object, CacheEntry> entry : map.entrySet()) {
CacheEntry ce = entry.getValue();
ce.lastAccessedCopy = ce.lastAccessed;
if (tree.size() < n) {
tree.add(ce);
} else {
if (ce.lastAccessedCopy < tree.first().lastAccessedCopy) {
tree.remove(tree.first());
tree.add(ce);
}
}
}
} finally {
markAndSweepLock.unlock();
}
for (CacheEntry<K, V> e : tree) {
result.put(e.key, e.value);
}
return result;
}
public Map getLatestAccessedItems(long n) {
public Map<K,V> getLatestAccessedItems(int n) {
// we need to grab the lock since we are changing lastAccessedCopy
markAndSweepLock.lock();
Map<K,V> result = new LinkedHashMap<K,V>();
@ -378,7 +414,7 @@ public class ConcurrentLRUCache<K,V> {
} else {
if (ce.lastAccessedCopy > tree.last().lastAccessedCopy) {
tree.remove(tree.last());
tree.add(entry.getValue());
tree.add(ce);
}
}
}

View File

@ -65,6 +65,22 @@ public class TestFastLRUCache extends TestCase {
assertEquals(102L, nl.get("cumulative_inserts"));
}
public void testOldestItems() {
ConcurrentLRUCache<Integer, String> cache = new ConcurrentLRUCache<Integer, String>(100, 90);
for (int i = 0; i < 50; i++) {
cache.put(i + 1, "" + (i + 1));
}
cache.get(1);
cache.get(3);
Map<Integer, String> m = cache.getOldestAccessedItems(5);
//7 6 5 4 2
assertNotNull(m.get(7));
assertNotNull(m.get(6));
assertNotNull(m.get(5));
assertNotNull(m.get(4));
assertNotNull(m.get(2));
}
void doPerfTest(int iter, int cacheSize, int maxKey) {
long start = System.currentTimeMillis();
@ -87,7 +103,7 @@ public class TestFastLRUCache extends TestCase {
}
}
long end = System.currentTimeMillis();
long end = System.currentTimeMillis();
System.out.println("time=" + (end-start) + ", minSize="+minSize+",maxSize="+maxSize);
}
@ -105,7 +121,7 @@ public class TestFastLRUCache extends TestCase {
int useCache(SolrCache sc, int numGets, int maxKey, int seed) {
int ret = 0;
Random r = new Random(seed);
// use like a cache... gets and a put if not found
for (int i=0; i<numGets; i++) {
Integer k = r.nextInt(maxKey);
@ -127,7 +143,7 @@ public class TestFastLRUCache extends TestCase {
}
}
void cachePerfTest(final SolrCache sc, final int nThreads, final int numGets, int cacheSize, final int maxKey) {
Map l = new HashMap();
l.put("size", ""+cacheSize);