LUCENE-4576: Remove CachingWrapperFilter recacheDeletes boolean

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1414474 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2012-11-27 23:56:18 +00:00
parent 862335a772
commit 49a461a2b4
3 changed files with 14 additions and 55 deletions

View File

@ -54,6 +54,11 @@ Changes in backwards compatibility policy
abstract.
(Shai Erera, Gilad Barkai)
* LUCENE-4576: Remove CachingWrapperFilter(Filter, boolean). This recacheDeletes
option gave less than 1% speedup at the expense of cache churn (filters were
invalidated on reopen if even a single delete was posted against the segment).
(Robert Muir)
New Features
* LUCENE-4226: New experimental StoredFieldsFormat that compresses chunks of

View File

@ -39,36 +39,12 @@ public class CachingWrapperFilter extends Filter {
// level of the readers hierarchy it should be cached.
private final Filter filter;
private final Map<Object,DocIdSet> cache = Collections.synchronizedMap(new WeakHashMap<Object,DocIdSet>());
private final boolean recacheDeletes;
/** Wraps another filter's result and caches it.
* Deletions are not cached and AND'd in on the fly, see
* {@link #CachingWrapperFilter(Filter,boolean)} for an explanation.
* This constructor is recommended for often changing indexes.
* @param filter Filter to cache results of
* @see #CachingWrapperFilter(Filter,boolean)
*/
public CachingWrapperFilter(Filter filter) {
this(filter, false);
}
/** Wraps another filter's result and caches it. If
* {@code recacheDeletes} is {@code true}, then new deletes (for example
* after {@link DirectoryReader#openIfChanged}) will cause the filter
* {@link DocIdSet} to be recached.
*
* <p>If your index changes seldom, it is recommended to use {@code recacheDeletes=true},
* as recaching will only occur when the index is reopened.
* For near-real-time indexes or indexes that are often
* reopened with (e.g., {@link DirectoryReader#openIfChanged} is used), you should
* pass {@code recacheDeletes=false}. This will cache the filter results omitting
* deletions and will AND them in while scoring.
* @param filter Filter to cache results of
* @param recacheDeletes if deletions on the underlying index should recache
*/
public CachingWrapperFilter(Filter filter, boolean recacheDeletes) {
this.filter = filter;
this.recacheDeletes = recacheDeletes;
}
/** Provide the DocIdSet to be cached, using the DocIdSet provided
@ -104,54 +80,34 @@ public class CachingWrapperFilter extends Filter {
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException {
final AtomicReader reader = context.reader();
// Only cache if incoming acceptDocs is == live docs;
// if Lucene passes in more interesting acceptDocs in
// the future (@UweSays: it already does when you chain FilteredQuery) we don't want to over-cache:
final Bits liveDocs = reader.getLiveDocs();
final boolean doCacheAcceptDocs = (recacheDeletes && acceptDocs == liveDocs);
final Object key;
final Bits cacheAcceptDocs;
if (doCacheAcceptDocs) {
assert acceptDocs == liveDocs;
key = reader.getCombinedCoreAndDeletesKey();
cacheAcceptDocs = acceptDocs;
} else {
key = reader.getCoreCacheKey();
cacheAcceptDocs = null;
}
final Object key = reader.getCoreCacheKey();
DocIdSet docIdSet = cache.get(key);
if (docIdSet != null) {
hitCount++;
} else {
missCount++;
docIdSet = docIdSetToCache(filter.getDocIdSet(context, cacheAcceptDocs), reader);
docIdSet = docIdSetToCache(filter.getDocIdSet(context, null), reader);
cache.put(key, docIdSet);
}
if (doCacheAcceptDocs) {
return docIdSet;
} else {
return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
}
return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
}
@Override
public String toString() {
return "CachingWrapperFilter("+filter+",recacheDeletes=" + recacheDeletes + ")";
return "CachingWrapperFilter("+filter+")";
}
@Override
public boolean equals(Object o) {
if (!(o instanceof CachingWrapperFilter)) return false;
final CachingWrapperFilter other = (CachingWrapperFilter) o;
return this.filter.equals(other.filter) && this.recacheDeletes == other.recacheDeletes;
return this.filter.equals(other.filter);
}
@Override
public int hashCode() {
return (filter.hashCode() ^ 0x1117BF25) + (recacheDeletes ? 0 : 1);
return (filter.hashCode() ^ 0x1117BF25);
}
}

View File

@ -189,8 +189,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
final Filter startFilter = new QueryWrapperFilter(new TermQuery(new Term("id", "1")));
// force cache to regenerate after deletions:
CachingWrapperFilter filter = new CachingWrapperFilter(startFilter, true);
CachingWrapperFilter filter = new CachingWrapperFilter(startFilter);
docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
@ -231,9 +230,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
// cache miss, because we asked CWF to recache when
// deletes changed:
assertEquals(missCount+1, filter.missCount);
// cache hit
assertEquals(missCount, filter.missCount);
docs = searcher.search(constantScore, 1);
assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits);