mirror of https://github.com/apache/lucene.git
LUCENE-10103 Make QueryCache respect Accountable queries (#346)
This commit is contained in:
parent
8bcc3dc430
commit
6a41bc6310
|
@ -304,6 +304,8 @@ Improvements
|
|||
* LUCENE-10145, LUCENE-10153: Faster flushes and merges of points by leveraging
|
||||
VarHandles. (Adrien Grand)
|
||||
|
||||
* LUCENE-10103: Make QueryCache respect Accountable queries. (Haoyu Zhai)
|
||||
|
||||
Bug fixes
|
||||
|
||||
* LUCENE-9686: Fix read past EOF handling in DirectIODirectory. (Zach Chen,
|
||||
|
|
|
@ -299,7 +299,12 @@ public class LRUQueryCache implements QueryCache, Accountable {
|
|||
try {
|
||||
Query singleton = uniqueQueries.putIfAbsent(query, query);
|
||||
if (singleton == null) {
|
||||
onQueryCache(query, LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY + QUERY_DEFAULT_RAM_BYTES_USED);
|
||||
if (query instanceof Accountable) {
|
||||
onQueryCache(
|
||||
query, LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY + ((Accountable) query).ramBytesUsed());
|
||||
} else {
|
||||
onQueryCache(query, LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY + QUERY_DEFAULT_RAM_BYTES_USED);
|
||||
}
|
||||
} else {
|
||||
query = singleton;
|
||||
}
|
||||
|
|
|
@ -28,19 +28,34 @@ import org.apache.lucene.index.TermStates;
|
|||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.DocIdSetBuilder;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
|
||||
/**
|
||||
* This class also provides the functionality behind {@link MultiTermQuery#CONSTANT_SCORE_REWRITE}.
|
||||
* It tries to rewrite per-segment as a boolean query that returns a constant score and otherwise
|
||||
* fills a bit set with matches and builds a Scorer on top of this bit set.
|
||||
*/
|
||||
final class MultiTermQueryConstantScoreWrapper<Q extends MultiTermQuery> extends Query {
|
||||
final class MultiTermQueryConstantScoreWrapper<Q extends MultiTermQuery> extends Query
|
||||
implements Accountable {
|
||||
|
||||
// mtq that matches 16 terms or less will be executed as a regular disjunction
|
||||
private static final int BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD = 16;
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
if (query instanceof Accountable) {
|
||||
return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER
|
||||
+ RamUsageEstimator.NUM_BYTES_OBJECT_REF
|
||||
+ ((Accountable) query).ramBytesUsed();
|
||||
}
|
||||
return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER
|
||||
+ RamUsageEstimator.NUM_BYTES_OBJECT_REF
|
||||
+ RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED;
|
||||
}
|
||||
|
||||
private static class TermAndState {
|
||||
final BytesRef term;
|
||||
final TermState state;
|
||||
|
|
|
@ -61,6 +61,7 @@ import org.apache.lucene.index.SerialMergeScheduler;
|
|||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
@ -532,6 +533,46 @@ public class TestLRUQueryCache extends LuceneTestCase {
|
|||
dir.close();
|
||||
}
|
||||
|
||||
/** DummyQuery with Accountable, pretending to be a memory-eating query */
|
||||
private class AccountableDummyQuery extends DummyQuery implements Accountable {
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return 10 * QUERY_DEFAULT_RAM_BYTES_USED;
|
||||
}
|
||||
}
|
||||
|
||||
public void testCachingAccountableQuery() throws IOException {
|
||||
final LRUQueryCache queryCache =
|
||||
new LRUQueryCache(1000000, 10000000, context -> true, Float.POSITIVE_INFINITY);
|
||||
|
||||
Directory dir = newDirectory();
|
||||
final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
final int numDocs = atLeast(100);
|
||||
for (int i = 0; i < numDocs; ++i) {
|
||||
w.addDocument(doc);
|
||||
}
|
||||
final DirectoryReader reader = w.getReader();
|
||||
final IndexSearcher searcher = new IndexSearcher(reader);
|
||||
searcher.setQueryCache(queryCache);
|
||||
searcher.setQueryCachingPolicy(ALWAYS_CACHE);
|
||||
|
||||
final int numQueries = random().nextInt(100) + 100;
|
||||
for (int i = 0; i < numQueries; ++i) {
|
||||
final Query query = new AccountableDummyQuery();
|
||||
searcher.count(query);
|
||||
}
|
||||
long queryRamBytesUsed =
|
||||
numQueries * (10 * QUERY_DEFAULT_RAM_BYTES_USED + LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY);
|
||||
// allow 10% error for other ram bytes used estimation inside query cache
|
||||
assertEquals(queryRamBytesUsed, queryCache.ramBytesUsed(), 10 * queryRamBytesUsed / 100);
|
||||
|
||||
reader.close();
|
||||
w.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testOnUse() throws IOException {
|
||||
final LRUQueryCache queryCache =
|
||||
new LRUQueryCache(
|
||||
|
|
Loading…
Reference in New Issue