Make LRUQueryCache respect Accountable queries on eviction and consistency check (#12614)

Given a query that implements Accountable, the LRUQueryCache would increment
its internal accounting by the amount reported by Accountable.ramBytesUsed(), but
only decrement on eviction by the default used for all other queries.  This meant that 
the cache could eventually think it had run out of space, even if there were no queries 
in it at all.  This commit ensures that queries that implement Accountable are always 
accounted for correctly.
This commit is contained in:
Grigoriy Troitskiy 2023-10-03 16:52:12 +03:00 committed by GitHub
parent 3f81f2f315
commit 1baae3629a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 53 additions and 12 deletions

View File

@ -176,6 +176,9 @@ Changes in runtime behavior
Bug Fixes
---------------------
* GITHUB#12614: Make LRUQueryCache respect Accountable queries on eviction and consistency check
(Grigoriy Troitskiy)
* GITHUB#11556: HTMLStripCharFilter fails on '>' or '<' characters in attribute values. (Elliot Lin)
Build

View File

@ -297,12 +297,7 @@ public class LRUQueryCache implements QueryCache, Accountable {
try {
Query singleton = uniqueQueries.putIfAbsent(query, query);
if (singleton == null) {
if (query instanceof Accountable) {
onQueryCache(
query, LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY + ((Accountable) query).ramBytesUsed());
} else {
onQueryCache(query, LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY + QUERY_DEFAULT_RAM_BYTES_USED);
}
onQueryCache(query, getRamBytesUsed(query));
} else {
query = singleton;
}
@ -385,7 +380,7 @@ public class LRUQueryCache implements QueryCache, Accountable {
private void onEviction(Query singleton) {
assert lock.isHeldByCurrentThread();
onQueryEviction(singleton, LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY + QUERY_DEFAULT_RAM_BYTES_USED);
onQueryEviction(singleton, getRamBytesUsed(singleton));
for (LeafCache leafCache : cache.values()) {
leafCache.remove(singleton);
}
@ -405,6 +400,13 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
}
private static long getRamBytesUsed(Query query) {
return LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY
+ (query instanceof Accountable accountableQuery
? accountableQuery.ramBytesUsed()
: QUERY_DEFAULT_RAM_BYTES_USED);
}
// pkg-private for testing
void assertConsistent() {
lock.lock();
@ -429,11 +431,10 @@ public class LRUQueryCache implements QueryCache, Accountable {
"One leaf cache contains more keys than the top-level cache: " + keys);
}
}
long recomputedRamBytesUsed =
HASHTABLE_RAM_BYTES_PER_ENTRY * cache.size()
+ LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY * uniqueQueries.size();
recomputedRamBytesUsed +=
mostRecentlyUsedQueries.size() * (long) QUERY_DEFAULT_RAM_BYTES_USED;
long recomputedRamBytesUsed = HASHTABLE_RAM_BYTES_PER_ENTRY * cache.size();
for (Query query : mostRecentlyUsedQueries) {
recomputedRamBytesUsed += getRamBytesUsed(query);
}
for (LeafCache leafCache : cache.values()) {
recomputedRamBytesUsed += HASHTABLE_RAM_BYTES_PER_ENTRY * leafCache.cache.size();
for (CacheAndCount cached : leafCache.cache.values()) {

View File

@ -593,6 +593,43 @@ public class TestLRUQueryCache extends LuceneTestCase {
dir.close();
}
public void testConsistencyWithAccountableQueries() throws IOException {
var queryCache = new LRUQueryCache(1, 10000000, context -> true, Float.POSITIVE_INFINITY);
var dir = newDirectory();
var writer = new RandomIndexWriter(random(), dir);
writer.addDocument(new Document());
var reader = writer.getReader();
var searcher = new IndexSearcher(reader);
searcher.setQueryCache(queryCache);
searcher.setQueryCachingPolicy(ALWAYS_CACHE);
queryCache.assertConsistent();
var accountableQuery = new AccountableDummyQuery();
searcher.count(accountableQuery);
var expectedRamBytesUsed =
HASHTABLE_RAM_BYTES_PER_ENTRY
+ LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY
+ accountableQuery.ramBytesUsed()
+ queryCache.getChildResources().iterator().next().ramBytesUsed();
assertEquals(expectedRamBytesUsed, queryCache.ramBytesUsed());
queryCache.assertConsistent();
queryCache.clearQuery(accountableQuery);
assertEquals(HASHTABLE_RAM_BYTES_PER_ENTRY, queryCache.ramBytesUsed());
queryCache.assertConsistent();
queryCache.clearCoreCacheKey(
reader.getContext().leaves().get(0).reader().getCoreCacheHelper().getKey());
assertEquals(0, queryCache.ramBytesUsed());
queryCache.assertConsistent();
reader.close();
writer.close();
dir.close();
}
public void testOnUse() throws IOException {
final LRUQueryCache queryCache =
new LRUQueryCache(