remove weak caching of filters and stick with only caching when needed / flagged / defaulted

This commit is contained in:
kimchy 2011-04-09 02:47:31 +03:00
parent b71513a072
commit c934f04b0c
15 changed files with 459 additions and 317 deletions

View File

@ -0,0 +1,336 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.docset;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.BitUtil;
import org.elasticsearch.common.RamUsage;
import java.io.IOException;
/**
* Derived from {@link org.apache.lucene.util.OpenBitSet} but works from a slice out of a provided long[] array.
* It does not expand, as it assumes that the slice is from a cached long[] array, so we can't really expand...
*/
public class SlicedOpenBitSet extends DocSet {
private final long[] bits;
private final int wlen; // number of words (elements) used in the array
private final int from; // the from index in the array
public SlicedOpenBitSet(long[] bits, int wlen, int from) {
this.bits = bits;
this.wlen = wlen;
this.from = from;
}
@Override public boolean isCacheable() {
return true;
}
@Override public long sizeInBytes() {
return wlen * RamUsage.NUM_BYTES_LONG + RamUsage.NUM_BYTES_ARRAY_HEADER + RamUsage.NUM_BYTES_INT /* wlen */;
}
/**
* Returns the current capacity in bits (1 greater than the index of the last bit)
*/
public long capacity() {
return (bits.length - from) << 6;
}
/**
* Returns the current capacity of this set. Included for
* compatibility. This is *not* equal to {@link #cardinality}
*/
public long size() {
return capacity();
}
/**
* @return the number of set bits
*/
public long cardinality() {
return BitUtil.pop_array(bits, from, wlen);
}
/**
* Returns true or false for the specified bit index.
*/
public boolean get(int index) {
return fastGet(index);
// int i = index >> 6; // div 64
// // signed shift will keep a negative index and force an
// // array-index-out-of-bounds-exception, removing the need for an explicit check.
// if (from + i >= wlen) return false;
//
// int bit = index & 0x3f; // mod 64
// long bitmask = 1L << bit;
// return (bits[from + i] & bitmask) != 0;
}
/**
* Returns true or false for the specified bit index.
* The index should be less than the OpenBitSet size
*/
public boolean fastGet(int index) {
int i = index >> 6; // div 64
// signed shift will keep a negative index and force an
// array-index-out-of-bounds-exception, removing the need for an explicit check.
int bit = index & 0x3f; // mod 64
long bitmask = 1L << bit;
return (bits[from + i] & bitmask) != 0;
}
/**
* Returns true or false for the specified bit index
*/
public boolean get(long index) {
int i = (int) (index >> 6); // div 64
if (from + i >= wlen) return false;
int bit = (int) index & 0x3f; // mod 64
long bitmask = 1L << bit;
return (bits[from + i] & bitmask) != 0;
}
/**
* Returns true or false for the specified bit index.
* The index should be less than the OpenBitSet size.
*/
public boolean fastGet(long index) {
int i = (int) (index >> 6); // div 64
int bit = (int) index & 0x3f; // mod 64
long bitmask = 1L << bit;
return (bits[from + i] & bitmask) != 0;
}
/**
* Sets the bit at the specified index.
* The index should be less than the OpenBitSet size.
*/
public void fastSet(int index) {
int wordNum = index >> 6; // div 64
int bit = index & 0x3f; // mod 64
long bitmask = 1L << bit;
bits[from + wordNum] |= bitmask;
}
/**
* Sets the bit at the specified index.
* The index should be less than the OpenBitSet size.
*/
public void fastSet(long index) {
int wordNum = (int) (index >> 6);
int bit = (int) index & 0x3f;
long bitmask = 1L << bit;
bits[from + wordNum] |= bitmask;
}
@Override public DocIdSetIterator iterator() throws IOException {
return new SlicedIterator(this);
}
/**
* An iterator to iterate over set bits in an OpenBitSet.
* This is faster than nextSetBit() for iterating over the complete set of bits,
* especially when the density of the bits set is high.
*/
public static class SlicedIterator extends DocIdSetIterator {
// The General Idea: instead of having an array per byte that has
// the offsets of the next set bit, that array could be
// packed inside a 32 bit integer (8 4 bit numbers). That
// should be faster than accessing an array for each index, and
// the total array size is kept smaller (256*sizeof(int))=1K
protected final static int[] bitlist = {
0x0, 0x1, 0x2, 0x21, 0x3, 0x31, 0x32, 0x321, 0x4, 0x41, 0x42, 0x421, 0x43,
0x431, 0x432, 0x4321, 0x5, 0x51, 0x52, 0x521, 0x53, 0x531, 0x532, 0x5321,
0x54, 0x541, 0x542, 0x5421, 0x543, 0x5431, 0x5432, 0x54321, 0x6, 0x61, 0x62,
0x621, 0x63, 0x631, 0x632, 0x6321, 0x64, 0x641, 0x642, 0x6421, 0x643,
0x6431, 0x6432, 0x64321, 0x65, 0x651, 0x652, 0x6521, 0x653, 0x6531, 0x6532,
0x65321, 0x654, 0x6541, 0x6542, 0x65421, 0x6543, 0x65431, 0x65432, 0x654321,
0x7, 0x71, 0x72, 0x721, 0x73, 0x731, 0x732, 0x7321, 0x74, 0x741, 0x742,
0x7421, 0x743, 0x7431, 0x7432, 0x74321, 0x75, 0x751, 0x752, 0x7521, 0x753,
0x7531, 0x7532, 0x75321, 0x754, 0x7541, 0x7542, 0x75421, 0x7543, 0x75431,
0x75432, 0x754321, 0x76, 0x761, 0x762, 0x7621, 0x763, 0x7631, 0x7632,
0x76321, 0x764, 0x7641, 0x7642, 0x76421, 0x7643, 0x76431, 0x76432, 0x764321,
0x765, 0x7651, 0x7652, 0x76521, 0x7653, 0x76531, 0x76532, 0x765321, 0x7654,
0x76541, 0x76542, 0x765421, 0x76543, 0x765431, 0x765432, 0x7654321, 0x8,
0x81, 0x82, 0x821, 0x83, 0x831, 0x832, 0x8321, 0x84, 0x841, 0x842, 0x8421,
0x843, 0x8431, 0x8432, 0x84321, 0x85, 0x851, 0x852, 0x8521, 0x853, 0x8531,
0x8532, 0x85321, 0x854, 0x8541, 0x8542, 0x85421, 0x8543, 0x85431, 0x85432,
0x854321, 0x86, 0x861, 0x862, 0x8621, 0x863, 0x8631, 0x8632, 0x86321, 0x864,
0x8641, 0x8642, 0x86421, 0x8643, 0x86431, 0x86432, 0x864321, 0x865, 0x8651,
0x8652, 0x86521, 0x8653, 0x86531, 0x86532, 0x865321, 0x8654, 0x86541,
0x86542, 0x865421, 0x86543, 0x865431, 0x865432, 0x8654321, 0x87, 0x871,
0x872, 0x8721, 0x873, 0x8731, 0x8732, 0x87321, 0x874, 0x8741, 0x8742,
0x87421, 0x8743, 0x87431, 0x87432, 0x874321, 0x875, 0x8751, 0x8752, 0x87521,
0x8753, 0x87531, 0x87532, 0x875321, 0x8754, 0x87541, 0x87542, 0x875421,
0x87543, 0x875431, 0x875432, 0x8754321, 0x876, 0x8761, 0x8762, 0x87621,
0x8763, 0x87631, 0x87632, 0x876321, 0x8764, 0x87641, 0x87642, 0x876421,
0x87643, 0x876431, 0x876432, 0x8764321, 0x8765, 0x87651, 0x87652, 0x876521,
0x87653, 0x876531, 0x876532, 0x8765321, 0x87654, 0x876541, 0x876542,
0x8765421, 0x876543, 0x8765431, 0x8765432, 0x87654321
};
/**
* ** the python code that generated bitlist
* def bits2int(val):
* arr=0
* for shift in range(8,0,-1):
* if val & 0x80:
* arr = (arr << 4) | shift
* val = val << 1
* return arr
*
* def int_table():
* tbl = [ hex(bits2int(val)).strip('L') for val in range(256) ]
* return ','.join(tbl)
* ****
*/
// hmmm, what about an iterator that finds zeros though,
// or a reverse iterator... should they be separate classes
// for efficiency, or have a common root interface? (or
// maybe both? could ask for a SetBitsIterator, etc...
private final long[] arr;
private final int words;
private final int from;
private int i = -1;
private long word;
private int wordShift;
private int indexArray;
private int curDocId = -1;
public SlicedIterator(SlicedOpenBitSet obs) {
this.arr = obs.bits;
this.words = obs.wlen;
this.from = obs.from;
}
// 64 bit shifts
private void shift() {
if ((int) word == 0) {
wordShift += 32;
word = word >>> 32;
}
if ((word & 0x0000FFFF) == 0) {
wordShift += 16;
word >>>= 16;
}
if ((word & 0x000000FF) == 0) {
wordShift += 8;
word >>>= 8;
}
indexArray = bitlist[(int) word & 0xff];
}
/**
* ** alternate shift implementations
* // 32 bit shifts, but a long shift needed at the end
* private void shift2() {
* int y = (int)word;
* if (y==0) {wordShift +=32; y = (int)(word >>>32); }
* if ((y & 0x0000FFFF) == 0) { wordShift +=16; y>>>=16; }
* if ((y & 0x000000FF) == 0) { wordShift +=8; y>>>=8; }
* indexArray = bitlist[y & 0xff];
* word >>>= (wordShift +1);
* }
*
* private void shift3() {
* int lower = (int)word;
* int lowByte = lower & 0xff;
* if (lowByte != 0) {
* indexArray=bitlist[lowByte];
* return;
* }
* shift();
* }
* ****
*/
@Override
public int nextDoc() {
if (indexArray == 0) {
if (word != 0) {
word >>>= 8;
wordShift += 8;
}
while (word == 0) {
if (++i >= words) {
return curDocId = NO_MORE_DOCS;
}
word = arr[from + i];
wordShift = -1; // loop invariant code motion should move this
}
// after the first time, should I go with a linear search, or
// stick with the binary search in shift?
shift();
}
int bitIndex = (indexArray & 0x0f) + wordShift;
indexArray >>>= 4;
// should i<<6 be cached as a separate variable?
// it would only save one cycle in the best circumstances.
return curDocId = (i << 6) + bitIndex;
}
@Override
public int advance(int target) {
indexArray = 0;
i = target >> 6;
if (i >= words) {
word = 0; // setup so next() will also return -1
return curDocId = NO_MORE_DOCS;
}
wordShift = target & 0x3f;
word = arr[from + i] >>> wordShift;
if (word != 0) {
wordShift--; // compensate for 1 based arrIndex
} else {
while (word == 0) {
if (++i >= words) {
return curDocId = NO_MORE_DOCS;
}
word = arr[from + i];
}
wordShift = -1;
}
shift();
int bitIndex = (indexArray & 0x0f) + wordShift;
indexArray >>>= 4;
// should i<<6 be cached as a separate variable?
// it would only save one cycle in the best circumstances.
return curDocId = (i << 6) + bitIndex;
}
@Override
public int docID() {
return curDocId;
}
}
}

View File

@ -33,8 +33,6 @@ public interface FilterCache extends IndexComponent, CloseableComponent {
Filter cache(Filter filterToCache);
Filter weakCache(Filter filterToCache);
boolean isCached(Filter filter);
void clear(IndexReader reader);

View File

@ -50,10 +50,6 @@ public class NoneFilterCache extends AbstractIndexComponent implements FilterCac
return filterToCache;
}
@Override public Filter weakCache(Filter filterToCache) {
return filterToCache;
}
@Override public boolean isCached(Filter filter) {
return false;
}

View File

@ -27,7 +27,7 @@ import org.elasticsearch.common.lucene.docset.DocSet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.cache.filter.support.AbstractDoubleConcurrentMapFilterCache;
import org.elasticsearch.index.cache.filter.support.AbstractConcurrentMapFilterCache;
import org.elasticsearch.index.settings.IndexSettings;
import java.util.concurrent.ConcurrentMap;
@ -39,7 +39,7 @@ import java.util.concurrent.atomic.AtomicLong;
*
* @author kimchy (shay.banon)
*/
public class ResidentFilterCache extends AbstractDoubleConcurrentMapFilterCache implements MapEvictionListener<Filter, DocSet> {
public class ResidentFilterCache extends AbstractConcurrentMapFilterCache implements MapEvictionListener<Filter, DocSet> {
private final int maxSize;
@ -53,7 +53,8 @@ public class ResidentFilterCache extends AbstractDoubleConcurrentMapFilterCache
this.expire = componentSettings.getAsTime("expire", null);
}
@Override protected ConcurrentMap<Filter, DocSet> buildCacheMap() {
@Override protected ConcurrentMap<Filter, DocSet> buildFilterMap() {
MapMaker mapMaker = new MapMaker();
if (maxSize != -1) {
mapMaker.maximumSize(maxSize);
@ -64,20 +65,6 @@ public class ResidentFilterCache extends AbstractDoubleConcurrentMapFilterCache
return mapMaker.makeMap();
}
@Override protected ConcurrentMap<Filter, DocSet> buildWeakCacheMap() {
// DocSet are not really stored with strong reference only when searching on them...
// Filter might be stored in query cache
MapMaker mapMaker = new MapMaker().weakValues();
if (maxSize != -1) {
mapMaker.maximumSize(maxSize);
}
if (expire != null) {
mapMaker.expireAfterAccess(expire.nanos(), TimeUnit.NANOSECONDS);
}
mapMaker.evictionListener(this);
return mapMaker.makeMap();
}
@Override public String type() {
return "soft";
}

View File

@ -27,7 +27,7 @@ import org.elasticsearch.common.lucene.docset.DocSet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.cache.filter.support.AbstractDoubleConcurrentMapFilterCache;
import org.elasticsearch.index.cache.filter.support.AbstractConcurrentMapFilterCache;
import org.elasticsearch.index.settings.IndexSettings;
import java.util.concurrent.ConcurrentMap;
@ -39,7 +39,7 @@ import java.util.concurrent.atomic.AtomicLong;
*
* @author kimchy (shay.banon)
*/
public class SoftFilterCache extends AbstractDoubleConcurrentMapFilterCache implements MapEvictionListener<Filter, DocSet> {
public class SoftFilterCache extends AbstractConcurrentMapFilterCache implements MapEvictionListener<Filter, DocSet> {
private final int maxSize;
@ -53,7 +53,7 @@ public class SoftFilterCache extends AbstractDoubleConcurrentMapFilterCache impl
this.expire = componentSettings.getAsTime("expire", null);
}
@Override protected ConcurrentMap<Filter, DocSet> buildCacheMap() {
@Override protected ConcurrentMap<Filter, DocSet> buildFilterMap() {
// DocSet are not really stored with strong reference only when searching on them...
// Filter might be stored in query cache
MapMaker mapMaker = new MapMaker().softValues();
@ -66,20 +66,6 @@ public class SoftFilterCache extends AbstractDoubleConcurrentMapFilterCache impl
return mapMaker.makeMap();
}
@Override protected ConcurrentMap<Filter, DocSet> buildWeakCacheMap() {
// DocSet are not really stored with strong reference only when searching on them...
// Filter might be stored in query cache
MapMaker mapMaker = new MapMaker().weakValues();
if (maxSize != -1) {
mapMaker.maximumSize(maxSize);
}
if (expire != null) {
mapMaker.expireAfterAccess(expire.nanos(), TimeUnit.NANOSECONDS);
}
mapMaker.evictionListener(this);
return mapMaker.makeMap();
}
@Override public String type() {
return "soft";
}

View File

@ -113,10 +113,6 @@ public abstract class AbstractConcurrentMapFilterCache extends AbstractIndexComp
return new FilterCacheFilterWrapper(filterToCache, this);
}
@Override public Filter weakCache(Filter filterToCache) {
return cache(filterToCache);
}
@Override public boolean isCached(Filter filter) {
return filter instanceof FilterCacheFilterWrapper;
}

View File

@ -1,252 +0,0 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.cache.filter.support;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.elasticsearch.common.collect.MapMaker;
import org.elasticsearch.common.lucene.docset.DocSet;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.cache.filter.FilterCache;
import org.elasticsearch.index.settings.IndexSettings;
import java.io.IOException;
import java.util.concurrent.ConcurrentMap;
import static org.elasticsearch.common.lucene.docset.DocSets.*;
import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.*;
/**
* A base concurrent filter cache that accepts the actual cache to use.
*
* @author kimchy (shay.banon)
*/
public abstract class AbstractDoubleConcurrentMapFilterCache extends AbstractIndexComponent implements FilterCache, IndexReader.ReaderFinishedListener {
final ConcurrentMap<Object, ConcurrentMap<Filter, DocSet>> cache;
final ConcurrentMap<Object, ConcurrentMap<Filter, DocSet>> weakCache;
protected AbstractDoubleConcurrentMapFilterCache(Index index, @IndexSettings Settings indexSettings) {
super(index, indexSettings);
// weak keys is fine, it will only be cleared once IndexReader references will be removed
// (assuming clear(...) will not be called)
this.cache = new MapMaker().weakKeys().makeMap();
this.weakCache = new MapMaker().weakKeys().makeMap();
}
@Override public void close() {
clear();
}
@Override public void clear() {
cache.clear();
weakCache.clear();
}
@Override public void finished(IndexReader reader) {
clear(reader);
}
@Override public void clear(IndexReader reader) {
ConcurrentMap<Filter, DocSet> map = cache.remove(reader.getCoreCacheKey());
// help soft/weak handling GC
if (map != null) {
map.clear();
}
map = weakCache.remove(reader.getCoreCacheKey());
// help soft/weak handling GC
if (map != null) {
map.clear();
}
}
@Override public void clearUnreferenced() {
}
@Override public long count() {
long entries = 0;
for (ConcurrentMap<Filter, DocSet> map : cache.values()) {
entries += map.size();
}
return entries;
}
@Override public long sizeInBytes() {
long sizeInBytes = 0;
for (ConcurrentMap<Filter, DocSet> map : cache.values()) {
for (DocSet docSet : map.values()) {
sizeInBytes += docSet.sizeInBytes();
}
}
for (ConcurrentMap<Filter, DocSet> map : weakCache.values()) {
for (DocSet docSet : map.values()) {
sizeInBytes += docSet.sizeInBytes();
}
}
return sizeInBytes;
}
@Override public Filter cache(Filter filterToCache) {
if (isCached(filterToCache)) {
return filterToCache;
}
return new FilterCacheFilterWrapper(filterToCache, this);
}
@Override public Filter weakCache(Filter filterToCache) {
if (isCached(filterToCache)) {
return filterToCache;
}
return new FilterWeakCacheFilterWrapper(filterToCache, this);
}
@Override public boolean isCached(Filter filter) {
return filter instanceof CacheMarker;
}
protected ConcurrentMap<Filter, DocSet> buildCacheMap() {
return newConcurrentMap();
}
protected ConcurrentMap<Filter, DocSet> buildWeakCacheMap() {
return newConcurrentMap();
}
static abstract class CacheMarker extends Filter {
}
// LUCENE MONITOR: Check next version Lucene for CachingWrapperFilter, consider using that logic
// and not use the DeletableConstantScoreQuery, instead pass the DeletesMode enum to the cache method
// see: https://issues.apache.org/jira/browse/LUCENE-2468
static class FilterCacheFilterWrapper extends CacheMarker {
private final Filter filter;
private final AbstractDoubleConcurrentMapFilterCache cache;
FilterCacheFilterWrapper(Filter filter, AbstractDoubleConcurrentMapFilterCache cache) {
this.filter = filter;
this.cache = cache;
}
@Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
ConcurrentMap<Filter, DocSet> cachedFilters = cache.cache.get(reader.getCoreCacheKey());
if (cachedFilters == null) {
cachedFilters = cache.buildCacheMap();
reader.addReaderFinishedListener(cache);
ConcurrentMap<Filter, DocSet> prev = cache.cache.putIfAbsent(reader.getCoreCacheKey(), cachedFilters);
if (prev != null) {
cachedFilters = prev;
}
}
DocSet docSet = cachedFilters.get(filter);
if (docSet != null) {
return docSet;
}
DocIdSet docIdSet = filter.getDocIdSet(reader);
docSet = cacheable(reader, docIdSet);
DocSet prev = cachedFilters.putIfAbsent(filter, docSet);
if (prev != null) {
docSet = prev;
}
return docSet;
}
public String toString() {
return "FilterCacheFilterWrapper(" + filter + ")";
}
public boolean equals(Object o) {
if (!(o instanceof FilterCacheFilterWrapper)) return false;
return this.filter.equals(((FilterCacheFilterWrapper) o).filter);
}
public int hashCode() {
return filter.hashCode() ^ 0x1117BF25;
}
}
static class FilterWeakCacheFilterWrapper extends CacheMarker {
private final Filter filter;
private final AbstractDoubleConcurrentMapFilterCache cache;
FilterWeakCacheFilterWrapper(Filter filter, AbstractDoubleConcurrentMapFilterCache cache) {
this.filter = filter;
this.cache = cache;
}
@Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
DocSet docSet;
// first check if its in the actual cache
ConcurrentMap<Filter, DocSet> cachedFilters = cache.cache.get(reader.getCoreCacheKey());
if (cachedFilters != null) {
docSet = cachedFilters.get(filter);
if (docSet != null) {
return docSet;
}
}
// now, handle it in the weak cache
ConcurrentMap<Filter, DocSet> weakCacheFilters = cache.weakCache.get(reader.getCoreCacheKey());
if (weakCacheFilters == null) {
weakCacheFilters = cache.buildWeakCacheMap();
reader.addReaderFinishedListener(cache);
ConcurrentMap<Filter, DocSet> prev = cache.weakCache.putIfAbsent(reader.getCoreCacheKey(), weakCacheFilters);
if (prev != null) {
weakCacheFilters = prev;
}
}
docSet = weakCacheFilters.get(filter);
if (docSet != null) {
return docSet;
}
DocIdSet docIdSet = filter.getDocIdSet(reader);
docSet = cacheable(reader, docIdSet);
DocSet prev = weakCacheFilters.putIfAbsent(filter, docSet);
if (prev != null) {
docSet = prev;
}
return docSet;
}
public String toString() {
return "FilterCacheFilterWrapper(" + filter + ")";
}
public boolean equals(Object o) {
if (!(o instanceof FilterCacheFilterWrapper)) return false;
return this.filter.equals(((FilterCacheFilterWrapper) o).filter);
}
public int hashCode() {
return filter.hashCode() ^ 0x1117BF25;
}
}
}

View File

@ -89,12 +89,8 @@ public class PrefixFilterParser extends AbstractIndexComponent implements XConte
Filter filter = new PrefixFilter(new Term(fieldName, value));
// we weak cache the filter if not cached, since in any case it builds an OpenBitSet
// we might as well weak cache it...
if (cache) {
filter = parseContext.cacheFilter(filter);
} else {
filter = parseContext.cacheWeakFilter(filter);
}
filter = wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext);

View File

@ -112,10 +112,6 @@ public class QueryParseContext {
return indexQueryParser.indexCache.filter().cache(filter);
}
public Filter cacheWeakFilter(Filter filter) {
return indexQueryParser.indexCache.filter().weakCache(filter);
}
public void addNamedFilter(String name, Filter filter) {
namedFilters.put(name, filter);
}

View File

@ -118,12 +118,8 @@ public class RangeFilterParser extends AbstractIndexComponent implements XConten
filter = new TermRangeFilter(fieldName, from, to, includeLower, includeUpper);
}
// we weak cache the filter if not cached, since in any case it builds an OpenBitSet
// we might as well weak cache it...
if (cache) {
filter = parseContext.cacheFilter(filter);
} else {
filter = parseContext.cacheWeakFilter(filter);
}
filter = wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext);

View File

@ -94,12 +94,8 @@ public class TermFilterParser extends AbstractIndexComponent implements XContent
filter = new TermFilter(new Term(fieldName, value));
}
// we weak cache the filter if not cached, since in any case it builds an OpenBitSet
// we might as well weak cache it...
if (cache) {
filter = parseContext.cacheFilter(filter);
} else {
filter = parseContext.cacheWeakFilter(filter);
}
filter = wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext);

View File

@ -94,12 +94,8 @@ public class TermsFilterParser extends AbstractIndexComponent implements XConten
}
Filter filter = termsFilter;
// we weak cache the filter if not cached, since in any case it builds an OpenBitSet
// we might as well weak cache it...
if (cache) {
filter = parseContext.cacheFilter(filter);
} else {
filter = parseContext.cacheWeakFilter(filter);
}
filter = wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext);

View File

@ -47,7 +47,7 @@ public class QueryFacetCollector extends AbstractFacetCollector {
if (possibleFilter != null) {
this.filter = possibleFilter;
} else {
this.filter = filterCache.weakCache(new QueryWrapperFilter(query));
this.filter = new QueryWrapperFilter(query);
}
}

View File

@ -0,0 +1,45 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.docset;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.OpenBitSet;
import org.testng.annotations.Test;
import java.io.IOException;
import static org.hamcrest.MatcherAssert.*;
import static org.hamcrest.Matchers.*;
@Test
public class SlicedOpenBitSetTests {
@Test public void simpleTests() throws IOException {
int numberOfBits = 500;
SlicedOpenBitSet bitSet = new SlicedOpenBitSet(new long[OpenBitSet.bits2words(numberOfBits) + 100], OpenBitSet.bits2words(numberOfBits), 100);
bitSet.fastSet(100);
assertThat(bitSet.fastGet(100), equalTo(true));
DocIdSetIterator iterator = bitSet.iterator();
assertThat(iterator.nextDoc(), equalTo(100));
assertThat(iterator.nextDoc(), equalTo(DocIdSetIterator.NO_MORE_DOCS));
}
}

View File

@ -0,0 +1,70 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test.stress.gcbehavior;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.ImmutableSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.elasticsearch.index.query.xcontent.FilterBuilders.*;
import static org.elasticsearch.index.query.xcontent.QueryBuilders.*;
public class FilterCacheGcStress {
public static void main(String[] args) {
Settings settings = ImmutableSettings.settingsBuilder()
.put("gateway.type", "none")
.build();
Node node = NodeBuilder.nodeBuilder().settings(settings).node();
final Client client = node.client();
client.admin().indices().prepareCreate("test").execute().actionGet();
client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet();
final AtomicBoolean stop = new AtomicBoolean();
Thread indexingThread = new Thread() {
@Override public void run() {
while (!stop.get()) {
client.prepareIndex("test", "type1").setSource("field", System.currentTimeMillis()).execute().actionGet();
}
}
};
indexingThread.start();
Thread searchThread = new Thread() {
@Override public void run() {
while (!stop.get()) {
client.prepareSearch()
.setQuery(filteredQuery(matchAllQuery(), rangeFilter("field").from(System.currentTimeMillis())))
.execute().actionGet();
}
}
};
searchThread.start();
}
}