mirror of https://github.com/apache/lucene.git
fix DV/FC thread safety
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene4547@1436340 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9502b8e6cc
commit
4b71fcbf72
|
@ -21,6 +21,12 @@ import org.apache.lucene.util.BytesRef;
|
|||
|
||||
public abstract class BinaryDocValues {
|
||||
|
||||
/** Lookup the value for document.
|
||||
*
|
||||
* <p><b>NOTE</b>: you should not share the provided
|
||||
* {@link BytesRef} result with other doc values sources
|
||||
* (other BinaryDocValues or SortedDocValues): a single
|
||||
* "private" instance should be used for each source. */
|
||||
public abstract void get(int docID, BytesRef result);
|
||||
|
||||
public static final byte[] MISSING = new byte[0];
|
||||
|
|
|
@ -370,8 +370,20 @@ class FieldCacheImpl implements FieldCache {
|
|||
// inherit javadocs
|
||||
public Bytes getBytes(AtomicReader reader, String field, ByteParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(field);
|
||||
if (valuesIn != null) {
|
||||
// Not cached here by FieldCacheImpl (cached instead
|
||||
// per-thread by SegmentReader):
|
||||
return new Bytes() {
|
||||
@Override
|
||||
public byte get(int docID) {
|
||||
return (byte) valuesIn.get(docID);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return (Bytes) caches.get(Byte.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
// nocommit move up?
|
||||
static class BytesFromArray extends Bytes {
|
||||
|
@ -396,16 +408,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
return new Bytes() {
|
||||
@Override
|
||||
public byte get(int docID) {
|
||||
return (byte) valuesIn.get(docID);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
|
||||
int maxDoc = reader.maxDoc();
|
||||
final byte[] values;
|
||||
final ByteParser parser = (ByteParser) key.custom;
|
||||
|
@ -441,7 +443,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
return new BytesFromArray(values);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// inherit javadocs
|
||||
public Shorts getShorts (AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
|
||||
|
@ -451,8 +452,20 @@ class FieldCacheImpl implements FieldCache {
|
|||
// inherit javadocs
|
||||
public Shorts getShorts(AtomicReader reader, String field, ShortParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(field);
|
||||
if (valuesIn != null) {
|
||||
// Not cached here by FieldCacheImpl (cached instead
|
||||
// per-thread by SegmentReader):
|
||||
return new Shorts() {
|
||||
@Override
|
||||
public short get(int docID) {
|
||||
return (short) valuesIn.get(docID);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return (Shorts) caches.get(Short.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
// nocommit move up?
|
||||
static class ShortsFromArray extends Shorts {
|
||||
|
@ -477,15 +490,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
return new Shorts() {
|
||||
@Override
|
||||
public short get(int docID) {
|
||||
return (short) valuesIn.get(docID);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
int maxDoc = reader.maxDoc();
|
||||
final short[] values;
|
||||
final ShortParser parser = (ShortParser) key.custom;
|
||||
|
@ -519,7 +523,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
return new ShortsFromArray(values);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// inherit javadocs
|
||||
public Ints getInts (AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
|
||||
|
@ -529,8 +532,20 @@ class FieldCacheImpl implements FieldCache {
|
|||
// inherit javadocs
|
||||
public Ints getInts(AtomicReader reader, String field, IntParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(field);
|
||||
if (valuesIn != null) {
|
||||
// Not cached here by FieldCacheImpl (cached instead
|
||||
// per-thread by SegmentReader):
|
||||
return new Ints() {
|
||||
@Override
|
||||
public int get(int docID) {
|
||||
return (int) valuesIn.get(docID);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return (Ints) caches.get(Integer.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
// nocommit move up?
|
||||
static class IntsFromArray extends Ints {
|
||||
|
@ -555,15 +570,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
protected Object createValue(final AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
return new Ints() {
|
||||
@Override
|
||||
public int get(int docID) {
|
||||
return (int) valuesIn.get(docID);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
final int[] values;
|
||||
final IntParser parser = (IntParser) key.custom;
|
||||
if (parser == null) {
|
||||
|
@ -605,7 +611,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
return new IntsFromArray(values);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nocommit must advertise that this does NOT work if you
|
||||
// index only doc values for the field ... it will say no
|
||||
|
@ -679,8 +684,20 @@ class FieldCacheImpl implements FieldCache {
|
|||
// inherit javadocs
|
||||
public Floats getFloats(AtomicReader reader, String field, FloatParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(field);
|
||||
if (valuesIn != null) {
|
||||
// Not cached here by FieldCacheImpl (cached instead
|
||||
// per-thread by SegmentReader):
|
||||
return new Floats() {
|
||||
@Override
|
||||
public float get(int docID) {
|
||||
return Float.intBitsToFloat((int) valuesIn.get(docID));
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return (Floats) caches.get(Float.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
// nocommit move up?
|
||||
static class FloatsFromArray extends Floats {
|
||||
|
@ -705,15 +722,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
return new Floats() {
|
||||
@Override
|
||||
public float get(int docID) {
|
||||
return Float.intBitsToFloat((int) valuesIn.get(docID));
|
||||
}
|
||||
};
|
||||
} else {
|
||||
final float[] values;
|
||||
final FloatParser parser = (FloatParser) key.custom;
|
||||
if (parser == null) {
|
||||
|
@ -756,7 +764,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
return new FloatsFromArray(values);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// inherit javadocs
|
||||
public Longs getLongs(AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
|
||||
|
@ -766,8 +773,20 @@ class FieldCacheImpl implements FieldCache {
|
|||
// inherit javadocs
|
||||
public Longs getLongs(AtomicReader reader, String field, FieldCache.LongParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(field);
|
||||
if (valuesIn != null) {
|
||||
// Not cached here by FieldCacheImpl (cached instead
|
||||
// per-thread by SegmentReader):
|
||||
return new Longs() {
|
||||
@Override
|
||||
public long get(int docID) {
|
||||
return valuesIn.get(docID);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return (Longs) caches.get(Long.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
// nocommit move up?
|
||||
static class LongsFromArray extends Longs {
|
||||
|
@ -792,15 +811,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
return new Longs() {
|
||||
@Override
|
||||
public long get(int docID) {
|
||||
return valuesIn.get(docID);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
final long[] values;
|
||||
final LongParser parser = (LongParser) key.custom;
|
||||
if (parser == null) {
|
||||
|
@ -842,7 +852,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
return new LongsFromArray(values);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// inherit javadocs
|
||||
public Doubles getDoubles(AtomicReader reader, String field, boolean setDocsWithField)
|
||||
|
@ -853,8 +862,20 @@ class FieldCacheImpl implements FieldCache {
|
|||
// inherit javadocs
|
||||
public Doubles getDoubles(AtomicReader reader, String field, FieldCache.DoubleParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(field);
|
||||
if (valuesIn != null) {
|
||||
// Not cached here by FieldCacheImpl (cached instead
|
||||
// per-thread by SegmentReader):
|
||||
return new Doubles() {
|
||||
@Override
|
||||
public double get(int docID) {
|
||||
return Double.longBitsToDouble(valuesIn.get(docID));
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return (Doubles) caches.get(Double.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
// nocommit move up?
|
||||
static class DoublesFromArray extends Doubles {
|
||||
|
@ -879,15 +900,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
return new Doubles() {
|
||||
@Override
|
||||
public double get(int docID) {
|
||||
return Double.longBitsToDouble(valuesIn.get(docID));
|
||||
}
|
||||
};
|
||||
} else {
|
||||
final double[] values;
|
||||
final DoubleParser parser = (DoubleParser) key.custom;
|
||||
if (parser == null) {
|
||||
|
@ -929,7 +941,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
return new DoublesFromArray(values);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class SortedDocValuesImpl extends SortedDocValues {
|
||||
private final PagedBytes.Reader bytes;
|
||||
|
@ -1118,8 +1129,15 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
|
||||
public SortedDocValues getTermsIndex(AtomicReader reader, String field, float acceptableOverheadRatio) throws IOException {
|
||||
SortedDocValues valuesIn = reader.getSortedDocValues(field);
|
||||
if (valuesIn != null) {
|
||||
// Not cached here by FieldCacheImpl (cached instead
|
||||
// per-thread by SegmentReader):
|
||||
return valuesIn;
|
||||
} else {
|
||||
return (SortedDocValues) caches.get(SortedDocValues.class).get(reader, new CacheKey(field, acceptableOverheadRatio), false);
|
||||
}
|
||||
}
|
||||
|
||||
static class SortedDocValuesCache extends Cache {
|
||||
SortedDocValuesCache(FieldCacheImpl wrapper) {
|
||||
|
@ -1131,13 +1149,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
throws IOException {
|
||||
|
||||
final int maxDoc = reader.maxDoc();
|
||||
SortedDocValues valuesIn = reader.getSortedDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
// nocommit we need thread DV test that would
|
||||
// uncover this bug!!
|
||||
// nocommit we should not cache in this case?
|
||||
return valuesIn;
|
||||
} else {
|
||||
|
||||
Terms terms = reader.terms(key.field);
|
||||
|
||||
|
@ -1233,7 +1244,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
return new SortedDocValuesImpl(bytes.freeze(true), termOrdToBytesOffset.getMutable(), docToTermOrd.getMutable(), termOrd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class BinaryDocValuesImpl extends BinaryDocValues {
|
||||
private final PagedBytes.Reader bytes;
|
||||
|
@ -1264,6 +1274,18 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
|
||||
public BinaryDocValues getTerms(AtomicReader reader, String field, float acceptableOverheadRatio) throws IOException {
|
||||
BinaryDocValues valuesIn = reader.getBinaryDocValues(field);
|
||||
if (valuesIn == null) {
|
||||
// nocommit is this auto-fallback ... OK?
|
||||
valuesIn = reader.getSortedDocValues(field);
|
||||
}
|
||||
|
||||
if (valuesIn != null) {
|
||||
// Not cached here by FieldCacheImpl (cached instead
|
||||
// per-thread by SegmentReader):
|
||||
return valuesIn;
|
||||
}
|
||||
|
||||
return (BinaryDocValues) caches.get(BinaryDocValues.class).get(reader, new CacheKey(field, acceptableOverheadRatio), false);
|
||||
}
|
||||
|
||||
|
@ -1276,15 +1298,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField /* ignored */)
|
||||
throws IOException {
|
||||
|
||||
BinaryDocValues valuesIn = reader.getBinaryDocValues(key.field);
|
||||
if (valuesIn == null) {
|
||||
// nocommit is this auto-fallback ... OK?
|
||||
valuesIn = reader.getSortedDocValues(key.field);
|
||||
}
|
||||
|
||||
if (valuesIn != null) {
|
||||
return valuesIn;
|
||||
} else {
|
||||
final int maxDoc = reader.maxDoc();
|
||||
Terms terms = reader.terms(key.field);
|
||||
|
||||
|
@ -1351,7 +1364,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
return new BinaryDocValuesImpl(bytes.freeze(true), docToOffset.getMutable());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public DocTermOrds getDocTermOrds(AtomicReader reader, String field) throws IOException {
|
||||
return (DocTermOrds) caches.get(DocTermOrds.class).get(reader, new CacheKey(field, null), false);
|
||||
|
|
|
@ -82,13 +82,34 @@ public class TestDocValuesWithThreads extends LuceneTestCase {
|
|||
startingGun.await();
|
||||
int iters = atLeast(1000);
|
||||
BytesRef scratch = new BytesRef();
|
||||
BytesRef scratch2 = new BytesRef();
|
||||
for(int iter=0;iter<iters;iter++) {
|
||||
int docID = threadRandom.nextInt(numDocs);
|
||||
assertEquals(numbers.get(docID).longValue(), ndv.get(docID));
|
||||
switch(threadRandom.nextInt(6)) {
|
||||
case 0:
|
||||
assertEquals((byte) numbers.get(docID).longValue(), FieldCache.DEFAULT.getBytes(ar, "number", false).get(docID));
|
||||
break;
|
||||
case 1:
|
||||
assertEquals((short) numbers.get(docID).longValue(), FieldCache.DEFAULT.getShorts(ar, "number", false).get(docID));
|
||||
break;
|
||||
case 2:
|
||||
assertEquals((int) numbers.get(docID).longValue(), FieldCache.DEFAULT.getInts(ar, "number", false).get(docID));
|
||||
break;
|
||||
case 3:
|
||||
assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getLongs(ar, "number", false).get(docID));
|
||||
break;
|
||||
case 4:
|
||||
assertEquals(Float.intBitsToFloat((int) numbers.get(docID).longValue()), FieldCache.DEFAULT.getFloats(ar, "number", false).get(docID), 0.0f);
|
||||
break;
|
||||
case 5:
|
||||
assertEquals(Double.longBitsToDouble(numbers.get(docID).longValue()), FieldCache.DEFAULT.getDoubles(ar, "number", false).get(docID), 0.0);
|
||||
break;
|
||||
}
|
||||
bdv.get(docID, scratch);
|
||||
assertEquals(binary.get(docID), scratch);
|
||||
sdv.get(docID, scratch);
|
||||
assertEquals(sorted.get(docID), scratch);
|
||||
// Cannot share a single scratch against two "sources":
|
||||
sdv.get(docID, scratch2);
|
||||
assertEquals(sorted.get(docID), scratch2);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
|
|
|
@ -41,7 +41,7 @@ import org.apache.lucene.util._TestUtil;
|
|||
/**
|
||||
* Tests IndexSearcher's searchAfter() method
|
||||
*/
|
||||
|
||||
// nocommit fail w/ OOME?: ant test -Dtestcase=TestSearchAfter -Dtests.method=testQueries -Dtests.seed=98B4DA915983D1AE -Dtests.slow=true -Dtests.locale=sr -Dtests.timezone=Etc/GMT+2 -Dtests.file.encoding=UTF-8
|
||||
public class TestSearchAfter extends LuceneTestCase {
|
||||
private Directory dir;
|
||||
private IndexReader reader;
|
||||
|
|
Loading…
Reference in New Issue