mirror of https://github.com/apache/lucene.git
pull from DV under FC.getXXX if possible
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene4547@1410878 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
6735e95b1f
commit
f9bfb920c6
|
@ -414,7 +414,7 @@ public class SimpleTextSimpleDocValuesFormat extends SimpleDocValuesFormat {
|
|||
}
|
||||
|
||||
@Override
|
||||
public NumericDocValues getDirectNumeric(FieldInfo fieldInfo) throws IOException {
|
||||
public NumericDocValues getNumeric(FieldInfo fieldInfo) throws IOException {
|
||||
final OneField field = fields.get(fieldInfo.name);
|
||||
|
||||
// SegmentCoreReaders already verifies this field is
|
||||
|
@ -454,7 +454,7 @@ public class SimpleTextSimpleDocValuesFormat extends SimpleDocValuesFormat {
|
|||
}
|
||||
|
||||
@Override
|
||||
public BinaryDocValues getDirectBinary(FieldInfo fieldInfo) throws IOException {
|
||||
public BinaryDocValues getBinary(FieldInfo fieldInfo) throws IOException {
|
||||
final OneField field = fields.get(fieldInfo.name);
|
||||
|
||||
// SegmentCoreReaders already verifies this field is
|
||||
|
@ -497,7 +497,7 @@ public class SimpleTextSimpleDocValuesFormat extends SimpleDocValuesFormat {
|
|||
}
|
||||
|
||||
@Override
|
||||
public SortedDocValues getDirectSorted(FieldInfo fieldInfo) throws IOException {
|
||||
public SortedDocValues getSorted(FieldInfo fieldInfo) throws IOException {
|
||||
final OneField field = fields.get(fieldInfo.name);
|
||||
|
||||
// SegmentCoreReaders already verifies this field is
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.lucene.codecs;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DocValues.Source;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.MergeState;
|
||||
|
@ -36,10 +37,14 @@ public abstract class BinaryDocValuesConsumer {
|
|||
for (AtomicReader reader : mergeState.readers) {
|
||||
final int maxDoc = reader.maxDoc();
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
final Source source = reader.docValues(mergeState.fieldInfo.name).getDirectSource();
|
||||
|
||||
// nocommit what if this is null...? need default source?
|
||||
final BinaryDocValues source = reader.getBinaryDocValues(mergeState.fieldInfo.name);
|
||||
|
||||
for (int i = 0; i < maxDoc; i++) {
|
||||
if (liveDocs == null || liveDocs.get(i)) {
|
||||
add(source.getBytes(i, bytes));
|
||||
source.get(i, bytes);
|
||||
add(bytes);
|
||||
}
|
||||
docCount++;
|
||||
mergeState.checkAbort.work(300);
|
||||
|
|
|
@ -20,10 +20,11 @@ package org.apache.lucene.codecs;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.DocValues.Source;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.MergeState;
|
||||
import org.apache.lucene.index.DocValues.Source;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
public abstract class NumericDocValuesConsumer {
|
||||
|
@ -35,10 +36,11 @@ public abstract class NumericDocValuesConsumer {
|
|||
for (AtomicReader reader : mergeState.readers) {
|
||||
final int maxDoc = reader.maxDoc();
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
final Source source = reader.docValues(mergeState.fieldInfo.name).getDirectSource();
|
||||
// nocommit what if this is null...? need default source?
|
||||
final NumericDocValues source = reader.getNumericDocValues(mergeState.fieldInfo.name);
|
||||
for (int i = 0; i < maxDoc; i++) {
|
||||
if (liveDocs == null || liveDocs.get(i)) {
|
||||
add(source.getInt(i));
|
||||
add(source.get(i));
|
||||
}
|
||||
docCount++;
|
||||
mergeState.checkAbort.work(300);
|
||||
|
|
|
@ -35,86 +35,9 @@ public abstract class SimpleDVProducer implements Closeable {
|
|||
this.maxDoc = maxDoc;
|
||||
}
|
||||
|
||||
public abstract NumericDocValues getDirectNumeric(FieldInfo field) throws IOException;
|
||||
public abstract NumericDocValues getNumeric(FieldInfo field) throws IOException;
|
||||
|
||||
/** Loads all values into RAM. */
|
||||
public NumericDocValues getNumeric(FieldInfo field) throws IOException {
|
||||
NumericDocValues source = getDirectNumeric(field);
|
||||
// nocommit more ram efficient?
|
||||
final long[] values = new long[maxDoc];
|
||||
for(int docID=0;docID<maxDoc;docID++) {
|
||||
values[docID] = source.get(docID);
|
||||
}
|
||||
return new NumericDocValues() {
|
||||
@Override
|
||||
public long get(int docID) {
|
||||
return values[docID];
|
||||
}
|
||||
};
|
||||
}
|
||||
public abstract BinaryDocValues getBinary(FieldInfo field) throws IOException;
|
||||
|
||||
public abstract BinaryDocValues getDirectBinary(FieldInfo field) throws IOException;
|
||||
|
||||
/** Loads all values into RAM. */
|
||||
public BinaryDocValues getBinary(FieldInfo field) throws IOException {
|
||||
|
||||
BinaryDocValues source = getDirectBinary(field);
|
||||
|
||||
// nocommit more ram efficient
|
||||
final byte[][] values = new byte[maxDoc][];
|
||||
BytesRef scratch = new BytesRef();
|
||||
for(int docID=0;docID<maxDoc;docID++) {
|
||||
source.get(docID, scratch);
|
||||
values[docID] = new byte[scratch.length];
|
||||
System.arraycopy(scratch.bytes, scratch.offset, values[docID], 0, scratch.length);
|
||||
}
|
||||
|
||||
return new BinaryDocValues() {
|
||||
@Override
|
||||
public void get(int docID, BytesRef result) {
|
||||
result.bytes = values[docID];
|
||||
result.offset = 0;
|
||||
result.length = result.bytes.length;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public abstract SortedDocValues getDirectSorted(FieldInfo field) throws IOException;
|
||||
|
||||
/** Loads all values into RAM. */
|
||||
public SortedDocValues getSorted(FieldInfo field) throws IOException {
|
||||
SortedDocValues source = getDirectSorted(field);
|
||||
final int valueCount = source.getValueCount();
|
||||
final byte[][] values = new byte[valueCount][];
|
||||
BytesRef scratch = new BytesRef();
|
||||
for(int ord=0;ord<valueCount;ord++) {
|
||||
source.lookupOrd(ord, scratch);
|
||||
values[ord] = new byte[scratch.length];
|
||||
System.arraycopy(scratch.bytes, scratch.offset, values[ord], 0, scratch.length);
|
||||
}
|
||||
|
||||
final int[] ords = new int[maxDoc];
|
||||
for(int docID=0;docID<maxDoc;docID++) {
|
||||
ords[docID] = source.getOrd(docID);
|
||||
}
|
||||
|
||||
return new SortedDocValues() {
|
||||
@Override
|
||||
public int getOrd(int docID) {
|
||||
return ords[docID];
|
||||
}
|
||||
|
||||
@Override
|
||||
public void lookupOrd(int ord, BytesRef result) {
|
||||
result.bytes = values[ord];
|
||||
result.offset = 0;
|
||||
result.length = result.bytes.length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getValueCount() {
|
||||
return valueCount;
|
||||
}
|
||||
};
|
||||
}
|
||||
public abstract SortedDocValues getSorted(FieldInfo field) throws IOException;
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.index.AtomicReader;
|
|||
import org.apache.lucene.index.DocValues.SortedSource;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.MergeState;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
|
@ -57,7 +58,7 @@ public abstract class SortedDocValuesConsumer {
|
|||
AtomicReader reader;
|
||||
FixedBitSet liveTerms;
|
||||
int ord = -1;
|
||||
SortedSource source;
|
||||
SortedDocValues values;
|
||||
BytesRef scratch = new BytesRef();
|
||||
|
||||
// nocommit can we factor out the compressed fields
|
||||
|
@ -67,10 +68,10 @@ public abstract class SortedDocValuesConsumer {
|
|||
int[] segOrdToMergedOrd;
|
||||
|
||||
public BytesRef nextTerm() {
|
||||
while (ord < source.getValueCount()-1) {
|
||||
while (ord < values.getValueCount()-1) {
|
||||
ord++;
|
||||
if (liveTerms == null || liveTerms.get(ord)) {
|
||||
source.getByOrd(ord, scratch);
|
||||
values.lookupOrd(ord, scratch);
|
||||
return scratch;
|
||||
} else {
|
||||
// Skip "deleted" terms (ie, terms that were not
|
||||
|
@ -98,26 +99,20 @@ public abstract class SortedDocValuesConsumer {
|
|||
|
||||
// First pass: mark "live" terms
|
||||
for (AtomicReader reader : mergeState.readers) {
|
||||
DocValues docvalues = reader.docValues(mergeState.fieldInfo.name);
|
||||
final SortedSource source;
|
||||
// nocommit what if this is null...? need default source?
|
||||
int maxDoc = reader.maxDoc();
|
||||
if (docvalues == null) {
|
||||
source = DocValues.getDefaultSortedSource(mergeState.fieldInfo.getDocValuesType(), maxDoc);
|
||||
} else {
|
||||
source = (SortedSource) docvalues.getDirectSource();
|
||||
}
|
||||
|
||||
SegmentState state = new SegmentState();
|
||||
state.reader = reader;
|
||||
state.source = source;
|
||||
state.values = reader.getSortedDocValues(mergeState.fieldInfo.name);
|
||||
segStates.add(state);
|
||||
assert source.getValueCount() < Integer.MAX_VALUE;
|
||||
assert state.values.getValueCount() < Integer.MAX_VALUE;
|
||||
if (reader.hasDeletions()) {
|
||||
state.liveTerms = new FixedBitSet(source.getValueCount());
|
||||
state.liveTerms = new FixedBitSet(state.values.getValueCount());
|
||||
Bits liveDocs = reader.getLiveDocs();
|
||||
for(int docID=0;docID<maxDoc;docID++) {
|
||||
if (liveDocs.get(docID)) {
|
||||
state.liveTerms.set(source.ord(docID));
|
||||
state.liveTerms.set(state.values.getOrd(docID));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -135,7 +130,7 @@ public abstract class SortedDocValuesConsumer {
|
|||
// nocommit we could defer this to 3rd pass (and
|
||||
// reduce transient RAM spike) but then
|
||||
// we'd spend more effort computing the mapping...:
|
||||
segState.segOrdToMergedOrd = new int[segState.source.getValueCount()];
|
||||
segState.segOrdToMergedOrd = new int[segState.values.getValueCount()];
|
||||
q.add(segState);
|
||||
}
|
||||
}
|
||||
|
@ -184,7 +179,7 @@ public abstract class SortedDocValuesConsumer {
|
|||
int maxDoc = segState.reader.maxDoc();
|
||||
for(int docID=0;docID<maxDoc;docID++) {
|
||||
if (liveDocs == null || liveDocs.get(docID)) {
|
||||
int segOrd = segState.source.ord(docID);
|
||||
int segOrd = segState.values.getOrd(docID);
|
||||
int mergedOrd = segState.segOrdToMergedOrd[segOrd];
|
||||
consumer.addDoc(mergedOrd);
|
||||
}
|
||||
|
|
|
@ -42,7 +42,9 @@ public class DoubleDocValuesField extends StoredField {
|
|||
*/
|
||||
public static final FieldType TYPE = new FieldType();
|
||||
static {
|
||||
TYPE.setDocValueType(DocValues.Type.FLOAT_64);
|
||||
// nocommit kinda messy ... if user calls .numericValue
|
||||
// they get back strange int ... hmmm
|
||||
TYPE.setDocValueType(DocValues.Type.FIXED_INTS_64);
|
||||
TYPE.freeze();
|
||||
}
|
||||
|
||||
|
@ -54,6 +56,8 @@ public class DoubleDocValuesField extends StoredField {
|
|||
*/
|
||||
public DoubleDocValuesField(String name, double value) {
|
||||
super(name, TYPE);
|
||||
fieldsData = Double.valueOf(value);
|
||||
// nocommit kinda messy ... if user calls .numericValue
|
||||
// they get back strange int ... hmmm
|
||||
fieldsData = Double.doubleToRawLongBits(value);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,7 +41,9 @@ public class FloatDocValuesField extends StoredField {
|
|||
*/
|
||||
public static final FieldType TYPE = new FieldType();
|
||||
static {
|
||||
TYPE.setDocValueType(DocValues.Type.FLOAT_32);
|
||||
// nocommit kinda messy ... if user calls .numericValue
|
||||
// they get back strange int ... hmmm
|
||||
TYPE.setDocValueType(DocValues.Type.FIXED_INTS_32);
|
||||
TYPE.freeze();
|
||||
}
|
||||
|
||||
|
@ -53,6 +55,8 @@ public class FloatDocValuesField extends StoredField {
|
|||
*/
|
||||
public FloatDocValuesField(String name, float value) {
|
||||
super(name, TYPE);
|
||||
fieldsData = Float.valueOf(value);
|
||||
// nocommit kinda messy ... if user calls .numericValue
|
||||
// they get back strange int ... hmmm
|
||||
fieldsData = Float.floatToRawIntBits(value);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -164,13 +164,13 @@ public abstract class AtomicReader extends IndexReader {
|
|||
public abstract DocValues docValues(String field) throws IOException;
|
||||
|
||||
// nocommit javadocs
|
||||
public abstract NumericDocValues getNumericDocValues(String field, boolean direct) throws IOException;
|
||||
public abstract NumericDocValues getNumericDocValues(String field) throws IOException;
|
||||
|
||||
// nocommit javadocs
|
||||
public abstract BinaryDocValues getBinaryDocValues(String field, boolean direct) throws IOException;
|
||||
public abstract BinaryDocValues getBinaryDocValues(String field) throws IOException;
|
||||
|
||||
// nocommit javadocs
|
||||
public abstract SortedDocValues getSortedDocValues(String field, boolean direct) throws IOException;
|
||||
public abstract SortedDocValues getSortedDocValues(String field) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns {@link DocValues} for this field's normalization values.
|
||||
|
|
|
@ -412,21 +412,21 @@ public class FilterAtomicReader extends AtomicReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public NumericDocValues getNumericDocValues(String field, boolean direct) throws IOException {
|
||||
public NumericDocValues getNumericDocValues(String field) throws IOException {
|
||||
ensureOpen();
|
||||
return in.getNumericDocValues(field, direct);
|
||||
return in.getNumericDocValues(field);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BinaryDocValues getBinaryDocValues(String field, boolean direct) throws IOException {
|
||||
public BinaryDocValues getBinaryDocValues(String field) throws IOException {
|
||||
ensureOpen();
|
||||
return in.getBinaryDocValues(field, direct);
|
||||
return in.getBinaryDocValues(field);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SortedDocValues getSortedDocValues(String field, boolean direct) throws IOException {
|
||||
public SortedDocValues getSortedDocValues(String field) throws IOException {
|
||||
ensureOpen();
|
||||
return in.getSortedDocValues(field, direct);
|
||||
return in.getSortedDocValues(field);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -271,24 +271,24 @@ public final class ParallelAtomicReader extends AtomicReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public NumericDocValues getNumericDocValues(String field, boolean direct) throws IOException {
|
||||
public NumericDocValues getNumericDocValues(String field) throws IOException {
|
||||
ensureOpen();
|
||||
AtomicReader reader = fieldToReader.get(field);
|
||||
return reader == null ? null : reader.getNumericDocValues(field, direct);
|
||||
return reader == null ? null : reader.getNumericDocValues(field);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BinaryDocValues getBinaryDocValues(String field, boolean direct) throws IOException {
|
||||
public BinaryDocValues getBinaryDocValues(String field) throws IOException {
|
||||
ensureOpen();
|
||||
AtomicReader reader = fieldToReader.get(field);
|
||||
return reader == null ? null : reader.getBinaryDocValues(field, direct);
|
||||
return reader == null ? null : reader.getBinaryDocValues(field);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SortedDocValues getSortedDocValues(String field, boolean direct) throws IOException {
|
||||
public SortedDocValues getSortedDocValues(String field) throws IOException {
|
||||
ensureOpen();
|
||||
AtomicReader reader = fieldToReader.get(field);
|
||||
return reader == null ? null : reader.getSortedDocValues(field, direct);
|
||||
return reader == null ? null : reader.getSortedDocValues(field);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -66,8 +66,6 @@ final class SegmentCoreReaders {
|
|||
final TermVectorsReader termVectorsReaderOrig;
|
||||
final CompoundFileDirectory cfsReader;
|
||||
|
||||
private final Map<FieldInfo,Object> docValuesCache = new HashMap<FieldInfo,Object>();
|
||||
|
||||
final CloseableThreadLocal<StoredFieldsReader> fieldsReaderLocal = new CloseableThreadLocal<StoredFieldsReader>() {
|
||||
@Override
|
||||
protected StoredFieldsReader initialValue() {
|
||||
|
@ -155,72 +153,60 @@ final class SegmentCoreReaders {
|
|||
}
|
||||
|
||||
// nocommit shrink the sync'd part to a cache miss
|
||||
synchronized NumericDocValues getNumericDocValues(String field, boolean direct) throws IOException {
|
||||
synchronized NumericDocValues getNumericDocValues(String field) throws IOException {
|
||||
FieldInfo fi = fieldInfos.fieldInfo(field);
|
||||
if (fi == null) {
|
||||
// Field does not exist
|
||||
return null;
|
||||
}
|
||||
if (fi.getDocValuesType() == null) {
|
||||
// Field was not indexed with doc values
|
||||
return null;
|
||||
}
|
||||
if (!DocValues.isNumber(fi.getDocValuesType())) {
|
||||
throw new IllegalArgumentException("field \"" + field + "\" was not indexed as a numeric doc values field");
|
||||
// DocValues were not numeric
|
||||
return null;
|
||||
}
|
||||
|
||||
if (direct) {
|
||||
return simpleDVProducer.getDirectNumeric(fi);
|
||||
} else {
|
||||
if (!docValuesCache.containsKey(fi)) {
|
||||
NumericDocValues dv = simpleDVProducer.getNumeric(fi);
|
||||
if (dv != null) {
|
||||
docValuesCache.put(fi, dv);
|
||||
}
|
||||
}
|
||||
return (NumericDocValues) docValuesCache.get(fi);
|
||||
}
|
||||
return simpleDVProducer.getNumeric(fi);
|
||||
}
|
||||
|
||||
// nocommit shrink the sync'd part to a cache miss
|
||||
synchronized BinaryDocValues getBinaryDocValues(String field, boolean direct) throws IOException {
|
||||
synchronized BinaryDocValues getBinaryDocValues(String field) throws IOException {
|
||||
FieldInfo fi = fieldInfos.fieldInfo(field);
|
||||
if (fi == null) {
|
||||
// Field does not exist
|
||||
return null;
|
||||
}
|
||||
if (fi.getDocValuesType() == null) {
|
||||
// Field was not indexed with doc values
|
||||
return null;
|
||||
}
|
||||
if (!DocValues.isBytes(fi.getDocValuesType())) {
|
||||
throw new IllegalArgumentException("field \"" + field + "\" was not indexed as a binary doc values field");
|
||||
// DocValues were not binary
|
||||
return null;
|
||||
}
|
||||
|
||||
if (direct) {
|
||||
return simpleDVProducer.getDirectBinary(fi);
|
||||
} else {
|
||||
if (!docValuesCache.containsKey(fi)) {
|
||||
BinaryDocValues dv = simpleDVProducer.getBinary(fi);
|
||||
if (dv != null) {
|
||||
docValuesCache.put(fi, dv);
|
||||
}
|
||||
}
|
||||
return (BinaryDocValues) docValuesCache.get(fi);
|
||||
}
|
||||
return simpleDVProducer.getBinary(fi);
|
||||
}
|
||||
|
||||
// nocommit shrink the sync'd part to a cache miss
|
||||
synchronized SortedDocValues getSortedDocValues(String field, boolean direct) throws IOException {
|
||||
synchronized SortedDocValues getSortedDocValues(String field) throws IOException {
|
||||
FieldInfo fi = fieldInfos.fieldInfo(field);
|
||||
if (fi == null) {
|
||||
// Field does not exist
|
||||
return null;
|
||||
}
|
||||
if (fi.getDocValuesType() == null) {
|
||||
// Field was not indexed with doc values
|
||||
return null;
|
||||
}
|
||||
if (!DocValues.isSortedBytes(fi.getDocValuesType())) {
|
||||
throw new IllegalArgumentException("field \"" + field + "\" was not indexed as a sorted doc values field");
|
||||
// DocValues were not sorted
|
||||
return null;
|
||||
}
|
||||
|
||||
if (direct) {
|
||||
return simpleDVProducer.getDirectSorted(fi);
|
||||
} else {
|
||||
if (!docValuesCache.containsKey(fi)) {
|
||||
SortedDocValues dv = simpleDVProducer.getSorted(fi);
|
||||
if (dv != null) {
|
||||
docValuesCache.put(fi, dv);
|
||||
}
|
||||
}
|
||||
return (SortedDocValues) docValuesCache.get(fi);
|
||||
}
|
||||
return simpleDVProducer.getSorted(fi);
|
||||
}
|
||||
|
||||
// nocommit binary, sorted too
|
||||
|
|
|
@ -226,18 +226,18 @@ public final class SegmentReader extends AtomicReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public NumericDocValues getNumericDocValues(String field, boolean direct) throws IOException {
|
||||
return core.getNumericDocValues(field, direct);
|
||||
public NumericDocValues getNumericDocValues(String field) throws IOException {
|
||||
return core.getNumericDocValues(field);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BinaryDocValues getBinaryDocValues(String field, boolean direct) throws IOException {
|
||||
return core.getBinaryDocValues(field, direct);
|
||||
public BinaryDocValues getBinaryDocValues(String field) throws IOException {
|
||||
return core.getBinaryDocValues(field);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SortedDocValues getSortedDocValues(String field, boolean direct) throws IOException {
|
||||
return core.getSortedDocValues(field, direct);
|
||||
public SortedDocValues getSortedDocValues(String field) throws IOException {
|
||||
return core.getSortedDocValues(field);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -89,19 +89,19 @@ public final class SlowCompositeReaderWrapper extends AtomicReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public NumericDocValues getNumericDocValues(String field, boolean direct) throws IOException {
|
||||
public NumericDocValues getNumericDocValues(String field) throws IOException {
|
||||
// nocommit todo
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BinaryDocValues getBinaryDocValues(String field, boolean direct) throws IOException {
|
||||
public BinaryDocValues getBinaryDocValues(String field) throws IOException {
|
||||
// nocommit todo
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SortedDocValues getSortedDocValues(String field, boolean direct) throws IOException {
|
||||
public SortedDocValues getSortedDocValues(String field) throws IOException {
|
||||
// nocommit todo
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -26,13 +26,16 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.WeakHashMap;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DocTermOrds;
|
||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||
import org.apache.lucene.index.DocsEnum;
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.OrdTermState;
|
||||
import org.apache.lucene.index.SegmentReader;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.TermState;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -93,12 +96,12 @@ class FieldCacheImpl implements FieldCache {
|
|||
final Cache cache = cacheEntry.getValue();
|
||||
final Class<?> cacheType = cacheEntry.getKey();
|
||||
synchronized(cache.readerCache) {
|
||||
for (final Map.Entry<Object,Map<Entry, Object>> readerCacheEntry : cache.readerCache.entrySet()) {
|
||||
for (final Map.Entry<Object,Map<CacheKey, Object>> readerCacheEntry : cache.readerCache.entrySet()) {
|
||||
final Object readerKey = readerCacheEntry.getKey();
|
||||
if (readerKey == null) continue;
|
||||
final Map<Entry, Object> innerCache = readerCacheEntry.getValue();
|
||||
for (final Map.Entry<Entry, Object> mapEntry : innerCache.entrySet()) {
|
||||
Entry entry = mapEntry.getKey();
|
||||
final Map<CacheKey, Object> innerCache = readerCacheEntry.getValue();
|
||||
for (final Map.Entry<CacheKey, Object> mapEntry : innerCache.entrySet()) {
|
||||
CacheKey entry = mapEntry.getKey();
|
||||
result.add(new CacheEntry(readerKey, entry.field,
|
||||
cacheType, entry.custom,
|
||||
mapEntry.getValue()));
|
||||
|
@ -152,9 +155,9 @@ class FieldCacheImpl implements FieldCache {
|
|||
|
||||
final FieldCacheImpl wrapper;
|
||||
|
||||
final Map<Object,Map<Entry,Object>> readerCache = new WeakHashMap<Object,Map<Entry,Object>>();
|
||||
final Map<Object,Map<CacheKey,Object>> readerCache = new WeakHashMap<Object,Map<CacheKey,Object>>();
|
||||
|
||||
protected abstract Object createValue(AtomicReader reader, Entry key, boolean setDocsWithField)
|
||||
protected abstract Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException;
|
||||
|
||||
/** Remove this reader from the cache, if present. */
|
||||
|
@ -167,13 +170,13 @@ class FieldCacheImpl implements FieldCache {
|
|||
|
||||
/** Sets the key to the value for the provided reader;
|
||||
* if the key is already set then this doesn't change it. */
|
||||
public void put(AtomicReader reader, Entry key, Object value) {
|
||||
public void put(AtomicReader reader, CacheKey key, Object value) {
|
||||
final Object readerKey = reader.getCoreCacheKey();
|
||||
synchronized (readerCache) {
|
||||
Map<Entry,Object> innerCache = readerCache.get(readerKey);
|
||||
Map<CacheKey,Object> innerCache = readerCache.get(readerKey);
|
||||
if (innerCache == null) {
|
||||
// First time this reader is using FieldCache
|
||||
innerCache = new HashMap<Entry,Object>();
|
||||
innerCache = new HashMap<CacheKey,Object>();
|
||||
readerCache.put(readerKey, innerCache);
|
||||
wrapper.initReader(reader);
|
||||
}
|
||||
|
@ -186,15 +189,15 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
}
|
||||
|
||||
public Object get(AtomicReader reader, Entry key, boolean setDocsWithField) throws IOException {
|
||||
Map<Entry,Object> innerCache;
|
||||
public Object get(AtomicReader reader, CacheKey key, boolean setDocsWithField) throws IOException {
|
||||
Map<CacheKey,Object> innerCache;
|
||||
Object value;
|
||||
final Object readerKey = reader.getCoreCacheKey();
|
||||
synchronized (readerCache) {
|
||||
innerCache = readerCache.get(readerKey);
|
||||
if (innerCache == null) {
|
||||
// First time this reader is using FieldCache
|
||||
innerCache = new HashMap<Entry,Object>();
|
||||
innerCache = new HashMap<CacheKey,Object>();
|
||||
readerCache.put(readerKey, innerCache);
|
||||
wrapper.initReader(reader);
|
||||
value = null;
|
||||
|
@ -250,12 +253,12 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
|
||||
/** Expert: Every composite-key in the internal cache is of this type. */
|
||||
static class Entry {
|
||||
static class CacheKey {
|
||||
final String field; // which Field
|
||||
final Object custom; // which custom comparator or parser
|
||||
|
||||
/** Creates one of these objects for a custom comparator/parser. */
|
||||
Entry (String field, Object custom) {
|
||||
CacheKey(String field, Object custom) {
|
||||
this.field = field;
|
||||
this.custom = custom;
|
||||
}
|
||||
|
@ -263,8 +266,8 @@ class FieldCacheImpl implements FieldCache {
|
|||
/** Two of these are equal iff they reference the same field and type. */
|
||||
@Override
|
||||
public boolean equals (Object o) {
|
||||
if (o instanceof Entry) {
|
||||
Entry other = (Entry) o;
|
||||
if (o instanceof CacheKey) {
|
||||
CacheKey other = (CacheKey) o;
|
||||
if (other.field.equals(field)) {
|
||||
if (other.custom == null) {
|
||||
if (custom == null) return true;
|
||||
|
@ -283,17 +286,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
}
|
||||
|
||||
// inherit javadocs
|
||||
public Bytes getBytes (AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
|
||||
return getBytes(reader, field, null, setDocsWithField);
|
||||
}
|
||||
|
||||
// inherit javadocs
|
||||
public Bytes getBytes(AtomicReader reader, String field, ByteParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
return (Bytes) caches.get(Byte.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
||||
}
|
||||
|
||||
private static abstract class Uninvert {
|
||||
|
||||
public Bits docsWithField;
|
||||
|
@ -352,6 +344,38 @@ class FieldCacheImpl implements FieldCache {
|
|||
protected abstract void visitDoc(int docID);
|
||||
}
|
||||
|
||||
// null Bits means no docs matched
|
||||
void setDocsWithField(AtomicReader reader, String field, Bits docsWithField) {
|
||||
final int maxDoc = reader.maxDoc();
|
||||
final Bits bits;
|
||||
if (docsWithField == null) {
|
||||
bits = new Bits.MatchNoBits(maxDoc);
|
||||
} else if (docsWithField instanceof FixedBitSet) {
|
||||
final int numSet = ((FixedBitSet) docsWithField).cardinality();
|
||||
if (numSet >= maxDoc) {
|
||||
// The cardinality of the BitSet is maxDoc if all documents have a value.
|
||||
assert numSet == maxDoc;
|
||||
bits = new Bits.MatchAllBits(maxDoc);
|
||||
} else {
|
||||
bits = docsWithField;
|
||||
}
|
||||
} else {
|
||||
bits = docsWithField;
|
||||
}
|
||||
caches.get(DocsWithFieldCache.class).put(reader, new CacheKey(field, null), bits);
|
||||
}
|
||||
|
||||
// inherit javadocs
|
||||
public Bytes getBytes (AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
|
||||
return getBytes(reader, field, null, setDocsWithField);
|
||||
}
|
||||
|
||||
// inherit javadocs
|
||||
public Bytes getBytes(AtomicReader reader, String field, ByteParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
return (Bytes) caches.get(Byte.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
|
||||
// nocommit move up?
|
||||
static class BytesFromArray extends Bytes {
|
||||
private final byte[] values;
|
||||
|
@ -372,26 +396,38 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField)
|
||||
protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
|
||||
ByteParser parser = (ByteParser) entryKey.custom;
|
||||
int maxDoc = reader.maxDoc();
|
||||
final byte[] values;
|
||||
|
||||
NumericDocValues valuesIn = reader.getNumericDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
// nocommit should we throw exc if parser isn't
|
||||
// null? if setDocsWithField is true?
|
||||
values = new byte[maxDoc];
|
||||
for(int docID=0;docID<maxDoc;docID++) {
|
||||
values[docID] = (byte) valuesIn.get(docID);
|
||||
}
|
||||
} else {
|
||||
|
||||
final ByteParser parser = (ByteParser) key.custom;
|
||||
if (parser == null) {
|
||||
// Confusing: must delegate to wrapper (vs simply
|
||||
// setting parser = DEFAULT_SHORT_PARSER) so cache
|
||||
// key includes DEFAULT_SHORT_PARSER:
|
||||
return wrapper.getBytes(reader, entryKey.field, DEFAULT_BYTE_PARSER, setDocsWithField);
|
||||
return wrapper.getBytes(reader, key.field, DEFAULT_BYTE_PARSER, setDocsWithField);
|
||||
}
|
||||
|
||||
final ByteParser finalParser = parser;
|
||||
values = new byte[maxDoc];
|
||||
|
||||
final byte[] values = new byte[reader.maxDoc()];
|
||||
Uninvert u = new Uninvert() {
|
||||
private byte currentValue;
|
||||
|
||||
@Override
|
||||
public void visitTerm(BytesRef term) {
|
||||
currentValue = finalParser.parseByte(term);
|
||||
currentValue = parser.parseByte(term);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -400,10 +436,11 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
};
|
||||
|
||||
u.uninvert(reader, entryKey.field, setDocsWithField);
|
||||
u.uninvert(reader, key.field, setDocsWithField);
|
||||
|
||||
if (setDocsWithField) {
|
||||
wrapper.setDocsWithField(reader, entryKey.field, u.docsWithField);
|
||||
wrapper.setDocsWithField(reader, key.field, u.docsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
return new BytesFromArray(values);
|
||||
|
@ -418,7 +455,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
// inherit javadocs
|
||||
public Shorts getShorts(AtomicReader reader, String field, ShortParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
return (Shorts) caches.get(Short.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
||||
return (Shorts) caches.get(Short.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
|
||||
// nocommit move up?
|
||||
|
@ -441,26 +478,36 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField)
|
||||
protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
|
||||
ShortParser parser = (ShortParser) entryKey.custom;
|
||||
int maxDoc = reader.maxDoc();
|
||||
final short[] values;
|
||||
|
||||
NumericDocValues valuesIn = reader.getNumericDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
// nocommit should we throw exc if parser isn't
|
||||
// null? if setDocsWithField is true?
|
||||
values = new short[maxDoc];
|
||||
for(int docID=0;docID<maxDoc;docID++) {
|
||||
values[docID] = (short) valuesIn.get(docID);
|
||||
}
|
||||
} else {
|
||||
final ShortParser parser = (ShortParser) key.custom;
|
||||
if (parser == null) {
|
||||
// Confusing: must delegate to wrapper (vs simply
|
||||
// setting parser = DEFAULT_SHORT_PARSER) so cache
|
||||
// key includes DEFAULT_SHORT_PARSER:
|
||||
return wrapper.getShorts(reader, entryKey.field, DEFAULT_SHORT_PARSER, setDocsWithField);
|
||||
return wrapper.getShorts(reader, key.field, DEFAULT_SHORT_PARSER, setDocsWithField);
|
||||
}
|
||||
|
||||
final ShortParser finalParser = parser;
|
||||
|
||||
final short[] values = new short[reader.maxDoc()];
|
||||
values = new short[maxDoc];
|
||||
Uninvert u = new Uninvert() {
|
||||
private short currentValue;
|
||||
|
||||
@Override
|
||||
public void visitTerm(BytesRef term) {
|
||||
currentValue = finalParser.parseShort(term);
|
||||
currentValue = parser.parseShort(term);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -469,37 +516,16 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
};
|
||||
|
||||
u.uninvert(reader, entryKey.field, setDocsWithField);
|
||||
u.uninvert(reader, key.field, setDocsWithField);
|
||||
|
||||
if (setDocsWithField) {
|
||||
wrapper.setDocsWithField(reader, entryKey.field, u.docsWithField);
|
||||
wrapper.setDocsWithField(reader, key.field, u.docsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
return new ShortsFromArray(values);
|
||||
}
|
||||
}
|
||||
|
||||
// null Bits means no docs matched
|
||||
void setDocsWithField(AtomicReader reader, String field, Bits docsWithField) {
|
||||
final int maxDoc = reader.maxDoc();
|
||||
final Bits bits;
|
||||
if (docsWithField == null) {
|
||||
bits = new Bits.MatchNoBits(maxDoc);
|
||||
} else if (docsWithField instanceof FixedBitSet) {
|
||||
final int numSet = ((FixedBitSet) docsWithField).cardinality();
|
||||
if (numSet >= maxDoc) {
|
||||
// The cardinality of the BitSet is maxDoc if all documents have a value.
|
||||
assert numSet == maxDoc;
|
||||
bits = new Bits.MatchAllBits(maxDoc);
|
||||
} else {
|
||||
bits = docsWithField;
|
||||
}
|
||||
} else {
|
||||
bits = docsWithField;
|
||||
}
|
||||
caches.get(DocsWithFieldCache.class).put(reader, new Entry(field, null), bits);
|
||||
}
|
||||
|
||||
// inherit javadocs
|
||||
public Ints getInts (AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
|
||||
return getInts(reader, field, null, setDocsWithField);
|
||||
|
@ -508,7 +534,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
// inherit javadocs
|
||||
public Ints getInts(AtomicReader reader, String field, IntParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
return (Ints) caches.get(Integer.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
||||
return (Ints) caches.get(Integer.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
|
||||
// nocommit move up?
|
||||
|
@ -531,9 +557,22 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Object createValue(final AtomicReader reader, Entry entryKey, boolean setDocsWithField)
|
||||
protected Object createValue(final AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
IntParser parser = (IntParser) entryKey.custom;
|
||||
|
||||
int maxDoc = reader.maxDoc();
|
||||
final int[] values;
|
||||
NumericDocValues valuesIn = reader.getNumericDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
// nocommit should we throw exc if parser isn't
|
||||
// null? if setDocsWithField is true?
|
||||
values = new int[maxDoc];
|
||||
for(int docID=0;docID<maxDoc;docID++) {
|
||||
values[docID] = (int) valuesIn.get(docID);
|
||||
}
|
||||
} else {
|
||||
|
||||
final IntParser parser = (IntParser) key.custom;
|
||||
if (parser == null) {
|
||||
// Confusing: must delegate to wrapper (vs simply
|
||||
// setting parser =
|
||||
|
@ -541,23 +580,22 @@ class FieldCacheImpl implements FieldCache {
|
|||
// cache key includes
|
||||
// DEFAULT_INT_PARSER/NUMERIC_UTILS_INT_PARSER:
|
||||
try {
|
||||
return wrapper.getInts(reader, entryKey.field, DEFAULT_INT_PARSER, setDocsWithField);
|
||||
return wrapper.getInts(reader, key.field, DEFAULT_INT_PARSER, setDocsWithField);
|
||||
} catch (NumberFormatException ne) {
|
||||
return wrapper.getInts(reader, entryKey.field, NUMERIC_UTILS_INT_PARSER, setDocsWithField);
|
||||
return wrapper.getInts(reader, key.field, NUMERIC_UTILS_INT_PARSER, setDocsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
final IntParser finalParser = parser;
|
||||
// nocommit how to avoid double alloc in numeric field
|
||||
// case ...
|
||||
final int[] values = new int[reader.maxDoc()];
|
||||
values = new int[reader.maxDoc()];
|
||||
|
||||
Uninvert u = new Uninvert() {
|
||||
private int currentValue;
|
||||
|
||||
@Override
|
||||
public void visitTerm(BytesRef term) {
|
||||
currentValue = finalParser.parseInt(term);
|
||||
currentValue = parser.parseInt(term);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -566,31 +604,33 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
};
|
||||
|
||||
u.uninvert(reader, entryKey.field, setDocsWithField);
|
||||
u.uninvert(reader, key.field, setDocsWithField);
|
||||
|
||||
if (setDocsWithField) {
|
||||
wrapper.setDocsWithField(reader, entryKey.field, u.docsWithField);
|
||||
wrapper.setDocsWithField(reader, key.field, u.docsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
return new IntsFromArray(values);
|
||||
}
|
||||
}
|
||||
|
||||
// nocommit must advertise that this does NOT work if you
|
||||
// index only doc values for the field ... it will say no
|
||||
// doc exists...
|
||||
public Bits getDocsWithField(AtomicReader reader, String field)
|
||||
throws IOException {
|
||||
return (Bits) caches.get(DocsWithFieldCache.class).get(reader, new Entry(field, null), false);
|
||||
return (Bits) caches.get(DocsWithFieldCache.class).get(reader, new CacheKey(field, null), false);
|
||||
}
|
||||
|
||||
// nocommit move up?
|
||||
static final class DocsWithFieldCache extends Cache {
|
||||
DocsWithFieldCache(FieldCacheImpl wrapper) {
|
||||
super(wrapper);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
|
||||
protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField /* ignored */)
|
||||
throws IOException {
|
||||
final String field = entryKey.field;
|
||||
final String field = key.field;
|
||||
FixedBitSet res = null;
|
||||
Terms terms = reader.terms(field);
|
||||
final int maxDoc = reader.maxDoc();
|
||||
|
@ -646,7 +686,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
// inherit javadocs
|
||||
public Floats getFloats(AtomicReader reader, String field, FloatParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
return (Floats) caches.get(Float.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
||||
return (Floats) caches.get(Float.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
|
||||
// nocommit move up?
|
||||
|
@ -669,9 +709,25 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField)
|
||||
protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
FloatParser parser = (FloatParser) entryKey.custom;
|
||||
|
||||
int maxDoc = reader.maxDoc();
|
||||
final float[] values;
|
||||
|
||||
NumericDocValues valuesIn = reader.getNumericDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
// nocommit should we throw exc if parser isn't
|
||||
// null? if setDocsWithField is true?
|
||||
values = new float[maxDoc];
|
||||
for(int docID=0;docID<maxDoc;docID++) {
|
||||
// nocommit somewhat dangerous ... eg if user had
|
||||
// indexed as DV.BYTE ...
|
||||
values[docID] = Float.intBitsToFloat((int) valuesIn.get(docID));
|
||||
}
|
||||
} else {
|
||||
|
||||
final FloatParser parser = (FloatParser) key.custom;
|
||||
if (parser == null) {
|
||||
// Confusing: must delegate to wrapper (vs simply
|
||||
// setting parser =
|
||||
|
@ -679,23 +735,22 @@ class FieldCacheImpl implements FieldCache {
|
|||
// cache key includes
|
||||
// DEFAULT_FLOAT_PARSER/NUMERIC_UTILS_FLOAT_PARSER:
|
||||
try {
|
||||
return wrapper.getFloats(reader, entryKey.field, DEFAULT_FLOAT_PARSER, setDocsWithField);
|
||||
return wrapper.getFloats(reader, key.field, DEFAULT_FLOAT_PARSER, setDocsWithField);
|
||||
} catch (NumberFormatException ne) {
|
||||
return wrapper.getFloats(reader, entryKey.field, NUMERIC_UTILS_FLOAT_PARSER, setDocsWithField);
|
||||
return wrapper.getFloats(reader, key.field, NUMERIC_UTILS_FLOAT_PARSER, setDocsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
final FloatParser finalParser = parser;
|
||||
// nocommit how to avoid double alloc in numeric field
|
||||
// case ...
|
||||
final float[] values = new float[reader.maxDoc()];
|
||||
values = new float[reader.maxDoc()];
|
||||
|
||||
Uninvert u = new Uninvert() {
|
||||
private float currentValue;
|
||||
|
||||
@Override
|
||||
public void visitTerm(BytesRef term) {
|
||||
currentValue = finalParser.parseFloat(term);
|
||||
currentValue = parser.parseFloat(term);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -704,10 +759,11 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
};
|
||||
|
||||
u.uninvert(reader, entryKey.field, setDocsWithField);
|
||||
u.uninvert(reader, key.field, setDocsWithField);
|
||||
|
||||
if (setDocsWithField) {
|
||||
wrapper.setDocsWithField(reader, entryKey.field, u.docsWithField);
|
||||
wrapper.setDocsWithField(reader, key.field, u.docsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
return new FloatsFromArray(values);
|
||||
|
@ -722,7 +778,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
// inherit javadocs
|
||||
public Longs getLongs(AtomicReader reader, String field, FieldCache.LongParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
return (Longs) caches.get(Long.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
||||
return (Longs) caches.get(Long.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
|
||||
// nocommit move up?
|
||||
|
@ -745,9 +801,21 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField)
|
||||
protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
LongParser parser = (LongParser) entryKey.custom;
|
||||
|
||||
int maxDoc = reader.maxDoc();
|
||||
final long[] values;
|
||||
NumericDocValues valuesIn = reader.getNumericDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
// nocommit should we throw exc if parser isn't
|
||||
// null? if setDocsWithField is true?
|
||||
values = new long[maxDoc];
|
||||
for(int docID=0;docID<maxDoc;docID++) {
|
||||
values[docID] = valuesIn.get(docID);
|
||||
}
|
||||
} else {
|
||||
final LongParser parser = (LongParser) key.custom;
|
||||
if (parser == null) {
|
||||
// Confusing: must delegate to wrapper (vs simply
|
||||
// setting parser =
|
||||
|
@ -755,23 +823,22 @@ class FieldCacheImpl implements FieldCache {
|
|||
// cache key includes
|
||||
// DEFAULT_LONG_PARSER/NUMERIC_UTILS_LONG_PARSER:
|
||||
try {
|
||||
return wrapper.getLongs(reader, entryKey.field, DEFAULT_LONG_PARSER, setDocsWithField);
|
||||
return wrapper.getLongs(reader, key.field, DEFAULT_LONG_PARSER, setDocsWithField);
|
||||
} catch (NumberFormatException ne) {
|
||||
return wrapper.getLongs(reader, entryKey.field, NUMERIC_UTILS_LONG_PARSER, setDocsWithField);
|
||||
return wrapper.getLongs(reader, key.field, NUMERIC_UTILS_LONG_PARSER, setDocsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
final LongParser finalParser = parser;
|
||||
// nocommit how to avoid double alloc in numeric field
|
||||
// case ...
|
||||
final long[] values = new long[reader.maxDoc()];
|
||||
values = new long[reader.maxDoc()];
|
||||
|
||||
Uninvert u = new Uninvert() {
|
||||
private long currentValue;
|
||||
|
||||
@Override
|
||||
public void visitTerm(BytesRef term) {
|
||||
currentValue = finalParser.parseLong(term);
|
||||
currentValue = parser.parseLong(term);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -780,12 +847,12 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
};
|
||||
|
||||
u.uninvert(reader, entryKey.field, setDocsWithField);
|
||||
u.uninvert(reader, key.field, setDocsWithField);
|
||||
|
||||
if (setDocsWithField) {
|
||||
wrapper.setDocsWithField(reader, entryKey.field, u.docsWithField);
|
||||
wrapper.setDocsWithField(reader, key.field, u.docsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
return new LongsFromArray(values);
|
||||
}
|
||||
}
|
||||
|
@ -799,7 +866,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
// inherit javadocs
|
||||
public Doubles getDoubles(AtomicReader reader, String field, FieldCache.DoubleParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
return (Doubles) caches.get(Double.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
||||
return (Doubles) caches.get(Double.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
|
||||
// nocommit move up?
|
||||
|
@ -822,9 +889,23 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField)
|
||||
protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
DoubleParser parser = (DoubleParser) entryKey.custom;
|
||||
int maxDoc = reader.maxDoc();
|
||||
final double[] values;
|
||||
|
||||
NumericDocValues valuesIn = reader.getNumericDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
// nocommit should we throw exc if parser isn't
|
||||
// null? if setDocsWithField is true?
|
||||
values = new double[maxDoc];
|
||||
for(int docID=0;docID<maxDoc;docID++) {
|
||||
// nocommit somewhat dangerous ... eg if user had
|
||||
// indexed as DV.BYTE ...
|
||||
values[docID] = Double.longBitsToDouble(valuesIn.get(docID));
|
||||
}
|
||||
} else {
|
||||
final DoubleParser parser = (DoubleParser) key.custom;
|
||||
if (parser == null) {
|
||||
// Confusing: must delegate to wrapper (vs simply
|
||||
// setting parser =
|
||||
|
@ -832,23 +913,22 @@ class FieldCacheImpl implements FieldCache {
|
|||
// cache key includes
|
||||
// DEFAULT_DOUBLE_PARSER/NUMERIC_UTILS_DOUBLE_PARSER:
|
||||
try {
|
||||
return wrapper.getDoubles(reader, entryKey.field, DEFAULT_DOUBLE_PARSER, setDocsWithField);
|
||||
return wrapper.getDoubles(reader, key.field, DEFAULT_DOUBLE_PARSER, setDocsWithField);
|
||||
} catch (NumberFormatException ne) {
|
||||
return wrapper.getDoubles(reader, entryKey.field, NUMERIC_UTILS_DOUBLE_PARSER, setDocsWithField);
|
||||
return wrapper.getDoubles(reader, key.field, NUMERIC_UTILS_DOUBLE_PARSER, setDocsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
final DoubleParser finalParser = parser;
|
||||
// nocommit how to avoid double alloc in numeric field
|
||||
// case ...
|
||||
final double[] values = new double[reader.maxDoc()];
|
||||
values = new double[reader.maxDoc()];
|
||||
|
||||
Uninvert u = new Uninvert() {
|
||||
private double currentValue;
|
||||
|
||||
@Override
|
||||
public void visitTerm(BytesRef term) {
|
||||
currentValue = finalParser.parseDouble(term);
|
||||
currentValue = parser.parseDouble(term);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -857,12 +937,12 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
};
|
||||
|
||||
u.uninvert(reader, entryKey.field, setDocsWithField);
|
||||
u.uninvert(reader, key.field, setDocsWithField);
|
||||
|
||||
if (setDocsWithField) {
|
||||
wrapper.setDocsWithField(reader, entryKey.field, u.docsWithField);
|
||||
wrapper.setDocsWithField(reader, key.field, u.docsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
return new DoublesFromArray(values);
|
||||
}
|
||||
}
|
||||
|
@ -1046,7 +1126,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
|
||||
public DocTermsIndex getTermsIndex(AtomicReader reader, String field, float acceptableOverheadRatio) throws IOException {
|
||||
return (DocTermsIndex) caches.get(DocTermsIndex.class).get(reader, new Entry(field, acceptableOverheadRatio), false);
|
||||
return (DocTermsIndex) caches.get(DocTermsIndex.class).get(reader, new CacheKey(field, acceptableOverheadRatio), false);
|
||||
}
|
||||
|
||||
static class DocTermsIndexCache extends Cache {
|
||||
|
@ -1055,12 +1135,69 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
|
||||
protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField /* ignored */)
|
||||
throws IOException {
|
||||
|
||||
Terms terms = reader.terms(entryKey.field);
|
||||
final int maxDoc = reader.maxDoc();
|
||||
SortedDocValues valuesIn = reader.getSortedDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
// nocommit used packed ints like below!
|
||||
final byte[][] values = new byte[valuesIn.getValueCount()][];
|
||||
BytesRef scratch = new BytesRef();
|
||||
for(int ord=0;ord<values.length;ord++) {
|
||||
valuesIn.lookupOrd(ord, scratch);
|
||||
values[ord] = new byte[scratch.length];
|
||||
System.arraycopy(scratch.bytes, scratch.offset, values[ord], 0, scratch.length);
|
||||
}
|
||||
|
||||
final float acceptableOverheadRatio = ((Float) entryKey.custom).floatValue();
|
||||
final int[] docToOrd = new int[maxDoc];
|
||||
for(int docID=0;docID<maxDoc;docID++) {
|
||||
docToOrd[docID] = valuesIn.getOrd(docID);
|
||||
}
|
||||
|
||||
return new DocTermsIndex() {
|
||||
|
||||
@Override
|
||||
public PackedInts.Reader getDocToOrd() {
|
||||
// nocommit
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int numOrd() {
|
||||
return values.length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getOrd(int docID) {
|
||||
return docToOrd[docID];
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
return docToOrd.length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef lookup(int ord, BytesRef ret) {
|
||||
ret.bytes = values[ord];
|
||||
ret.length = ret.bytes.length;
|
||||
ret.offset = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermsEnum getTermsEnum() {
|
||||
// nocommit
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
} else {
|
||||
|
||||
Terms terms = reader.terms(key.field);
|
||||
|
||||
final float acceptableOverheadRatio = ((Float) key.custom).floatValue();
|
||||
|
||||
final PagedBytes bytes = new PagedBytes(15);
|
||||
|
||||
|
@ -1068,7 +1205,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
int startTermsBPV;
|
||||
int startNumUniqueTerms;
|
||||
|
||||
int maxDoc = reader.maxDoc();
|
||||
final int termCountHardLimit;
|
||||
if (maxDoc == Integer.MAX_VALUE) {
|
||||
termCountHardLimit = Integer.MAX_VALUE;
|
||||
|
@ -1151,6 +1287,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
return new DocTermsIndexImpl(bytes.freeze(true), termOrdToBytesOffset.getMutable(), docToTermOrd.getMutable(), termOrd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class DocTermsImpl extends DocTerms {
|
||||
private final PagedBytes.Reader bytes;
|
||||
|
@ -1185,7 +1322,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
|
||||
public DocTerms getTerms(AtomicReader reader, String field, float acceptableOverheadRatio) throws IOException {
|
||||
return (DocTerms) caches.get(DocTerms.class).get(reader, new Entry(field, acceptableOverheadRatio), false);
|
||||
return (DocTerms) caches.get(DocTerms.class).get(reader, new CacheKey(field, acceptableOverheadRatio), false);
|
||||
}
|
||||
|
||||
static final class DocTermsCache extends Cache {
|
||||
|
@ -1194,14 +1331,48 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
|
||||
protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField /* ignored */)
|
||||
throws IOException {
|
||||
|
||||
Terms terms = reader.terms(entryKey.field);
|
||||
final int maxDoc = reader.maxDoc();
|
||||
BinaryDocValues valuesIn = reader.getBinaryDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
// nocommit used packed ints like below!
|
||||
final byte[][] values = new byte[maxDoc][];
|
||||
BytesRef scratch = new BytesRef();
|
||||
for(int docID=0;docID<maxDoc;docID++) {
|
||||
valuesIn.get(docID, scratch);
|
||||
values[docID] = new byte[scratch.length];
|
||||
System.arraycopy(scratch.bytes, scratch.offset, values[docID], 0, scratch.length);
|
||||
}
|
||||
|
||||
final float acceptableOverheadRatio = ((Float) entryKey.custom).floatValue();
|
||||
return new DocTerms() {
|
||||
@Override
|
||||
public int size() {
|
||||
return maxDoc;
|
||||
}
|
||||
|
||||
final int termCountHardLimit = reader.maxDoc();
|
||||
@Override
|
||||
public boolean exists(int docID) {
|
||||
// nocommit lying ...?
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef getTerm(int docID, BytesRef ret) {
|
||||
ret.bytes = values[docID];
|
||||
ret.length = ret.bytes.length;
|
||||
ret.offset = 0;
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
} else {
|
||||
|
||||
Terms terms = reader.terms(key.field);
|
||||
|
||||
final float acceptableOverheadRatio = ((Float) key.custom).floatValue();
|
||||
|
||||
final int termCountHardLimit = maxDoc;
|
||||
|
||||
// Holds the actual term data, expanded.
|
||||
final PagedBytes bytes = new PagedBytes(15);
|
||||
|
@ -1225,7 +1396,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
startBPV = 1;
|
||||
}
|
||||
|
||||
final GrowableWriter docToOffset = new GrowableWriter(startBPV, reader.maxDoc(), acceptableOverheadRatio);
|
||||
final GrowableWriter docToOffset = new GrowableWriter(startBPV, maxDoc, acceptableOverheadRatio);
|
||||
|
||||
// pointer==0 means not set
|
||||
bytes.copyUsingLengthPrefix(new BytesRef());
|
||||
|
@ -1262,9 +1433,10 @@ class FieldCacheImpl implements FieldCache {
|
|||
return new DocTermsImpl(bytes.freeze(true), docToOffset.getMutable());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public DocTermOrds getDocTermOrds(AtomicReader reader, String field) throws IOException {
|
||||
return (DocTermOrds) caches.get(DocTermOrds.class).get(reader, new Entry(field, null), false);
|
||||
return (DocTermOrds) caches.get(DocTermOrds.class).get(reader, new CacheKey(field, null), false);
|
||||
}
|
||||
|
||||
static final class DocTermOrdsCache extends Cache {
|
||||
|
@ -1273,9 +1445,10 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
|
||||
protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField /* ignored */)
|
||||
throws IOException {
|
||||
return new DocTermOrds(reader, entryKey.field);
|
||||
// No DocValues impl yet (DocValues are single valued...):
|
||||
return new DocTermOrds(reader, key.field);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -87,6 +87,7 @@ import org.apache.lucene.util.packed.PackedInts;
|
|||
* @lucene.experimental
|
||||
*/
|
||||
public abstract class FieldComparator<T> {
|
||||
// nocommit remove the doc values comparators
|
||||
|
||||
/**
|
||||
* Compare hit at slot1 with hit at slot2.
|
||||
|
|
|
@ -49,7 +49,7 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
* to this class.
|
||||
*/
|
||||
// nocommit don't suppress any:
|
||||
@SuppressCodecs({"Direct", "Memory", "Lucene41", "MockRandom", "Lucene40", "Compressing"})
|
||||
@SuppressCodecs({"Asserting", "Direct", "Memory", "Lucene41", "MockRandom", "Lucene40", "Compressing"})
|
||||
public class TestDemoDocValue extends LuceneTestCase {
|
||||
|
||||
public void testDemoNumber() throws IOException {
|
||||
|
@ -82,7 +82,7 @@ public class TestDemoDocValue extends LuceneTestCase {
|
|||
StoredDocument hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
|
||||
assertEquals(text, hitDoc.get("fieldname"));
|
||||
assert ireader.leaves().size() == 1;
|
||||
NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv", random().nextBoolean());
|
||||
NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv");
|
||||
assertEquals(5, dv.get(hits.scoreDocs[i].doc));
|
||||
}
|
||||
|
||||
|
@ -113,7 +113,7 @@ public class TestDemoDocValue extends LuceneTestCase {
|
|||
// Now search the index:
|
||||
IndexReader ireader = DirectoryReader.open(directory); // read-only=true
|
||||
assert ireader.leaves().size() == 1;
|
||||
NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv", random().nextBoolean());
|
||||
NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv");
|
||||
assertEquals(1, dv.get(0));
|
||||
assertEquals(2, dv.get(1));
|
||||
|
||||
|
@ -147,7 +147,7 @@ public class TestDemoDocValue extends LuceneTestCase {
|
|||
// Now search the index:
|
||||
IndexReader ireader = DirectoryReader.open(directory); // read-only=true
|
||||
assert ireader.leaves().size() == 1;
|
||||
NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv", random().nextBoolean());
|
||||
NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv");
|
||||
for(int i=0;i<2;i++) {
|
||||
StoredDocument doc2 = ireader.leaves().get(0).reader().document(i);
|
||||
long expected;
|
||||
|
@ -186,7 +186,7 @@ public class TestDemoDocValue extends LuceneTestCase {
|
|||
// Now search the index:
|
||||
IndexReader ireader = DirectoryReader.open(directory); // read-only=true
|
||||
assert ireader.leaves().size() == 1;
|
||||
NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv", random().nextBoolean());
|
||||
NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv");
|
||||
assertEquals(Long.MIN_VALUE, dv.get(0));
|
||||
assertEquals(Long.MAX_VALUE, dv.get(1));
|
||||
|
||||
|
@ -225,7 +225,7 @@ public class TestDemoDocValue extends LuceneTestCase {
|
|||
StoredDocument hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
|
||||
assertEquals(text, hitDoc.get("fieldname"));
|
||||
assert ireader.leaves().size() == 1;
|
||||
BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv", random().nextBoolean());
|
||||
BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv");
|
||||
dv.get(hits.scoreDocs[i].doc, scratch);
|
||||
assertEquals(new BytesRef("hello world"), scratch);
|
||||
}
|
||||
|
@ -262,7 +262,7 @@ public class TestDemoDocValue extends LuceneTestCase {
|
|||
// Now search the index:
|
||||
IndexReader ireader = DirectoryReader.open(directory); // read-only=true
|
||||
assert ireader.leaves().size() == 1;
|
||||
BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv", random().nextBoolean());
|
||||
BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv");
|
||||
BytesRef scratch = new BytesRef();
|
||||
for(int i=0;i<2;i++) {
|
||||
StoredDocument doc2 = ireader.leaves().get(0).reader().document(i);
|
||||
|
@ -311,7 +311,7 @@ public class TestDemoDocValue extends LuceneTestCase {
|
|||
StoredDocument hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
|
||||
assertEquals(text, hitDoc.get("fieldname"));
|
||||
assert ireader.leaves().size() == 1;
|
||||
SortedDocValues dv = ireader.leaves().get(0).reader().getSortedDocValues("dv", random().nextBoolean());
|
||||
SortedDocValues dv = ireader.leaves().get(0).reader().getSortedDocValues("dv");
|
||||
dv.lookupOrd(dv.getOrd(hits.scoreDocs[i].doc), scratch);
|
||||
assertEquals(new BytesRef("hello world"), scratch);
|
||||
}
|
||||
|
@ -343,7 +343,7 @@ public class TestDemoDocValue extends LuceneTestCase {
|
|||
// Now search the index:
|
||||
IndexReader ireader = DirectoryReader.open(directory); // read-only=true
|
||||
assert ireader.leaves().size() == 1;
|
||||
SortedDocValues dv = ireader.leaves().get(0).reader().getSortedDocValues("dv", random().nextBoolean());
|
||||
SortedDocValues dv = ireader.leaves().get(0).reader().getSortedDocValues("dv");
|
||||
BytesRef scratch = new BytesRef();
|
||||
dv.lookupOrd(dv.getOrd(0), scratch);
|
||||
assertEquals("hello world 1", scratch.utf8ToString());
|
||||
|
@ -380,7 +380,7 @@ public class TestDemoDocValue extends LuceneTestCase {
|
|||
// Now search the index:
|
||||
IndexReader ireader = DirectoryReader.open(directory); // read-only=true
|
||||
assert ireader.leaves().size() == 1;
|
||||
SortedDocValues dv = ireader.leaves().get(0).reader().getSortedDocValues("dv", random().nextBoolean());
|
||||
SortedDocValues dv = ireader.leaves().get(0).reader().getSortedDocValues("dv");
|
||||
BytesRef scratch = new BytesRef();
|
||||
for(int i=0;i<2;i++) {
|
||||
StoredDocument doc2 = ireader.leaves().get(0).reader().document(i);
|
||||
|
|
|
@ -740,17 +740,17 @@ public class MemoryIndex {
|
|||
}
|
||||
|
||||
// nocommit todo
|
||||
public NumericDocValues getNumericDocValues(String field, boolean direct) {
|
||||
public NumericDocValues getNumericDocValues(String field) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// nocommit todo
|
||||
public BinaryDocValues getBinaryDocValues(String field, boolean direct) {
|
||||
public BinaryDocValues getBinaryDocValues(String field) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// nocommit todo
|
||||
public SortedDocValues getSortedDocValues(String field, boolean direct) {
|
||||
public SortedDocValues getSortedDocValues(String field) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
@ -399,17 +399,17 @@ public class TestDocSet extends LuceneTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public NumericDocValues getNumericDocValues(String field, boolean direct) {
|
||||
public NumericDocValues getNumericDocValues(String field) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BinaryDocValues getBinaryDocValues(String field, boolean direct) {
|
||||
public BinaryDocValues getBinaryDocValues(String field) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SortedDocValues getSortedDocValues(String field, boolean direct) {
|
||||
public SortedDocValues getSortedDocValues(String field) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue