LUCENE-6013: remove IndexableFieldType.indexed and FieldInfo.indexed (it's redundant with IndexOptions != null)

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1633296 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2014-10-21 07:32:38 +00:00
parent 58fe66dba7
commit 8f9f8a3252
69 changed files with 302 additions and 328 deletions

View File

@ -164,6 +164,10 @@ API Changes
will be DocValuesType.NUMERIC if the field indexed and does not omit
norms, else null. (Robert Muir, Mike McCandless)
* LUCENE-6013: Removed indexed boolean from IndexableFieldType and
FieldInfo, since it's redundant with IndexOptions != null. (Robert
Muir, Mike McCandless)
Bug Fixes
* LUCENE-5650: Enforce read-only access to any path outside the temporary

View File

@ -106,7 +106,7 @@ public class Lucene40FieldInfosFormat extends FieldInfosFormat {
// Undead norms! Lucene40NormsReader will check this and bring norms back from the dead:
UndeadNormsProducer.setUndead(attributes);
}
infos[i] = new FieldInfo(name, isIndexed, fieldNumber, storeTermVector,
infos[i] = new FieldInfo(name, fieldNumber, storeTermVector,
omitNorms, storePayloads, indexOptions, oldValuesType.mapping, -1, Collections.unmodifiableMap(attributes));
}

View File

@ -93,7 +93,7 @@ public class Lucene42FieldInfosFormat extends FieldInfosFormat {
UndeadNormsProducer.setUndead(attributes);
}
infos[i] = new FieldInfo(name, isIndexed, fieldNumber, storeTermVector,
infos[i] = new FieldInfo(name, fieldNumber, storeTermVector,
omitNorms, storePayloads, indexOptions, docValuesType, -1, Collections.unmodifiableMap(attributes));
}

View File

@ -95,7 +95,7 @@ public final class Lucene46FieldInfosFormat extends FieldInfosFormat {
UndeadNormsProducer.setUndead(attributes);
}
infos[i] = new FieldInfo(name, isIndexed, fieldNumber, storeTermVector,
infos[i] = new FieldInfo(name, fieldNumber, storeTermVector,
omitNorms, storePayloads, indexOptions, docValuesType, dvGen, Collections.unmodifiableMap(attributes));
}

View File

@ -231,7 +231,6 @@ public class DocMaker implements Closeable {
// Set ID_FIELD
FieldType ft = new FieldType(valType);
ft.setIndexed(true);
ft.setStored(true);
Field idField = ds.getField(ID_FIELD, ft);

View File

@ -72,7 +72,7 @@ public class ReadTokensTask extends PerfTask {
List<Field> fields = doc.getFields();
Analyzer analyzer = getRunData().getAnalyzer();
int tokenCount = 0;
for(final IndexableField field : fields) {
for(final Field field : fields) {
if (!field.fieldType().tokenized() ||
field instanceof IntField ||
field instanceof LongField ||

View File

@ -53,7 +53,6 @@ public class SimpleTextFieldInfosFormat extends FieldInfosFormat {
static final BytesRef NUMFIELDS = new BytesRef("number of fields ");
static final BytesRef NAME = new BytesRef(" name ");
static final BytesRef NUMBER = new BytesRef(" number ");
static final BytesRef ISINDEXED = new BytesRef(" indexed ");
static final BytesRef STORETV = new BytesRef(" term vectors ");
static final BytesRef STORETVPOS = new BytesRef(" term vector positions ");
static final BytesRef STORETVOFF = new BytesRef(" term vector offsets ");
@ -89,19 +88,16 @@ public class SimpleTextFieldInfosFormat extends FieldInfosFormat {
assert StringHelper.startsWith(scratch.get(), NUMBER);
int fieldNumber = Integer.parseInt(readString(NUMBER.length, scratch));
SimpleTextUtil.readLine(input, scratch);
assert StringHelper.startsWith(scratch.get(), ISINDEXED);
boolean isIndexed = Boolean.parseBoolean(readString(ISINDEXED.length, scratch));
final IndexOptions indexOptions;
if (isIndexed) {
SimpleTextUtil.readLine(input, scratch);
assert StringHelper.startsWith(scratch.get(), INDEXOPTIONS);
indexOptions = IndexOptions.valueOf(readString(INDEXOPTIONS.length, scratch));
} else {
SimpleTextUtil.readLine(input, scratch);
assert StringHelper.startsWith(scratch.get(), INDEXOPTIONS);
String s = readString(INDEXOPTIONS.length, scratch);
if ("null".equals(s)) {
indexOptions = null;
} else {
indexOptions = IndexOptions.valueOf(s);
}
SimpleTextUtil.readLine(input, scratch);
assert StringHelper.startsWith(scratch.get(), STORETV);
boolean storeTermVector = Boolean.parseBoolean(readString(STORETV.length, scratch));
@ -139,7 +135,7 @@ public class SimpleTextFieldInfosFormat extends FieldInfosFormat {
atts.put(key, value);
}
infos[i] = new FieldInfo(name, isIndexed, fieldNumber, storeTermVector,
infos[i] = new FieldInfo(name, fieldNumber, storeTermVector,
omitNorms, storePayloads, indexOptions, docValuesType, dvGen, Collections.unmodifiableMap(atts));
}
@ -189,16 +185,15 @@ public class SimpleTextFieldInfosFormat extends FieldInfosFormat {
SimpleTextUtil.write(out, Integer.toString(fi.number), scratch);
SimpleTextUtil.writeNewline(out);
SimpleTextUtil.write(out, ISINDEXED);
SimpleTextUtil.write(out, Boolean.toString(fi.isIndexed()), scratch);
SimpleTextUtil.writeNewline(out);
if (fi.isIndexed()) {
SimpleTextUtil.write(out, INDEXOPTIONS);
IndexOptions indexOptions = fi.getIndexOptions();
if (indexOptions != null) {
assert fi.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0 || !fi.hasPayloads();
SimpleTextUtil.write(out, INDEXOPTIONS);
SimpleTextUtil.write(out, fi.getIndexOptions().toString(), scratch);
SimpleTextUtil.writeNewline(out);
} else {
SimpleTextUtil.write(out, "null", scratch);
}
SimpleTextUtil.writeNewline(out);
SimpleTextUtil.write(out, STORETV);
SimpleTextUtil.write(out, Boolean.toString(fi.hasVectors()), scratch);

View File

@ -316,6 +316,12 @@ public final class NumericTokenStream extends TokenStream {
public int getPrecisionStep() {
return precisionStep;
}
@Override
public String toString() {
// We override default because it can throw cryptic "illegal shift value":
return getClass().getSimpleName() + "(precisionStep=" + precisionStep + " valueSize=" + numericAtt.getValueSize() + " shift=" + numericAtt.getShift() + ")";
}
// members
private final NumericTermAttribute numericAtt = addAttribute(NumericTermAttribute.class);

View File

@ -49,7 +49,7 @@ import org.apache.lucene.store.IndexOutput;
* <li>Header --&gt; {@link CodecUtil#checkSegmentHeader SegmentHeader}</li>
* <li>FieldsCount --&gt; {@link DataOutput#writeVInt VInt}</li>
* <li>FieldName --&gt; {@link DataOutput#writeString String}</li>
* <li>FieldBits, DocValuesBits --&gt; {@link DataOutput#writeByte Byte}</li>
* <li>FieldBits, IndexOptions, DocValuesBits --&gt; {@link DataOutput#writeByte Byte}</li>
* <li>FieldNumber --&gt; {@link DataOutput#writeInt VInt}</li>
* <li>Attributes --&gt; {@link DataOutput#writeStringStringMap Map&lt;String,String&gt;}</li>
* <li>DocValuesGen --&gt; {@link DataOutput#writeLong(long) Int64}</li>
@ -64,39 +64,39 @@ import org.apache.lucene.store.IndexOutput;
* Lucene, the fields are not numbered implicitly by their order in the
* file, instead explicitly.</li>
* <li>FieldBits: a byte containing field options.
* <ul>
* <li>The low-order bit is one for indexed fields, and zero for non-indexed
* fields.</li>
* <li>The second lowest-order bit is one for fields that have term vectors
* stored, and zero for fields without term vectors.</li>
* <li>If the third lowest order-bit is set (0x4), offsets are stored into
* the postings list in addition to positions.</li>
* <li>Fourth bit is unused.</li>
* <li>If the fifth lowest-order bit is set (0x10), norms are omitted for the
* indexed field.</li>
* <li>If the sixth lowest-order bit is set (0x20), payloads are stored for the
* indexed field.</li>
* <li>If the seventh lowest-order bit is set (0x40), term frequencies and
* positions omitted for the indexed field.</li>
* <li>If the eighth lowest-order bit is set (0x80), positions are omitted for the
* indexed field.</li>
* </ul>
* </li>
* <li>DocValuesBits: a byte containing per-document value types. The type
* recorded as two four-bit integers, with the high-order bits representing
* <code>norms</code> options, and the low-order bits representing
* {@code DocValues} options. Each four-bit integer can be decoded as such:
* <ul>
* <li>0: no DocValues for this field.</li>
* <li>1: NumericDocValues. ({@link DocValuesType#NUMERIC})</li>
* <li>2: BinaryDocValues. ({@code DocValuesType#BINARY})</li>
* <li>3: SortedDocValues. ({@code DocValuesType#SORTED})</li>
* </ul>
* </li>
* <li>DocValuesGen is the generation count of the field's DocValues. If this is -1,
* there are no DocValues updates to that field. Anything above zero means there
* are updates stored by {@link DocValuesFormat}.</li>
* <li>Attributes: a key-value map of codec-private attributes.</li>
* <ul>
* <li>The low order bit (0x1) is one for fields that have term vectors
* stored, and zero for fields without term vectors.</li>
* <li>If the second lowest order-bit is set (0x2), norms are omitted for the
* indexed field.</li>
* <li>If the third lowest-order bit is set (0x4), payloads are stored for the
* indexed field.</li>
* </ul>
* </li>
* <li>IndexOptions: a byte containing index options.
* <ul>
* <li>0: not indexed</li>
* <li>1: indexed as DOCS_ONLY</li>
* <li>2: indexed as DOCS_AND_FREQS</li>
* <li>3: indexed as DOCS_AND_FREQS_AND_POSITIONS</li>
* <li>4: indexed as DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS</li>
* </ul>
* </li>
* <li>DocValuesBits: a byte containing per-document value types. The type
* recorded as two four-bit integers, with the high-order bits representing
* <code>norms</code> options, and the low-order bits representing
* {@code DocValues} options. Each four-bit integer can be decoded as such:
* <ul>
* <li>0: no DocValues for this field.</li>
* <li>1: NumericDocValues. ({@link DocValuesType#NUMERIC})</li>
* <li>2: BinaryDocValues. ({@code DocValuesType#BINARY})</li>
* <li>3: SortedDocValues. ({@code DocValuesType#SORTED})</li>
* </ul>
* </li>
* <li>DocValuesGen is the generation count of the field's DocValues. If this is -1,
* there are no DocValues updates to that field. Anything above zero means there
* are updates stored by {@link DocValuesFormat}.</li>
* <li>Attributes: a key-value map of codec-private attributes.</li>
* </ul>
*
* @lucene.experimental
@ -109,14 +109,14 @@ public final class Lucene50FieldInfosFormat extends FieldInfosFormat {
@Override
public FieldInfos read(Directory directory, SegmentInfo segmentInfo, String segmentSuffix, IOContext context) throws IOException {
final String fileName = IndexFileNames.segmentFileName(segmentInfo.name, segmentSuffix, Lucene50FieldInfosFormat.EXTENSION);
final String fileName = IndexFileNames.segmentFileName(segmentInfo.name, segmentSuffix, EXTENSION);
try (ChecksumIndexInput input = directory.openChecksumInput(fileName, context)) {
Throwable priorE = null;
FieldInfo infos[] = null;
try {
CodecUtil.checkSegmentHeader(input, Lucene50FieldInfosFormat.CODEC_NAME,
Lucene50FieldInfosFormat.FORMAT_START,
Lucene50FieldInfosFormat.FORMAT_CURRENT,
CodecUtil.checkSegmentHeader(input, CODEC_NAME,
FORMAT_START,
FORMAT_CURRENT,
segmentInfo.getId(), segmentSuffix);
final int size = input.readVInt(); //read in the size
@ -129,30 +129,18 @@ public final class Lucene50FieldInfosFormat extends FieldInfosFormat {
throw new CorruptIndexException("invalid field number for field: " + name + ", fieldNumber=" + fieldNumber, input);
}
byte bits = input.readByte();
boolean isIndexed = (bits & Lucene50FieldInfosFormat.IS_INDEXED) != 0;
boolean storeTermVector = (bits & Lucene50FieldInfosFormat.STORE_TERMVECTOR) != 0;
boolean omitNorms = (bits & Lucene50FieldInfosFormat.OMIT_NORMS) != 0;
boolean storePayloads = (bits & Lucene50FieldInfosFormat.STORE_PAYLOADS) != 0;
final IndexOptions indexOptions;
if (!isIndexed) {
indexOptions = null;
} else if ((bits & Lucene50FieldInfosFormat.OMIT_TERM_FREQ_AND_POSITIONS) != 0) {
indexOptions = IndexOptions.DOCS_ONLY;
} else if ((bits & Lucene50FieldInfosFormat.OMIT_POSITIONS) != 0) {
indexOptions = IndexOptions.DOCS_AND_FREQS;
} else if ((bits & Lucene50FieldInfosFormat.STORE_OFFSETS_IN_POSTINGS) != 0) {
indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
} else {
indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
}
boolean storeTermVector = (bits & STORE_TERMVECTOR) != 0;
boolean omitNorms = (bits & OMIT_NORMS) != 0;
boolean storePayloads = (bits & STORE_PAYLOADS) != 0;
final IndexOptions indexOptions = getIndexOptions(input, input.readByte());
// DV Types are packed in one byte
byte val = input.readByte();
final DocValuesType docValuesType = getDocValuesType(input, (byte) (val & 0x0F));
final DocValuesType docValuesType = getDocValuesType(input, input.readByte());
final long dvGen = input.readLong();
final Map<String,String> attributes = input.readStringStringMap();
try {
infos[i] = new FieldInfo(name, isIndexed, fieldNumber, storeTermVector, omitNorms, storePayloads,
infos[i] = new FieldInfo(name, fieldNumber, storeTermVector, omitNorms, storePayloads,
indexOptions, docValuesType, dvGen, Collections.unmodifiableMap(attributes));
infos[i].checkConsistency();
} catch (IllegalStateException e) {
@ -168,56 +156,119 @@ public final class Lucene50FieldInfosFormat extends FieldInfosFormat {
}
}
private static DocValuesType getDocValuesType(IndexInput input, byte b) throws IOException {
if (b == 0) {
return null;
} else if (b == 1) {
return DocValuesType.NUMERIC;
} else if (b == 2) {
return DocValuesType.BINARY;
} else if (b == 3) {
return DocValuesType.SORTED;
} else if (b == 4) {
return DocValuesType.SORTED_SET;
} else if (b == 5) {
return DocValuesType.SORTED_NUMERIC;
static {
// We "mirror" DocValues enum values with the constants below; let's try to ensure if we add a new DocValuesType while this format is
// still used for writing, we remember to fix this encoding:
assert DocValuesType.values().length == 5;
}
private static byte docValuesByte(DocValuesType type) {
if (type == null) {
return 0;
} else {
switch(type) {
case NUMERIC:
return 1;
case BINARY:
return 2;
case SORTED:
return 3;
case SORTED_SET:
return 4;
case SORTED_NUMERIC:
return 5;
default:
// BUG
throw new AssertionError("unhandled DocValuesType: " + type);
}
}
}
private static DocValuesType getDocValuesType(IndexInput input, byte b) throws IOException {
switch(b) {
case 0:
return null;
case 1:
return DocValuesType.NUMERIC;
case 2:
return DocValuesType.BINARY;
case 3:
return DocValuesType.SORTED;
case 4:
return DocValuesType.SORTED_SET;
case 5:
return DocValuesType.SORTED_NUMERIC;
default:
throw new CorruptIndexException("invalid docvalues byte: " + b, input);
}
}
static {
// We "mirror" IndexOptions enum values with the constants below; let's try to ensure if we add a new IndexOption while this format is
// still used for writing, we remember to fix this encoding:
assert IndexOptions.values().length == 4;
}
private static byte indexOptionsByte(IndexOptions indexOptions) {
if (indexOptions == null) {
return 0;
} else {
switch (indexOptions) {
case DOCS_ONLY:
return 1;
case DOCS_AND_FREQS:
return 2;
case DOCS_AND_FREQS_AND_POSITIONS:
return 3;
case DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS:
return 4;
default:
// BUG:
throw new AssertionError("unhandled IndexOptions: " + indexOptions);
}
}
}
private static IndexOptions getIndexOptions(IndexInput input, byte b) throws IOException {
switch (b) {
case 0:
return null;
case 1:
return IndexOptions.DOCS_ONLY;
case 2:
return IndexOptions.DOCS_AND_FREQS;
case 3:
return IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
case 4:
return IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
default:
// BUG
throw new CorruptIndexException("invalid IndexOptions byte: " + b, input);
}
}
@Override
public void write(Directory directory, SegmentInfo segmentInfo, String segmentSuffix, FieldInfos infos, IOContext context) throws IOException {
final String fileName = IndexFileNames.segmentFileName(segmentInfo.name, segmentSuffix, Lucene50FieldInfosFormat.EXTENSION);
final String fileName = IndexFileNames.segmentFileName(segmentInfo.name, segmentSuffix, EXTENSION);
try (IndexOutput output = directory.createOutput(fileName, context)) {
CodecUtil.writeSegmentHeader(output, Lucene50FieldInfosFormat.CODEC_NAME, Lucene50FieldInfosFormat.FORMAT_CURRENT, segmentInfo.getId(), segmentSuffix);
CodecUtil.writeSegmentHeader(output, CODEC_NAME, FORMAT_CURRENT, segmentInfo.getId(), segmentSuffix);
output.writeVInt(infos.size());
for (FieldInfo fi : infos) {
fi.checkConsistency();
IndexOptions indexOptions = fi.getIndexOptions();
byte bits = 0x0;
if (fi.hasVectors()) bits |= Lucene50FieldInfosFormat.STORE_TERMVECTOR;
if (fi.omitsNorms()) bits |= Lucene50FieldInfosFormat.OMIT_NORMS;
if (fi.hasPayloads()) bits |= Lucene50FieldInfosFormat.STORE_PAYLOADS;
if (fi.isIndexed()) {
bits |= Lucene50FieldInfosFormat.IS_INDEXED;
assert indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0 || !fi.hasPayloads();
if (indexOptions == IndexOptions.DOCS_ONLY) {
bits |= Lucene50FieldInfosFormat.OMIT_TERM_FREQ_AND_POSITIONS;
} else if (indexOptions == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) {
bits |= Lucene50FieldInfosFormat.STORE_OFFSETS_IN_POSTINGS;
} else if (indexOptions == IndexOptions.DOCS_AND_FREQS) {
bits |= Lucene50FieldInfosFormat.OMIT_POSITIONS;
}
}
output.writeString(fi.name);
output.writeVInt(fi.number);
byte bits = 0x0;
if (fi.hasVectors()) bits |= STORE_TERMVECTOR;
if (fi.omitsNorms()) bits |= OMIT_NORMS;
if (fi.hasPayloads()) bits |= STORE_PAYLOADS;
output.writeByte(bits);
output.writeByte(indexOptionsByte(fi.getIndexOptions()));
// pack the DV type and hasNorms in one byte
final byte dv = docValuesByte(fi.getDocValuesType());
assert (dv & (~0xF)) == 0;
output.writeByte(dv);
output.writeByte(docValuesByte(fi.getDocValuesType()));
output.writeLong(fi.getDocValuesGen());
output.writeStringStringMap(fi.attributes());
}
@ -225,24 +276,6 @@ public final class Lucene50FieldInfosFormat extends FieldInfosFormat {
}
}
private static byte docValuesByte(DocValuesType type) {
if (type == null) {
return 0;
} else if (type == DocValuesType.NUMERIC) {
return 1;
} else if (type == DocValuesType.BINARY) {
return 2;
} else if (type == DocValuesType.SORTED) {
return 3;
} else if (type == DocValuesType.SORTED_SET) {
return 4;
} else if (type == DocValuesType.SORTED_NUMERIC) {
return 5;
} else {
throw new AssertionError();
}
}
/** Extension of field infos */
static final String EXTENSION = "fnm";
@ -252,11 +285,7 @@ public final class Lucene50FieldInfosFormat extends FieldInfosFormat {
static final int FORMAT_CURRENT = FORMAT_START;
// Field flags
static final byte IS_INDEXED = 0x1;
static final byte STORE_TERMVECTOR = 0x2;
static final byte STORE_OFFSETS_IN_POSTINGS = 0x4;
static final byte OMIT_NORMS = 0x10;
static final byte STORE_PAYLOADS = 0x20;
static final byte OMIT_TERM_FREQ_AND_POSITIONS = 0x40;
static final byte OMIT_POSITIONS = -128;
static final byte STORE_TERMVECTOR = 0x1;
static final byte OMIT_NORMS = 0x2;
static final byte STORE_PAYLOADS = 0x4;
}

View File

@ -319,7 +319,7 @@ public final class Document implements IndexDocument {
return new FilterIterator<IndexableField, Field>(fields.iterator()) {
@Override
protected boolean predicateFunction(Field field) {
return field.type.indexed();
return field.type.indexOptions() != null;
}
};
}

View File

@ -69,7 +69,6 @@ public class DocumentStoredFieldVisitor extends StoredFieldVisitor {
public void stringField(FieldInfo fieldInfo, String value) throws IOException {
final FieldType ft = new FieldType(TextField.TYPE_STORED);
ft.setStoreTermVectors(fieldInfo.hasVectors());
ft.setIndexed(fieldInfo.isIndexed());
ft.setOmitNorms(fieldInfo.omitsNorms());
ft.setIndexOptions(fieldInfo.getIndexOptions());
doc.add(new StoredField(fieldInfo.name, value, ft));

View File

@ -119,7 +119,6 @@ public final class DoubleField extends Field {
*/
public static final FieldType TYPE_NOT_STORED = new FieldType();
static {
TYPE_NOT_STORED.setIndexed(true);
TYPE_NOT_STORED.setTokenized(true);
TYPE_NOT_STORED.setOmitNorms(true);
TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS_ONLY);
@ -133,7 +132,6 @@ public final class DoubleField extends Field {
*/
public static final FieldType TYPE_STORED = new FieldType();
static {
TYPE_STORED.setIndexed(true);
TYPE_STORED.setTokenized(true);
TYPE_STORED.setOmitNorms(true);
TYPE_STORED.setIndexOptions(IndexOptions.DOCS_ONLY);

View File

@ -122,7 +122,7 @@ public class Field implements IndexableField, StorableField {
if (type.stored()) {
throw new IllegalArgumentException("fields with a Reader value cannot be stored");
}
if (type.indexed() && !type.tokenized()) {
if (type.indexOptions() != null && !type.tokenized()) {
throw new IllegalArgumentException("non-tokenized fields must use String values");
}
@ -148,7 +148,7 @@ public class Field implements IndexableField, StorableField {
if (tokenStream == null) {
throw new NullPointerException("tokenStream cannot be null");
}
if (!type.indexed() || !type.tokenized()) {
if (type.indexOptions() == null || !type.tokenized()) {
throw new IllegalArgumentException("TokenStream fields must be indexed and tokenized");
}
if (type.stored()) {
@ -214,7 +214,7 @@ public class Field implements IndexableField, StorableField {
if (bytes == null) {
throw new IllegalArgumentException("bytes cannot be null");
}
if (type.indexed()) {
if (type.indexOptions() != null) {
throw new IllegalArgumentException("Fields with BytesRef values cannot be indexed");
}
this.fieldsData = bytes;
@ -241,7 +241,7 @@ public class Field implements IndexableField, StorableField {
if (value == null) {
throw new IllegalArgumentException("value cannot be null");
}
if (!type.stored() && !type.indexed()) {
if (!type.stored() && type.indexOptions() == null) {
throw new IllegalArgumentException("it doesn't make sense to have a field that "
+ "is neither indexed nor stored");
}
@ -338,7 +338,7 @@ public class Field implements IndexableField, StorableField {
if (!(fieldsData instanceof BytesRef)) {
throw new IllegalArgumentException("cannot change value type from " + fieldsData.getClass().getSimpleName() + " to BytesRef");
}
if (type.indexed()) {
if (type.indexOptions() != null) {
throw new IllegalArgumentException("cannot set a BytesRef value on an indexed field");
}
if (value == null) {
@ -419,7 +419,7 @@ public class Field implements IndexableField, StorableField {
* values from stringValue() or getBinaryValue()
*/
public void setTokenStream(TokenStream tokenStream) {
if (!type.indexed() || !type.tokenized()) {
if (type.indexOptions() == null || !type.tokenized()) {
throw new IllegalArgumentException("TokenStream fields must be indexed and tokenized");
}
if (type.numericType() != null) {
@ -452,7 +452,7 @@ public class Field implements IndexableField, StorableField {
*/
public void setBoost(float boost) {
if (boost != 1.0f) {
if (type.indexed() == false || type.omitNorms()) {
if (type.indexOptions() == null || type.omitNorms()) {
throw new IllegalArgumentException("You cannot set an index-time boost on an unindexed field, or one that omits norms");
}
}
@ -502,7 +502,8 @@ public class Field implements IndexableField, StorableField {
@Override
public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) throws IOException {
if (!fieldType().indexed()) {
if (fieldType().indexOptions() == null) {
// Not indexed
return null;
}

View File

@ -43,7 +43,6 @@ public class FieldType implements IndexableFieldType {
DOUBLE
}
private boolean indexed;
private boolean stored;
private boolean tokenized = true;
private boolean storeTermVectors;
@ -51,7 +50,7 @@ public class FieldType implements IndexableFieldType {
private boolean storeTermVectorPositions;
private boolean storeTermVectorPayloads;
private boolean omitNorms;
private IndexOptions indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
private IndexOptions indexOptions;
private NumericType numericType;
private boolean frozen;
private int numericPrecisionStep = NumericUtils.PRECISION_STEP_DEFAULT;
@ -61,7 +60,6 @@ public class FieldType implements IndexableFieldType {
* Create a new mutable FieldType with all of the properties from <code>ref</code>
*/
public FieldType(FieldType ref) {
this.indexed = ref.indexed();
this.stored = ref.stored();
this.tokenized = ref.tokenized();
this.storeTermVectors = ref.storeTermVectors();
@ -96,29 +94,6 @@ public class FieldType implements IndexableFieldType {
this.frozen = true;
}
/**
* {@inheritDoc}
* <p>
* The default is <code>false</code>.
* @see #setIndexed(boolean)
*/
@Override
public boolean indexed() {
return this.indexed;
}
/**
* Set to <code>true</code> to index (invert) this field.
* @param value true if this field should be indexed.
* @throws IllegalStateException if this FieldType is frozen against
* future modifications.
* @see #indexed()
*/
public void setIndexed(boolean value) {
checkIfFrozen();
this.indexed = value;
}
/**
* {@inheritDoc}
* <p>
@ -148,7 +123,6 @@ public class FieldType implements IndexableFieldType {
* The default is <code>true</code>.
* @see #setTokenized(boolean)
*/
@Override
public boolean tokenized() {
return this.tokenized;
}
@ -367,7 +341,7 @@ public class FieldType implements IndexableFieldType {
if (stored()) {
result.append("stored");
}
if (indexed()) {
if (indexOptions != null) {
if (result.length() > 0)
result.append(",");
result.append("indexed");
@ -441,7 +415,6 @@ public class FieldType implements IndexableFieldType {
int result = 1;
result = prime * result + ((docValueType == null) ? 0 : docValueType.hashCode());
result = prime * result + ((indexOptions == null) ? 0 : indexOptions.hashCode());
result = prime * result + (indexed ? 1231 : 1237);
result = prime * result + numericPrecisionStep;
result = prime * result + ((numericType == null) ? 0 : numericType.hashCode());
result = prime * result + (omitNorms ? 1231 : 1237);
@ -462,7 +435,6 @@ public class FieldType implements IndexableFieldType {
FieldType other = (FieldType) obj;
if (docValueType != other.docValueType) return false;
if (indexOptions != other.indexOptions) return false;
if (indexed != other.indexed) return false;
if (numericPrecisionStep != other.numericPrecisionStep) return false;
if (numericType != other.numericType) return false;
if (omitNorms != other.omitNorms) return false;

View File

@ -119,7 +119,6 @@ public final class FloatField extends Field {
*/
public static final FieldType TYPE_NOT_STORED = new FieldType();
static {
TYPE_NOT_STORED.setIndexed(true);
TYPE_NOT_STORED.setTokenized(true);
TYPE_NOT_STORED.setOmitNorms(true);
TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS_ONLY);
@ -134,7 +133,6 @@ public final class FloatField extends Field {
*/
public static final FieldType TYPE_STORED = new FieldType();
static {
TYPE_STORED.setIndexed(true);
TYPE_STORED.setTokenized(true);
TYPE_STORED.setOmitNorms(true);
TYPE_STORED.setIndexOptions(IndexOptions.DOCS_ONLY);

View File

@ -119,7 +119,6 @@ public final class IntField extends Field {
*/
public static final FieldType TYPE_NOT_STORED = new FieldType();
static {
TYPE_NOT_STORED.setIndexed(true);
TYPE_NOT_STORED.setTokenized(true);
TYPE_NOT_STORED.setOmitNorms(true);
TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS_ONLY);
@ -134,7 +133,6 @@ public final class IntField extends Field {
*/
public static final FieldType TYPE_STORED = new FieldType();
static {
TYPE_STORED.setIndexed(true);
TYPE_STORED.setTokenized(true);
TYPE_STORED.setOmitNorms(true);
TYPE_STORED.setIndexOptions(IndexOptions.DOCS_ONLY);

View File

@ -129,7 +129,6 @@ public final class LongField extends Field {
*/
public static final FieldType TYPE_NOT_STORED = new FieldType();
static {
TYPE_NOT_STORED.setIndexed(true);
TYPE_NOT_STORED.setTokenized(true);
TYPE_NOT_STORED.setOmitNorms(true);
TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS_ONLY);
@ -143,7 +142,6 @@ public final class LongField extends Field {
*/
public static final FieldType TYPE_STORED = new FieldType();
static {
TYPE_STORED.setIndexed(true);
TYPE_STORED.setTokenized(true);
TYPE_STORED.setOmitNorms(true);
TYPE_STORED.setIndexOptions(IndexOptions.DOCS_ONLY);

View File

@ -36,13 +36,11 @@ public final class StringField extends Field {
public static final FieldType TYPE_STORED = new FieldType();
static {
TYPE_NOT_STORED.setIndexed(true);
TYPE_NOT_STORED.setOmitNorms(true);
TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS_ONLY);
TYPE_NOT_STORED.setTokenized(false);
TYPE_NOT_STORED.freeze();
TYPE_STORED.setIndexed(true);
TYPE_STORED.setOmitNorms(true);
TYPE_STORED.setIndexOptions(IndexOptions.DOCS_ONLY);
TYPE_STORED.setStored(true);

View File

@ -20,6 +20,7 @@ package org.apache.lucene.document;
import java.io.Reader;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.index.FieldInfo.IndexOptions;
/** A field that is indexed and tokenized, without term
* vectors. For example this would be used on a 'body'
@ -34,11 +35,11 @@ public final class TextField extends Field {
public static final FieldType TYPE_STORED = new FieldType();
static {
TYPE_NOT_STORED.setIndexed(true);
TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
TYPE_NOT_STORED.setTokenized(true);
TYPE_NOT_STORED.freeze();
TYPE_STORED.setIndexed(true);
TYPE_STORED.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
TYPE_STORED.setTokenized(true);
TYPE_STORED.setStored(true);
TYPE_STORED.freeze();

View File

@ -361,7 +361,7 @@ final class DefaultIndexingChain extends DocConsumer {
}
private static void verifyFieldType(String name, IndexableFieldType ft) {
if (ft.indexed() == false) {
if (ft.indexOptions() == null) {
if (ft.storeTermVectors()) {
throw new IllegalArgumentException("cannot store term vectors "
+ "for a field that is not indexed (field=\"" + name + "\")");
@ -580,7 +580,6 @@ final class DefaultIndexingChain extends DocConsumer {
// reset the TokenStream to the first token
stream.reset();
invertState.setAttributeSource(stream);
termsHashPerField.start(field, first);
while (stream.incrementToken()) {

View File

@ -34,7 +34,6 @@ public final class FieldInfo {
/** Internal field number */
public final int number;
private boolean indexed;
private DocValuesType docValueType;
// True if any document indexed term vectors
@ -124,14 +123,13 @@ public final class FieldInfo {
*
* @lucene.experimental
*/
public FieldInfo(String name, boolean indexed, int number, boolean storeTermVector, boolean omitNorms,
public FieldInfo(String name, int number, boolean storeTermVector, boolean omitNorms,
boolean storePayloads, IndexOptions indexOptions, DocValuesType docValues,
long dvGen, Map<String,String> attributes) {
this.name = name;
this.indexed = indexed;
this.number = number;
this.docValueType = docValues;
if (indexed) {
if (indexOptions != null) {
this.storeTermVector = storeTermVector;
this.storePayloads = storePayloads;
this.omitNorms = omitNorms;
@ -152,10 +150,7 @@ public final class FieldInfo {
* Always returns true (or throws IllegalStateException)
*/
public boolean checkConsistency() {
if (indexed) {
if (indexOptions == null) {
throw new IllegalStateException("indexed field '" + name + "' must have index options");
}
if (indexOptions != null) {
// Cannot store payloads unless positions are indexed:
if (indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0 && storePayloads) {
throw new IllegalStateException("indexed field '" + name + "' cannot have payloads without positions");
@ -183,31 +178,33 @@ public final class FieldInfo {
}
void update(IndexableFieldType ft) {
update(ft.indexed(), false, ft.omitNorms(), false, ft.indexOptions());
update(false, ft.omitNorms(), false, ft.indexOptions());
}
// should only be called by FieldInfos#addOrUpdate
void update(boolean indexed, boolean storeTermVector, boolean omitNorms, boolean storePayloads, IndexOptions indexOptions) {
void update(boolean storeTermVector, boolean omitNorms, boolean storePayloads, IndexOptions indexOptions) {
//System.out.println("FI.update field=" + name + " indexed=" + indexed + " omitNorms=" + omitNorms + " this.omitNorms=" + this.omitNorms);
this.indexed |= indexed; // once indexed, always indexed
if (indexed) { // if updated field data is not for indexing, leave the updates out
if (this.indexOptions != indexOptions) {
if (this.indexOptions == null) {
this.indexOptions = indexOptions;
} else if (indexOptions != null) {
// downgrade
this.indexOptions = this.indexOptions.compareTo(indexOptions) < 0 ? this.indexOptions : indexOptions;
}
}
if (this.indexOptions != null) { // if updated field data is not for indexing, leave the updates out
this.storeTermVector |= storeTermVector; // once vector, always vector
this.storePayloads |= storePayloads;
if (this.omitNorms != omitNorms) {
// Awkward: only drop norms if incoming update is indexed:
if (indexOptions != null && this.omitNorms != omitNorms) {
this.omitNorms = true; // if one require omitNorms at least once, it remains off for life
}
if (this.indexOptions != indexOptions) {
if (this.indexOptions == null) {
this.indexOptions = indexOptions;
} else {
// downgrade
this.indexOptions = this.indexOptions.compareTo(indexOptions) < 0 ? this.indexOptions : indexOptions;
}
if (this.indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
// cannot store payloads if we don't store positions:
this.storePayloads = false;
}
}
}
if (this.indexOptions == null || this.indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
// cannot store payloads if we don't store positions:
this.storePayloads = false;
}
assert checkConsistency();
}
@ -259,7 +256,7 @@ public final class FieldInfo {
}
void setStorePayloads() {
if (indexed && indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) {
if (indexOptions != null && indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) {
storePayloads = true;
}
assert checkConsistency();
@ -276,14 +273,14 @@ public final class FieldInfo {
* Returns true if this field actually has any norms.
*/
public boolean hasNorms() {
return indexed && omitNorms == false;
return isIndexed() && omitNorms == false;
}
/**
* Returns true if this field is indexed.
* Returns true if this field is indexed (has non-null {@link #getIndexOptions}).
*/
public boolean isIndexed() {
return indexed;
return indexOptions != null;
}
/**

View File

@ -284,12 +284,12 @@ public class FieldInfos implements Iterable<FieldInfo> {
// rather, each component in the chain should update
// what it "owns". EG fieldType.indexOptions() should
// be updated by maybe FreqProxTermsWriterPerField:
return addOrUpdateInternal(name, -1, fieldType.indexed(), false,
return addOrUpdateInternal(name, -1, false,
fieldType.omitNorms(), false,
fieldType.indexOptions(), fieldType.docValueType());
}
private FieldInfo addOrUpdateInternal(String name, int preferredFieldNumber, boolean isIndexed,
private FieldInfo addOrUpdateInternal(String name, int preferredFieldNumber,
boolean storeTermVector,
boolean omitNorms, boolean storePayloads, IndexOptions indexOptions, DocValuesType docValues) {
FieldInfo fi = fieldInfo(name);
@ -300,12 +300,12 @@ public class FieldInfos implements Iterable<FieldInfo> {
// before then we'll get the same name and number,
// else we'll allocate a new one:
final int fieldNumber = globalFieldNumbers.addOrGet(name, preferredFieldNumber, docValues);
fi = new FieldInfo(name, isIndexed, fieldNumber, storeTermVector, omitNorms, storePayloads, indexOptions, docValues, -1, null);
fi = new FieldInfo(name, fieldNumber, storeTermVector, omitNorms, storePayloads, indexOptions, docValues, -1, null);
assert !byName.containsKey(fi.name);
assert globalFieldNumbers.containsConsistent(Integer.valueOf(fi.number), fi.name, fi.getDocValuesType());
byName.put(fi.name, fi);
} else {
fi.update(isIndexed, storeTermVector, omitNorms, storePayloads, indexOptions);
fi.update(storeTermVector, omitNorms, storePayloads, indexOptions);
if (docValues != null) {
// only pay the synchronization cost if fi does not already have a DVType
@ -323,7 +323,7 @@ public class FieldInfos implements Iterable<FieldInfo> {
public FieldInfo add(FieldInfo fi) {
// IMPORTANT - reuse the field number if possible for consistent field numbers across segments
return addOrUpdateInternal(fi.name, fi.number, fi.isIndexed(), fi.hasVectors(),
return addOrUpdateInternal(fi.name, fi.number, fi.hasVectors(),
fi.omitsNorms(), fi.hasPayloads(),
fi.getIndexOptions(), fi.getDocValuesType());
}

View File

@ -18,18 +18,14 @@ package org.apache.lucene.index;
*/
import java.io.IOException;
import java.io.Reader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.search.similarities.DefaultSimilarity; // javadocs
import org.apache.lucene.search.similarities.Similarity; // javadocs
import org.apache.lucene.util.BytesRef;
// TODO: how to handle versioning here...?
// TODO: we need to break out separate StoredField...
/** Represents a single field for indexing. IndexWriter
* consumes Iterable&lt;IndexableField&gt; as a document.
*
@ -68,7 +64,7 @@ public interface IndexableField extends GeneralField {
* the range of that encoding.
* <p>
* It is illegal to return a boost other than 1.0f for a field that is not
* indexed ({@link IndexableFieldType#indexed()} is false) or omits normalization values
* indexed ({@link IndexableFieldType#indexOptions()} is null) or omits normalization values
* ({@link IndexableFieldType#omitNorms()} returns true).
*
* @see Similarity#computeNorm(FieldInvertState)

View File

@ -27,9 +27,6 @@ import org.apache.lucene.index.FieldInfo.IndexOptions;
*/
public interface IndexableFieldType {
/** True if this field should be indexed (inverted) */
public boolean indexed();
/** True if the field's value should be stored */
public boolean stored();
@ -37,7 +34,7 @@ public interface IndexableFieldType {
* True if this field's value should be analyzed by the
* {@link Analyzer}.
* <p>
* This has no effect if {@link #indexed()} returns false.
* This has no effect if {@link #indexOptions()} returns null.
*/
// TODO: shouldn't we remove this? Whether/how a field is
// tokenized is an impl detail under Field?
@ -51,7 +48,7 @@ public interface IndexableFieldType {
* can be accessed in a document-oriented way from
* {@link IndexReader#getTermVector(int,String)}.
* <p>
* This option is illegal if {@link #indexed()} returns false.
* This option is illegal if {@link #indexOptions()} returns null.
*/
public boolean storeTermVectors();
@ -91,7 +88,8 @@ public interface IndexableFieldType {
public boolean omitNorms();
/** {@link IndexOptions}, describing what should be
* recorded into the inverted index */
* recorded into the inverted index, or null if this field
* is not indexed */
public IndexOptions indexOptions();
/**

View File

@ -113,7 +113,7 @@ final class TermVectorsConsumerPerField extends TermsHashPerField {
@Override
boolean start(IndexableField field, boolean first) {
assert field.fieldType().indexed();
assert field.fieldType().indexOptions() != null;
if (first) {

View File

@ -143,8 +143,10 @@ public final class NumericUtils {
* @param bytes will contain the encoded value
*/
public static void longToPrefixCodedBytes(final long val, final int shift, final BytesRefBuilder bytes) {
if ((shift & ~0x3f) != 0) // ensure shift is 0..63
throw new IllegalArgumentException("Illegal shift value, must be 0..63");
// ensure shift is 0..63
if ((shift & ~0x3f) != 0) {
throw new IllegalArgumentException("Illegal shift value, must be 0..63; got shift=" + shift);
}
int nChars = (((63-shift)*37)>>8) + 1; // i/7 is the same as (i*37)>>8 for i in 0..63
bytes.setLength(nChars+1); // one extra for the byte that contains the shift info
bytes.grow(BUF_SIZE_LONG);
@ -169,8 +171,10 @@ public final class NumericUtils {
* @param bytes will contain the encoded value
*/
public static void intToPrefixCodedBytes(final int val, final int shift, final BytesRefBuilder bytes) {
if ((shift & ~0x1f) != 0) // ensure shift is 0..31
throw new IllegalArgumentException("Illegal shift value, must be 0..31");
// ensure shift is 0..31
if ((shift & ~0x1f) != 0) {
throw new IllegalArgumentException("Illegal shift value, must be 0..31; got shift=" + shift);
}
int nChars = (((31-shift)*37)>>8) + 1; // i/7 is the same as (i*37)>>8 for i in 0..63
bytes.setLength(nChars+1); // one extra for the byte that contains the shift info
bytes.grow(NumericUtils.BUF_SIZE_LONG); // use the max

View File

@ -305,7 +305,6 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase {
final RandomIndexWriter writer = new RandomIndexWriter(random(), newDirectory());
final Document doc = new Document();
final FieldType ft = new FieldType();
ft.setIndexed(true);
ft.setIndexOptions(IndexOptions.DOCS_ONLY);
ft.setTokenized(true);
ft.setStoreTermVectors(true);

View File

@ -25,6 +25,7 @@ import java.util.List;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.StorableField;
@ -64,7 +65,7 @@ public class TestDocument extends LuceneTestCase {
assertTrue(binaryFld.binaryValue() != null);
assertTrue(binaryFld.fieldType().stored());
assertFalse(binaryFld.fieldType().indexed());
assertNull(binaryFld.fieldType().indexOptions());
String binaryTest = doc.getBinaryValue("binary").utf8ToString();
assertTrue(binaryTest.equals(binaryVal));
@ -263,7 +264,7 @@ public class TestDocument extends LuceneTestCase {
FieldType stored = new FieldType();
stored.setStored(true);
FieldType indexedNotTokenized = new FieldType();
indexedNotTokenized.setIndexed(true);
indexedNotTokenized.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
indexedNotTokenized.setTokenized(false);
doc.add(new StringField("keyword", "test1", Field.Store.YES));
doc.add(new StringField("keyword", "test2", Field.Store.YES));

View File

@ -43,7 +43,7 @@ public class TestFieldType extends LuceneTestCase {
assertFalse(ft4.equals(ft));
FieldType ft5 = new FieldType();
ft5.setIndexed(true);
ft5.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
assertFalse(ft5.equals(ft));
FieldType ft6 = new FieldType();

View File

@ -59,7 +59,6 @@ public class Test4GBStoredFields extends LuceneTestCase {
final Document doc = new Document();
final FieldType ft = new FieldType();
ft.setIndexed(false);
ft.setStored(true);
ft.freeze();
final int valueLength = RandomInts.randomIntBetween(random(), 1 << 13, 1 << 20);

View File

@ -21,7 +21,6 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.NoSuchFileException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.lucene.analysis.MockAnalyzer;
@ -35,6 +34,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.PhraseQuery;
@ -1143,7 +1143,7 @@ public class TestAddIndexes extends LuceneTestCase {
IndexWriter w = new IndexWriter(toAdd, conf);
Document doc = new Document();
FieldType customType = new FieldType();
customType.setIndexed(true);
customType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
doc.add(newField("foo", "bar", customType));
w.addDocument(doc);
w.close();

View File

@ -92,9 +92,6 @@ public class TestCodecs extends LuceneTestCase {
// TODO: change this test to use all three
fieldInfo = fieldInfos.addOrUpdate(name, new IndexableFieldType() {
@Override
public boolean indexed() { return true; }
@Override
public boolean stored() { return false; }

View File

@ -276,7 +276,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
for (FieldInfo fi : fis) {
Field expected = getField(Integer.parseInt(fi.name));
assertEquals(expected.fieldType().indexed(), fi.isIndexed());
assertEquals(expected.fieldType().indexOptions(), fi.getIndexOptions());
assertEquals(expected.fieldType().storeTermVectors(), fi.hasVectors());
}
}

View File

@ -17,17 +17,18 @@ package org.apache.lucene.index;
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
import java.io.IOException;
import org.junit.Before;
import org.junit.After;
import org.junit.Before;
/**
* Tests that a useful exception is thrown when attempting to index a term that is
@ -58,7 +59,7 @@ public class TestExceedMaxTermLength extends LuceneTestCase {
(dir, newIndexWriterConfig(random(), new MockAnalyzer(random())));
try {
final FieldType ft = new FieldType();
ft.setIndexed(true);
ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
ft.setStored(random().nextBoolean());
ft.freeze();

View File

@ -1247,7 +1247,8 @@ public class TestIndexWriter extends LuceneTestCase {
customType.setTokenized(true);
Field f = new Field("binary", b, 10, 17, customType);
customType.setIndexed(true);
// TODO: this is evil, changing the type after creating the field:
customType.setIndexOptions(IndexOptions.DOCS_ONLY);
final MockTokenizer doc1field1 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
doc1field1.setReader(new StringReader("doc1field1"));
f.setTokenStream(doc1field1);

View File

@ -89,7 +89,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
custom1.setStoreTermVectorOffsets(true);
custom2.setStored(true);
custom2.setIndexed(true);
custom2.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
custom3.setStored(true);

View File

@ -46,11 +46,6 @@ public class TestIndexableField extends LuceneTestCase {
private final int counter;
private final IndexableFieldType fieldType = new IndexableFieldType() {
@Override
public boolean indexed() {
return (counter % 10) != 3;
}
@Override
public boolean stored() {
return (counter & 1) == 0 || (counter % 10) == 3;
@ -63,7 +58,7 @@ public class TestIndexableField extends LuceneTestCase {
@Override
public boolean storeTermVectors() {
return indexed() && counter % 2 == 1 && counter % 10 != 9;
return indexOptions() != null && counter % 2 == 1 && counter % 10 != 9;
}
@Override
@ -88,7 +83,7 @@ public class TestIndexableField extends LuceneTestCase {
@Override
public FieldInfo.IndexOptions indexOptions() {
return FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
return counter%10 == 3 ? null : FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
}
@Override
@ -208,7 +203,7 @@ public class TestIndexableField extends LuceneTestCase {
next = new MyField(finalBaseCount + (fieldUpto++-1));
}
if (next != null && next.fieldType().indexed()) return true;
if (next != null && next.fieldType().indexOptions() != null) return true;
else return this.hasNext();
}

View File

@ -362,7 +362,6 @@ public class TestPostingsOffsets extends LuceneTestCase {
if (i < 99 && random().nextInt(2) == 0) {
// stored only
FieldType ft = new FieldType();
ft.setIndexed(false);
ft.setStored(true);
doc.add(new Field("foo", "boo!", ft));
} else {

View File

@ -176,7 +176,7 @@ public class TestSegmentReader extends LuceneTestCase {
// test omit norms
for (int i=0; i<DocHelper.fields.length; i++) {
IndexableField f = DocHelper.fields[i];
if (f.fieldType().indexed()) {
if (f.fieldType().indexOptions() != null) {
assertEquals(reader.getNormValues(f.name()) != null, !f.fieldType().omitNorms());
assertEquals(reader.getNormValues(f.name()) != null, !DocHelper.noNorms.containsKey(f.name()));
if (reader.getNormValues(f.name()) == null) {

View File

@ -31,6 +31,7 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.TermQuery;
@ -815,12 +816,12 @@ public class TestStressIndexing2 extends LuceneTestCase {
case 0:
customType.setStored(true);
customType.setOmitNorms(true);
customType.setIndexed(true);
customType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
customType.freeze();
fields.add(newField(fieldName, getString(1), customType));
break;
case 1:
customType.setIndexed(true);
customType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
customType.setTokenized(true);
customType.freeze();
fields.add(newField(fieldName, getString(0), customType));
@ -835,7 +836,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
break;
case 3:
customType.setStored(true);
customType.setIndexed(true);
customType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
customType.setTokenized(true);
customType.freeze();
fields.add(newField(fieldName, getString(bigFieldSize), customType));

View File

@ -111,7 +111,7 @@ public class TestSimilarityBase extends LuceneTestCase {
for (int i = 0; i < docs.length; i++) {
Document d = new Document();
FieldType ft = new FieldType(TextField.TYPE_STORED);
ft.setIndexed(false);
ft.setIndexOptions(null);
d.add(newField(FIELD_ID, Integer.toString(i), ft));
d.add(newTextField(FIELD_BODY, docs[i], Field.Store.YES));
writer.addDocument(d);

View File

@ -22,6 +22,7 @@ import java.util.Arrays;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.FieldInfo.IndexOptions;
/**
* Add an instance of this to your {@link Document} for every facet label.
@ -33,7 +34,7 @@ import org.apache.lucene.document.FieldType;
public class FacetField extends Field {
static final FieldType TYPE = new FieldType();
static {
TYPE.setIndexed(true);
TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
TYPE.freeze();
}

View File

@ -20,6 +20,7 @@ package org.apache.lucene.facet.sortedset;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.facet.FacetField;
import org.apache.lucene.index.FieldInfo.IndexOptions;
/** Add an instance of this to your Document for every facet
* label to be indexed via SortedSetDocValues. */
@ -28,7 +29,7 @@ public class SortedSetDocValuesFacetField extends Field {
/** Indexed {@link FieldType}. */
public static final FieldType TYPE = new FieldType();
static {
TYPE.setIndexed(true);
TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
TYPE.freeze();
}

View File

@ -24,6 +24,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.facet.FacetField;
import org.apache.lucene.facet.Facets;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.util.BytesRef;
/** Add an instance of this to your {@link Document} to add
@ -40,7 +41,7 @@ public class AssociationFacetField extends Field {
/** Indexed {@link FieldType}. */
public static final FieldType TYPE = new FieldType();
static {
TYPE.setIndexed(true);
TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
TYPE.freeze();
}

View File

@ -445,7 +445,7 @@ public class MemoryIndex {
if (!fieldInfos.containsKey(fieldName)) {
fieldInfos.put(fieldName,
new FieldInfo(fieldName, true, fieldInfos.size(), false, false, false, this.storeOffsets ? IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS : IndexOptions.DOCS_AND_FREQS_AND_POSITIONS , null, -1, null));
new FieldInfo(fieldName, fieldInfos.size(), false, false, false, this.storeOffsets ? IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS : IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, null, -1, null));
}
TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class);
PositionIncrementAttribute posIncrAttribute = stream.addAttribute(PositionIncrementAttribute.class);

View File

@ -432,7 +432,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
Document nextDoc = lineFileDocs.nextDoc();
Document doc = new Document();
for (Field field : nextDoc.getFields()) {
if (field.fieldType().indexed()) {
if (field.fieldType().indexOptions() != null) {
doc.add(field);
if (random().nextInt(3) == 0) {
doc.add(field); // randomly add the same field twice
@ -443,7 +443,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
writer.addDocument(doc);
writer.close();
for (IndexableField field : doc.indexableFields()) {
memory.addField(field.name(), ((Field)field).stringValue(), mockAnalyzer);
memory.addField(field.name(), ((Field)field).stringValue(), mockAnalyzer);
}
DirectoryReader competitor = DirectoryReader.open(dir);
LeafReader memIndexReader= (LeafReader) memory.createSearcher().getIndexReader();

View File

@ -212,7 +212,7 @@ public class UninvertingReader extends FilterLeafReader {
}
}
}
filteredInfos.add(new FieldInfo(fi.name, fi.isIndexed(), fi.number, fi.hasVectors(), fi.omitsNorms(),
filteredInfos.add(new FieldInfo(fi.name, fi.number, fi.hasVectors(), fi.omitsNorms(),
fi.hasPayloads(), fi.getIndexOptions(), type, -1, null));
}
fieldInfos = new FieldInfos(filteredInfos.toArray(new FieldInfo[filteredInfos.size()]));

View File

@ -212,7 +212,6 @@ public class TestLazyDocument extends LuceneTestCase {
public void stringField(FieldInfo fieldInfo, String value) throws IOException {
final FieldType ft = new FieldType(TextField.TYPE_STORED);
ft.setStoreTermVectors(fieldInfo.hasVectors());
ft.setIndexed(fieldInfo.isIndexed());
ft.setOmitNorms(fieldInfo.omitsNorms());
ft.setIndexOptions(fieldInfo.getIndexOptions());
doc.add(new Field(fieldInfo.name, value, ft));

View File

@ -34,6 +34,7 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
@ -272,7 +273,7 @@ public class TestAnalyzingQueryParser extends LuceneTestCase {
RandomIndexWriter writer = new RandomIndexWriter(random(), ramDir, analyzer);
Document doc = new Document();
FieldType fieldType = new FieldType();
fieldType.setIndexed(true);
fieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
fieldType.setTokenized(true);
fieldType.setStored(true);
Field field = new Field(FIELD, content, fieldType);
@ -292,4 +293,4 @@ public class TestAnalyzingQueryParser extends LuceneTestCase {
}
}
}
}

View File

@ -36,7 +36,6 @@ class StringAndPayloadField extends Field {
public static final FieldType TYPE = new FieldType();
static {
TYPE.setIndexed(true);
TYPE.setOmitNorms(true);
TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
TYPE.setTokenized(true);

View File

@ -130,7 +130,7 @@ public class BBoxStrategy extends SpatialStrategy {
//for xdlFieldType, copy some similar options. Don't do docValues since it isn't needed here.
xdlFieldType = new FieldType(StringField.TYPE_NOT_STORED);
xdlFieldType.setStored(fieldType.stored());
xdlFieldType.setIndexed(fieldType.indexed());
xdlFieldType.setIndexOptions(fieldType.indexOptions());
xdlFieldType.freeze();
}

View File

@ -142,7 +142,6 @@ public abstract class PrefixTreeStrategy extends SpatialStrategy {
public static final FieldType FIELD_TYPE = new FieldType();
static {
FIELD_TYPE.setIndexed(true);
FIELD_TYPE.setTokenized(true);
FIELD_TYPE.setOmitNorms(true);
FIELD_TYPE.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY);

View File

@ -96,7 +96,7 @@ public class DistanceStrategyTest extends StrategyTestCase {
if (strategy instanceof BBoxStrategy && random().nextBoolean()) {//disable indexing sometimes
BBoxStrategy bboxStrategy = (BBoxStrategy)strategy;
final FieldType fieldType = new FieldType(bboxStrategy.getFieldType());
fieldType.setIndexed(false);
fieldType.setIndexOptions(null);
bboxStrategy.setFieldType(fieldType);
}
}

View File

@ -292,7 +292,7 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase {
BBoxStrategy bboxStrategy = (BBoxStrategy) strategy;
if (random().nextBoolean()) {
FieldType fieldType = new FieldType(bboxStrategy.getFieldType());
fieldType.setIndexed(false);
fieldType.setIndexOptions(null);
bboxStrategy.setFieldType(fieldType);
}

View File

@ -84,7 +84,7 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes
for (String field : fieldNames) {
IndexableFieldType fieldType = randomFieldType(random());
FieldInfo fi = builder.addOrUpdate(field, fieldType);
if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) {
if (fieldType.indexOptions() != null && fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) {
if (random().nextBoolean()) {
fi.setStorePayloads();
}
@ -101,8 +101,7 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes
private final IndexableFieldType randomFieldType(Random r) {
FieldType type = new FieldType();
type.setIndexed(r.nextBoolean());
if (type.indexed()) {
if (r.nextBoolean()) {
IndexOptions values[] = IndexOptions.values();
type.setIndexOptions(values[r.nextInt(values.length)]);
type.setOmitNorms(r.nextBoolean());

View File

@ -370,7 +370,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
continue;
}
fieldInfoArray[fieldUpto] = new FieldInfo(field, true, fieldUpto, false, false, true,
fieldInfoArray[fieldUpto] = new FieldInfo(field, fieldUpto, false, false, true,
IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS,
null, -1, null);
fieldUpto++;
@ -694,7 +694,6 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
boolean doPayloads = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0 && allowPayloads;
newFieldInfoArray[fieldUpto] = new FieldInfo(oldFieldInfo.name,
true,
fieldUpto,
false,
false,
@ -1739,7 +1738,6 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
for (IndexOptions opts : IndexOptions.values()) {
FieldType ft = new FieldType();
ft.setIndexOptions(opts);
ft.setIndexed(true);
ft.freeze();
final int numFields = random().nextInt(5);
for (int j = 0; j < numFields; ++j) {

View File

@ -321,8 +321,8 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat
w.addDocument(doc);
IndexReader r = w.getReader();
w.close();
assertFalse(r.document(0).getField("field").fieldType().indexed());
assertTrue(r.document(0).getField("field2").fieldType().indexed());
assertNull(r.document(0).getField("field").fieldType().indexOptions());
assertNotNull(r.document(0).getField("field2").fieldType().indexOptions());
r.close();
dir.close();
}
@ -516,7 +516,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat
}
final FieldType type = new FieldType(StringField.TYPE_STORED);
type.setIndexed(false);
type.setIndexOptions(null);
type.freeze();
IntField id = new IntField("id", 0, Store.YES);
for (int i = 0; i < data.length; ++i) {
@ -606,7 +606,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat
bigDoc2.add(idField);
final FieldType onlyStored = new FieldType(StringField.TYPE_STORED);
onlyStored.setIndexed(false);
onlyStored.setIndexOptions(null);
final Field smallField = new Field("fld", randomByteArray(random().nextInt(10), 256), onlyStored);
final int numFields = RandomInts.randomIntBetween(random(), 500000, 1000000);

View File

@ -204,10 +204,10 @@ class DocHelper {
for (int i=0; i<fields.length; i++) {
IndexableField f = fields[i];
add(all,f);
if (f.fieldType().indexed()) add(indexed,f);
if (f.fieldType().indexOptions() != null) add(indexed,f);
else add(unindexed,f);
if (f.fieldType().storeTermVectors()) add(termvector,f);
if (f.fieldType().indexed() && !f.fieldType().storeTermVectors()) add(notermvector,f);
if (f.fieldType().indexOptions() != null && !f.fieldType().storeTermVectors()) add(notermvector,f);
if (f.fieldType().stored()) add(stored,f);
else add(unstored,f);
if (f.fieldType().indexOptions() == IndexOptions.DOCS_ONLY) add(noTf,f);

View File

@ -41,6 +41,7 @@ import org.apache.lucene.document.IntField;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.FieldInfo.IndexOptions;
/** Minimal port of benchmark's LneDocSource +
* DocMaker, so tests can enum docs from a line file created
@ -170,6 +171,7 @@ public class LineFileDocs implements Closeable {
doc.add(title);
FieldType ft = new FieldType(TextField.TYPE_STORED);
ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
ft.setStoreTermVectors(true);
ft.setStoreTermVectorOffsets(true);
ft.setStoreTermVectorPositions(true);

View File

@ -1338,7 +1338,7 @@ public abstract class LuceneTestCase extends Assert {
/** Returns a FieldType derived from newType but whose
* term vector options match the old type */
private static FieldType mergeTermVectorOptions(FieldType newType, FieldType oldType) {
if (newType.indexed() && oldType.storeTermVectors() == true && newType.storeTermVectors() == false) {
if (newType.indexOptions() != null && oldType.storeTermVectors() == true && newType.storeTermVectors() == false) {
newType = new FieldType(newType);
newType.setStoreTermVectors(oldType.storeTermVectors());
newType.setStoreTermVectorPositions(oldType.storeTermVectorPositions());
@ -1363,7 +1363,7 @@ public abstract class LuceneTestCase extends Assert {
FieldType prevType = fieldToType.get(name);
if (usually(random) || !type.indexed() || prevType != null) {
if (usually(random) || type.indexOptions() == null || prevType != null) {
// most of the time, don't modify the params
if (prevType == null) {
fieldToType.put(name, new FieldType(type));

View File

@ -176,7 +176,7 @@ public class LukeRequestHandler extends RequestHandlerBase
StringBuilder flags = new StringBuilder();
flags.append( (f != null && f.fieldType().indexed()) ? FieldFlag.INDEXED.getAbbreviation() : '-' );
flags.append( (f != null && f.fieldType().indexOptions() != null) ? FieldFlag.INDEXED.getAbbreviation() : '-' );
flags.append( (f != null && f.fieldType().tokenized()) ? FieldFlag.TOKENIZED.getAbbreviation() : '-' );
flags.append( (f != null && f.fieldType().stored()) ? FieldFlag.STORED.getAbbreviation() : '-' );
flags.append( (f != null && f.fieldType().docValueType() != null) ? FieldFlag.DOC_VALUES.getAbbreviation() : "-" );

View File

@ -381,11 +381,10 @@ public class EnumField extends PrimitiveFieldType {
String intAsString = intValue.toString();
final FieldType newType = new FieldType();
newType.setIndexed(field.indexed());
newType.setTokenized(field.isTokenized());
newType.setStored(field.stored());
newType.setOmitNorms(field.omitNorms());
newType.setIndexOptions(getIndexOptions(field, intAsString));
newType.setIndexOptions(field.indexed() ? getIndexOptions(field, intAsString) : null);
newType.setStoreTermVectors(field.storeTermVector());
newType.setStoreTermVectorOffsets(field.storeTermOffsets());
newType.setStoreTermVectorPositions(field.storeTermPositions());

View File

@ -265,11 +265,10 @@ public abstract class FieldType extends FieldProperties {
if (val==null) return null;
org.apache.lucene.document.FieldType newType = new org.apache.lucene.document.FieldType();
newType.setIndexed(field.indexed());
newType.setTokenized(field.isTokenized());
newType.setStored(field.stored());
newType.setOmitNorms(field.omitNorms());
newType.setIndexOptions(getIndexOptions(field, val));
newType.setIndexOptions(field.indexed() ? getIndexOptions(field, val) : null);
newType.setStoreTermVectors(field.storeTermVector());
newType.setStoreTermVectorOffsets(field.storeTermOffsets());
newType.setStoreTermVectorPositions(field.storeTermPositions());

View File

@ -163,7 +163,6 @@ public class PreAnalyzedField extends FieldType {
return null;
}
org.apache.lucene.document.FieldType newType = new org.apache.lucene.document.FieldType();
newType.setIndexed(field.indexed());
newType.setTokenized(field.isTokenized());
newType.setStored(field.stored());
newType.setOmitNorms(field.omitNorms());
@ -243,7 +242,6 @@ public class PreAnalyzedField extends FieldType {
if (parse.hasTokenStream()) {
if (field.indexed()) {
type.setIndexed(true);
type.setTokenized(true);
if (f != null) {
f.setTokenStream(parse);
@ -252,7 +250,7 @@ public class PreAnalyzedField extends FieldType {
}
} else {
if (f != null) {
f.fieldType().setIndexed(false);
f.fieldType().setIndexOptions(null);
f.fieldType().setTokenized(false);
}
}

View File

@ -600,9 +600,8 @@ public class TrieField extends PrimitiveFieldType {
FieldType ft = new FieldType();
ft.setStored(stored);
ft.setTokenized(true);
ft.setIndexed(indexed);
ft.setOmitNorms(field.omitNorms());
ft.setIndexOptions(getIndexOptions(field, value.toString()));
ft.setIndexOptions(indexed ? getIndexOptions(field, value.toString()) : null);
switch (type) {
case INTEGER:

View File

@ -64,7 +64,7 @@ public class Insanity {
ArrayList<FieldInfo> filteredInfos = new ArrayList<>();
for (FieldInfo fi : in.getFieldInfos()) {
if (fi.name.equals(insaneField)) {
filteredInfos.add(new FieldInfo(fi.name, fi.isIndexed(), fi.number, fi.hasVectors(), fi.omitsNorms(),
filteredInfos.add(new FieldInfo(fi.name, fi.number, fi.hasVectors(), fi.omitsNorms(),
fi.hasPayloads(), fi.getIndexOptions(), null, -1, null));
} else {
filteredInfos.add(fi);

View File

@ -608,7 +608,6 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
public void stringField(FieldInfo fieldInfo, String value) throws IOException {
final FieldType ft = new FieldType(TextField.TYPE_STORED);
ft.setStoreTermVectors(fieldInfo.hasVectors());
ft.setIndexed(fieldInfo.isIndexed());
ft.setOmitNorms(fieldInfo.omitsNorms());
ft.setIndexOptions(fieldInfo.getIndexOptions());
doc.add(new Field(fieldInfo.name, value, ft));
@ -618,7 +617,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
public void intField(FieldInfo fieldInfo, int value) {
FieldType ft = new FieldType(IntField.TYPE_NOT_STORED);
ft.setStored(true);
ft.setIndexed(fieldInfo.isIndexed());
ft.setIndexOptions(fieldInfo.getIndexOptions());
doc.add(new IntField(fieldInfo.name, value, ft));
}
@ -626,7 +625,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
public void longField(FieldInfo fieldInfo, long value) {
FieldType ft = new FieldType(LongField.TYPE_NOT_STORED);
ft.setStored(true);
ft.setIndexed(fieldInfo.isIndexed());
ft.setIndexOptions(fieldInfo.getIndexOptions());
doc.add(new LongField(fieldInfo.name, value, ft));
}
@ -634,7 +633,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
public void floatField(FieldInfo fieldInfo, float value) {
FieldType ft = new FieldType(FloatField.TYPE_NOT_STORED);
ft.setStored(true);
ft.setIndexed(fieldInfo.isIndexed());
ft.setIndexOptions(fieldInfo.getIndexOptions());
doc.add(new FloatField(fieldInfo.name, value, ft));
}
@ -642,7 +641,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
public void doubleField(FieldInfo fieldInfo, double value) {
FieldType ft = new FieldType(DoubleField.TYPE_NOT_STORED);
ft.setStored(true);
ft.setIndexed(fieldInfo.isIndexed());
ft.setIndexOptions(fieldInfo.getIndexOptions());
doc.add(new DoubleField(fieldInfo.name, value, ft));
}
}

View File

@ -83,14 +83,12 @@ public class TestStressLucene extends TestRTGBase {
final FieldType idFt = new FieldType();
idFt.setIndexed(true);
idFt.setStored(true);
idFt.setOmitNorms(true);
idFt.setTokenized(false);
idFt.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY);
final FieldType ft2 = new FieldType();
ft2.setIndexed(false);
ft2.setStored(true);

View File

@ -34,7 +34,7 @@ public class TestExceedMaxTermLength extends SolrTestCaseJ4 {
public final static String TEST_SCHEMAXML_NAME = "schema11.xml";
private final static int minTestTermLength = IndexWriter.MAX_TERM_LENGTH + 1;
private final static int maxTestTermLegnth = IndexWriter.MAX_TERM_LENGTH * 2;
private final static int maxTestTermLength = IndexWriter.MAX_TERM_LENGTH * 2;
@BeforeClass
public static void beforeTests() throws Exception {
@ -54,12 +54,12 @@ public class TestExceedMaxTermLength extends SolrTestCaseJ4 {
final String longFieldName = "cat";
final String longFieldValue = TestUtil.randomSimpleString(random(),
minTestTermLength,
maxTestTermLegnth);
maxTestTermLength);
final String okayFieldName = TestUtil.randomSimpleString(random(), 1, 50) + "_sS" ; //Dynamic field
final String okayFieldValue = TestUtil.randomSimpleString(random(),
minTestTermLength,
maxTestTermLegnth);
maxTestTermLength);
boolean includeOkayFields = random().nextBoolean();
@ -105,12 +105,12 @@ public class TestExceedMaxTermLength extends SolrTestCaseJ4 {
final String longFieldName = "cat_length";
final String longFieldValue = TestUtil.randomSimpleString(random(),
minTestTermLength,
maxTestTermLegnth);
maxTestTermLength);
final String okayFieldName = TestUtil.randomSimpleString(random(), 1, 50) + "_sS" ; //Dynamic field
final String okayFieldValue = TestUtil.randomSimpleString(random(),
minTestTermLength,
maxTestTermLegnth);
maxTestTermLength);
boolean includeOkayFields = random().nextBoolean();
@ -138,7 +138,6 @@ public class TestExceedMaxTermLength extends SolrTestCaseJ4 {
updateJ(json(jsonStr), null);
}
} catch (Exception e) {
//expected
fail("Should not have failed adding doc " + jsonStr);
String msg= e.getCause().getMessage();
assertTrue(msg.contains("one immense term in field=\"cat\""));