LUCENE-6041: remove FieldInfo.isIndex/hasDocValues sugar APIs

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1636166 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2014-11-02 19:03:11 +00:00
parent 6d827b409a
commit 17ab4645d5
22 changed files with 108 additions and 117 deletions

View File

@ -201,6 +201,9 @@ Bug Fixes
org.apache.lucene.index (Simon Willnauer, Robert Muir, Mike
McCandless)
* LUCENE-6041: Remove sugar methods FieldInfo.isIndexed and
FieldInfo.hasDocValues. (Robert Muir, Mike McCandless)
Documentation
* LUCENE-5392: Add/improve analysis package documentation to reflect

View File

@ -30,6 +30,7 @@ import org.apache.lucene.codecs.DocValuesFormat;
import org.apache.lucene.codecs.DocValuesProducer;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.SegmentReadState;
@ -251,7 +252,7 @@ public abstract class PerFieldDocValuesFormat extends DocValuesFormat {
try {
// Read field name -> format name
for (FieldInfo fi : readState.fieldInfos) {
if (fi.hasDocValues()) {
if (fi.getDocValuesType() != DocValuesType.NONE) {
final String fieldName = fi.name;
final String formatName = fi.getAttribute(PER_FIELD_FORMAT_KEY);
if (formatName != null) {

View File

@ -36,6 +36,7 @@ import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.SegmentWriteState;
import org.apache.lucene.index.Terms;
@ -242,7 +243,7 @@ public abstract class PerFieldPostingsFormat extends PostingsFormat {
try {
// Read field name -> format name
for (FieldInfo fi : readState.fieldInfos) {
if (fi.isIndexed()) {
if (fi.getIndexOptions() != IndexOptions.NONE) {
final String fieldName = fi.name;
final String formatName = fi.getAttribute(PER_FIELD_FORMAT_KEY);
if (formatName != null) {

View File

@ -52,7 +52,6 @@ import org.apache.lucene.util.LongBitSet;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.Version;
/**
* Basic tool and API to check the health of an index and
* write a new segments file that removes reference to
@ -906,7 +905,7 @@ public class CheckIndex implements Closeable {
if (fieldInfo == null) {
throw new RuntimeException("fieldsEnum inconsistent with fieldInfos, no fieldInfos for: " + field);
}
if (!fieldInfo.isIndexed()) {
if (fieldInfo.getIndexOptions() == IndexOptions.NONE) {
throw new RuntimeException("fieldsEnum inconsistent with fieldInfos, isIndexed == false for: " + field);
}
@ -1535,7 +1534,7 @@ public class CheckIndex implements Closeable {
infoStream.print(" test: docvalues...........");
}
for (FieldInfo fieldInfo : reader.getFieldInfos()) {
if (fieldInfo.hasDocValues()) {
if (fieldInfo.getDocValuesType() != DocValuesType.NONE) {
status.totalValueFields++;
checkDocValues(fieldInfo, reader, infoStream, status);
} else {

View File

@ -128,7 +128,7 @@ final class DefaultIndexingChain extends DocConsumer {
PerField perField = fieldHash[i];
while (perField != null) {
if (perField.docValuesWriter != null) {
if (perField.fieldInfo.hasDocValues() == false) {
if (perField.fieldInfo.getDocValuesType() == DocValuesType.NONE) {
// BUG
throw new AssertionError("segment=" + state.segmentInfo + ": field=\"" + perField.fieldInfo.name + "\" has no docValues but wrote them");
}
@ -141,7 +141,7 @@ final class DefaultIndexingChain extends DocConsumer {
perField.docValuesWriter.finish(docCount);
perField.docValuesWriter.flush(state, dvConsumer);
perField.docValuesWriter = null;
} else if (perField.fieldInfo.hasDocValues()) {
} else if (perField.fieldInfo.getDocValuesType() != DocValuesType.NONE) {
// BUG
throw new AssertionError("segment=" + state.segmentInfo + ": field=\"" + perField.fieldInfo.name + "\" has docValues but did not write them");
}
@ -198,7 +198,7 @@ final class DefaultIndexingChain extends DocConsumer {
// we must check the final value of omitNorms for the fieldinfo: it could have
// changed for this field since the first time we added it.
if (fi.omitsNorms() == false && fi.isIndexed()) {
if (fi.omitsNorms() == false && fi.getIndexOptions() != IndexOptions.NONE) {
assert perField.norms != null: "field=" + fi.name;
perField.norms.finish(state.segmentInfo.getDocCount());
perField.norms.flush(state, normsConsumer);
@ -406,9 +406,7 @@ final class DefaultIndexingChain extends DocConsumer {
* value */
private void indexDocValue(PerField fp, DocValuesType dvType, StorableField field) throws IOException {
boolean hasDocValues = fp.fieldInfo.hasDocValues();
if (hasDocValues == false) {
if (fp.fieldInfo.getDocValuesType() == DocValuesType.NONE) {
// This will throw an exc if the caller tried to
// change the DV type for the field:
fieldInfos.globalFieldNumbers.setDocValuesType(fp.fieldInfo.number, fp.fieldInfo.name, dvType);

View File

@ -144,9 +144,6 @@ public final class FieldInfo {
}
void setDocValuesType(DocValuesType type) {
if (type == null) {
throw new NullPointerException("DocValuesType cannot be null (field: \"" + name + "\")");
}
if (docValuesType != DocValuesType.NONE && docValuesType != type) {
throw new IllegalArgumentException("cannot change DocValues type from " + docValuesType + " to " + type + " for field \"" + name + "\"");
}
@ -159,13 +156,6 @@ public final class FieldInfo {
return indexOptions;
}
/**
* Returns true if this field has any docValues.
*/
public boolean hasDocValues() {
return docValuesType != DocValuesType.NONE;
}
/**
* Returns {@link DocValuesType} of the docValues; this is
* {@code DocValuesType.NONE} if the field has no docvalues.
@ -211,14 +201,7 @@ public final class FieldInfo {
* Returns true if this field actually has any norms.
*/
public boolean hasNorms() {
return isIndexed() && omitNorms == false;
}
/**
* Returns true if this field is indexed ({@link #getIndexOptions} is not IndexOptions.NONE).
*/
public boolean isIndexed() {
return indexOptions != IndexOptions.NONE;
return indexOptions != IndexOptions.NONE && omitNorms == false;
}
/**

View File

@ -68,11 +68,11 @@ public class FieldInfos implements Iterable<FieldInfo> {
}
hasVectors |= info.hasVectors();
hasProx |= info.isIndexed() && info.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
hasFreq |= info.isIndexed() && info.getIndexOptions() != IndexOptions.DOCS;
hasOffsets |= info.isIndexed() && info.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
hasProx |= info.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
hasFreq |= info.getIndexOptions() != IndexOptions.DOCS;
hasOffsets |= info.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
hasNorms |= info.hasNorms();
hasDocValues |= info.hasDocValues();
hasDocValues |= info.getDocValuesType() != DocValuesType.NONE;
hasPayloads |= info.hasPayloads();
}
@ -314,7 +314,7 @@ public class FieldInfos implements Iterable<FieldInfo> {
if (docValues != DocValuesType.NONE) {
// Only pay the synchronization cost if fi does not already have a DVType
boolean updateGlobal = !fi.hasDocValues();
boolean updateGlobal = fi.getDocValuesType() == DocValuesType.NONE;
if (updateGlobal) {
// Must also update docValuesType map so it's
// aware of this field's DocValueType. This will throw IllegalArgumentException if

View File

@ -89,7 +89,7 @@ final class FreqProxTermsWriter extends TermsHash {
final FreqProxTermsWriterPerField perField = (FreqProxTermsWriterPerField) f;
if (perField.bytesHash.size() > 0) {
perField.sortPostings();
assert perField.fieldInfo.isIndexed();
assert perField.fieldInfo.getIndexOptions() != IndexOptions.NONE;
allFields.add(perField);
}
}

View File

@ -19,11 +19,11 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Collection;
import java.util.HashSet;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.lucene.util.Bits;
@ -271,7 +271,7 @@ public final class MultiFields extends Fields {
public static Collection<String> getIndexedFields(IndexReader reader) {
final Collection<String> fields = new HashSet<>();
for(final FieldInfo fieldInfo : getMergedFieldInfos(reader)) {
if (fieldInfo.isIndexed()) {
if (fieldInfo.getIndexOptions() != IndexOptions.NONE) {
fields.add(fieldInfo.name);
}
}

View File

@ -53,7 +53,7 @@ class SegmentDocValuesProducer extends DocValuesProducer {
try {
DocValuesProducer baseProducer = null;
for (FieldInfo fi : fieldInfos) {
if (!fi.hasDocValues()) {
if (fi.getDocValuesType() == DocValuesType.NONE) {
continue;
}
long docValuesGen = fi.getDocValuesGen();

View File

@ -44,7 +44,7 @@ public class SimpleMergedSegmentWarmer extends IndexReaderWarmer {
int docValuesCount = 0;
int normsCount = 0;
for (FieldInfo info : reader.getFieldInfos()) {
if (info.isIndexed()) {
if (info.getIndexOptions() != IndexOptions.NONE) {
reader.terms(info.name);
indexedCount++;
@ -54,7 +54,7 @@ public class SimpleMergedSegmentWarmer extends IndexReaderWarmer {
}
}
if (info.hasDocValues()) {
if (info.getDocValuesType() != DocValuesType.NONE) {
switch(info.getDocValuesType()) {
case NUMERIC:
reader.getNumericDocValues(info.name);

View File

@ -255,7 +255,7 @@ public class TestDirectoryReader extends LuceneTestCase {
for(FieldInfo fieldInfo : fieldInfos) {
final String name = fieldInfo.name;
allFieldNames.add(name);
if (fieldInfo.isIndexed()) {
if (fieldInfo.getIndexOptions() != IndexOptions.NONE) {
indexedFieldNames.add(name);
} else {
notIndexedFieldNames.add(name);

View File

@ -206,7 +206,7 @@ public class TestDocValuesIndexing extends LuceneTestCase {
LeafReader slow = SlowCompositeReaderWrapper.wrap(r);
FieldInfos fi = slow.getFieldInfos();
FieldInfo dvInfo = fi.fieldInfo("dv");
assertTrue(dvInfo.hasDocValues());
assertTrue(dvInfo.getDocValuesType() != DocValuesType.NONE);
NumericDocValues dv = slow.getNumericDocValues("dv");
for (int i = 0; i < 50; i++) {
assertEquals(i, dv.get(i));

View File

@ -93,7 +93,7 @@ public class TestDocumentWriter extends LuceneTestCase {
// test that the norms are not present in the segment if
// omitNorms is true
for (FieldInfo fi : reader.getFieldInfos()) {
if (fi.isIndexed()) {
if (fi.getIndexOptions() != IndexOptions.NONE) {
assertTrue(fi.omitsNorms() == (reader.getNormValues(fi.name) == null));
}
}

View File

@ -84,14 +84,14 @@ public class TestSegmentReader extends LuceneTestCase {
for(FieldInfo fieldInfo : reader.getFieldInfos()) {
final String name = fieldInfo.name;
allFieldNames.add(name);
if (fieldInfo.isIndexed()) {
if (fieldInfo.getIndexOptions() != IndexOptions.NONE) {
indexedFieldNames.add(name);
} else {
notIndexedFieldNames.add(name);
}
if (fieldInfo.hasVectors()) {
tvFieldNames.add(name);
} else if (fieldInfo.isIndexed()) {
} else if (fieldInfo.getIndexOptions() != IndexOptions.NONE) {
noTVFieldNames.add(name);
}
}

View File

@ -24,12 +24,13 @@ import java.util.Collections;
import java.util.List;
import org.apache.lucene.codecs.PostingsFormat; // javadocs
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
@ -282,7 +283,7 @@ public class DocTermOrds implements Accountable {
/** Call this only once (if you subclass!) */
protected void uninvert(final LeafReader reader, Bits liveDocs, final BytesRef termPrefix) throws IOException {
final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
if (info != null && info.hasDocValues()) {
if (info != null && info.getDocValuesType() != DocValuesType.NONE) {
throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
}
//System.out.println("DTO uninvert field=" + field + " prefix=" + termPrefix);

View File

@ -26,11 +26,13 @@ import java.util.List;
import java.util.Map;
import java.util.WeakHashMap;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.SegmentReader;
import org.apache.lucene.index.SortedDocValues;
@ -356,9 +358,9 @@ class FieldCacheImpl implements FieldCache {
if (fieldInfo == null) {
// field does not exist or has no value
return new Bits.MatchNoBits(reader.maxDoc());
} else if (fieldInfo.hasDocValues()) {
} else if (fieldInfo.getDocValuesType() != DocValuesType.NONE) {
return reader.getDocsWithField(field);
} else if (!fieldInfo.isIndexed()) {
} else if (fieldInfo.getIndexOptions() == IndexOptions.NONE) {
return new Bits.MatchNoBits(reader.maxDoc());
}
BitsEntry bitsEntry = (BitsEntry) caches.get(DocsWithFieldCache.class).get(reader, new CacheKey(field, null), false);
@ -459,9 +461,9 @@ class FieldCacheImpl implements FieldCache {
final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
if (info == null) {
return DocValues.emptyNumeric();
} else if (info.hasDocValues()) {
} else if (info.getDocValuesType() != DocValuesType.NONE) {
throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
} else if (!info.isIndexed()) {
} else if (info.getIndexOptions() == IndexOptions.NONE) {
return DocValues.emptyNumeric();
}
return (NumericDocValues) caches.get(Long.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
@ -634,11 +636,11 @@ class FieldCacheImpl implements FieldCache {
final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
if (info == null) {
return DocValues.emptySorted();
} else if (info.hasDocValues()) {
} else if (info.getDocValuesType() != DocValuesType.NONE) {
// we don't try to build a sorted instance from numeric/binary doc
// values because dedup can be very costly
throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
} else if (!info.isIndexed()) {
} else if (info.getIndexOptions() == IndexOptions.NONE) {
return DocValues.emptySorted();
}
SortedDocValuesImpl impl = (SortedDocValuesImpl) caches.get(SortedDocValues.class).get(reader, new CacheKey(field, acceptableOverheadRatio), false);
@ -783,9 +785,9 @@ class FieldCacheImpl implements FieldCache {
final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
if (info == null) {
return DocValues.emptyBinary();
} else if (info.hasDocValues()) {
} else if (info.getDocValuesType() != DocValuesType.NONE) {
throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
} else if (!info.isIndexed()) {
} else if (info.getIndexOptions() == IndexOptions.NONE) {
return DocValues.emptyBinary();
}
@ -906,9 +908,9 @@ class FieldCacheImpl implements FieldCache {
final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
if (info == null) {
return DocValues.emptySortedSet();
} else if (info.hasDocValues()) {
} else if (info.getDocValuesType() != DocValuesType.NONE) {
throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
} else if (!info.isIndexed()) {
} else if (info.getIndexOptions() == IndexOptions.NONE) {
return DocValues.emptySortedSet();
}

View File

@ -37,6 +37,7 @@ import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.FilterDirectoryReader;
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.SortedDocValues;
@ -185,7 +186,7 @@ public class UninvertingReader extends FilterLeafReader {
ArrayList<FieldInfo> filteredInfos = new ArrayList<>();
for (FieldInfo fi : in.getFieldInfos()) {
DocValuesType type = fi.getDocValuesType();
if (fi.isIndexed() && !fi.hasDocValues()) {
if (fi.getIndexOptions() != IndexOptions.NONE && fi.getDocValuesType() == DocValuesType.NONE) {
Type t = mapping.get(fi.name);
if (t != null) {
switch(t) {
@ -291,7 +292,7 @@ public class UninvertingReader extends FilterLeafReader {
*/
private Type getType(String field) {
FieldInfo info = fieldInfos.fieldInfo(field);
if (info == null || info.hasDocValues() == false) {
if (info == null || info.getDocValuesType() == DocValuesType.NONE) {
return null;
}
return mapping.get(field);

View File

@ -800,11 +800,11 @@ public class AssertingLeafReader extends FilterLeafReader {
FieldInfo fi = getFieldInfos().fieldInfo(field);
if (docsWithField != null) {
assert fi != null;
assert fi.hasDocValues();
assert fi.getDocValuesType() != DocValuesType.NONE;
assert maxDoc() == docsWithField.length();
docsWithField = new AssertingBits(docsWithField);
} else {
assert fi == null || fi.hasDocValues() == false;
assert fi == null || fi.getDocValuesType() == DocValuesType.NONE;
}
return docsWithField;
}

View File

@ -56,8 +56,8 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes
FieldInfos infos2 = codec.fieldInfosFormat().read(dir, segmentInfo, "", IOContext.DEFAULT);
assertEquals(1, infos2.size());
assertNotNull(infos2.fieldInfo("field"));
assertTrue(infos2.fieldInfo("field").isIndexed());
assertFalse(infos2.fieldInfo("field").hasDocValues());
assertTrue(infos2.fieldInfo("field").getIndexOptions() != IndexOptions.NONE);
assertFalse(infos2.fieldInfo("field").getDocValuesType() != DocValuesType.NONE);
assertFalse(infos2.fieldInfo("field").omitsNorms());
assertFalse(infos2.fieldInfo("field").hasPayloads());
assertFalse(infos2.fieldInfo("field").hasVectors());
@ -157,11 +157,9 @@ public abstract class BaseFieldInfoFormatTestCase extends BaseIndexFileFormatTes
assertEquals(expected.name, actual.name);
assertEquals(expected.getDocValuesType(), actual.getDocValuesType());
assertEquals(expected.getIndexOptions(), actual.getIndexOptions());
assertEquals(expected.hasDocValues(), actual.hasDocValues());
assertEquals(expected.hasNorms(), actual.hasNorms());
assertEquals(expected.hasPayloads(), actual.hasPayloads());
assertEquals(expected.hasVectors(), actual.hasVectors());
assertEquals(expected.isIndexed(), actual.isIndexed());
assertEquals(expected.omitsNorms(), actual.omitsNorms());
assertEquals(expected.getDocValuesGen(), actual.getDocValuesGen());
}

View File

@ -56,8 +56,8 @@ import java.util.concurrent.atomic.AtomicReference;
import java.util.logging.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
@ -68,6 +68,7 @@ import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.CompositeReader;
import org.apache.lucene.index.ConcurrentMergeScheduler;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldFilterLeafReader;
@ -75,8 +76,8 @@ import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader.ReaderClosedListener;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderClosedListener;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReader;
@ -103,8 +104,8 @@ import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.index.StorableField;
import org.apache.lucene.index.StoredDocument;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum.SeekStatus;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.index.TermsEnum.SeekStatus;
import org.apache.lucene.index.TieredMergePolicy;
import org.apache.lucene.search.AssertingIndexSearcher;
import org.apache.lucene.search.DocIdSetIterator;
@ -114,12 +115,12 @@ import org.apache.lucene.store.BaseDirectoryWrapper;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.FlushInfo;
import org.apache.lucene.store.IOContext.Context;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IOContext.Context;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.MergeInfo;
import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
import org.apache.lucene.store.NRTCachingDirectory;
import org.apache.lucene.store.RateLimitedDirectoryWrapper;
import org.apache.lucene.util.automaton.AutomatonTestUtil;
@ -136,6 +137,7 @@ import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
import org.junit.runner.RunWith;
import com.carrotsearch.randomizedtesting.JUnit4MethodProvider;
import com.carrotsearch.randomizedtesting.LifecycleScope;
import com.carrotsearch.randomizedtesting.MixWithSuiteName;
@ -146,16 +148,16 @@ import com.carrotsearch.randomizedtesting.annotations.Listeners;
import com.carrotsearch.randomizedtesting.annotations.SeedDecorators;
import com.carrotsearch.randomizedtesting.annotations.TestGroup;
import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction.Action;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction.Action;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup.Group;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup.Group;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies.Consequence;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies.Consequence;
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import com.carrotsearch.randomizedtesting.rules.NoClassHooksShadowingRule;
@ -2218,7 +2220,7 @@ public abstract class LuceneTestCase extends Assert {
private static Set<String> getDVFields(IndexReader reader) {
Set<String> fields = new HashSet<>();
for(FieldInfo fi : MultiFields.getMergedFieldInfos(reader)) {
if (fi.hasDocValues()) {
if (fi.getDocValuesType() != DocValuesType.NONE) {
fields.add(fi.name);
}
}

View File

@ -17,45 +17,6 @@
package org.apache.solr.schema;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.StorableField;
import org.apache.lucene.index.StoredDocument;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.uninverting.UninvertingReader;
import org.apache.lucene.util.Version;
import org.apache.solr.cloud.CloudUtil;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.request.LocalSolrQueryRequest;
import org.apache.solr.response.SchemaXmlWriter;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.util.DOMUtil;
import org.apache.solr.core.SolrConfig;
import org.apache.solr.core.Config;
import org.apache.solr.core.SolrResourceLoader;
import org.apache.solr.search.similarities.DefaultSimilarityFactory;
import org.apache.solr.util.plugin.SolrCoreAware;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathExpressionException;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
@ -75,6 +36,47 @@ import java.util.TreeMap;
import java.util.TreeSet;
import java.util.regex.Pattern;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathExpressionException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.StorableField;
import org.apache.lucene.index.StoredDocument;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.uninverting.UninvertingReader;
import org.apache.lucene.util.Version;
import org.apache.solr.cloud.CloudUtil;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.core.Config;
import org.apache.solr.core.SolrConfig;
import org.apache.solr.core.SolrResourceLoader;
import org.apache.solr.request.LocalSolrQueryRequest;
import org.apache.solr.response.SchemaXmlWriter;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.search.similarities.DefaultSimilarityFactory;
import org.apache.solr.util.DOMUtil;
import org.apache.solr.util.plugin.SolrCoreAware;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import static java.util.Collections.singletonList;
import static java.util.Collections.singletonMap;
@ -369,7 +371,7 @@ public class IndexSchema {
public Map<String,UninvertingReader.Type> getUninversionMap(IndexReader reader) {
Map<String,UninvertingReader.Type> map = new HashMap<>();
for (FieldInfo f : MultiFields.getMergedFieldInfos(reader)) {
if (f.hasDocValues() == false && f.isIndexed()) {
if (f.getDocValuesType() == DocValuesType.NONE && f.getIndexOptions() != IndexOptions.NONE) {
SchemaField sf = getFieldOrNull(f.name);
if (sf != null) {
UninvertingReader.Type type = sf.getType().getUninversionType(sf);