Upgrade to lucene-5.2.0-snapshot-1673124.

This commit is contained in:
Adrien Grand 2015-04-13 10:10:51 +02:00
parent df118214c4
commit 45afa47a6f
46 changed files with 112 additions and 106 deletions

View File

@ -31,8 +31,8 @@
</parent>
<properties>
<lucene.version>5.1.0</lucene.version>
<lucene.maven.version>5.1.0-snapshot-1671894</lucene.maven.version>
<lucene.version>5.2.0</lucene.version>
<lucene.maven.version>5.2.0-snapshot-1673124</lucene.maven.version>
<tests.jvms>auto</tests.jvms>
<tests.shuffle>true</tests.shuffle>
<tests.output>onerror</tests.output>
@ -66,7 +66,7 @@
<repository>
<id>lucene-snapshots</id>
<name>Lucene Snapshots</name>
<url>https://download.elastic.co/lucenesnapshots/1671894</url>
<url>https://download.elastic.co/lucenesnapshots/1673124</url>
</repository>
</repositories>

View File

@ -28,6 +28,7 @@ import org.apache.lucene.util.InPlaceMergeSorter;
import org.apache.lucene.util.UnicodeUtil;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.text.BreakIterator;
import java.util.*;
@ -426,7 +427,7 @@ public class XPostingsHighlighter {
throw new IllegalArgumentException("field '" + field + "' was indexed without offsets, cannot highlight");
}
if (leaf != lastLeaf) {
termsEnum = t.iterator(null);
termsEnum = t.iterator();
postings = new PostingsEnum[terms.length];
}
Passage passages[] = highlightDoc(field, terms, content.length(), bi, doc - subContext.docBase, termsEnum, postings, maxPassages);
@ -745,7 +746,8 @@ public class XPostingsHighlighter {
}
@Override
public void stringField(FieldInfo fieldInfo, String value) throws IOException {
public void stringField(FieldInfo fieldInfo, byte[] bytes) throws IOException {
String value = new String(bytes, StandardCharsets.UTF_8);
assert currentField >= 0;
StringBuilder builder = builders[currentField];
if (builder.length() > 0 && builder.length() < maxLength) {

View File

@ -236,7 +236,7 @@ public class Version {
public static final int V_1_6_0_ID = 1060099;
public static final Version V_1_6_0 = new Version(V_1_6_0_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_2_0_0_ID = 2000099;
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_1_0);
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_0);
public static final Version CURRENT = V_2_0_0;

View File

@ -212,7 +212,7 @@ public final class TermVectorsFields extends Fields {
}
@Override
public TermsEnum iterator(TermsEnum reuse) throws IOException {
public TermsEnum iterator() throws IOException {
// reset before asking for an iterator
reset();
// convert bytes ref for the terms to actual data

View File

@ -201,7 +201,7 @@ public class TermVectorsResponse extends ActionResponse implements ToXContent {
// write field statistics
buildFieldStatistics(builder, curTerms);
builder.startObject(FieldStrings.TERMS);
TermsEnum termIter = curTerms.iterator(null);
TermsEnum termIter = curTerms.iterator();
for (int i = 0; i < curTerms.size(); i++) {
buildTerm(builder, spare, curTerms, termIter);
}

View File

@ -52,10 +52,8 @@ final class TermVectorsWriter {
void setFields(Fields termVectorsByField, Set<String> selectedFields, EnumSet<Flag> flags, Fields topLevelFields, @Nullable AggregatedDfs dfs) throws IOException {
int numFieldsWritten = 0;
TermsEnum iterator = null;
PostingsEnum docsAndPosEnum = null;
PostingsEnum docsEnum = null;
TermsEnum topLevelIterator = null;
for (String field : termVectorsByField) {
if ((selectedFields != null) && (!selectedFields.contains(field))) {
continue;
@ -69,7 +67,7 @@ final class TermVectorsWriter {
topLevelTerms = fieldTermVector;
}
topLevelIterator = topLevelTerms.iterator(topLevelIterator);
TermsEnum topLevelIterator = topLevelTerms.iterator();
boolean positions = flags.contains(Flag.Positions) && fieldTermVector.hasPositions();
boolean offsets = flags.contains(Flag.Offsets) && fieldTermVector.hasOffsets();
boolean payloads = flags.contains(Flag.Payloads) && fieldTermVector.hasPayloads();
@ -81,7 +79,7 @@ final class TermVectorsWriter {
writeFieldStatistics(topLevelTerms);
}
}
iterator = fieldTermVector.iterator(iterator);
TermsEnum iterator = fieldTermVector.iterator();
final boolean useDocsAndPos = positions || offsets || payloads;
while (iterator.next() != null) { // iterate all terms of the
// current field

View File

@ -53,13 +53,12 @@ public class DfsOnlyRequest extends BroadcastOperationRequest<DfsOnlyRequest> {
// build a search request with a query of all the terms
final BoolQueryBuilder boolBuilder = boolQuery();
TermsEnum iterator = null;
for (String fieldName : termVectorsFields) {
if ((selectedFields != null) && (!selectedFields.contains(fieldName))) {
continue;
}
Terms terms = termVectorsFields.terms(fieldName);
iterator = terms.iterator(iterator);
TermsEnum iterator = terms.iterator();
while (iterator.next() != null) {
String text = iterator.term().utf8ToString();
boolBuilder.should(QueryBuilders.termQuery(fieldName, text));

View File

@ -25,12 +25,12 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.search.spans.SpanScorer;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.search.spans.SpanWeight;
import org.apache.lucene.search.spans.Spans;
import org.apache.lucene.search.spans.TermSpans;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
@ -51,7 +51,7 @@ public class AllTermQuery extends SpanTermQuery {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
// TODO: needsScores
// we should be able to just return a regular SpanTermWeight, at most here if needsScores == false?
return new AllTermWeight(this, searcher);
@ -68,8 +68,12 @@ public class AllTermQuery extends SpanTermQuery {
if (this.stats == null) {
return null;
}
TermSpans spans = (TermSpans) query.getSpans(context, acceptDocs, termContexts);
if (spans == null) {
return null;
}
SimScorer sloppySimScorer = similarity.simScorer(stats, context);
return new AllTermSpanScorer((TermSpans) query.getSpans(context, acceptDocs, termContexts), this, sloppySimScorer);
return new AllTermSpanScorer(spans, this, sloppySimScorer);
}
protected class AllTermSpanScorer extends SpanScorer {
@ -77,31 +81,43 @@ public class AllTermQuery extends SpanTermQuery {
protected float payloadScore;
protected int payloadsSeen;
public AllTermSpanScorer(TermSpans spans, Weight weight, Similarity.SimScorer docScorer) throws IOException {
public AllTermSpanScorer(TermSpans spans, SpanWeight weight, Similarity.SimScorer docScorer) throws IOException {
super(spans, weight, docScorer);
positions = spans.getPostings();
}
@Override
protected boolean setFreqCurrentDoc() throws IOException {
if (!more) {
return false;
}
doc = spans.doc();
protected void setFreqCurrentDoc() throws IOException {
freq = 0.0f;
numMatches = 0;
payloadScore = 0;
payloadsSeen = 0;
assert spans.startPosition() == -1 : "incorrect initial start position, spans="+spans;
assert spans.endPosition() == -1 : "incorrect initial end position, spans="+spans;
int prevStartPos = -1;
int prevEndPos = -1;
int startPos = spans.nextStartPosition();
assert startPos != Spans.NO_MORE_POSITIONS : "initial startPos NO_MORE_POSITIONS, spans="+spans;
do {
int matchLength = spans.end() - spans.start();
freq += docScorer.computeSlopFactor(matchLength);
assert startPos >= prevStartPos;
int endPos = spans.endPosition();
assert endPos != Spans.NO_MORE_POSITIONS;
// This assertion can fail for Or spans on the same term:
// assert (startPos != prevStartPos) || (endPos > prevEndPos) : "non increased endPos="+endPos;
assert (startPos != prevStartPos) || (endPos >= prevEndPos) : "decreased endPos="+endPos;
numMatches++;
int matchLength = endPos - startPos;
freq += docScorer.computeSlopFactor(matchLength);
processPayload();
prevStartPos = startPos;
prevEndPos = endPos;
startPos = spans.nextStartPosition();
} while (startPos != Spans.NO_MORE_POSITIONS);
more = spans.next();// this moves positions to the next match
} while (more && (doc == spans.doc()));
return true;
assert spans.startPosition() == Spans.NO_MORE_POSITIONS : "incorrect final start position, spans="+spans;
assert spans.endPosition() == Spans.NO_MORE_POSITIONS : "incorrect final end position, spans="+spans;
}
protected void processPayload() throws IOException {
@ -120,7 +136,7 @@ public class AllTermQuery extends SpanTermQuery {
* @throws IOException
*/
@Override
public float score() throws IOException {
public float scoreCurrentDoc() throws IOException {
return getSpanScore() * getPayloadScore();
}
@ -134,7 +150,7 @@ public class AllTermQuery extends SpanTermQuery {
* @see #score()
*/
protected float getSpanScore() throws IOException {
return super.score();
return super.scoreCurrentDoc();
}
/**

View File

@ -84,7 +84,7 @@ public class FilterableTermsEnum extends TermsEnum {
if (terms == null) {
continue;
}
TermsEnum termsEnum = terms.iterator(null);
TermsEnum termsEnum = terms.iterator();
if (termsEnum == null) {
continue;
}

View File

@ -202,7 +202,7 @@ public class MoreLikeThisQuery extends Query {
for (Fields fields : ignoreFields) {
for (String fieldName : fields) {
Terms terms = fields.terms(fieldName);
final TermsEnum termsEnum = terms.iterator(null);
final TermsEnum termsEnum = terms.iterator();
BytesRef text;
while ((text = termsEnum.next()) != null) {
skipTerms.add(new Term(fieldName, text.utf8ToString()));

View File

@ -155,7 +155,6 @@ public class MultiPhrasePrefixQuery extends Query {
private void getPrefixTerms(ObjectOpenHashSet<Term> terms, final Term prefix, final IndexReader reader) throws IOException {
// SlowCompositeReaderWrapper could be used... but this would merge all terms from each segment into one terms
// instance, which is very expensive. Therefore I think it is better to iterate over each leaf individually.
TermsEnum termsEnum = null;
List<LeafReaderContext> leaves = reader.leaves();
for (LeafReaderContext leaf : leaves) {
Terms _terms = leaf.reader().terms(field);
@ -163,7 +162,7 @@ public class MultiPhrasePrefixQuery extends Query {
continue;
}
termsEnum = _terms.iterator(termsEnum);
TermsEnum termsEnum = _terms.iterator();
TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(prefix.bytes());
if (TermsEnum.SeekStatus.END == seekStatus) {
continue;

View File

@ -842,7 +842,7 @@ public final class XMoreLikeThis {
* @param fieldName Optional field name of the terms for skip terms
*/
private void addTermFrequencies(Map<String, Int> termFreqMap, Terms vector, @Nullable String fieldName) throws IOException {
final TermsEnum termsEnum = vector.iterator(null);
final TermsEnum termsEnum = vector.iterator();
final CharsRefBuilder spare = new CharsRefBuilder();
BytesRef text;
while((text = termsEnum.next()) != null) {

View File

@ -82,7 +82,7 @@ final class PerThreadIDAndVersionLookup {
if (terms != null) {
readerContexts[numSegs] = readerContext;
hasPayloads[numSegs] = terms.hasPayloads();
termsEnums[numSegs] = terms.iterator(null);
termsEnums[numSegs] = terms.iterator();
assert termsEnums[numSegs] != null;
liveDocs[numSegs] = readerContext.reader().getLiveDocs();
hasDeletions |= readerContext.reader().hasDeletions();

View File

@ -239,22 +239,8 @@ public class BloomFilterPostingsFormat extends PostingsFormat {
}
@Override
public TermsEnum iterator(TermsEnum reuse) throws IOException {
TermsEnum result;
if ((reuse != null) && (reuse instanceof BloomFilteredTermsEnum)) {
// recycle the existing BloomFilteredTermsEnum by asking the delegate
// to recycle its contained TermsEnum
BloomFilteredTermsEnum bfte = (BloomFilteredTermsEnum) reuse;
if (bfte.filter == filter) {
bfte.reset(this.in);
return bfte;
}
reuse = bfte.reuse;
}
// We have been handed something we cannot reuse (either null, wrong
// class or wrong filter) so allocate a new object
result = new BloomFilteredTermsEnum(this.in, reuse, filter);
return result;
public TermsEnum iterator() throws IOException {
return new BloomFilteredTermsEnum(this.in, filter);
}
}
@ -262,17 +248,14 @@ public class BloomFilterPostingsFormat extends PostingsFormat {
private Terms delegateTerms;
private TermsEnum delegateTermsEnum;
private TermsEnum reuse;
private BloomFilter filter;
public BloomFilteredTermsEnum(Terms other, TermsEnum reuse, BloomFilter filter) {
public BloomFilteredTermsEnum(Terms other, BloomFilter filter) {
this.delegateTerms = other;
this.reuse = reuse;
this.filter = filter;
}
void reset(Terms others) {
reuse = this.delegateTermsEnum;
this.delegateTermsEnum = null;
this.delegateTerms = others;
}
@ -283,7 +266,7 @@ public class BloomFilterPostingsFormat extends PostingsFormat {
* this can be a relatively heavy operation depending on the
* delegate postings format and they underlying directory
* (clone IndexInput) */
delegateTermsEnum = delegateTerms.iterator(reuse);
delegateTermsEnum = delegateTerms.iterator();
}
return delegateTermsEnum;
}
@ -385,7 +368,7 @@ public class BloomFilterPostingsFormat extends PostingsFormat {
continue;
}
FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field);
TermsEnum termsEnum = terms.iterator(null);
TermsEnum termsEnum = terms.iterator();
BloomFilter bloomFilter = null;

View File

@ -39,7 +39,7 @@ public class EngineSearcherFactory extends SearcherFactory {
}
@Override
public IndexSearcher newSearcher(IndexReader reader) throws IOException {
public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException {
IndexSearcher searcher = new IndexSearcher(reader);
searcher.setSimilarity(engineConfig.getSimilarity());
return searcher;

View File

@ -1030,7 +1030,7 @@ public class InternalEngine extends Engine {
}
@Override
public IndexSearcher newSearcher(IndexReader reader) throws IOException {
public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException {
IndexSearcher searcher = new IndexSearcher(reader);
searcher.setSimilarity(engineConfig.getSimilarity());
if (warmer != null) {

View File

@ -81,7 +81,7 @@ public abstract class AbstractIndexOrdinalsFieldData extends AbstractIndexFieldD
}
protected TermsEnum filter(Terms terms, LeafReader reader) throws IOException {
TermsEnum iterator = terms.iterator(null);
TermsEnum iterator = terms.iterator();
if (iterator == null) {
return null;
}

View File

@ -108,7 +108,7 @@ public class DoubleArrayIndexFieldData extends AbstractIndexFieldData<AtomicNume
final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
boolean success = false;
try (OrdinalsBuilder builder = new OrdinalsBuilder(reader.maxDoc(), acceptableTransientOverheadRatio)) {
final BytesRefIterator iter = builder.buildFromTerms(getNumericType().wrapTermsEnum(terms.iterator(null)));
final BytesRefIterator iter = builder.buildFromTerms(getNumericType().wrapTermsEnum(terms.iterator()));
BytesRef term;
long numTerms = 0;
while ((term = iter.next()) != null) {

View File

@ -106,7 +106,7 @@ public class FloatArrayIndexFieldData extends AbstractIndexFieldData<AtomicNumer
final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
boolean success = false;
try (OrdinalsBuilder builder = new OrdinalsBuilder(reader.maxDoc(), acceptableTransientOverheadRatio)) {
BytesRefIterator iter = builder.buildFromTerms(getNumericType().wrapTermsEnum(terms.iterator(null)));
BytesRefIterator iter = builder.buildFromTerms(getNumericType().wrapTermsEnum(terms.iterator()));
BytesRef term;
long numTerms = 0;
while ((term = iter.next()) != null) {

View File

@ -101,7 +101,7 @@ public class GeoPointCompressedIndexFieldData extends AbstractIndexGeoPointField
final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
boolean success = false;
try (OrdinalsBuilder builder = new OrdinalsBuilder(terms.size(), reader.maxDoc(), acceptableTransientOverheadRatio)) {
final GeoPointEnum iter = new GeoPointEnum(builder.buildFromTerms(terms.iterator(null)));
final GeoPointEnum iter = new GeoPointEnum(builder.buildFromTerms(terms.iterator()));
GeoPoint point;
while ((point = iter.next()) != null) {
final long ord = builder.currentOrdinal();

View File

@ -76,7 +76,7 @@ public class GeoPointDoubleArrayIndexFieldData extends AbstractIndexGeoPointFiel
final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
boolean success = false;
try (OrdinalsBuilder builder = new OrdinalsBuilder(terms.size(), reader.maxDoc(), acceptableTransientOverheadRatio)) {
final GeoPointEnum iter = new GeoPointEnum(builder.buildFromTerms(terms.iterator(null)));
final GeoPointEnum iter = new GeoPointEnum(builder.buildFromTerms(terms.iterator()));
GeoPoint point;
long numTerms = 0;
while ((point = iter.next()) != null) {

View File

@ -442,7 +442,7 @@ public class PackedArrayIndexFieldData extends AbstractIndexFieldData<AtomicNume
*/
@Override
public TermsEnum beforeLoad(Terms terms) throws IOException {
return new RamAccountingTermsEnum(type.wrapTermsEnum(terms.iterator(null)), breaker, this, this.fieldName);
return new RamAccountingTermsEnum(type.wrapTermsEnum(terms.iterator()), breaker, this, this.fieldName);
}
/**

View File

@ -49,7 +49,7 @@ final class ParentChildIntersectTermsEnum extends TermsEnum {
for (String field : fields) {
Terms terms = atomicReader.terms(field);
if (terms != null) {
fieldEnums.add(terms.iterator(null));
fieldEnums.add(terms.iterator());
}
}
states = new ArrayList<>(fieldEnums.size());

View File

@ -19,16 +19,23 @@
package org.elasticsearch.index.fieldvisitor;
import com.google.common.collect.ImmutableMap;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.StoredFieldVisitor;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.index.mapper.*;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.FieldMappers;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@ -88,7 +95,8 @@ public abstract class FieldsVisitor extends StoredFieldVisitor {
}
@Override
public void stringField(FieldInfo fieldInfo, String value) throws IOException {
public void stringField(FieldInfo fieldInfo, byte[] bytes) throws IOException {
final String value = new String(bytes, StandardCharsets.UTF_8);
if (UidFieldMapper.NAME.equals(fieldInfo.name)) {
uid = Uid.createUid(value);
} else {

View File

@ -23,6 +23,7 @@ import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
/**
*/
@ -42,11 +43,11 @@ public class UidAndRoutingFieldsVisitor extends FieldsVisitor {
}
@Override
public void stringField(FieldInfo fieldInfo, String value) throws IOException {
public void stringField(FieldInfo fieldInfo, byte[] bytes) throws IOException {
if (RoutingFieldMapper.NAME.equals(fieldInfo.name)) {
routing = value;
routing = new String(bytes, StandardCharsets.UTF_8);;
} else {
super.stringField(fieldInfo, value);
super.stringField(fieldInfo, bytes);
}
}

View File

@ -129,7 +129,7 @@ class VersionFieldUpgrader extends FilterCodecReader {
if (VersionFieldMapper.NAME.equals(field.name)) {
// uninvert into a packed ints and expose as docvalues
final Terms terms = reader.terms(UidFieldMapper.NAME);
final TermsEnum uids = terms.iterator(null);
final TermsEnum uids = terms.iterator();
final GrowableWriter versions = new GrowableWriter(2, reader.maxDoc(), PackedInts.COMPACT);
PostingsEnum dpe = null;
for (BytesRef uid = uids.next(); uid != null; uid = uids.next()) {

View File

@ -140,7 +140,7 @@ final class ParentIdsFilter extends Filter {
return null;
}
TermsEnum termsEnum = terms.iterator(null);
TermsEnum termsEnum = terms.iterator();
BytesRefBuilder uidSpare = new BytesRefBuilder();
BytesRef idSpare = new BytesRef();

View File

@ -196,7 +196,7 @@ public class TopChildrenQuery extends Query {
if (terms == null) {
continue;
}
TermsEnum termsEnum = terms.iterator(null);
TermsEnum termsEnum = terms.iterator();
if (!termsEnum.seekExact(Uid.createUidAsBytes(parentType, parentId))) {
continue;
}

View File

@ -165,7 +165,7 @@ public class IncludeExclude {
}
@Override
public TermsEnum iterator(TermsEnum reuse) throws IOException {
public TermsEnum iterator() throws IOException {
return values.termsEnum();
}

View File

@ -142,7 +142,7 @@ public class IndexFieldTerm implements Iterable<TermPosition> {
if (fields != null) {
final Terms terms = fields.terms(identifier.field());
if (terms != null) {
TermsEnum termsEnum = terms.iterator(null);
TermsEnum termsEnum = terms.iterator();
if (termsEnum.seekExact(identifier.bytes())) {
newPostings = termsEnum.postings(reader.getLiveDocs(), postings, luceneFlags);
}

View File

@ -132,7 +132,7 @@ public class AnalyzingCompletionLookupProvider extends CompletionLookupProvider
if (terms == null) {
continue;
}
TermsEnum termsEnum = terms.iterator(null);
TermsEnum termsEnum = terms.iterator();
PostingsEnum docsEnum = null;
final SuggestPayload spare = new SuggestPayload();
int maxAnalyzedPathsForOneInput = 0;

View File

@ -73,7 +73,7 @@ public final class DirectCandidateGenerator extends CandidateGenerator {
this.nonErrorLikelihood = nonErrorLikelihood;
float thresholdFrequency = spellchecker.getThresholdFrequency();
this.frequencyPlateau = thresholdFrequency >= 1.0f ? (int) thresholdFrequency: (int)(dictSize * thresholdFrequency);
termsEnum = terms.iterator(null);
termsEnum = terms.iterator();
}
/* (non-Javadoc)

View File

@ -340,8 +340,8 @@ public abstract class AbstractTermVectorsTests extends ElasticsearchIntegrationT
assertNotNull(esTerms);
Terms luceneTerms = luceneFields.terms(field.name);
TermsEnum esTermEnum = esTerms.iterator(null);
TermsEnum luceneTermEnum = luceneTerms.iterator(null);
TermsEnum esTermEnum = esTerms.iterator();
TermsEnum luceneTermEnum = luceneTerms.iterator();
while (esTermEnum.next() != null) {
assertNotNull(luceneTermEnum.next());

View File

@ -108,7 +108,7 @@ public class GetTermVectorsCheckDocFreqTests extends ElasticsearchIntegrationTes
assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) -1));
assertThat(terms.getDocCount(), Matchers.equalTo(-1));
assertThat(terms.getSumDocFreq(), equalTo((long) -1));
TermsEnum iterator = terms.iterator(null);
TermsEnum iterator = terms.iterator();
for (int j = 0; j < values.length; j++) {
String string = values[j];
BytesRef next = iterator.next();
@ -168,7 +168,7 @@ public class GetTermVectorsCheckDocFreqTests extends ElasticsearchIntegrationTes
assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs)));
assertThat(terms.getDocCount(), Matchers.equalTo(numDocs));
assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length));
TermsEnum iterator = terms.iterator(null);
TermsEnum iterator = terms.iterator();
for (int j = 0; j < values.length; j++) {
String string = values[j];
BytesRef next = iterator.next();
@ -225,7 +225,7 @@ public class GetTermVectorsCheckDocFreqTests extends ElasticsearchIntegrationTes
assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs)));
assertThat(terms.getDocCount(), Matchers.equalTo(numDocs));
assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length));
TermsEnum iterator = terms.iterator(null);
TermsEnum iterator = terms.iterator();
for (int j = 0; j < values.length; j++) {
String string = values[j];
BytesRef next = iterator.next();

View File

@ -313,7 +313,7 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests {
if (ft.storeTermVectors()) {
Terms terms = fields.terms("field");
assertThat(terms.size(), equalTo(8l));
TermsEnum iterator = terms.iterator(null);
TermsEnum iterator = terms.iterator();
for (int j = 0; j < values.length; j++) {
String string = values[j];
BytesRef next = iterator.next();
@ -448,7 +448,7 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests {
Fields fields = response.getFields();
assertThat(fields.size(), equalTo(1));
Terms terms = fields.terms("field");
TermsEnum iterator = terms.iterator(null);
TermsEnum iterator = terms.iterator();
while (iterator.next() != null) {
String term = iterator.term().utf8ToString();
PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL);
@ -636,7 +636,7 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests {
Terms terms = fields.terms(fieldName);
assertThat(terms.size(), equalTo(8l));
TermsEnum iterator = terms.iterator(null);
TermsEnum iterator = terms.iterator();
for (int j = 0; j < values.length; j++) {
String string = values[j];
BytesRef next = iterator.next();
@ -722,8 +722,8 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests {
assertThat(terms1, notNullValue());
assertThat(terms0.size(), equalTo(terms1.size()));
TermsEnum iter0 = terms0.iterator(null);
TermsEnum iter1 = terms1.iterator(null);
TermsEnum iter0 = terms0.iterator();
TermsEnum iter1 = terms1.iterator();
for (int i = 0; i < terms0.size(); i++) {
BytesRef next0 = iter0.next();
assertThat(next0, notNullValue());
@ -1003,7 +1003,7 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests {
assertThat("Existing field " + fieldName + "should have been returned", terms, notNullValue());
// check overridden by keyword analyzer ...
if (perFieldAnalyzer.containsKey(fieldName)) {
TermsEnum iterator = terms.iterator(null);
TermsEnum iterator = terms.iterator();
assertThat("Analyzer for " + fieldName + " should have been overridden!", iterator.next().utf8ToString(), equalTo("some text here"));
assertThat(iterator.next(), nullValue());
}
@ -1090,7 +1090,7 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests {
(int) terms.getSumTotalTermFreq(),
equalOrLessThanTo(fieldStatistics.get("sum_ttf"), isEqual));
final TermsEnum termsEnum = terms.iterator(null);
final TermsEnum termsEnum = terms.iterator();
BytesRef text;
while((text = termsEnum.next()) != null) {
String term = text.utf8ToString();

View File

@ -197,7 +197,7 @@ public class MultiTermVectorsTests extends AbstractTermVectorsTests {
}
private void checkTermTexts(Terms terms, String[] expectedTexts) throws IOException {
final TermsEnum termsEnum = terms.iterator(null);
final TermsEnum termsEnum = terms.iterator();
for (String expectedText : expectedTexts) {
assertThat(termsEnum.next().utf8ToString(), equalTo(expectedText));
}

View File

@ -196,7 +196,7 @@ public class SimpleLuceneTests extends ElasticsearchTestCase {
indexWriter.close();
TermsEnum termDocs = SlowCompositeReaderWrapper.wrap(reader).terms("_id").iterator(null);
TermsEnum termDocs = SlowCompositeReaderWrapper.wrap(reader).terms("_id").iterator();
termDocs.next();
}
@ -233,7 +233,7 @@ public class SimpleLuceneTests extends ElasticsearchTestCase {
LeafReader atomicReader = SlowCompositeReaderWrapper.wrap(reader);
Terms terms = atomicReader.terms("int1");
TermsEnum termsEnum = terms.iterator(null);
TermsEnum termsEnum = terms.iterator();
termsEnum.next();
PostingsEnum termDocs = termsEnum.postings(atomicReader.getLiveDocs(), null);
@ -242,7 +242,7 @@ public class SimpleLuceneTests extends ElasticsearchTestCase {
assertThat(termDocs.freq(), equalTo(1));
terms = atomicReader.terms("int2");
termsEnum = terms.iterator(termsEnum);
termsEnum = terms.iterator();
termsEnum.next();
termDocs = termsEnum.postings(atomicReader.getLiveDocs(), termDocs);
assertThat(termDocs.nextDoc(), equalTo(0));

View File

@ -73,7 +73,7 @@ public class BooleanFieldMapperTests extends ElasticsearchSingleNodeTest {
try (DirectoryReader reader = DirectoryReader.open(w, true)) {
final LeafReader leaf = reader.leaves().get(0).reader();
// boolean fields are indexed and have doc values by default
assertEquals(new BytesRef("T"), leaf.terms("field").iterator(null).next());
assertEquals(new BytesRef("T"), leaf.terms("field").iterator().next());
SortedNumericDocValues values = leaf.getSortedNumericDocValues("field");
assertNotNull(values);
values.setDocument(0);

View File

@ -1769,7 +1769,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest {
private static String termsToString(Terms terms) throws IOException {
String strings = "";
TermsEnum termsEnum = terms.iterator(null);
TermsEnum termsEnum = terms.iterator();
CharsRefBuilder spare = new CharsRefBuilder();
BytesRef text;
while((text = termsEnum.next()) != null) {

View File

@ -260,7 +260,7 @@ public class ChildrenConstantScoreQueryTests extends AbstractChildTests {
Terms terms = slowLeafReader.terms(UidFieldMapper.NAME);
if (terms != null) {
NavigableSet<String> parentIds = childValueToParentIds.lget();
TermsEnum termsEnum = terms.iterator(null);
TermsEnum termsEnum = terms.iterator();
PostingsEnum docsEnum = null;
for (String id : parentIds) {
TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("parent", id));

View File

@ -229,7 +229,7 @@ public class ChildrenQueryTests extends AbstractChildTests {
Terms terms = slowLeafReader.terms(UidFieldMapper.NAME);
if (terms != null) {
NavigableMap<String, FloatArrayList> parentIdToChildScores = childValueToParentIds.lget();
TermsEnum termsEnum = terms.iterator(null);
TermsEnum termsEnum = terms.iterator();
PostingsEnum docsEnum = null;
for (Map.Entry<String, FloatArrayList> entry : parentIdToChildScores.entrySet()) {
int count = entry.getValue().elementsCount;

View File

@ -211,7 +211,7 @@ public class ParentConstantScoreQueryTests extends AbstractChildTests {
Terms terms = slowLeafReader.terms(UidFieldMapper.NAME);
if (terms != null) {
NavigableSet<String> childIds = parentValueToChildDocIds.lget();
TermsEnum termsEnum = terms.iterator(null);
TermsEnum termsEnum = terms.iterator();
PostingsEnum docsEnum = null;
for (String id : childIds) {
TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("child", id));

View File

@ -209,7 +209,7 @@ public class ParentQueryTests extends AbstractChildTests {
Terms terms = slowLeafReader.terms(UidFieldMapper.NAME);
if (terms != null) {
NavigableMap<String, Float> childIdsAndScore = parentValueToChildIds.lget();
TermsEnum termsEnum = terms.iterator(null);
TermsEnum termsEnum = terms.iterator();
PostingsEnum docsEnum = null;
for (Map.Entry<String, Float> entry : childIdsAndScore.entrySet()) {
TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("child", entry.getKey()));

View File

@ -140,7 +140,7 @@ public class AnalyzingCompletionLookupProviderV1 extends CompletionLookupProvide
if (terms == null) {
continue;
}
TermsEnum termsEnum = terms.iterator(null);
TermsEnum termsEnum = terms.iterator();
PostingsEnum docsEnum = null;
final SuggestPayload spare = new SuggestPayload();
int maxAnalyzedPathsForOneInput = 0;

View File

@ -364,7 +364,7 @@ public class CompletionPostingsFormatTest extends ElasticsearchTestCase {
if (field.equals("foo")) {
return new Terms() {
@Override
public TermsEnum iterator(TermsEnum reuse) throws IOException {
public TermsEnum iterator() throws IOException {
final Iterator<TermPosAndPayload> iterator = terms.iterator();
return new TermsEnum() {
private TermPosAndPayload current = null;

View File

@ -129,8 +129,8 @@ public class ThrowingLeafReaderWrapper extends FilterLeafReader {
}
@Override
public TermsEnum iterator(TermsEnum reuse) throws IOException {
TermsEnum termsEnum = super.iterator(reuse);
public TermsEnum iterator() throws IOException {
TermsEnum termsEnum = super.iterator();
thrower.maybeThrow(Flags.TermsEnum);
return new ThrowingTermsEnum(termsEnum, thrower);
}