Upgrade to Lucene 4.7.1

* Removed XTermsFilter fixed in LUCENE-5502
 * Switched back to automaton queries that caused failures due to LUCENE-5532
 * Fixed Highlight test that has different results due to LUCENE-5538
This commit is contained in:
Simon Willnauer 2014-03-26 09:01:40 +01:00
parent 51a6a95de3
commit 42b20d601f
14 changed files with 37 additions and 373 deletions

View File

@ -17,6 +17,7 @@
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:git@github.com:elasticsearch/elasticsearch.git</connection>
<developerConnection>scm:git:git@github.com:elasticsearch/elasticsearch.git</developerConnection>
@ -30,7 +31,7 @@
</parent>
<properties>
<lucene.version>4.7.0</lucene.version>
<lucene.version>4.7.1</lucene.version>
<tests.jvms>1</tests.jvms>
<tests.shuffle>true</tests.shuffle>
<tests.output>onerror</tests.output>
@ -1053,7 +1054,6 @@
<exclude>org/elasticsearch/plugins/PluginManager.class</exclude>
<exclude>org/elasticsearch/bootstrap/Bootstrap.class</exclude>
<exclude>org/elasticsearch/Version.class</exclude>
<exclude>org/apache/lucene/queries/XTermsFilter.class</exclude>
<exclude>org/elasticsearch/index/merge/Merges.class</exclude>
<exclude>org/elasticsearch/common/lucene/search/Queries$QueryWrapperFilterFactory.class</exclude>
<!-- end excludes for valid system-out -->

View File

@ -1,328 +0,0 @@
package org.apache.lucene.queries;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.*;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.*;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
/**
* Constructs a filter for docs matching any of the terms added to this class.
* Unlike a RangeFilter this can be used for filtering on multiple terms that are not necessarily in
* a sequence. An example might be a collection of primary keys from a database query result or perhaps
* a choice of "category" labels picked by the end user. As a filter, this is much faster than the
* equivalent query (a BooleanQuery with many "should" TermQueries)
*/
public final class XTermsFilter extends Filter {
static {
assert Version.LUCENE_47 == org.elasticsearch.Version.CURRENT.luceneVersion : "Remove this once we are on LUCENE_48 - see LUCENE-5502";
}
/*
* this class is often used for large number of terms in a single field.
* to optimize for this case and to be filter-cache friendly we
* serialize all terms into a single byte array and store offsets
* in a parallel array to keep the # of object constant and speed up
* equals / hashcode.
*
* This adds quite a bit of complexity but allows large term filters to
* be efficient for GC and cache-lookups
*/
private final int[] offsets;
private final byte[] termsBytes;
private final TermsAndField[] termsAndFields;
private final int hashCode; // cached hashcode for fast cache lookups
private static final int PRIME = 31;
/**
* Creates a new {@link XTermsFilter} from the given list. The list
* can contain duplicate terms and multiple fields.
*/
public XTermsFilter(final List<Term> terms) {
this(new FieldAndTermEnum() {
// we need to sort for deduplication and to have a common cache key
final Iterator<Term> iter = sort(terms).iterator();
@Override
public BytesRef next() {
if (iter.hasNext()) {
Term next = iter.next();
field = next.field();
return next.bytes();
}
return null;
}}, terms.size());
}
/**
* Creates a new {@link XTermsFilter} from the given {@link BytesRef} list for
* a single field.
*/
public XTermsFilter(final String field, final List<BytesRef> terms) {
this(new FieldAndTermEnum(field) {
// we need to sort for deduplication and to have a common cache key
final Iterator<BytesRef> iter = sort(terms).iterator();
@Override
public BytesRef next() {
if (iter.hasNext()) {
return iter.next();
}
return null;
}
}, terms.size());
}
/**
* Creates a new {@link XTermsFilter} from the given {@link BytesRef} array for
* a single field.
*/
public XTermsFilter(final String field, final BytesRef...terms) {
// this ctor prevents unnecessary Term creations
this(field, Arrays.asList(terms));
}
/**
* Creates a new {@link XTermsFilter} from the given array. The array can
* contain duplicate terms and multiple fields.
*/
public XTermsFilter(final Term... terms) {
this(Arrays.asList(terms));
}
private XTermsFilter(FieldAndTermEnum iter, int length) {
// TODO: maybe use oal.index.PrefixCodedTerms instead?
// If number of terms is more than a few hundred it
// should be a win
// TODO: we also pack terms in FieldCache/DocValues
// ... maybe we can refactor to share that code
// TODO: yet another option is to build the union of the terms in
// an automaton an call intersect on the termsenum if the density is high
int hash = 9;
byte[] serializedTerms = new byte[0];
this.offsets = new int[length+1];
int lastEndOffset = 0;
int index = 0;
ArrayList<TermsAndField> termsAndFields = new ArrayList<>();
TermsAndField lastTermsAndField = null;
BytesRef previousTerm = null;
String previousField = null;
BytesRef currentTerm;
String currentField;
while((currentTerm = iter.next()) != null) {
currentField = iter.field();
if (currentField == null) {
throw new IllegalArgumentException("Field must not be null");
}
if (previousField != null) {
// deduplicate
if (previousField.equals(currentField)) {
if (previousTerm.bytesEquals(currentTerm)){
continue;
}
} else {
final int start = lastTermsAndField == null ? 0 : lastTermsAndField.end;
lastTermsAndField = new TermsAndField(start, index, previousField);
termsAndFields.add(lastTermsAndField);
}
}
hash = PRIME * hash + currentField.hashCode();
hash = PRIME * hash + currentTerm.hashCode();
if (serializedTerms.length < lastEndOffset+currentTerm.length) {
serializedTerms = ArrayUtil.grow(serializedTerms, lastEndOffset+currentTerm.length);
}
System.arraycopy(currentTerm.bytes, currentTerm.offset, serializedTerms, lastEndOffset, currentTerm.length);
offsets[index] = lastEndOffset;
lastEndOffset += currentTerm.length;
index++;
previousTerm = currentTerm;
previousField = currentField;
}
offsets[index] = lastEndOffset;
final int start = lastTermsAndField == null ? 0 : lastTermsAndField.end;
lastTermsAndField = new TermsAndField(start, index, previousField);
termsAndFields.add(lastTermsAndField);
this.termsBytes = ArrayUtil.shrink(serializedTerms, lastEndOffset);
this.termsAndFields = termsAndFields.toArray(new TermsAndField[termsAndFields.size()]);
this.hashCode = hash;
}
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
final AtomicReader reader = context.reader();
FixedBitSet result = null; // lazy init if needed - no need to create a big bitset ahead of time
final Fields fields = reader.fields();
final BytesRef spare = new BytesRef(this.termsBytes);
if (fields == null) {
return result;
}
Terms terms = null;
TermsEnum termsEnum = null;
DocsEnum docs = null;
for (TermsAndField termsAndField : this.termsAndFields) {
if ((terms = fields.terms(termsAndField.field)) != null) {
termsEnum = terms.iterator(termsEnum); // this won't return null
for (int i = termsAndField.start; i < termsAndField.end; i++) {
spare.offset = offsets[i];
spare.length = offsets[i+1] - offsets[i];
if (termsEnum.seekExact(spare)) {
docs = termsEnum.docs(acceptDocs, docs, DocsEnum.FLAG_NONE); // no freq since we don't need them
if (result == null) {
if (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
result = new FixedBitSet(reader.maxDoc());
// lazy init but don't do it in the hot loop since we could read many docs
result.set(docs.docID());
}
}
while (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
result.set(docs.docID());
}
}
}
}
}
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if ((obj == null) || (obj.getClass() != this.getClass())) {
return false;
}
XTermsFilter test = (XTermsFilter) obj;
// first check the fields before even comparing the bytes
if (test.hashCode == hashCode && Arrays.equals(termsAndFields, test.termsAndFields)) {
int lastOffset = termsAndFields[termsAndFields.length - 1].end;
// compare offsets since we sort they must be identical
if (ArrayUtil.equals(offsets, 0, test.offsets, 0, lastOffset + 1)) {
// straight byte comparison since we sort they must be identical
return ArrayUtil.equals(termsBytes, 0, test.termsBytes, 0, offsets[lastOffset]);
}
}
return false;
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
BytesRef spare = new BytesRef(termsBytes);
boolean first = true;
for (int i = 0; i < termsAndFields.length; i++) {
TermsAndField current = termsAndFields[i];
for (int j = current.start; j < current.end; j++) {
spare.offset = offsets[j];
spare.length = offsets[j+1] - offsets[j];
if (!first) {
builder.append(' ');
}
first = false;
builder.append(current.field).append(':');
builder.append(spare.utf8ToString());
}
}
return builder.toString();
}
private static final class TermsAndField {
final int start;
final int end;
final String field;
TermsAndField(int start, int end, String field) {
super();
this.start = start;
this.end = end;
this.field = field;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((field == null) ? 0 : field.hashCode());
result = prime * result + end;
result = prime * result + start;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
TermsAndField other = (TermsAndField) obj;
if (field == null) {
if (other.field != null) return false;
} else if (!field.equals(other.field)) return false;
if (end != other.end) return false;
if (start != other.start) return false;
return true;
}
}
private static abstract class FieldAndTermEnum {
protected String field;
public abstract BytesRef next();
public FieldAndTermEnum() {}
public FieldAndTermEnum(String field) { this.field = field; }
public String field() {
return field;
}
}
/*
* simple utility that returns the in-place sorted list
*/
private static <T extends Comparable<? super T>> List<T> sort(List<T> toSort) {
if (toSort.isEmpty()) {
throw new IllegalArgumentException("no terms provided");
}
Collections.sort(toSort);
return toSort;
}
}

View File

@ -27,7 +27,7 @@ import org.apache.lucene.analysis.AnalyzerWrapper;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.FilterClause;
import org.apache.lucene.queries.TermFilter;
import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.queries.TermsFilter;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.BytesRef;
@ -476,7 +476,7 @@ public class MapperService extends AbstractIndexComponent implements Iterable<Do
for (int i = 0; i < typesBytes.length; i++) {
typesBytes[i] = new BytesRef(types[i]);
}
XTermsFilter termsFilter = new XTermsFilter(TypeFieldMapper.NAME, typesBytes);
TermsFilter termsFilter = new TermsFilter(TypeFieldMapper.NAME, typesBytes);
if (filterPercolateType) {
return new AndFilter(ImmutableList.of(excludePercolatorType, termsFilter));
} else {

View File

@ -30,7 +30,7 @@ import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.TermFilter;
import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.queries.TermsFilter;
import org.apache.lucene.search.*;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
@ -484,7 +484,7 @@ public abstract class AbstractFieldMapper<T> implements FieldMapper<T> {
for (int i = 0; i < bytesRefs.length; i++) {
bytesRefs[i] = indexedValueForSearch(values.get(i));
}
return new XTermsFilter(names.indexName(), bytesRefs);
return new TermsFilter(names.indexName(), bytesRefs);
}
/**

View File

@ -25,7 +25,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.queries.TermsFilter;
import org.apache.lucene.search.*;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.Nullable;
@ -180,7 +180,7 @@ public class IdFieldMapper extends AbstractFieldMapper<String> implements Intern
if (fieldType.indexed() || context == null) {
return super.termFilter(value, context);
}
return new XTermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), value));
return new TermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), value));
}
@Override
@ -188,7 +188,7 @@ public class IdFieldMapper extends AbstractFieldMapper<String> implements Intern
if (fieldType.indexed() || context == null) {
return super.termsFilter(values, context);
}
return new XTermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), values));
return new TermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), values));
}
@Override

View File

@ -23,7 +23,7 @@ import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.TermFilter;
import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.queries.TermsFilter;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
@ -278,7 +278,7 @@ public class ParentFieldMapper extends AbstractFieldMapper<Uid> implements Inter
for (String type : context.mapperService().types()) {
typesValues.add(Uid.createUidAsBytes(type, bValue));
}
return new XTermsFilter(names.indexName(), typesValues);
return new TermsFilter(names.indexName(), typesValues);
}
}
@ -311,7 +311,7 @@ public class ParentFieldMapper extends AbstractFieldMapper<Uid> implements Inter
}
}
}
return new XTermsFilter(names.indexName(), bValues);
return new TermsFilter(names.indexName(), bValues);
}
/**

View File

@ -21,7 +21,7 @@ package org.elasticsearch.index.query;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.queries.TermsFilter;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.inject.Inject;
@ -108,7 +108,7 @@ public class IdsFilterParser implements FilterParser {
types = parseContext.mapperService().types();
}
XTermsFilter filter = new XTermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(types, ids));
TermsFilter filter = new TermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(types, ids));
if (filterName != null) {
parseContext.addNamedFilter(filterName, filter);
}

View File

@ -21,7 +21,7 @@ package org.elasticsearch.index.query;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.queries.TermsFilter;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
@ -115,7 +115,7 @@ public class IdsQueryParser implements QueryParser {
types = parseContext.mapperService().types();
}
XTermsFilter filter = new XTermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(types, ids));
TermsFilter filter = new TermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(types, ids));
// no need for constant score filter, since we don't cache the filter, and it always takes deletes into account
ConstantScoreQuery query = new ConstantScoreQuery(filter);
query.setBoost(boost);

View File

@ -22,7 +22,7 @@ package org.elasticsearch.index.query;
import com.google.common.collect.Lists;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.TermFilter;
import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.queries.TermsFilter;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.BytesRef;
@ -201,7 +201,7 @@ public class TermsFilterParser implements FilterParser {
for (int i = 0; i < filterValues.length; i++) {
filterValues[i] = BytesRefs.toBytesRef(terms.get(i));
}
filter = new XTermsFilter(fieldName, filterValues);
filter = new TermsFilter(fieldName, filterValues);
}
// cache the whole filter by default, or if explicitly told to
if (cache == null || cache) {

View File

@ -25,8 +25,8 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.*;
import org.apache.lucene.queries.TermFilter;
import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.queries.TermsFilter;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.FixedBitSet;
@ -97,19 +97,19 @@ public class TermsFilterTests extends ElasticsearchTestCase {
AtomicReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(w, true));
w.close();
XTermsFilter tf = new XTermsFilter(new Term[]{new Term(fieldName, "19")});
TermsFilter tf = new TermsFilter(new Term[]{new Term(fieldName, "19")});
FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
assertThat(bits, nullValue());
tf = new XTermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20")});
tf = new TermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20")});
bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
assertThat(bits.cardinality(), equalTo(1));
tf = new XTermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10")});
tf = new TermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10")});
bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
assertThat(bits.cardinality(), equalTo(2));
tf = new XTermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10"), new Term(fieldName, "00")});
tf = new TermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10"), new Term(fieldName, "00")});
bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
assertThat(bits.cardinality(), equalTo(2));

View File

@ -25,7 +25,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.*;
import org.apache.lucene.queries.FilterClause;
import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.queries.TermsFilter;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
@ -88,7 +88,7 @@ public class XBooleanFilterLuceneTests extends ElasticsearchTestCase {
}
private Filter getTermsFilter(String field, String text) {
return new XTermsFilter(new Term(field, text));
return new TermsFilter(new Term(field, text));
}
private Filter getWrappedTermQuery(String field, String text) {

View File

@ -1308,8 +1308,8 @@ public class SimpleIndexQueryParserTests extends ElasticsearchTestCase {
Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), termsFilter("name.last", "banon", "kimchy"))).query();
assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
assertThat(filteredQuery.getFilter(), instanceOf(XTermsFilter.class));
XTermsFilter termsFilter = (XTermsFilter) filteredQuery.getFilter();
assertThat(filteredQuery.getFilter(), instanceOf(TermsFilter.class));
TermsFilter termsFilter = (TermsFilter) filteredQuery.getFilter();
//assertThat(termsFilter.getTerms().length, equalTo(2));
//assertThat(termsFilter.getTerms()[0].text(), equalTo("banon"));
}
@ -1322,8 +1322,8 @@ public class SimpleIndexQueryParserTests extends ElasticsearchTestCase {
Query parsedQuery = queryParser.parse(query).query();
assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
assertThat(filteredQuery.getFilter(), instanceOf(XTermsFilter.class));
XTermsFilter termsFilter = (XTermsFilter) filteredQuery.getFilter();
assertThat(filteredQuery.getFilter(), instanceOf(TermsFilter.class));
TermsFilter termsFilter = (TermsFilter) filteredQuery.getFilter();
//assertThat(termsFilter.getTerms().length, equalTo(2));
//assertThat(termsFilter.getTerms()[0].text(), equalTo("banon"));
}
@ -1336,8 +1336,8 @@ public class SimpleIndexQueryParserTests extends ElasticsearchTestCase {
assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery.query();
assertThat(filteredQuery.getFilter(), instanceOf(XTermsFilter.class));
XTermsFilter termsFilter = (XTermsFilter) filteredQuery.getFilter();
assertThat(filteredQuery.getFilter(), instanceOf(TermsFilter.class));
TermsFilter termsFilter = (TermsFilter) filteredQuery.getFilter();
//assertThat(termsFilter.getTerms().length, equalTo(2));
//assertThat(termsFilter.getTerms()[0].text(), equalTo("banon"));
}

View File

@ -588,22 +588,16 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest {
refresh();
SearchResponse searchResponse = client().prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
// Enable this again when upgraded to Lucene 4.7.1
// .setQuery(boolQuery().mustNot(hasChildQuery("child", boolQuery().should(queryString("c_field:*"))))).get();
.setQuery(boolQuery().mustNot(hasChildQuery("child", boolQuery().should(matchAllQuery())))).get();
.setQuery(boolQuery().mustNot(hasChildQuery("child", boolQuery().should(queryString("c_field:*"))))).get();
assertNoFailures(searchResponse);
searchResponse = client().prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
// Enable this again when upgraded to Lucene 4.7.1
// .setQuery(boolQuery().mustNot(hasParentQuery("parent", boolQuery().should(queryString("p_field:*"))))).execute()
.setQuery(boolQuery().mustNot(hasParentQuery("parent", boolQuery().should(matchAllQuery())))).execute()
.setQuery(boolQuery().mustNot(hasParentQuery("parent", boolQuery().should(queryString("p_field:*"))))).execute()
.actionGet();
assertNoFailures(searchResponse);
searchResponse = client().prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
// Enable this again when upgraded to Lucene 4.7.1
// .setQuery(boolQuery().mustNot(topChildrenQuery("child", boolQuery().should(queryString("c_field:*"))))).execute()
.setQuery(boolQuery().mustNot(topChildrenQuery("child", boolQuery().should(matchAllQuery())))).execute()
.setQuery(boolQuery().mustNot(topChildrenQuery("child", boolQuery().should(queryString("c_field:*"))))).execute()
.actionGet();
assertNoFailures(searchResponse);
}
@ -626,9 +620,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest {
refresh();
SearchResponse searchResponse = client().prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH)
// Enable this again when upgraded to Lucene 4.7.1
// .setQuery(boolQuery().mustNot(topChildrenQuery("child", boolQuery().should(queryString("c_field:*"))))).execute()
.setQuery(boolQuery().mustNot(topChildrenQuery("child", boolQuery().should(queryString("c_field:(red OR yellow OR blue)"))))).execute()
.setQuery(boolQuery().mustNot(topChildrenQuery("child", boolQuery().should(queryString("c_field:*"))))).execute()
.actionGet();
assertNoFailures(searchResponse);
}

View File

@ -167,7 +167,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest {
SearchResponse search = client().prepareSearch().setQuery(matchQuery("body", "Test: http://www.facebook.com ").type(Type.PHRASE)).addHighlightedField("body").execute().actionGet();
assertHighlight(search, 0, "body", 0, startsWith("<em>Test: http://www.facebook.com</em>"));
search = client().prepareSearch().setQuery(matchQuery("body", "Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature").type(Type.PHRASE)).addHighlightedField("body").execute().actionGet();
assertHighlight(search, 0, "body", 0, equalTo("<em>Test</em>: <em>http</em>://<em>www</em>.<em>facebook</em>.<em>com</em> <em>http</em>://<em>elasticsearch</em>.<em>org</em> <em>http</em>://<em>xing</em>.<em>com</em> <em>http</em>://<em>cnn</em>.<em>com</em> <em>http</em>://<em>quora</em>.com"));
assertHighlight(search, 0, "body", 0, equalTo("<em>Test</em>: <em>http</em>://<em>www</em>.<em>facebook</em>.com <em>http</em>://<em>elasticsearch</em>.<em>org</em> <em>http</em>://<em>xing</em>.com <em>http</em>://<em>cnn</em>.com <em>http</em>://<em>quora</em>.com"));
}
@Test