diff --git a/pom.xml b/pom.xml
index 708ab338865..bf2f5120267 100644
--- a/pom.xml
+++ b/pom.xml
@@ -989,6 +989,7 @@
org/elasticsearch/Version.class
org/apache/lucene/search/XReferenceManager.class
org/apache/lucene/search/XSearcherManager.class
+ org/apache/lucene/queries/XTermsFilter.class
org/elasticsearch/index/percolator/stats/ShardPercolateService$RamEstimator.class
org/elasticsearch/index/merge/Merges.class
diff --git a/src/main/java/org/apache/lucene/queries/XTermsFilter.java b/src/main/java/org/apache/lucene/queries/XTermsFilter.java
new file mode 100644
index 00000000000..13c05919db4
--- /dev/null
+++ b/src/main/java/org/apache/lucene/queries/XTermsFilter.java
@@ -0,0 +1,328 @@
+package org.apache.lucene.queries;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.*;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Constructs a filter for docs matching any of the terms added to this class.
+ * Unlike a RangeFilter this can be used for filtering on multiple terms that are not necessarily in
+ * a sequence. An example might be a collection of primary keys from a database query result or perhaps
+ * a choice of "category" labels picked by the end user. As a filter, this is much faster than the
+ * equivalent query (a BooleanQuery with many "should" TermQueries)
+ */
+public final class XTermsFilter extends Filter {
+
+ static {
+ assert Version.LUCENE_47 == org.elasticsearch.Version.CURRENT.luceneVersion : "Remove this once we are on LUCENE_48 - see LUCENE-5502";
+ }
+
+ /*
+ * this class is often used for large number of terms in a single field.
+ * to optimize for this case and to be filter-cache friendly we
+ * serialize all terms into a single byte array and store offsets
+ * in a parallel array to keep the # of object constant and speed up
+ * equals / hashcode.
+ *
+ * This adds quite a bit of complexity but allows large term filters to
+ * be efficient for GC and cache-lookups
+ */
+ private final int[] offsets;
+ private final byte[] termsBytes;
+ private final TermsAndField[] termsAndFields;
+ private final int hashCode; // cached hashcode for fast cache lookups
+ private static final int PRIME = 31;
+
+ /**
+ * Creates a new {@link XTermsFilter} from the given list. The list
+ * can contain duplicate terms and multiple fields.
+ */
+ public XTermsFilter(final List terms) {
+ this(new FieldAndTermEnum() {
+ // we need to sort for deduplication and to have a common cache key
+ final Iterator iter = sort(terms).iterator();
+ @Override
+ public BytesRef next() {
+ if (iter.hasNext()) {
+ Term next = iter.next();
+ field = next.field();
+ return next.bytes();
+ }
+ return null;
+ }}, terms.size());
+ }
+
+ /**
+ * Creates a new {@link XTermsFilter} from the given {@link BytesRef} list for
+ * a single field.
+ */
+ public XTermsFilter(final String field, final List terms) {
+ this(new FieldAndTermEnum(field) {
+ // we need to sort for deduplication and to have a common cache key
+ final Iterator iter = sort(terms).iterator();
+ @Override
+ public BytesRef next() {
+ if (iter.hasNext()) {
+ return iter.next();
+ }
+ return null;
+ }
+ }, terms.size());
+ }
+
+ /**
+ * Creates a new {@link XTermsFilter} from the given {@link BytesRef} array for
+ * a single field.
+ */
+ public XTermsFilter(final String field, final BytesRef...terms) {
+ // this ctor prevents unnecessary Term creations
+ this(field, Arrays.asList(terms));
+ }
+
+ /**
+ * Creates a new {@link XTermsFilter} from the given array. The array can
+ * contain duplicate terms and multiple fields.
+ */
+ public XTermsFilter(final Term... terms) {
+ this(Arrays.asList(terms));
+ }
+
+
+ private XTermsFilter(FieldAndTermEnum iter, int length) {
+ // TODO: maybe use oal.index.PrefixCodedTerms instead?
+ // If number of terms is more than a few hundred it
+ // should be a win
+
+ // TODO: we also pack terms in FieldCache/DocValues
+ // ... maybe we can refactor to share that code
+
+ // TODO: yet another option is to build the union of the terms in
+ // an automaton an call intersect on the termsenum if the density is high
+
+ int hash = 9;
+ byte[] serializedTerms = new byte[0];
+ this.offsets = new int[length+1];
+ int lastEndOffset = 0;
+ int index = 0;
+ ArrayList termsAndFields = new ArrayList();
+ TermsAndField lastTermsAndField = null;
+ BytesRef previousTerm = null;
+ String previousField = null;
+ BytesRef currentTerm;
+ String currentField;
+ while((currentTerm = iter.next()) != null) {
+ currentField = iter.field();
+ if (currentField == null) {
+ throw new IllegalArgumentException("Field must not be null");
+ }
+ if (previousField != null) {
+ // deduplicate
+ if (previousField.equals(currentField)) {
+ if (previousTerm.bytesEquals(currentTerm)){
+ continue;
+ }
+ } else {
+ final int start = lastTermsAndField == null ? 0 : lastTermsAndField.end;
+ lastTermsAndField = new TermsAndField(start, index, previousField);
+ termsAndFields.add(lastTermsAndField);
+ }
+ }
+ hash = PRIME * hash + currentField.hashCode();
+ hash = PRIME * hash + currentTerm.hashCode();
+ if (serializedTerms.length < lastEndOffset+currentTerm.length) {
+ serializedTerms = ArrayUtil.grow(serializedTerms, lastEndOffset+currentTerm.length);
+ }
+ System.arraycopy(currentTerm.bytes, currentTerm.offset, serializedTerms, lastEndOffset, currentTerm.length);
+ offsets[index] = lastEndOffset;
+ lastEndOffset += currentTerm.length;
+ index++;
+ previousTerm = currentTerm;
+ previousField = currentField;
+ }
+ offsets[index] = lastEndOffset;
+ final int start = lastTermsAndField == null ? 0 : lastTermsAndField.end;
+ lastTermsAndField = new TermsAndField(start, index, previousField);
+ termsAndFields.add(lastTermsAndField);
+ this.termsBytes = ArrayUtil.shrink(serializedTerms, lastEndOffset);
+ this.termsAndFields = termsAndFields.toArray(new TermsAndField[termsAndFields.size()]);
+ this.hashCode = hash;
+
+ }
+
+
+ @Override
+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+ final AtomicReader reader = context.reader();
+ FixedBitSet result = null; // lazy init if needed - no need to create a big bitset ahead of time
+ final Fields fields = reader.fields();
+ final BytesRef spare = new BytesRef(this.termsBytes);
+ if (fields == null) {
+ return result;
+ }
+ Terms terms = null;
+ TermsEnum termsEnum = null;
+ DocsEnum docs = null;
+ for (TermsAndField termsAndField : this.termsAndFields) {
+ if ((terms = fields.terms(termsAndField.field)) != null) {
+ termsEnum = terms.iterator(termsEnum); // this won't return null
+ for (int i = termsAndField.start; i < termsAndField.end; i++) {
+ spare.offset = offsets[i];
+ spare.length = offsets[i+1] - offsets[i];
+ if (termsEnum.seekExact(spare)) {
+ docs = termsEnum.docs(acceptDocs, docs, DocsEnum.FLAG_NONE); // no freq since we don't need them
+ if (result == null) {
+ if (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ result = new FixedBitSet(reader.maxDoc());
+ // lazy init but don't do it in the hot loop since we could read many docs
+ result.set(docs.docID());
+ }
+ }
+ while (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ result.set(docs.docID());
+ }
+ }
+ }
+ }
+ }
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if ((obj == null) || (obj.getClass() != this.getClass())) {
+ return false;
+ }
+
+ XTermsFilter test = (XTermsFilter) obj;
+ // first check the fields before even comparing the bytes
+ if (test.hashCode == hashCode && Arrays.equals(termsAndFields, test.termsAndFields)) {
+ int lastOffset = termsAndFields[termsAndFields.length - 1].end;
+ // compare offsets since we sort they must be identical
+ if (ArrayUtil.equals(offsets, 0, test.offsets, 0, lastOffset + 1)) {
+ // straight byte comparison since we sort they must be identical
+ return ArrayUtil.equals(termsBytes, 0, test.termsBytes, 0, offsets[lastOffset]);
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return hashCode;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ BytesRef spare = new BytesRef(termsBytes);
+ boolean first = true;
+ for (int i = 0; i < termsAndFields.length; i++) {
+ TermsAndField current = termsAndFields[i];
+ for (int j = current.start; j < current.end; j++) {
+ spare.offset = offsets[j];
+ spare.length = offsets[j+1] - offsets[j];
+ if (!first) {
+ builder.append(' ');
+ }
+ first = false;
+ builder.append(current.field).append(':');
+ builder.append(spare.utf8ToString());
+ }
+ }
+
+ return builder.toString();
+ }
+
+ private static final class TermsAndField {
+ final int start;
+ final int end;
+ final String field;
+
+
+ TermsAndField(int start, int end, String field) {
+ super();
+ this.start = start;
+ this.end = end;
+ this.field = field;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((field == null) ? 0 : field.hashCode());
+ result = prime * result + end;
+ result = prime * result + start;
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) return true;
+ if (obj == null) return false;
+ if (getClass() != obj.getClass()) return false;
+ TermsAndField other = (TermsAndField) obj;
+ if (field == null) {
+ if (other.field != null) return false;
+ } else if (!field.equals(other.field)) return false;
+ if (end != other.end) return false;
+ if (start != other.start) return false;
+ return true;
+ }
+
+ }
+
+ private static abstract class FieldAndTermEnum {
+ protected String field;
+
+ public abstract BytesRef next();
+
+ public FieldAndTermEnum() {}
+
+ public FieldAndTermEnum(String field) { this.field = field; }
+
+ public String field() {
+ return field;
+ }
+ }
+
+ /*
+ * simple utility that returns the in-place sorted list
+ */
+ private static > List sort(List toSort) {
+ if (toSort.isEmpty()) {
+ throw new IllegalArgumentException("no terms provided");
+ }
+ Collections.sort(toSort);
+ return toSort;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/src/main/java/org/elasticsearch/index/mapper/MapperService.java
index f060b4d18aa..97273aa94ff 100755
--- a/src/main/java/org/elasticsearch/index/mapper/MapperService.java
+++ b/src/main/java/org/elasticsearch/index/mapper/MapperService.java
@@ -27,7 +27,7 @@ import org.apache.lucene.analysis.AnalyzerWrapper;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.FilterClause;
import org.apache.lucene.queries.TermFilter;
-import org.apache.lucene.queries.TermsFilter;
+import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.BytesRef;
@@ -476,7 +476,7 @@ public class MapperService extends AbstractIndexComponent implements Iterable implements FieldMapper {
for (int i = 0; i < bytesRefs.length; i++) {
bytesRefs[i] = indexedValueForSearch(values.get(i));
}
- return new TermsFilter(names.indexName(), bytesRefs);
+ return new XTermsFilter(names.indexName(), bytesRefs);
}
/**
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java
index ae9cb5211b1..7c1960e32e8 100644
--- a/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java
@@ -25,7 +25,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.TermsFilter;
+import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.search.*;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.Nullable;
@@ -180,7 +180,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern
if (fieldType.indexed() || context == null) {
return super.termFilter(value, context);
}
- return new TermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), value));
+ return new XTermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), value));
}
@Override
@@ -188,7 +188,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern
if (fieldType.indexed() || context == null) {
return super.termsFilter(values, context);
}
- return new TermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), values));
+ return new XTermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), values));
}
@Override
diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java
index cba6d1b0ed2..6f3a3e553fb 100644
--- a/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java
+++ b/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java
@@ -23,7 +23,7 @@ import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.TermFilter;
-import org.apache.lucene.queries.TermsFilter;
+import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
@@ -278,7 +278,7 @@ public class ParentFieldMapper extends AbstractFieldMapper implements Inter
for (String type : context.mapperService().types()) {
typesValues.add(Uid.createUidAsBytes(type, bValue));
}
- return new TermsFilter(names.indexName(), typesValues);
+ return new XTermsFilter(names.indexName(), typesValues);
}
}
@@ -311,7 +311,7 @@ public class ParentFieldMapper extends AbstractFieldMapper implements Inter
}
}
}
- return new TermsFilter(names.indexName(), bValues);
+ return new XTermsFilter(names.indexName(), bValues);
}
/**
diff --git a/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java b/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java
index b76271c58cf..5eec1bb5e03 100644
--- a/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java
+++ b/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java
@@ -21,7 +21,7 @@ package org.elasticsearch.index.query;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
-import org.apache.lucene.queries.TermsFilter;
+import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.inject.Inject;
@@ -108,7 +108,7 @@ public class IdsFilterParser implements FilterParser {
types = parseContext.mapperService().types();
}
- TermsFilter filter = new TermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(types, ids));
+ XTermsFilter filter = new XTermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(types, ids));
if (filterName != null) {
parseContext.addNamedFilter(filterName, filter);
}
diff --git a/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java b/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java
index 5b6702c695c..cf009f458e6 100644
--- a/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java
+++ b/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java
@@ -21,7 +21,7 @@ package org.elasticsearch.index.query;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
-import org.apache.lucene.queries.TermsFilter;
+import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
@@ -115,7 +115,7 @@ public class IdsQueryParser implements QueryParser {
types = parseContext.mapperService().types();
}
- TermsFilter filter = new TermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(types, ids));
+ XTermsFilter filter = new XTermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(types, ids));
// no need for constant score filter, since we don't cache the filter, and it always takes deletes into account
ConstantScoreQuery query = new ConstantScoreQuery(filter);
query.setBoost(boost);
diff --git a/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java b/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java
index 4099eedd13e..90153dbd686 100644
--- a/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java
+++ b/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java
@@ -22,7 +22,7 @@ package org.elasticsearch.index.query;
import com.google.common.collect.Lists;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.TermFilter;
-import org.apache.lucene.queries.TermsFilter;
+import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.BytesRef;
@@ -201,7 +201,7 @@ public class TermsFilterParser implements FilterParser {
for (int i = 0; i < filterValues.length; i++) {
filterValues[i] = BytesRefs.toBytesRef(terms.get(i));
}
- filter = new TermsFilter(fieldName, filterValues);
+ filter = new XTermsFilter(fieldName, filterValues);
}
// cache the whole filter by default, or if explicitly told to
if (cache == null || cache) {
diff --git a/src/test/java/org/elasticsearch/common/lucene/search/TermsFilterTests.java b/src/test/java/org/elasticsearch/common/lucene/search/TermsFilterTests.java
index 58b0554f5d4..1c84a0ff4ea 100644
--- a/src/test/java/org/elasticsearch/common/lucene/search/TermsFilterTests.java
+++ b/src/test/java/org/elasticsearch/common/lucene/search/TermsFilterTests.java
@@ -25,7 +25,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.*;
import org.apache.lucene.queries.TermFilter;
-import org.apache.lucene.queries.TermsFilter;
+import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
@@ -97,19 +97,19 @@ public class TermsFilterTests extends ElasticsearchTestCase {
AtomicReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(w, true));
w.close();
- TermsFilter tf = new TermsFilter(new Term[]{new Term(fieldName, "19")});
+ XTermsFilter tf = new XTermsFilter(new Term[]{new Term(fieldName, "19")});
FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
assertThat(bits, nullValue());
- tf = new TermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20")});
+ tf = new XTermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20")});
bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
assertThat(bits.cardinality(), equalTo(1));
- tf = new TermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10")});
+ tf = new XTermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10")});
bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
assertThat(bits.cardinality(), equalTo(2));
- tf = new TermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10"), new Term(fieldName, "00")});
+ tf = new XTermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10"), new Term(fieldName, "00")});
bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
assertThat(bits.cardinality(), equalTo(2));
diff --git a/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterLuceneTests.java b/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterLuceneTests.java
index 4d99e0cb804..5aa44245a59 100644
--- a/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterLuceneTests.java
+++ b/src/test/java/org/elasticsearch/common/lucene/search/XBooleanFilterLuceneTests.java
@@ -25,7 +25,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.*;
import org.apache.lucene.queries.FilterClause;
-import org.apache.lucene.queries.TermsFilter;
+import org.apache.lucene.queries.XTermsFilter;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
@@ -88,7 +88,7 @@ public class XBooleanFilterLuceneTests extends ElasticsearchTestCase {
}
private Filter getTermsFilter(String field, String text) {
- return new TermsFilter(new Term(field, text));
+ return new XTermsFilter(new Term(field, text));
}
private Filter getWrappedTermQuery(String field, String text) {
diff --git a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java
index 15c6f6ea0cb..24b8eed4735 100644
--- a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java
+++ b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java
@@ -1307,8 +1307,8 @@ public class SimpleIndexQueryParserTests extends ElasticsearchTestCase {
Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), termsFilter("name.last", "banon", "kimchy"))).query();
assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
- assertThat(filteredQuery.getFilter(), instanceOf(TermsFilter.class));
- TermsFilter termsFilter = (TermsFilter) filteredQuery.getFilter();
+ assertThat(filteredQuery.getFilter(), instanceOf(XTermsFilter.class));
+ XTermsFilter termsFilter = (XTermsFilter) filteredQuery.getFilter();
//assertThat(termsFilter.getTerms().length, equalTo(2));
//assertThat(termsFilter.getTerms()[0].text(), equalTo("banon"));
}
@@ -1321,8 +1321,8 @@ public class SimpleIndexQueryParserTests extends ElasticsearchTestCase {
Query parsedQuery = queryParser.parse(query).query();
assertThat(parsedQuery, instanceOf(XFilteredQuery.class));
XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery;
- assertThat(filteredQuery.getFilter(), instanceOf(TermsFilter.class));
- TermsFilter termsFilter = (TermsFilter) filteredQuery.getFilter();
+ assertThat(filteredQuery.getFilter(), instanceOf(XTermsFilter.class));
+ XTermsFilter termsFilter = (XTermsFilter) filteredQuery.getFilter();
//assertThat(termsFilter.getTerms().length, equalTo(2));
//assertThat(termsFilter.getTerms()[0].text(), equalTo("banon"));
}
@@ -1335,8 +1335,8 @@ public class SimpleIndexQueryParserTests extends ElasticsearchTestCase {
assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
assertThat(parsedQuery.query(), instanceOf(XFilteredQuery.class));
XFilteredQuery filteredQuery = (XFilteredQuery) parsedQuery.query();
- assertThat(filteredQuery.getFilter(), instanceOf(TermsFilter.class));
- TermsFilter termsFilter = (TermsFilter) filteredQuery.getFilter();
+ assertThat(filteredQuery.getFilter(), instanceOf(XTermsFilter.class));
+ XTermsFilter termsFilter = (XTermsFilter) filteredQuery.getFilter();
//assertThat(termsFilter.getTerms().length, equalTo(2));
//assertThat(termsFilter.getTerms()[0].text(), equalTo("banon"));
}