Merge branch 'master' of https://github.com/elastic/elasticsearch
This commit is contained in:
commit
1926fe5ad5
|
@ -57,12 +57,10 @@ class ClusterConfiguration {
|
|||
@Input
|
||||
Closure waitCondition = { NodeInfo node, AntBuilder ant ->
|
||||
File tmpFile = new File(node.cwd, 'wait.success')
|
||||
ant.echo(message: "[${LocalDateTime.now()}] Waiting for elasticsearch node ${node.httpUri()}", level: "info")
|
||||
ant.get(src: "http://${node.httpUri()}",
|
||||
ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}",
|
||||
dest: tmpFile.toString(),
|
||||
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
|
||||
retries: 10)
|
||||
ant.echo(message: "[${LocalDateTime.now()}] Finished waiting for elasticsearch node ${node.httpUri()}. Reachable? ${tmpFile.exists()}", level: "info")
|
||||
return tmpFile.exists()
|
||||
}
|
||||
|
||||
|
|
|
@ -52,10 +52,6 @@ public class PercolateShardRequest extends BroadcastShardRequest {
|
|||
this.startTime = request.startTime;
|
||||
}
|
||||
|
||||
public PercolateShardRequest(ShardId shardId, OriginalIndices originalIndices) {
|
||||
super(shardId, originalIndices);
|
||||
}
|
||||
|
||||
PercolateShardRequest(ShardId shardId, PercolateRequest request) {
|
||||
super(shardId, request);
|
||||
this.documentType = request.documentType();
|
||||
|
|
|
@ -160,12 +160,8 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi
|
|||
items = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
int slot = in.readVInt();
|
||||
OriginalIndices originalIndices = OriginalIndices.readOriginalIndices(in);
|
||||
PercolateShardRequest shardRequest = new PercolateShardRequest(new ShardId(index, shardId), originalIndices);
|
||||
shardRequest.documentType(in.readString());
|
||||
shardRequest.source(in.readBytesReference());
|
||||
shardRequest.docSource(in.readBytesReference());
|
||||
shardRequest.onlyCount(in.readBoolean());
|
||||
PercolateShardRequest shardRequest = new PercolateShardRequest();
|
||||
shardRequest.readFrom(in);
|
||||
Item item = new Item(slot, shardRequest);
|
||||
items.add(item);
|
||||
}
|
||||
|
@ -179,11 +175,7 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi
|
|||
out.writeVInt(items.size());
|
||||
for (Item item : items) {
|
||||
out.writeVInt(item.slot);
|
||||
OriginalIndices.writeOriginalIndices(item.request.originalIndices(), out);
|
||||
out.writeString(item.request.documentType());
|
||||
out.writeBytesReference(item.request.source());
|
||||
out.writeBytesReference(item.request.docSource());
|
||||
out.writeBoolean(item.request.onlyCount());
|
||||
item.request.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -406,10 +406,26 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
|||
String nodeId = nodeStats.getNode().id();
|
||||
String nodeName = nodeStats.getNode().getName();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}", nodeId, mostAvailablePath.getTotal(), leastAvailablePath.getAvailable(), leastAvailablePath.getTotal(), leastAvailablePath.getAvailable());
|
||||
logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}",
|
||||
nodeId, mostAvailablePath.getTotal(), leastAvailablePath.getAvailable(),
|
||||
leastAvailablePath.getTotal(), leastAvailablePath.getAvailable());
|
||||
}
|
||||
if (leastAvailablePath.getTotal().bytes() < 0) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("node: [{}] least available path has less than 0 total bytes of disk [{}], skipping",
|
||||
nodeId, leastAvailablePath.getTotal().bytes());
|
||||
}
|
||||
} else {
|
||||
newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().bytes(), leastAvailablePath.getAvailable().bytes()));
|
||||
}
|
||||
if (mostAvailablePath.getTotal().bytes() < 0) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("node: [{}] most available path has less than 0 total bytes of disk [{}], skipping",
|
||||
nodeId, mostAvailablePath.getTotal().bytes());
|
||||
}
|
||||
} else {
|
||||
newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().bytes(), mostAvailablePath.getAvailable().bytes()));
|
||||
}
|
||||
newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().bytes(), leastAvailablePath.getAvailable().bytes()));
|
||||
newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().bytes(), mostAvailablePath.getAvailable().bytes()));
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,9 @@ package org.elasticsearch.common.util.concurrent;
|
|||
|
||||
import org.elasticsearch.common.Priority;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
@ -27,14 +30,21 @@ public abstract class PrioritizedRunnable implements Runnable, Comparable<Priori
|
|||
|
||||
private final Priority priority;
|
||||
private final long creationDate;
|
||||
private final LongSupplier relativeTimeProvider;
|
||||
|
||||
public static PrioritizedRunnable wrap(Runnable runnable, Priority priority) {
|
||||
return new Wrapped(runnable, priority);
|
||||
}
|
||||
|
||||
protected PrioritizedRunnable(Priority priority) {
|
||||
this(priority, System::nanoTime);
|
||||
}
|
||||
|
||||
// package visible for testing
|
||||
PrioritizedRunnable(Priority priority, LongSupplier relativeTimeProvider) {
|
||||
this.priority = priority;
|
||||
creationDate = System.nanoTime();
|
||||
this.creationDate = relativeTimeProvider.getAsLong();
|
||||
this.relativeTimeProvider = relativeTimeProvider;
|
||||
}
|
||||
|
||||
public long getCreationDateInNanos() {
|
||||
|
@ -42,7 +52,7 @@ public abstract class PrioritizedRunnable implements Runnable, Comparable<Priori
|
|||
}
|
||||
|
||||
public long getAgeInMillis() {
|
||||
return Math.max(0, (System.nanoTime() - creationDate) / 1000);
|
||||
return TimeUnit.MILLISECONDS.convert(relativeTimeProvider.getAsLong() - creationDate, TimeUnit.NANOSECONDS);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.nio.file.Path;
|
|||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -715,8 +716,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
|
||||
private static boolean sameException(Exception left, Exception right) {
|
||||
if (left.getClass() == right.getClass()) {
|
||||
if ((left.getMessage() != null && left.getMessage().equals(right.getMessage()))
|
||||
|| left.getMessage() == right.getMessage()) {
|
||||
if (Objects.equals(left.getMessage(), right.getMessage())) {
|
||||
StackTraceElement[] stackTraceLeft = left.getStackTrace();
|
||||
StackTraceElement[] stackTraceRight = right.getStackTrace();
|
||||
if (stackTraceLeft.length == stackTraceRight.length) {
|
||||
|
|
|
@ -389,7 +389,12 @@ public abstract class MappedFieldType extends FieldType {
|
|||
return false;
|
||||
}
|
||||
|
||||
/** Creates a term associated with the field of this mapper for the given value */
|
||||
/**
|
||||
* Creates a term associated with the field of this mapper for the given
|
||||
* value. Its important to use termQuery when building term queries because
|
||||
* things like ParentFieldMapper override it to make more interesting
|
||||
* queries.
|
||||
*/
|
||||
protected Term createTerm(Object value) {
|
||||
return new Term(name(), indexedValueForSearch(value));
|
||||
}
|
||||
|
|
|
@ -212,10 +212,6 @@ public class MatchQuery {
|
|||
this.zeroTermsQuery = zeroTermsQuery;
|
||||
}
|
||||
|
||||
protected boolean forceAnalyzeQueryString() {
|
||||
return false;
|
||||
}
|
||||
|
||||
protected Analyzer getAnalyzer(MappedFieldType fieldType) {
|
||||
if (this.analyzer == null) {
|
||||
if (fieldType != null) {
|
||||
|
@ -240,17 +236,19 @@ public class MatchQuery {
|
|||
field = fieldName;
|
||||
}
|
||||
|
||||
if (fieldType != null && fieldType.useTermQueryWithQueryString() && !forceAnalyzeQueryString()) {
|
||||
try {
|
||||
return fieldType.termQuery(value, context);
|
||||
} catch (RuntimeException e) {
|
||||
if (lenient) {
|
||||
return null;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the user forced an analyzer we really don't care if they are
|
||||
* searching a type that wants term queries to be used with query string
|
||||
* because the QueryBuilder will take care of it. If they haven't forced
|
||||
* an analyzer then types like NumberFieldType that want terms with
|
||||
* query string will blow up because their analyzer isn't capable of
|
||||
* passing through QueryBuilder.
|
||||
*/
|
||||
boolean noForcedAnalyzer = this.analyzer == null;
|
||||
if (fieldType != null && fieldType.useTermQueryWithQueryString() && noForcedAnalyzer) {
|
||||
return termQuery(fieldType, value);
|
||||
}
|
||||
|
||||
Analyzer analyzer = getAnalyzer(fieldType);
|
||||
assert analyzer != null;
|
||||
MatchQueryBuilder builder = new MatchQueryBuilder(analyzer, fieldType);
|
||||
|
@ -282,6 +280,26 @@ public class MatchQuery {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a TermQuery-like-query for MappedFieldTypes that don't support
|
||||
* QueryBuilder which is very string-ish. Just delegates to the
|
||||
* MappedFieldType for MatchQuery but gets more complex for blended queries.
|
||||
*/
|
||||
protected Query termQuery(MappedFieldType fieldType, Object value) {
|
||||
return termQuery(fieldType, value, lenient);
|
||||
}
|
||||
|
||||
protected final Query termQuery(MappedFieldType fieldType, Object value, boolean lenient) {
|
||||
try {
|
||||
return fieldType.termQuery(value, context);
|
||||
} catch (RuntimeException e) {
|
||||
if (lenient) {
|
||||
return null;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
protected Query zeroTermsQuery() {
|
||||
return zeroTermsQuery == DEFAULT_ZERO_TERMS_QUERY ? Queries.newMatchNoDocsQuery() : Queries.newMatchAllQuery();
|
||||
}
|
||||
|
@ -289,20 +307,20 @@ public class MatchQuery {
|
|||
private class MatchQueryBuilder extends QueryBuilder {
|
||||
|
||||
private final MappedFieldType mapper;
|
||||
|
||||
/**
|
||||
* Creates a new QueryBuilder using the given analyzer.
|
||||
*/
|
||||
public MatchQueryBuilder(Analyzer analyzer, @Nullable MappedFieldType mapper) {
|
||||
super(analyzer);
|
||||
this.mapper = mapper;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Query newTermQuery(Term term) {
|
||||
return blendTermQuery(term, mapper);
|
||||
}
|
||||
|
||||
|
||||
public Query createPhrasePrefixQuery(String field, String queryText, int phraseSlop, int maxExpansions) {
|
||||
final Query query = createFieldQuery(getAnalyzer(), Occur.MUST, field, queryText, true, phraseSlop);
|
||||
final MultiPhrasePrefixQuery prefixQuery = new MultiPhrasePrefixQuery();
|
||||
|
@ -352,11 +370,16 @@ public class MatchQuery {
|
|||
protected Query blendTermQuery(Term term, MappedFieldType fieldType) {
|
||||
if (fuzziness != null) {
|
||||
if (fieldType != null) {
|
||||
Query query = fieldType.fuzzyQuery(term.text(), fuzziness, fuzzyPrefixLength, maxExpansions, transpositions);
|
||||
if (query instanceof FuzzyQuery) {
|
||||
QueryParsers.setRewriteMethod((FuzzyQuery) query, fuzzyRewriteMethod);
|
||||
try {
|
||||
Query query = fieldType.fuzzyQuery(term.text(), fuzziness, fuzzyPrefixLength, maxExpansions, transpositions);
|
||||
if (query instanceof FuzzyQuery) {
|
||||
QueryParsers.setRewriteMethod((FuzzyQuery) query, fuzzyRewriteMethod);
|
||||
}
|
||||
return query;
|
||||
} catch (RuntimeException e) {
|
||||
return new TermQuery(term);
|
||||
// See long comment below about why we're lenient here.
|
||||
}
|
||||
return query;
|
||||
}
|
||||
int edits = fuzziness.asDistance(term.text());
|
||||
FuzzyQuery query = new FuzzyQuery(term, edits, fuzzyPrefixLength, maxExpansions, transpositions);
|
||||
|
@ -364,9 +387,25 @@ public class MatchQuery {
|
|||
return query;
|
||||
}
|
||||
if (fieldType != null) {
|
||||
Query termQuery = fieldType.queryStringTermQuery(term);
|
||||
if (termQuery != null) {
|
||||
return termQuery;
|
||||
/*
|
||||
* Its a bit weird to default to lenient here but its the backwards
|
||||
* compatible. It makes some sense when you think about what we are
|
||||
* doing here: at this point the user has forced an analyzer and
|
||||
* passed some string to the match query. We cut it up using the
|
||||
* analyzer and then tried to cram whatever we get into the field.
|
||||
* lenient=true here means that we try the terms in the query and on
|
||||
* the off chance that they are actually valid terms then we
|
||||
* actually try them. lenient=false would mean that we blow up the
|
||||
* query if they aren't valid terms. "valid" in this context means
|
||||
* "parses properly to something of the type being queried." So "1"
|
||||
* is a valid number, etc.
|
||||
*
|
||||
* We use the text form here because we we've received the term from
|
||||
* an analyzer that cut some string into text.
|
||||
*/
|
||||
Query query = termQuery(fieldType, term.bytes(), true);
|
||||
if (query != null) {
|
||||
return query;
|
||||
}
|
||||
}
|
||||
return new TermQuery(term);
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.search.BooleanQuery;
|
|||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.DisjunctionMaxQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
|
@ -104,7 +103,7 @@ public class MultiMatchQuery extends MatchQuery {
|
|||
this.tieBreaker = tieBreaker;
|
||||
}
|
||||
|
||||
public List<Query> buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map<String, Float> fieldNames, Object value, String minimumShouldMatch) throws IOException{
|
||||
public List<Query> buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map<String, Float> fieldNames, Object value, String minimumShouldMatch) throws IOException{
|
||||
List<Query> queries = new ArrayList<>();
|
||||
for (String fieldName : fieldNames.keySet()) {
|
||||
Float boostValue = fieldNames.get(fieldName);
|
||||
|
@ -146,8 +145,8 @@ public class MultiMatchQuery extends MatchQuery {
|
|||
return MultiMatchQuery.super.blendTermQuery(term, fieldType);
|
||||
}
|
||||
|
||||
public boolean forceAnalyzeQueryString() {
|
||||
return false;
|
||||
public Query termQuery(MappedFieldType fieldType, Object value) {
|
||||
return MultiMatchQuery.this.termQuery(fieldType, value, lenient);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -196,8 +195,13 @@ public class MultiMatchQuery extends MatchQuery {
|
|||
} else {
|
||||
blendedFields = null;
|
||||
}
|
||||
final FieldAndFieldType fieldAndFieldType = group.get(0);
|
||||
Query q = parseGroup(type.matchQueryType(), fieldAndFieldType.field, 1f, value, minimumShouldMatch);
|
||||
/*
|
||||
* We have to pick some field to pass through the superclass so
|
||||
* we just pick the first field. It shouldn't matter because
|
||||
* fields are already grouped by their analyzers/types.
|
||||
*/
|
||||
String representativeField = group.get(0).field;
|
||||
Query q = parseGroup(type.matchQueryType(), representativeField, 1f, value, minimumShouldMatch);
|
||||
if (q != null) {
|
||||
queries.add(q);
|
||||
}
|
||||
|
@ -206,11 +210,6 @@ public class MultiMatchQuery extends MatchQuery {
|
|||
return queries.isEmpty() ? null : queries;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean forceAnalyzeQueryString() {
|
||||
return blendedFields != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query blendTerm(Term term, MappedFieldType fieldType) {
|
||||
if (blendedFields == null) {
|
||||
|
@ -231,6 +230,16 @@ public class MultiMatchQuery extends MatchQuery {
|
|||
}
|
||||
return BlendedTermQuery.dismaxBlendedQuery(terms, blendedBoost, tieBreaker);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(MappedFieldType fieldType, Object value) {
|
||||
/*
|
||||
* Use the string value of the term because we're reusing the
|
||||
* portion of the query is usually after the analyzer has run on
|
||||
* each term. We just skip that analyzer phase.
|
||||
*/
|
||||
return blendTerm(new Term(fieldType.name(), value.toString()), fieldType);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -241,6 +250,15 @@ public class MultiMatchQuery extends MatchQuery {
|
|||
return queryBuilder.blendTerm(term, fieldType);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Query termQuery(MappedFieldType fieldType, Object value) {
|
||||
if (queryBuilder == null) {
|
||||
// Can be null when the MultiMatchQuery collapses into a MatchQuery
|
||||
return super.termQuery(fieldType, value);
|
||||
}
|
||||
return queryBuilder.termQuery(fieldType, value);
|
||||
}
|
||||
|
||||
private static final class FieldAndFieldType {
|
||||
final String field;
|
||||
final MappedFieldType fieldType;
|
||||
|
@ -255,18 +273,17 @@ public class MultiMatchQuery extends MatchQuery {
|
|||
|
||||
public Term newTerm(String value) {
|
||||
try {
|
||||
final BytesRef bytesRef = fieldType.indexedValueForSearch(value);
|
||||
return new Term(field, bytesRef);
|
||||
} catch (Exception ex) {
|
||||
/*
|
||||
* Note that this ignore any overrides the fieldType might do
|
||||
* for termQuery, meaning things like _parent won't work here.
|
||||
*/
|
||||
return new Term(fieldType.name(), fieldType.indexedValueForSearch(value));
|
||||
} catch (RuntimeException ex) {
|
||||
// we can't parse it just use the incoming value -- it will
|
||||
// just have a DF of 0 at the end of the day and will be ignored
|
||||
// Note that this is like lenient = true allways
|
||||
}
|
||||
return new Term(field, value);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean forceAnalyzeQueryString() {
|
||||
return this.queryBuilder == null ? super.forceAnalyzeQueryString() : this.queryBuilder.forceAnalyzeQueryString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.search.aggregations.bucket.filter;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
@ -45,13 +46,13 @@ public class FilterAggregator extends SingleBucketAggregator {
|
|||
private final Weight filter;
|
||||
|
||||
public FilterAggregator(String name,
|
||||
Query filter,
|
||||
Weight filter,
|
||||
AggregatorFactories factories,
|
||||
AggregationContext aggregationContext,
|
||||
Aggregator parent, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) throws IOException {
|
||||
super(name, factories, aggregationContext, parent, pipelineAggregators, metaData);
|
||||
this.filter = aggregationContext.searchContext().searcher().createNormalizedWeight(filter, false);
|
||||
this.filter = filter;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -89,10 +90,22 @@ public class FilterAggregator extends SingleBucketAggregator {
|
|||
this.filter = filter;
|
||||
}
|
||||
|
||||
// TODO: refactor in order to initialize the factory once with its parent,
|
||||
// the context, etc. and then have a no-arg lightweight create method
|
||||
// (since create may be called thousands of times)
|
||||
|
||||
private IndexSearcher searcher;
|
||||
private Weight weight;
|
||||
|
||||
@Override
|
||||
public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket,
|
||||
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||
return new FilterAggregator(name, filter, factories, context, parent, pipelineAggregators, metaData);
|
||||
IndexSearcher contextSearcher = context.searchContext().searcher();
|
||||
if (searcher != contextSearcher) {
|
||||
searcher = contextSearcher;
|
||||
weight = contextSearcher.createNormalizedWeight(filter, false);
|
||||
}
|
||||
return new FilterAggregator(name, weight, factories, context, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.search.aggregations.bucket.filters;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
@ -57,31 +58,26 @@ public class FiltersAggregator extends BucketsAggregator {
|
|||
}
|
||||
|
||||
private final String[] keys;
|
||||
private final Weight[] filters;
|
||||
private Weight[] filters;
|
||||
private final boolean keyed;
|
||||
private final boolean showOtherBucket;
|
||||
private final String otherBucketKey;
|
||||
private final int totalNumKeys;
|
||||
|
||||
public FiltersAggregator(String name, AggregatorFactories factories, List<KeyedFilter> filters, boolean keyed, String otherBucketKey,
|
||||
public FiltersAggregator(String name, AggregatorFactories factories, String[] keys, Weight[] filters, boolean keyed, String otherBucketKey,
|
||||
AggregationContext aggregationContext,
|
||||
Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
|
||||
throws IOException {
|
||||
super(name, factories, aggregationContext, parent, pipelineAggregators, metaData);
|
||||
this.keyed = keyed;
|
||||
this.keys = new String[filters.size()];
|
||||
this.filters = new Weight[filters.size()];
|
||||
this.keys = keys;
|
||||
this.filters = filters;
|
||||
this.showOtherBucket = otherBucketKey != null;
|
||||
this.otherBucketKey = otherBucketKey;
|
||||
if (showOtherBucket) {
|
||||
this.totalNumKeys = filters.size() + 1;
|
||||
this.totalNumKeys = keys.length + 1;
|
||||
} else {
|
||||
this.totalNumKeys = filters.size();
|
||||
}
|
||||
for (int i = 0; i < filters.size(); ++i) {
|
||||
KeyedFilter keyedFilter = filters.get(i);
|
||||
this.keys[i] = keyedFilter.key;
|
||||
this.filters[i] = aggregationContext.searchContext().searcher().createNormalizedWeight(keyedFilter.filter, false);
|
||||
this.totalNumKeys = keys.length;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -146,6 +142,7 @@ public class FiltersAggregator extends BucketsAggregator {
|
|||
public static class Factory extends AggregatorFactory {
|
||||
|
||||
private final List<KeyedFilter> filters;
|
||||
private final String[] keys;
|
||||
private boolean keyed;
|
||||
private String otherBucketKey;
|
||||
|
||||
|
@ -154,12 +151,33 @@ public class FiltersAggregator extends BucketsAggregator {
|
|||
this.filters = filters;
|
||||
this.keyed = keyed;
|
||||
this.otherBucketKey = otherBucketKey;
|
||||
this.keys = new String[filters.size()];
|
||||
for (int i = 0; i < filters.size(); ++i) {
|
||||
KeyedFilter keyedFilter = filters.get(i);
|
||||
this.keys[i] = keyedFilter.key;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: refactor in order to initialize the factory once with its parent,
|
||||
// the context, etc. and then have a no-arg lightweight create method
|
||||
// (since create may be called thousands of times)
|
||||
|
||||
private IndexSearcher searcher;
|
||||
private Weight[] weights;
|
||||
|
||||
@Override
|
||||
public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket,
|
||||
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||
return new FiltersAggregator(name, factories, filters, keyed, otherBucketKey, context, parent, pipelineAggregators, metaData);
|
||||
IndexSearcher contextSearcher = context.searchContext().searcher();
|
||||
if (searcher != contextSearcher) {
|
||||
searcher = contextSearcher;
|
||||
weights = new Weight[filters.size()];
|
||||
for (int i = 0; i < filters.size(); ++i) {
|
||||
KeyedFilter keyedFilter = filters.get(i);
|
||||
this.weights[i] = contextSearcher.createNormalizedWeight(keyedFilter.filter, false);
|
||||
}
|
||||
}
|
||||
return new FiltersAggregator(name, factories, keys, weights, keyed, otherBucketKey, context, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -583,7 +583,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
assertIndexShardCounter(1);
|
||||
}
|
||||
|
||||
public void testCounterOnPrimary() throws InterruptedException, ExecutionException, IOException {
|
||||
public void testCounterOnPrimary() throws Exception {
|
||||
final String index = "test";
|
||||
final ShardId shardId = new ShardId(index, 0);
|
||||
// no replica, we only want to test on primary
|
||||
|
@ -611,9 +611,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
t.start();
|
||||
// shard operation should be ongoing, so the counter is at 2
|
||||
// we have to wait here because increment happens in thread
|
||||
awaitBusy(() -> count.get() == 2);
|
||||
|
||||
assertIndexShardCounter(2);
|
||||
assertBusy(() -> assertIndexShardCounter(2));
|
||||
assertThat(transport.capturedRequests().length, equalTo(0));
|
||||
((ActionWithDelay) action).countDownLatch.countDown();
|
||||
t.join();
|
||||
|
@ -664,7 +662,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
replicaOperationTransportHandler.messageReceived(new Request(), createTransportChannel(new PlainActionFuture<>()));
|
||||
replicaOperationTransportHandler.messageReceived(new Request().setShardId(shardId), createTransportChannel(new PlainActionFuture<>()));
|
||||
} catch (Exception e) {
|
||||
}
|
||||
}
|
||||
|
@ -672,7 +670,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
t.start();
|
||||
// shard operation should be ongoing, so the counter is at 2
|
||||
// we have to wait here because increment happens in thread
|
||||
awaitBusy(() -> count.get() == 2);
|
||||
assertBusy(() -> assertIndexShardCounter(2));
|
||||
((ActionWithDelay) action).countDownLatch.countDown();
|
||||
t.join();
|
||||
// operation should have finished and counter decreased because no outstanding replica requests
|
||||
|
|
|
@ -164,7 +164,50 @@ public class DiskUsageTests extends ESTestCase {
|
|||
assertDiskUsage(mostNode_3, node3FSInfo[1]);
|
||||
}
|
||||
|
||||
public void testFillDiskUsageSomeInvalidValues() {
|
||||
ImmutableOpenMap.Builder<String, DiskUsage> newLeastAvailableUsages = ImmutableOpenMap.builder();
|
||||
ImmutableOpenMap.Builder<String, DiskUsage> newMostAvailableUsages = ImmutableOpenMap.builder();
|
||||
FsInfo.Path[] node1FSInfo = new FsInfo.Path[] {
|
||||
new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80),
|
||||
new FsInfo.Path("/least", "/dev/sdb", -1, -1, -1),
|
||||
new FsInfo.Path("/most", "/dev/sdc", 300, 290, 280),
|
||||
};
|
||||
FsInfo.Path[] node2FSInfo = new FsInfo.Path[] {
|
||||
new FsInfo.Path("/least_most", "/dev/sda", -2, -1, -1),
|
||||
};
|
||||
|
||||
FsInfo.Path[] node3FSInfo = new FsInfo.Path[] {
|
||||
new FsInfo.Path("/most", "/dev/sda", 100, 90, 70),
|
||||
new FsInfo.Path("/least", "/dev/sda", 10, -8, 0),
|
||||
};
|
||||
NodeStats[] nodeStats = new NodeStats[] {
|
||||
new NodeStats(new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Version.CURRENT), 0,
|
||||
null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null,null),
|
||||
new NodeStats(new DiscoveryNode("node_2", DummyTransportAddress.INSTANCE, Version.CURRENT), 0,
|
||||
null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null,null),
|
||||
new NodeStats(new DiscoveryNode("node_3", DummyTransportAddress.INSTANCE, Version.CURRENT), 0,
|
||||
null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null,null)
|
||||
};
|
||||
InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvailableUsages, newMostAvailableUsages);
|
||||
DiskUsage leastNode_1 = newLeastAvailableUsages.get("node_1");
|
||||
DiskUsage mostNode_1 = newMostAvailableUsages.get("node_1");
|
||||
assertNull("node1 should have been skipped", leastNode_1);
|
||||
assertDiskUsage(mostNode_1, node1FSInfo[2]);
|
||||
|
||||
DiskUsage leastNode_2 = newLeastAvailableUsages.get("node_2");
|
||||
DiskUsage mostNode_2 = newMostAvailableUsages.get("node_2");
|
||||
assertNull("node2 should have been skipped", leastNode_2);
|
||||
assertNull("node2 should have been skipped", mostNode_2);
|
||||
|
||||
DiskUsage leastNode_3 = newLeastAvailableUsages.get("node_3");
|
||||
DiskUsage mostNode_3 = newMostAvailableUsages.get("node_3");
|
||||
assertDiskUsage(leastNode_3, node3FSInfo[1]);
|
||||
assertDiskUsage(mostNode_3, node3FSInfo[0]);
|
||||
}
|
||||
|
||||
private void assertDiskUsage(DiskUsage usage, FsInfo.Path path) {
|
||||
assertNotNull(usage);
|
||||
assertNotNull(path);
|
||||
assertEquals(usage.toString(), usage.getPath(), path.getPath());
|
||||
assertEquals(usage.toString(), usage.getTotalBytes(), path.getTotal().bytes());
|
||||
assertEquals(usage.toString(), usage.getFreeBytes(), path.getAvailable().bytes());
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.util.concurrent;
|
||||
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class PrioritizedRunnableTests extends ESTestCase {
|
||||
public void testGetAgeInMillis() throws Exception {
|
||||
AtomicLong time = new AtomicLong();
|
||||
|
||||
PrioritizedRunnable runnable = new PrioritizedRunnable(Priority.NORMAL, time::get) {
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
}
|
||||
};
|
||||
assertEquals(0, runnable.getAgeInMillis());
|
||||
int milliseconds = randomIntBetween(1, 256);
|
||||
time.addAndGet(TimeUnit.NANOSECONDS.convert(milliseconds, TimeUnit.MILLISECONDS));
|
||||
assertEquals(milliseconds, runnable.getAgeInMillis());
|
||||
}
|
||||
}
|
|
@ -24,14 +24,18 @@ import org.apache.lucene.search.BooleanClause;
|
|||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.NumericRangeQuery;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.search.MatchQuery;
|
||||
import org.elasticsearch.index.search.MatchQuery.ZeroTermsQuery;
|
||||
import org.hamcrest.Matcher;
|
||||
import org.joda.time.format.ISODateTimeFormat;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
@ -120,15 +124,15 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase<MatchQueryBuil
|
|||
switch (queryBuilder.type()) {
|
||||
case BOOLEAN:
|
||||
assertThat(query, either(instanceOf(BooleanQuery.class)).or(instanceOf(ExtendedCommonTermsQuery.class))
|
||||
.or(instanceOf(TermQuery.class)).or(instanceOf(FuzzyQuery.class)));
|
||||
.or(instanceOf(TermQuery.class)).or(instanceOf(FuzzyQuery.class)).or(instanceOf(NumericRangeQuery.class)));
|
||||
break;
|
||||
case PHRASE:
|
||||
assertThat(query, either(instanceOf(BooleanQuery.class)).or(instanceOf(PhraseQuery.class))
|
||||
.or(instanceOf(TermQuery.class)).or(instanceOf(FuzzyQuery.class)));
|
||||
.or(instanceOf(TermQuery.class)).or(instanceOf(FuzzyQuery.class)).or(instanceOf(NumericRangeQuery.class)));
|
||||
break;
|
||||
case PHRASE_PREFIX:
|
||||
assertThat(query, either(instanceOf(BooleanQuery.class)).or(instanceOf(MultiPhrasePrefixQuery.class))
|
||||
.or(instanceOf(TermQuery.class)).or(instanceOf(FuzzyQuery.class)));
|
||||
.or(instanceOf(TermQuery.class)).or(instanceOf(FuzzyQuery.class)).or(instanceOf(NumericRangeQuery.class)));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -173,10 +177,45 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase<MatchQueryBuil
|
|||
// compare lowercased terms here
|
||||
String originalTermLc = queryBuilder.value().toString().toLowerCase(Locale.ROOT);
|
||||
String actualTermLc = fuzzyQuery.getTerm().text().toLowerCase(Locale.ROOT);
|
||||
assertThat(actualTermLc, equalTo(originalTermLc));
|
||||
Matcher<String> termLcMatcher = equalTo(originalTermLc);
|
||||
if ("false".equals(originalTermLc) || "true".equals(originalTermLc)) {
|
||||
// Booleans become t/f when querying a boolean field
|
||||
termLcMatcher = either(termLcMatcher).or(equalTo(originalTermLc.substring(0, 1)));
|
||||
}
|
||||
assertThat(actualTermLc, termLcMatcher);
|
||||
assertThat(queryBuilder.prefixLength(), equalTo(fuzzyQuery.getPrefixLength()));
|
||||
assertThat(queryBuilder.fuzzyTranspositions(), equalTo(fuzzyQuery.getTranspositions()));
|
||||
}
|
||||
|
||||
if (query instanceof NumericRangeQuery) {
|
||||
// These are fuzzy numeric queries
|
||||
assertTrue(queryBuilder.fuzziness() != null);
|
||||
@SuppressWarnings("unchecked")
|
||||
NumericRangeQuery<Number> numericRangeQuery = (NumericRangeQuery<Number>) query;
|
||||
assertTrue(numericRangeQuery.includesMin());
|
||||
assertTrue(numericRangeQuery.includesMax());
|
||||
|
||||
double value;
|
||||
try {
|
||||
value = Double.parseDouble(queryBuilder.value().toString());
|
||||
} catch (NumberFormatException e) {
|
||||
// Maybe its a date
|
||||
value = ISODateTimeFormat.dateTimeParser().parseMillis(queryBuilder.value().toString());
|
||||
}
|
||||
double width;
|
||||
if (queryBuilder.fuzziness().equals(Fuzziness.AUTO)) {
|
||||
width = 1;
|
||||
} else {
|
||||
try {
|
||||
width = queryBuilder.fuzziness().asDouble();
|
||||
} catch (NumberFormatException e) {
|
||||
// Maybe a time value?
|
||||
width = queryBuilder.fuzziness().asTimeValue().getMillis();
|
||||
}
|
||||
}
|
||||
assertEquals(value - width, numericRangeQuery.getMin().doubleValue(), width * .1);
|
||||
assertEquals(value + width, numericRangeQuery.getMax().doubleValue(), width * .1);
|
||||
}
|
||||
}
|
||||
|
||||
public void testIllegalValues() {
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.search.DisjunctionMaxQuery;
|
|||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.NumericRangeQuery;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
|
@ -132,7 +133,8 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase<MultiMatc
|
|||
.or(instanceOf(BooleanQuery.class)).or(instanceOf(DisjunctionMaxQuery.class))
|
||||
.or(instanceOf(FuzzyQuery.class)).or(instanceOf(MultiPhrasePrefixQuery.class))
|
||||
.or(instanceOf(MatchAllDocsQuery.class)).or(instanceOf(ExtendedCommonTermsQuery.class))
|
||||
.or(instanceOf(MatchNoDocsQuery.class)).or(instanceOf(PhraseQuery.class)));
|
||||
.or(instanceOf(MatchNoDocsQuery.class)).or(instanceOf(PhraseQuery.class))
|
||||
.or(instanceOf(NumericRangeQuery.class)));
|
||||
}
|
||||
|
||||
public void testIllegaArguments() {
|
||||
|
|
|
@ -33,12 +33,14 @@ import org.elasticsearch.test.ESIntegTestCase;
|
|||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
|
||||
import static org.elasticsearch.percolator.PercolatorTestUtil.convertFromTextArray;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount;
|
||||
|
@ -363,6 +365,33 @@ public class MultiPercolatorIT extends ESIntegTestCase {
|
|||
assertEquals(response.getItems()[1].getResponse().getMatches()[0].getId().string(), "Q");
|
||||
}
|
||||
|
||||
public void testStartTimeIsPropagatedToShardRequests() throws Exception {
|
||||
// See: https://github.com/elastic/elasticsearch/issues/15908
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
client().admin().indices().prepareCreate("test")
|
||||
.setSettings(settingsBuilder()
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1)
|
||||
)
|
||||
.addMapping("type", "date_field", "type=date,format=strict_date_optional_time||epoch_millis")
|
||||
.get();
|
||||
ensureGreen();
|
||||
|
||||
client().prepareIndex("test", ".percolator", "1")
|
||||
.setSource(jsonBuilder().startObject().field("query", rangeQuery("date_field").lt("now+90d")).endObject())
|
||||
.setRefresh(true)
|
||||
.get();
|
||||
|
||||
for (int i = 0; i < 32; i++) {
|
||||
MultiPercolateResponse response = client().prepareMultiPercolate()
|
||||
.add(client().preparePercolate().setDocumentType("type").setIndices("test")
|
||||
.setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("date_field", "2015-07-21T10:28:01-07:00")))
|
||||
.get();
|
||||
assertThat(response.getItems()[0].getResponse().getCount(), equalTo(1L));
|
||||
assertThat(response.getItems()[0].getResponse().getMatches()[0].getId().string(), equalTo("1"));
|
||||
}
|
||||
}
|
||||
|
||||
void initNestedIndexAndPercolation() throws IOException {
|
||||
XContentBuilder mapping = XContentFactory.jsonBuilder();
|
||||
mapping.startObject().startObject("properties").startObject("companyname").field("type", "string").endObject()
|
||||
|
|
|
@ -66,14 +66,13 @@ import static org.hamcrest.Matchers.nullValue;
|
|||
|
||||
public class PercolateDocumentParserTests extends ESTestCase {
|
||||
|
||||
private Index index;
|
||||
private MapperService mapperService;
|
||||
private PercolateDocumentParser parser;
|
||||
private QueryShardContext queryShardContext;
|
||||
private PercolateShardRequest request;
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
index = new Index("_index");
|
||||
IndexSettings indexSettings = new IndexSettings(new IndexMetaData.Builder("_index").settings(
|
||||
Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
|
@ -97,6 +96,10 @@ public class PercolateDocumentParserTests extends ESTestCase {
|
|||
parser = new PercolateDocumentParser(
|
||||
highlightPhase, new SortParseElement(), aggregationPhase, mappingUpdatedAction
|
||||
);
|
||||
|
||||
request = Mockito.mock(PercolateShardRequest.class);
|
||||
Mockito.when(request.shardId()).thenReturn(new ShardId(new Index("_index"), 0));
|
||||
Mockito.when(request.documentType()).thenReturn("type");
|
||||
}
|
||||
|
||||
public void testParseDoc() throws Exception {
|
||||
|
@ -105,9 +108,7 @@ public class PercolateDocumentParserTests extends ESTestCase {
|
|||
.field("field1", "value1")
|
||||
.endObject()
|
||||
.endObject();
|
||||
PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null);
|
||||
request.documentType("type");
|
||||
request.source(source.bytes());
|
||||
Mockito.when(request.source()).thenReturn(source.bytes());
|
||||
|
||||
PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService);
|
||||
ParsedDocument parsedDocument = parser.parse(request, context, mapperService, queryShardContext);
|
||||
|
@ -126,9 +127,7 @@ public class PercolateDocumentParserTests extends ESTestCase {
|
|||
.field("size", 123)
|
||||
.startObject("sort").startObject("_score").endObject().endObject()
|
||||
.endObject();
|
||||
PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null);
|
||||
request.documentType("type");
|
||||
request.source(source.bytes());
|
||||
Mockito.when(request.source()).thenReturn(source.bytes());
|
||||
|
||||
PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService);
|
||||
ParsedDocument parsedDocument = parser.parse(request, context, mapperService, queryShardContext);
|
||||
|
@ -151,10 +150,8 @@ public class PercolateDocumentParserTests extends ESTestCase {
|
|||
XContentBuilder docSource = jsonBuilder().startObject()
|
||||
.field("field1", "value1")
|
||||
.endObject();
|
||||
PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null);
|
||||
request.documentType("type");
|
||||
request.source(source.bytes());
|
||||
request.docSource(docSource.bytes());
|
||||
Mockito.when(request.source()).thenReturn(source.bytes());
|
||||
Mockito.when(request.docSource()).thenReturn(docSource.bytes());
|
||||
|
||||
PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService);
|
||||
ParsedDocument parsedDocument = parser.parse(request, context, mapperService, queryShardContext);
|
||||
|
@ -180,10 +177,8 @@ public class PercolateDocumentParserTests extends ESTestCase {
|
|||
XContentBuilder docSource = jsonBuilder().startObject()
|
||||
.field("field1", "value1")
|
||||
.endObject();
|
||||
PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null);
|
||||
request.documentType("type");
|
||||
request.source(source.bytes());
|
||||
request.docSource(docSource.bytes());
|
||||
Mockito.when(request.source()).thenReturn(source.bytes());
|
||||
Mockito.when(request.docSource()).thenReturn(docSource.bytes());
|
||||
|
||||
PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService);
|
||||
try {
|
||||
|
|
|
@ -42,6 +42,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogra
|
|||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.core.IsNull.notNullValue;
|
||||
|
||||
/**
|
||||
|
@ -145,6 +146,25 @@ public class FilterIT extends ESIntegTestCase {
|
|||
assertThat((double) filter.getProperty("avg_value.value"), equalTo((double) sum / numTag1Docs));
|
||||
}
|
||||
|
||||
public void testAsSubAggregation() {
|
||||
SearchResponse response = client().prepareSearch("idx")
|
||||
.addAggregation(
|
||||
histogram("histo").field("value").interval(2L).subAggregation(
|
||||
filter("filter").filter(matchAllQuery()))).get();
|
||||
|
||||
assertSearchResponse(response);
|
||||
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getBuckets().size(), greaterThanOrEqualTo(1));
|
||||
|
||||
for (Histogram.Bucket bucket : histo.getBuckets()) {
|
||||
Filter filter = bucket.getAggregations().get("filter");
|
||||
assertThat(filter, notNullValue());
|
||||
assertEquals(bucket.getDocCount(), filter.getDocCount());
|
||||
}
|
||||
}
|
||||
|
||||
public void testWithContextBasedSubAggregation() throws Exception {
|
||||
try {
|
||||
client().prepareSearch("idx")
|
||||
|
|
|
@ -44,6 +44,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.filters;
|
|||
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.core.IsNull.notNullValue;
|
||||
|
||||
|
@ -205,6 +206,27 @@ public class FiltersIT extends ESIntegTestCase {
|
|||
assertThat((double) propertiesCounts[1], equalTo((double) sum / numTag2Docs));
|
||||
}
|
||||
|
||||
public void testAsSubAggregation() {
|
||||
SearchResponse response = client().prepareSearch("idx")
|
||||
.addAggregation(
|
||||
histogram("histo").field("value").interval(2L).subAggregation(
|
||||
filters("filters").filter(matchAllQuery()))).get();
|
||||
|
||||
assertSearchResponse(response);
|
||||
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getBuckets().size(), greaterThanOrEqualTo(1));
|
||||
|
||||
for (Histogram.Bucket bucket : histo.getBuckets()) {
|
||||
Filters filters = bucket.getAggregations().get("filters");
|
||||
assertThat(filters, notNullValue());
|
||||
assertThat(filters.getBuckets().size(), equalTo(1));
|
||||
Filters.Bucket filterBucket = filters.getBuckets().get(0);
|
||||
assertEquals(bucket.getDocCount(), filterBucket.getDocCount());
|
||||
}
|
||||
}
|
||||
|
||||
public void testWithContextBasedSubAggregation() throws Exception {
|
||||
|
||||
try {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.search.query;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
|
@ -230,6 +231,12 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
|
|||
.setQuery(randomizeType(multiMatchQuery("15", "skill"))).get();
|
||||
assertNoFailures(searchResponse);
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("15", "skill", "int-field")).analyzer("category")).get();
|
||||
assertNoFailures(searchResponse);
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
String[] fields = {"full_name", "first_name", "last_name", "last_name_phrase", "first_name_phrase", "category_phrase", "category"};
|
||||
|
||||
String[] query = {"marvel","hero", "captain", "america", "15", "17", "1", "5", "ultimate", "Man",
|
||||
|
@ -459,18 +466,65 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
|
|||
assertHitCount(searchResponse, 1l);
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill", "int-field")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
.analyzer("category")
|
||||
.operator(Operator.AND))).get();
|
||||
assertHitCount(searchResponse, 1l);
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("captain america 15", "skill", "full_name", "first_name", "last_name", "category", "int-field")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
.analyzer("category")
|
||||
.operator(Operator.AND))).get();
|
||||
assertHitCount(searchResponse, 1l);
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("captain america 15", "first_name", "last_name", "skill")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
.analyzer("category"))).get();
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("15", "skill")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
.analyzer("category"))).get();
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("25 15", "skill")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
.analyzer("category"))).get();
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("25 15", "int-field", "skill")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
.analyzer("category"))).get();
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("25 15", "first_name", "int-field", "skill")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
.analyzer("category"))).get();
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("25 15", "int-field", "skill", "first_name")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
.analyzer("category"))).get();
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("25 15", "int-field", "first_name", "skill")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
.analyzer("category"))).get();
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
|
@ -529,6 +583,46 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
|
|||
assertFirstHit(searchResponse, hasId("ultimate2"));
|
||||
assertSecondHit(searchResponse, hasId("ultimate1"));
|
||||
assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThan(searchResponse.getHits().hits()[1].getScore()));
|
||||
|
||||
// Test group based on numeric fields
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("15", "skill")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))).get();
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("15", "skill", "first_name")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))).get();
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
// Two numeric fields together caused trouble at one point!
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("15", "int-field", "skill")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))).get();
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("15", "int-field", "first_name", "skill")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))).get();
|
||||
assertFirstHit(searchResponse, hasId("theone"));
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("alpha 15", "first_name", "skill")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
.lenient(true))).get();
|
||||
assertFirstHit(searchResponse, hasId("ultimate1"));
|
||||
/*
|
||||
* Doesn't find theone because "alpha 15" isn't a number and we don't
|
||||
* break on spaces.
|
||||
*/
|
||||
assertHitCount(searchResponse, 1);
|
||||
|
||||
// Lenient wasn't always properly lenient with two numeric fields
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(randomizeType(multiMatchQuery("alpha 15", "int-field", "first_name", "skill")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
|
||||
.lenient(true))).get();
|
||||
assertFirstHit(searchResponse, hasId("ultimate1"));
|
||||
}
|
||||
|
||||
private static final void assertEquivalent(String query, SearchResponse left, SearchResponse right) {
|
||||
|
|
Loading…
Reference in New Issue