Merge branch 'master' into enhancement/discovery_node_one_getter
This commit is contained in:
commit
32b6e529f4
|
@ -62,6 +62,8 @@ public class Version {
|
|||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_2_1_ID = 2020199;
|
||||
public static final Version V_2_2_1 = new Version(V_2_2_1_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_2_2_ID = 2020299;
|
||||
public static final Version V_2_2_2 = new Version(V_2_2_2_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_3_0_ID = 2030099;
|
||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_5_0_0_alpha1_ID = 5000001;
|
||||
|
@ -83,6 +85,8 @@ public class Version {
|
|||
return V_5_0_0_alpha1;
|
||||
case V_2_3_0_ID:
|
||||
return V_2_3_0;
|
||||
case V_2_2_2_ID:
|
||||
return V_2_2_2;
|
||||
case V_2_2_1_ID:
|
||||
return V_2_2_1;
|
||||
case V_2_2_0_ID:
|
||||
|
|
|
@ -23,9 +23,11 @@ import org.elasticsearch.ElasticsearchParseException;
|
|||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -38,6 +40,13 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
*/
|
||||
public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAllocationExplainRequest> {
|
||||
|
||||
private static ObjectParser<ClusterAllocationExplainRequest, Void> PARSER = new ObjectParser("cluster/allocation/explain");
|
||||
static {
|
||||
PARSER.declareString(ClusterAllocationExplainRequest::setIndex, new ParseField("index"));
|
||||
PARSER.declareInt(ClusterAllocationExplainRequest::setShard, new ParseField("shard"));
|
||||
PARSER.declareBoolean(ClusterAllocationExplainRequest::setPrimary, new ParseField("primary"));
|
||||
}
|
||||
|
||||
private String index;
|
||||
private Integer shard;
|
||||
private Boolean primary;
|
||||
|
@ -101,7 +110,7 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
}
|
||||
|
||||
@Nullable
|
||||
public int getShard() {
|
||||
public Integer getShard() {
|
||||
return this.shard;
|
||||
}
|
||||
|
||||
|
@ -111,7 +120,7 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
}
|
||||
|
||||
@Nullable
|
||||
public boolean isPrimary() {
|
||||
public Boolean isPrimary() {
|
||||
return this.primary;
|
||||
}
|
||||
|
||||
|
@ -139,40 +148,12 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
}
|
||||
|
||||
public static ClusterAllocationExplainRequest parse(XContentParser parser) throws IOException {
|
||||
String currentFieldName = null;
|
||||
String index = null;
|
||||
Integer shard = null;
|
||||
Boolean primary = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if ("index".equals(currentFieldName)) {
|
||||
index = parser.text();
|
||||
} else if ("shard".equals(currentFieldName)) {
|
||||
shard = parser.intValue();
|
||||
} else if ("primary".equals(currentFieldName)) {
|
||||
primary = parser.booleanValue();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("unexpected field [" + currentFieldName + "] in allocation explain request");
|
||||
}
|
||||
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
// the object was started
|
||||
continue;
|
||||
} else {
|
||||
throw new ElasticsearchParseException("unexpected token [" + token + "] in allocation explain request");
|
||||
}
|
||||
ClusterAllocationExplainRequest req = PARSER.parse(parser, new ClusterAllocationExplainRequest());
|
||||
Exception e = req.validate();
|
||||
if (e != null) {
|
||||
throw new ElasticsearchParseException("'index', 'shard', and 'primary' must be specified in allocation explain request", e);
|
||||
}
|
||||
|
||||
if (index == null && shard == null && primary == null) {
|
||||
// If it was an empty body, use the "any unassigned shard" request
|
||||
return new ClusterAllocationExplainRequest();
|
||||
} else if (index == null || shard == null || primary == null) {
|
||||
throw new ElasticsearchParseException("'index', 'shard', and 'primary' must be specified in allocation explain request");
|
||||
}
|
||||
return new ClusterAllocationExplainRequest(index, shard, primary);
|
||||
return req;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -37,6 +38,7 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Returns the list of tasks currently running on the nodes
|
||||
|
@ -47,6 +49,8 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
|
||||
private Map<DiscoveryNode, List<TaskInfo>> nodes;
|
||||
|
||||
private List<TaskGroup> groups;
|
||||
|
||||
public ListTasksResponse() {
|
||||
}
|
||||
|
||||
|
@ -94,6 +98,41 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
return nodeTasks;
|
||||
}
|
||||
|
||||
public List<TaskGroup> getTaskGroups() {
|
||||
if (groups == null) {
|
||||
buildTaskGroups();
|
||||
}
|
||||
return groups;
|
||||
}
|
||||
|
||||
private void buildTaskGroups() {
|
||||
Map<TaskId, TaskGroup.Builder> taskGroups = new HashMap<>();
|
||||
List<TaskGroup.Builder> topLevelTasks = new ArrayList<>();
|
||||
// First populate all tasks
|
||||
for (TaskInfo taskInfo : this.tasks) {
|
||||
taskGroups.put(taskInfo.getTaskId(), TaskGroup.builder(taskInfo));
|
||||
}
|
||||
|
||||
// Now go through all task group builders and add children to their parents
|
||||
for (TaskGroup.Builder taskGroup : taskGroups.values()) {
|
||||
TaskId parentTaskId = taskGroup.getTaskInfo().getParentTaskId();
|
||||
if (parentTaskId.isSet()) {
|
||||
TaskGroup.Builder parentTask = taskGroups.get(parentTaskId);
|
||||
if (parentTask != null) {
|
||||
// we found parent in the list of tasks - add it to the parent list
|
||||
parentTask.addGroup(taskGroup);
|
||||
} else {
|
||||
// we got zombie or the parent was filtered out - add it to the the top task list
|
||||
topLevelTasks.add(taskGroup);
|
||||
}
|
||||
} else {
|
||||
// top level task - add it to the top task list
|
||||
topLevelTasks.add(taskGroup);
|
||||
}
|
||||
}
|
||||
this.groups = Collections.unmodifiableList(topLevelTasks.stream().map(TaskGroup.Builder::build).collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
public List<TaskInfo> getTasks() {
|
||||
return tasks;
|
||||
}
|
||||
|
@ -119,39 +158,48 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
}
|
||||
builder.endArray();
|
||||
}
|
||||
String groupBy = params.param("group_by", "nodes");
|
||||
if ("nodes".equals(groupBy)) {
|
||||
builder.startObject("nodes");
|
||||
for (Map.Entry<DiscoveryNode, List<TaskInfo>> entry : getPerNodeTasks().entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
builder.startObject(node.getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("name", node.getName());
|
||||
builder.field("transport_address", node.getAddress().toString());
|
||||
builder.field("host", node.getHostName());
|
||||
builder.field("ip", node.getAddress());
|
||||
|
||||
builder.startObject("nodes");
|
||||
for (Map.Entry<DiscoveryNode, List<TaskInfo>> entry : getPerNodeTasks().entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
builder.startObject(node.getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("name", node.getName());
|
||||
builder.field("transport_address", node.getAddress().toString());
|
||||
builder.field("host", node.getHostName());
|
||||
builder.field("ip", node.getAddress());
|
||||
builder.startArray("roles");
|
||||
for (DiscoveryNode.Role role : node.getRoles()) {
|
||||
builder.value(role.getRoleName());
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
builder.startArray("roles");
|
||||
for (DiscoveryNode.Role role : node.getRoles()) {
|
||||
builder.value(role.getRoleName());
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
if (!node.getAttributes().isEmpty()) {
|
||||
builder.startObject("attributes");
|
||||
for (Map.Entry<String, String> attrEntry : node.getAttributes().entrySet()) {
|
||||
builder.field(attrEntry.getKey(), attrEntry.getValue(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
if (!node.getAttributes().isEmpty()) {
|
||||
builder.startObject("attributes");
|
||||
for (Map.Entry<String, String> attrEntry : node.getAttributes().entrySet()) {
|
||||
builder.field(attrEntry.getKey(), attrEntry.getValue(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.startObject("tasks");
|
||||
for(TaskInfo task : entry.getValue()) {
|
||||
builder.startObject(task.getTaskId().toString(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
task.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
}
|
||||
} else if ("parents".equals(groupBy)) {
|
||||
builder.startObject("tasks");
|
||||
for(TaskInfo task : entry.getValue()) {
|
||||
builder.startObject(task.getTaskId().toString(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
task.toXContent(builder, params);
|
||||
for (TaskGroup group : getTaskGroups()) {
|
||||
builder.startObject(group.getTaskInfo().getTaskId().toString(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
group.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Information about a currently running task and all its subtasks.
|
||||
*/
|
||||
public class TaskGroup implements ToXContent {
|
||||
|
||||
private final TaskInfo task;
|
||||
|
||||
private final List<TaskGroup> childTasks;
|
||||
|
||||
|
||||
public TaskGroup(TaskInfo task, List<TaskGroup> childTasks) {
|
||||
this.task = task;
|
||||
this.childTasks = Collections.unmodifiableList(new ArrayList<>(childTasks));
|
||||
}
|
||||
|
||||
public static Builder builder(TaskInfo taskInfo) {
|
||||
return new Builder(taskInfo);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
private TaskInfo taskInfo;
|
||||
private List<Builder> childTasks;
|
||||
|
||||
private Builder(TaskInfo taskInfo) {
|
||||
this.taskInfo = taskInfo;
|
||||
childTasks = new ArrayList<>();
|
||||
}
|
||||
|
||||
public void addGroup(Builder builder) {
|
||||
childTasks.add(builder);
|
||||
}
|
||||
|
||||
public TaskInfo getTaskInfo() {
|
||||
return taskInfo;
|
||||
}
|
||||
|
||||
public TaskGroup build() {
|
||||
return new TaskGroup(taskInfo, childTasks.stream().map(Builder::build).collect(Collectors.toList()));
|
||||
}
|
||||
}
|
||||
|
||||
public TaskInfo getTaskInfo() {
|
||||
return task;
|
||||
}
|
||||
|
||||
public List<TaskGroup> getChildTasks() {
|
||||
return childTasks;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
task.toXContent(builder, params);
|
||||
if (childTasks.isEmpty() == false) {
|
||||
builder.startArray("children");
|
||||
for (TaskGroup taskGroup : childTasks) {
|
||||
builder.startObject();
|
||||
taskGroup.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.xcontent.XContentHelper;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.BoolQueryBuilder;
|
||||
import org.elasticsearch.index.query.ConstantScoreQueryBuilder;
|
||||
import org.elasticsearch.index.query.PercolatorQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
|
@ -207,7 +208,10 @@ public class TransportPercolateAction extends HandledTransportAction<PercolateRe
|
|||
boolQueryBuilder.filter(percolatorQueryBuilder);
|
||||
searchSource.field("query", boolQueryBuilder);
|
||||
} else {
|
||||
searchSource.field("query", percolatorQueryBuilder);
|
||||
// wrapping in a constant score query with boost 0 for bwc reason.
|
||||
// percolator api didn't emit scores before and never included scores
|
||||
// for how well percolator queries matched with the document being percolated
|
||||
searchSource.field("query", new ConstantScoreQueryBuilder(percolatorQueryBuilder).boost(0f));
|
||||
}
|
||||
|
||||
searchSource.endObject();
|
||||
|
|
|
@ -42,8 +42,6 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
|
||||
public static final String[] ALL_NODES = Strings.EMPTY_ARRAY;
|
||||
|
||||
public static final long ALL_TASKS = -1L;
|
||||
|
||||
private String[] nodesIds = ALL_NODES;
|
||||
|
||||
private TimeValue timeout;
|
||||
|
|
|
@ -37,6 +37,16 @@ import org.apache.lucene.search.ConstantScoreQuery;
|
|||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.spans.FieldMaskingSpanQuery;
|
||||
import org.apache.lucene.search.spans.SpanContainingQuery;
|
||||
import org.apache.lucene.search.spans.SpanFirstQuery;
|
||||
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
|
||||
import org.apache.lucene.search.spans.SpanNearQuery;
|
||||
import org.apache.lucene.search.spans.SpanNotQuery;
|
||||
import org.apache.lucene.search.spans.SpanOrQuery;
|
||||
import org.apache.lucene.search.spans.SpanQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.apache.lucene.search.spans.SpanWithinQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.elasticsearch.common.logging.LoggerMessageFormat;
|
||||
|
@ -95,7 +105,6 @@ public final class ExtractQueryTermsService {
|
|||
* an UnsupportedQueryException is thrown.
|
||||
*/
|
||||
static Set<Term> extractQueryTerms(Query query) {
|
||||
// TODO: add support for span queries
|
||||
if (query instanceof TermQuery) {
|
||||
return Collections.singleton(((TermQuery) query).getTerm());
|
||||
} else if (query instanceof TermsQuery) {
|
||||
|
@ -170,6 +179,27 @@ public final class ExtractQueryTermsService {
|
|||
} else if (query instanceof BlendedTermQuery) {
|
||||
List<Term> terms = ((BlendedTermQuery) query).getTerms();
|
||||
return new HashSet<>(terms);
|
||||
} else if (query instanceof SpanTermQuery) {
|
||||
return Collections.singleton(((SpanTermQuery) query).getTerm());
|
||||
} else if (query instanceof SpanNearQuery) {
|
||||
Set<Term> bestClause = null;
|
||||
SpanNearQuery spanNearQuery = (SpanNearQuery) query;
|
||||
for (SpanQuery clause : spanNearQuery.getClauses()) {
|
||||
Set<Term> temp = extractQueryTerms(clause);
|
||||
bestClause = selectTermListWithTheLongestShortestTerm(temp, bestClause);
|
||||
}
|
||||
return bestClause;
|
||||
} else if (query instanceof SpanOrQuery) {
|
||||
Set<Term> terms = new HashSet<>();
|
||||
SpanOrQuery spanOrQuery = (SpanOrQuery) query;
|
||||
for (SpanQuery clause : spanOrQuery.getClauses()) {
|
||||
terms.addAll(extractQueryTerms(clause));
|
||||
}
|
||||
return terms;
|
||||
} else if (query instanceof SpanFirstQuery) {
|
||||
return extractQueryTerms(((SpanFirstQuery)query).getMatch());
|
||||
} else if (query instanceof SpanNotQuery) {
|
||||
return extractQueryTerms(((SpanNotQuery) query).getInclude());
|
||||
} else {
|
||||
throw new UnsupportedQueryException(query);
|
||||
}
|
||||
|
|
|
@ -28,6 +28,8 @@ import org.apache.lucene.search.Explanation;
|
|||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.SimpleCollector;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.search.TwoPhaseIterator;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
|
@ -36,12 +38,10 @@ import org.elasticsearch.common.lucene.Lucene;
|
|||
import org.elasticsearch.index.percolator.ExtractQueryTermsService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.apache.lucene.search.BooleanClause.Occur.FILTER;
|
||||
import static org.apache.lucene.search.BooleanClause.Occur.MUST;
|
||||
|
||||
public final class PercolatorQuery extends Query implements Accountable {
|
||||
|
||||
|
@ -139,7 +139,14 @@ public final class PercolatorQuery extends Query implements Accountable {
|
|||
int result = twoPhaseIterator.approximation().advance(docId);
|
||||
if (result == docId) {
|
||||
if (twoPhaseIterator.matches()) {
|
||||
return Explanation.match(scorer.score(), "PercolatorQuery");
|
||||
if (needsScores) {
|
||||
QueryRegistry.Leaf percolatorQueries = queryRegistry.getQueries(leafReaderContext);
|
||||
Query query = percolatorQueries.getQuery(docId);
|
||||
Explanation detail = percolatorIndexSearcher.explain(query, 0);
|
||||
return Explanation.match(scorer.score(), "PercolatorQuery", detail);
|
||||
} else {
|
||||
return Explanation.match(scorer.score(), "PercolatorQuery");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -164,52 +171,46 @@ public final class PercolatorQuery extends Query implements Accountable {
|
|||
}
|
||||
|
||||
final QueryRegistry.Leaf percolatorQueries = queryRegistry.getQueries(leafReaderContext);
|
||||
return new Scorer(this) {
|
||||
if (needsScores) {
|
||||
return new BaseScorer(this, approximation, percolatorQueries, percolatorIndexSearcher) {
|
||||
|
||||
@Override
|
||||
public DocIdSetIterator iterator() {
|
||||
return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator());
|
||||
}
|
||||
float score;
|
||||
|
||||
@Override
|
||||
public TwoPhaseIterator twoPhaseIterator() {
|
||||
return new TwoPhaseIterator(approximation.iterator()) {
|
||||
@Override
|
||||
public boolean matches() throws IOException {
|
||||
return matchDocId(approximation.docID());
|
||||
@Override
|
||||
boolean matchDocId(int docId) throws IOException {
|
||||
Query query = percolatorQueries.getQuery(docId);
|
||||
if (query != null) {
|
||||
TopDocs topDocs = percolatorIndexSearcher.search(query, 1);
|
||||
if (topDocs.totalHits > 0) {
|
||||
score = topDocs.scoreDocs[0].score;
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float matchCost() {
|
||||
return MATCH_COST;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public float score() throws IOException {
|
||||
return approximation.score();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int freq() throws IOException {
|
||||
return approximation.freq();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docID() {
|
||||
return approximation.docID();
|
||||
}
|
||||
|
||||
boolean matchDocId(int docId) throws IOException {
|
||||
Query query = percolatorQueries.getQuery(docId);
|
||||
if (query != null) {
|
||||
return Lucene.exists(percolatorIndexSearcher, query);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@Override
|
||||
public float score() throws IOException {
|
||||
return score;
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return new BaseScorer(this, approximation, percolatorQueries, percolatorIndexSearcher) {
|
||||
|
||||
@Override
|
||||
public float score() throws IOException {
|
||||
return 0f;
|
||||
}
|
||||
|
||||
boolean matchDocId(int docId) throws IOException {
|
||||
Query query = percolatorQueries.getQuery(docId);
|
||||
return query != null && Lucene.exists(percolatorIndexSearcher, query);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -276,4 +277,51 @@ public final class PercolatorQuery extends Query implements Accountable {
|
|||
|
||||
}
|
||||
|
||||
static abstract class BaseScorer extends Scorer {
|
||||
|
||||
final Scorer approximation;
|
||||
final QueryRegistry.Leaf percolatorQueries;
|
||||
final IndexSearcher percolatorIndexSearcher;
|
||||
|
||||
BaseScorer(Weight weight, Scorer approximation, QueryRegistry.Leaf percolatorQueries, IndexSearcher percolatorIndexSearcher) {
|
||||
super(weight);
|
||||
this.approximation = approximation;
|
||||
this.percolatorQueries = percolatorQueries;
|
||||
this.percolatorIndexSearcher = percolatorIndexSearcher;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DocIdSetIterator iterator() {
|
||||
return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator());
|
||||
}
|
||||
|
||||
@Override
|
||||
public final TwoPhaseIterator twoPhaseIterator() {
|
||||
return new TwoPhaseIterator(approximation.iterator()) {
|
||||
@Override
|
||||
public boolean matches() throws IOException {
|
||||
return matchDocId(approximation.docID());
|
||||
}
|
||||
|
||||
@Override
|
||||
public float matchCost() {
|
||||
return MATCH_COST;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final int freq() throws IOException {
|
||||
return approximation.freq();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final int docID() {
|
||||
return approximation.docID();
|
||||
}
|
||||
|
||||
abstract boolean matchDocId(int docId) throws IOException;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ public class CancellableTask extends Task {
|
|||
final void cancel(String reason) {
|
||||
assert reason != null;
|
||||
this.reason.compareAndSet(null, reason);
|
||||
onCancelled();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -65,4 +66,10 @@ public class CancellableTask extends Task {
|
|||
public String getReasonCancelled() {
|
||||
return reason.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Called after the task is cancelled so that it can take any actions that it has to take.
|
||||
*/
|
||||
protected void onCancelled() {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,8 +24,10 @@ import org.elasticsearch.action.FailedNodeException;
|
|||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
|
@ -43,6 +45,11 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.test.tasks.MockTaskManager;
|
||||
|
@ -368,6 +375,10 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
|
|||
for (int i = 1; i < testNodes.length; i++) {
|
||||
assertEquals(1, response.getPerNodeTasks().get(testNodes[i].discoveryNode).size());
|
||||
}
|
||||
// There should be a single main task when grouped by tasks
|
||||
assertEquals(1, response.getTaskGroups().size());
|
||||
// And as many child tasks as we have nodes
|
||||
assertEquals(testNodes.length, response.getTaskGroups().get(0).getChildTasks().size());
|
||||
|
||||
// Check task counts using transport with filtering
|
||||
testNode = testNodes[randomIntBetween(0, testNodes.length - 1)];
|
||||
|
@ -379,6 +390,11 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
|
|||
assertEquals(1, entry.getValue().size());
|
||||
assertNull(entry.getValue().get(0).getDescription());
|
||||
}
|
||||
// Since the main task is not in the list - all tasks should be by themselves
|
||||
assertEquals(testNodes.length, response.getTaskGroups().size());
|
||||
for (TaskGroup taskGroup : response.getTaskGroups()) {
|
||||
assertEquals(0, taskGroup.getChildTasks().size());
|
||||
}
|
||||
|
||||
// Check task counts using transport with detailed description
|
||||
listTasksRequest.setDetailed(true); // same request only with detailed description
|
||||
|
@ -703,4 +719,53 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
|
|||
NodesResponse responses = future.get();
|
||||
assertEquals(0, responses.failureCount());
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testTasksToXContentGrouping() throws Exception {
|
||||
setupTestNodes(Settings.EMPTY);
|
||||
connectNodes(testNodes);
|
||||
|
||||
// Get the parent task
|
||||
ListTasksRequest listTasksRequest = new ListTasksRequest();
|
||||
listTasksRequest.setActions(ListTasksAction.NAME + "*");
|
||||
ListTasksResponse response = testNodes[0].transportListTasksAction.execute(listTasksRequest).get();
|
||||
assertEquals(testNodes.length + 1, response.getTasks().size());
|
||||
|
||||
// First group by node
|
||||
Map<String, Object> byNodes = serialize(response, new ToXContent.MapParams(Collections.singletonMap("group_by", "nodes")));
|
||||
byNodes = (Map<String, Object>) byNodes.get("nodes");
|
||||
// One element on the top level
|
||||
assertEquals(testNodes.length, byNodes.size());
|
||||
Map<String, Object> firstNode = (Map<String, Object>) byNodes.get(testNodes[0].discoveryNode.getId());
|
||||
firstNode = (Map<String, Object>) firstNode.get("tasks");
|
||||
assertEquals(2, firstNode.size()); // two tasks for the first node
|
||||
for (int i = 1; i < testNodes.length; i++) {
|
||||
Map<String, Object> otherNode = (Map<String, Object>) byNodes.get(testNodes[i].discoveryNode.getId());
|
||||
otherNode = (Map<String, Object>) otherNode.get("tasks");
|
||||
assertEquals(1, otherNode.size()); // one tasks for the all other nodes
|
||||
}
|
||||
|
||||
// Group by parents
|
||||
Map<String, Object> byParent = serialize(response, new ToXContent.MapParams(Collections.singletonMap("group_by", "parents")));
|
||||
byParent = (Map<String, Object>) byParent.get("tasks");
|
||||
// One element on the top level
|
||||
assertEquals(1, byParent.size()); // Only one top level task
|
||||
Map<String, Object> topTask = (Map<String, Object>) byParent.values().iterator().next();
|
||||
List<Object> children = (List<Object>) topTask.get("children");
|
||||
assertEquals(testNodes.length, children.size()); // two tasks for the first node
|
||||
for (int i = 0; i < testNodes.length; i++) {
|
||||
Map<String, Object> child = (Map<String, Object>) children.get(i);
|
||||
assertNull(child.get("children"));
|
||||
}
|
||||
}
|
||||
|
||||
private Map<String, Object> serialize(ToXContent response, ToXContent.Params params) throws IOException {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
|
||||
builder.startObject();
|
||||
response.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
builder.flush();
|
||||
logger.info(builder.string());
|
||||
return XContentHelper.convertToMap(builder.bytes(), false).v2();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,6 +58,23 @@ public class ObjectParserTests extends ESTestCase {
|
|||
assertEquals(objectParser.toString(), "ObjectParser{name='foo', fields=[FieldParser{preferred_name=test, supportedTokens=[VALUE_STRING], type=STRING}, FieldParser{preferred_name=test_number, supportedTokens=[VALUE_STRING, VALUE_NUMBER], type=INT}, FieldParser{preferred_name=test_array, supportedTokens=[START_ARRAY, VALUE_STRING, VALUE_NUMBER], type=INT_ARRAY}, FieldParser{preferred_name=test_array, supportedTokens=[START_ARRAY, VALUE_STRING, VALUE_NUMBER], type=INT_ARRAY}, FieldParser{preferred_name=test_number, supportedTokens=[VALUE_STRING, VALUE_NUMBER], type=INT}]}");
|
||||
}
|
||||
|
||||
public void testEmptyObject() throws Exception {
|
||||
XContentParser parser = XContentType.JSON.xContent().createParser("{}");
|
||||
class TestStruct {
|
||||
public String val = null;
|
||||
public void setVal(String val) {
|
||||
this.val = val;
|
||||
}
|
||||
}
|
||||
|
||||
ObjectParser<TestStruct, Void> objectParser = new ObjectParser("eggplant");
|
||||
TestStruct s = new TestStruct();
|
||||
|
||||
objectParser.declareString(TestStruct::setVal, new ParseField("anything"));
|
||||
objectParser.parse(parser, s);
|
||||
assertNull("s.val should be null", s.val);
|
||||
}
|
||||
|
||||
public void testObjectOrDefault() throws IOException {
|
||||
XContentParser parser = XContentType.JSON.xContent().createParser("{\"object\" : { \"test\": 2}}");
|
||||
ObjectParser<StaticTestStruct, Void> objectParser = new ObjectParser("foo", StaticTestStruct::new);
|
||||
|
|
|
@ -35,12 +35,19 @@ import org.apache.lucene.search.PhraseQuery;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.apache.lucene.search.spans.FieldMaskingSpanQuery;
|
||||
import org.apache.lucene.search.spans.SpanFirstQuery;
|
||||
import org.apache.lucene.search.spans.SpanNearQuery;
|
||||
import org.apache.lucene.search.spans.SpanNotQuery;
|
||||
import org.apache.lucene.search.spans.SpanOrQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -242,6 +249,56 @@ public class ExtractQueryTermsServiceTests extends ESTestCase {
|
|||
assertThat(result.get(1).text(), equalTo("_term2"));
|
||||
}
|
||||
|
||||
public void testExtractQueryMetadata_spanTermQuery() {
|
||||
// the following span queries aren't exposed in the query dsl and are therefor not supported:
|
||||
// 1) SpanPositionRangeQuery
|
||||
// 2) PayloadScoreQuery
|
||||
// 3) SpanBoostQuery
|
||||
|
||||
// The following span queries can't be supported because of how these queries work:
|
||||
// 1) SpanMultiTermQueryWrapper, not supported, because there is no support for MTQ typed queries yet.
|
||||
// 2) SpanContainingQuery, is kind of range of spans and we don't know what is between the little and big terms
|
||||
// 3) SpanWithinQuery, same reason as SpanContainingQuery
|
||||
// 4) FieldMaskingSpanQuery is a tricky query so we shouldn't optimize this
|
||||
|
||||
SpanTermQuery spanTermQuery1 = new SpanTermQuery(new Term("_field", "_short_term"));
|
||||
Set<Term> terms = ExtractQueryTermsService.extractQueryTerms(spanTermQuery1);
|
||||
assertTermsEqual(terms, spanTermQuery1.getTerm());
|
||||
}
|
||||
|
||||
public void testExtractQueryMetadata_spanNearQuery() {
|
||||
SpanTermQuery spanTermQuery1 = new SpanTermQuery(new Term("_field", "_short_term"));
|
||||
SpanTermQuery spanTermQuery2 = new SpanTermQuery(new Term("_field", "_very_long_term"));
|
||||
SpanNearQuery spanNearQuery = new SpanNearQuery.Builder("_field", true)
|
||||
.addClause(spanTermQuery1).addClause(spanTermQuery2).build();
|
||||
Set<Term> terms = ExtractQueryTermsService.extractQueryTerms(spanNearQuery);
|
||||
assertTermsEqual(terms, spanTermQuery2.getTerm());
|
||||
}
|
||||
|
||||
public void testExtractQueryMetadata_spanOrQuery() {
|
||||
SpanTermQuery spanTermQuery1 = new SpanTermQuery(new Term("_field", "_short_term"));
|
||||
SpanTermQuery spanTermQuery2 = new SpanTermQuery(new Term("_field", "_very_long_term"));
|
||||
SpanOrQuery spanOrQuery = new SpanOrQuery(spanTermQuery1, spanTermQuery2);
|
||||
Set<Term> terms = ExtractQueryTermsService.extractQueryTerms(spanOrQuery);
|
||||
assertTermsEqual(terms, spanTermQuery1.getTerm(), spanTermQuery2.getTerm());
|
||||
}
|
||||
|
||||
public void testExtractQueryMetadata_spanFirstQuery() {
|
||||
SpanTermQuery spanTermQuery1 = new SpanTermQuery(new Term("_field", "_short_term"));
|
||||
SpanTermQuery spanTermQuery2 = new SpanTermQuery(new Term("_field", "_very_long_term"));
|
||||
SpanFirstQuery spanFirstQuery = new SpanFirstQuery(spanTermQuery1, 20);
|
||||
Set<Term> terms = ExtractQueryTermsService.extractQueryTerms(spanFirstQuery);
|
||||
assertTermsEqual(terms, spanTermQuery1.getTerm());
|
||||
}
|
||||
|
||||
public void testExtractQueryMetadata_spanNotQuery() {
|
||||
SpanTermQuery spanTermQuery1 = new SpanTermQuery(new Term("_field", "_short_term"));
|
||||
SpanTermQuery spanTermQuery2 = new SpanTermQuery(new Term("_field", "_very_long_term"));
|
||||
SpanNotQuery spanNotQuery = new SpanNotQuery(spanTermQuery1, spanTermQuery2);
|
||||
Set<Term> terms = ExtractQueryTermsService.extractQueryTerms(spanNotQuery);
|
||||
assertTermsEqual(terms, spanTermQuery1.getTerm());
|
||||
}
|
||||
|
||||
public void testExtractQueryMetadata_unsupportedQuery() {
|
||||
TermRangeQuery termRangeQuery = new TermRangeQuery("_field", null, null, true, false);
|
||||
|
||||
|
@ -330,4 +387,8 @@ public class ExtractQueryTermsServiceTests extends ESTestCase {
|
|||
assertThat(((TermQuery) booleanQuery.clauses().get(i).getQuery()).getTerm().bytes().utf8ToString(), equalTo(expectedValue));
|
||||
}
|
||||
|
||||
private static void assertTermsEqual(Set<Term> actual, Term... expected) {
|
||||
assertEquals(new HashSet<>(Arrays.asList(expected)), actual);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.lucene.queries.BlendedTermQuery;
|
|||
import org.apache.lucene.queries.CommonTermsQuery;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
|
@ -45,6 +46,10 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.search.WildcardQuery;
|
||||
import org.apache.lucene.search.spans.SpanNearQuery;
|
||||
import org.apache.lucene.search.spans.SpanNotQuery;
|
||||
import org.apache.lucene.search.spans.SpanOrQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
|
@ -60,6 +65,7 @@ import java.io.IOException;
|
|||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.arrayWithSize;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
|
@ -145,44 +151,104 @@ public class PercolatorQueryTests extends ESTestCase {
|
|||
new MatchAllDocsQuery()
|
||||
);
|
||||
builder.extractQueryTermsQuery(EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME);
|
||||
TopDocs topDocs = shardSearcher.search(builder.build(), 10);
|
||||
// no scoring, wrapping it in a constant score query:
|
||||
Query query = new ConstantScoreQuery(builder.build());
|
||||
TopDocs topDocs = shardSearcher.search(query, 10);
|
||||
assertThat(topDocs.totalHits, equalTo(5));
|
||||
assertThat(topDocs.scoreDocs.length, equalTo(5));
|
||||
assertThat(topDocs.scoreDocs[0].doc, equalTo(0));
|
||||
Explanation explanation = shardSearcher.explain(builder.build(), 0);
|
||||
Explanation explanation = shardSearcher.explain(query, 0);
|
||||
assertThat(explanation.isMatch(), is(true));
|
||||
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[0].score));
|
||||
|
||||
explanation = shardSearcher.explain(builder.build(), 1);
|
||||
explanation = shardSearcher.explain(query, 1);
|
||||
assertThat(explanation.isMatch(), is(false));
|
||||
|
||||
assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
|
||||
explanation = shardSearcher.explain(builder.build(), 2);
|
||||
explanation = shardSearcher.explain(query, 2);
|
||||
assertThat(explanation.isMatch(), is(true));
|
||||
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[1].score));
|
||||
|
||||
assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
|
||||
explanation = shardSearcher.explain(builder.build(), 3);
|
||||
explanation = shardSearcher.explain(query, 3);
|
||||
assertThat(explanation.isMatch(), is(true));
|
||||
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[2].score));
|
||||
|
||||
explanation = shardSearcher.explain(builder.build(), 4);
|
||||
explanation = shardSearcher.explain(query, 4);
|
||||
assertThat(explanation.isMatch(), is(false));
|
||||
|
||||
assertThat(topDocs.scoreDocs[3].doc, equalTo(5));
|
||||
explanation = shardSearcher.explain(builder.build(), 5);
|
||||
explanation = shardSearcher.explain(query, 5);
|
||||
assertThat(explanation.isMatch(), is(true));
|
||||
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[3].score));
|
||||
|
||||
explanation = shardSearcher.explain(builder.build(), 6);
|
||||
explanation = shardSearcher.explain(query, 6);
|
||||
assertThat(explanation.isMatch(), is(false));
|
||||
|
||||
assertThat(topDocs.scoreDocs[4].doc, equalTo(7));
|
||||
explanation = shardSearcher.explain(builder.build(), 7);
|
||||
explanation = shardSearcher.explain(query, 7);
|
||||
assertThat(explanation.isMatch(), is(true));
|
||||
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[4].score));
|
||||
}
|
||||
|
||||
public void testVariousQueries_withScoring() throws Exception {
|
||||
SpanNearQuery.Builder snp = new SpanNearQuery.Builder("field", true);
|
||||
snp.addClause(new SpanTermQuery(new Term("field", "jumps")));
|
||||
snp.addClause(new SpanTermQuery(new Term("field", "lazy")));
|
||||
snp.addClause(new SpanTermQuery(new Term("field", "dog")));
|
||||
snp.setSlop(2);
|
||||
addPercolatorQuery("1", snp.build());
|
||||
PhraseQuery.Builder pq1 = new PhraseQuery.Builder();
|
||||
pq1.add(new Term("field", "quick"));
|
||||
pq1.add(new Term("field", "brown"));
|
||||
pq1.add(new Term("field", "jumps"));
|
||||
pq1.setSlop(1);
|
||||
addPercolatorQuery("2", pq1.build());
|
||||
BooleanQuery.Builder bq1 = new BooleanQuery.Builder();
|
||||
bq1.add(new TermQuery(new Term("field", "quick")), BooleanClause.Occur.MUST);
|
||||
bq1.add(new TermQuery(new Term("field", "brown")), BooleanClause.Occur.MUST);
|
||||
bq1.add(new TermQuery(new Term("field", "fox")), BooleanClause.Occur.MUST);
|
||||
addPercolatorQuery("3", bq1.build());
|
||||
|
||||
indexWriter.close();
|
||||
directoryReader = DirectoryReader.open(directory);
|
||||
IndexSearcher shardSearcher = newSearcher(directoryReader);
|
||||
|
||||
MemoryIndex memoryIndex = new MemoryIndex();
|
||||
memoryIndex.addField("field", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer());
|
||||
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
|
||||
|
||||
PercolatorQuery.Builder builder = new PercolatorQuery.Builder(
|
||||
"docType",
|
||||
queryRegistry,
|
||||
new BytesArray("{}"),
|
||||
percolateSearcher,
|
||||
new MatchAllDocsQuery()
|
||||
);
|
||||
builder.extractQueryTermsQuery(EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME);
|
||||
Query query = builder.build();
|
||||
TopDocs topDocs = shardSearcher.search(query, 10);
|
||||
assertThat(topDocs.totalHits, equalTo(3));
|
||||
|
||||
assertThat(topDocs.scoreDocs[0].doc, equalTo(2));
|
||||
Explanation explanation = shardSearcher.explain(query, 2);
|
||||
assertThat(explanation.isMatch(), is(true));
|
||||
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[0].score));
|
||||
assertThat(explanation.getDetails(), arrayWithSize(1));
|
||||
|
||||
assertThat(topDocs.scoreDocs[1].doc, equalTo(1));
|
||||
explanation = shardSearcher.explain(query, 1);
|
||||
assertThat(explanation.isMatch(), is(true));
|
||||
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[1].score));
|
||||
assertThat(explanation.getDetails(), arrayWithSize(1));
|
||||
|
||||
assertThat(topDocs.scoreDocs[2].doc, equalTo(0));
|
||||
explanation = shardSearcher.explain(query, 0);
|
||||
assertThat(explanation.isMatch(), is(true));
|
||||
assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[2].score));
|
||||
assertThat(explanation.getDetails(), arrayWithSize(1));
|
||||
}
|
||||
|
||||
public void testDuel() throws Exception {
|
||||
int numQueries = scaledRandomIntBetween(32, 256);
|
||||
for (int i = 0; i < numQueries; i++) {
|
||||
|
@ -194,6 +260,8 @@ public class PercolatorQueryTests extends ESTestCase {
|
|||
query = new WildcardQuery(new Term("field", id + "*"));
|
||||
} else if (randomBoolean()) {
|
||||
query = new CustomQuery(new Term("field", id + "*"));
|
||||
} else if (randomBoolean()) {
|
||||
query = new SpanTermQuery(new Term("field", id));
|
||||
} else {
|
||||
query = new TermQuery(new Term("field", id));
|
||||
}
|
||||
|
@ -223,6 +291,27 @@ public class PercolatorQueryTests extends ESTestCase {
|
|||
new Term("field", "brown"), new Term("field", "fox")}, false);
|
||||
addPercolatorQuery("_id2", blendedTermQuery);
|
||||
|
||||
SpanNearQuery spanNearQuery = new SpanNearQuery.Builder("field", true)
|
||||
.addClause(new SpanTermQuery(new Term("field", "quick")))
|
||||
.addClause(new SpanTermQuery(new Term("field", "brown")))
|
||||
.addClause(new SpanTermQuery(new Term("field", "fox")))
|
||||
.build();
|
||||
addPercolatorQuery("_id3", spanNearQuery);
|
||||
|
||||
SpanNearQuery spanNearQuery2 = new SpanNearQuery.Builder("field", true)
|
||||
.addClause(new SpanTermQuery(new Term("field", "the")))
|
||||
.addClause(new SpanTermQuery(new Term("field", "lazy")))
|
||||
.addClause(new SpanTermQuery(new Term("field", "doc")))
|
||||
.build();
|
||||
SpanOrQuery spanOrQuery = new SpanOrQuery(
|
||||
spanNearQuery,
|
||||
spanNearQuery2
|
||||
);
|
||||
addPercolatorQuery("_id4", spanOrQuery);
|
||||
|
||||
SpanNotQuery spanNotQuery = new SpanNotQuery(spanNearQuery, spanNearQuery);
|
||||
addPercolatorQuery("_id5", spanNotQuery);
|
||||
|
||||
indexWriter.close();
|
||||
directoryReader = DirectoryReader.open(directory);
|
||||
IndexSearcher shardSearcher = newSearcher(directoryReader);
|
||||
|
|
|
@ -33,6 +33,9 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
|||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.percolatorQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanNearQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanNotQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanTermQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
|
@ -99,6 +102,44 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase {
|
|||
.setSource(jsonBuilder().startObject().field("query", multiMatchQuery("quick brown fox", "field1", "field2")
|
||||
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)).endObject())
|
||||
.get();
|
||||
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3")
|
||||
.setSource(jsonBuilder().startObject().field("query",
|
||||
spanNearQuery(spanTermQuery("field1", "quick"), 0)
|
||||
.clause(spanTermQuery("field1", "brown"))
|
||||
.clause(spanTermQuery("field1", "fox"))
|
||||
.inOrder(true)
|
||||
).endObject())
|
||||
.get();
|
||||
client().admin().indices().prepareRefresh().get();
|
||||
|
||||
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "4")
|
||||
.setSource(jsonBuilder().startObject().field("query",
|
||||
spanNotQuery(
|
||||
spanNearQuery(spanTermQuery("field1", "quick"), 0)
|
||||
.clause(spanTermQuery("field1", "brown"))
|
||||
.clause(spanTermQuery("field1", "fox"))
|
||||
.inOrder(true),
|
||||
spanNearQuery(spanTermQuery("field1", "the"), 0)
|
||||
.clause(spanTermQuery("field1", "lazy"))
|
||||
.clause(spanTermQuery("field1", "dog"))
|
||||
.inOrder(true)).dist(2)
|
||||
).endObject())
|
||||
.get();
|
||||
|
||||
// doesn't match
|
||||
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "5")
|
||||
.setSource(jsonBuilder().startObject().field("query",
|
||||
spanNotQuery(
|
||||
spanNearQuery(spanTermQuery("field1", "quick"), 0)
|
||||
.clause(spanTermQuery("field1", "brown"))
|
||||
.clause(spanTermQuery("field1", "fox"))
|
||||
.inOrder(true),
|
||||
spanNearQuery(spanTermQuery("field1", "the"), 0)
|
||||
.clause(spanTermQuery("field1", "lazy"))
|
||||
.clause(spanTermQuery("field1", "dog"))
|
||||
.inOrder(true)).dist(3)
|
||||
).endObject())
|
||||
.get();
|
||||
client().admin().indices().prepareRefresh().get();
|
||||
|
||||
BytesReference source = jsonBuilder().startObject()
|
||||
|
@ -107,10 +148,17 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase {
|
|||
.endObject().bytes();
|
||||
SearchResponse response = client().prepareSearch()
|
||||
.setQuery(percolatorQuery("type", source))
|
||||
.addSort("_uid", SortOrder.ASC)
|
||||
.get();
|
||||
assertHitCount(response, 2);
|
||||
assertHitCount(response, 4);
|
||||
assertThat(response.getHits().getAt(0).getId(), equalTo("1"));
|
||||
assertThat(response.getHits().getAt(0).score(), equalTo(Float.NaN));
|
||||
assertThat(response.getHits().getAt(1).getId(), equalTo("2"));
|
||||
assertThat(response.getHits().getAt(1).score(), equalTo(Float.NaN));
|
||||
assertThat(response.getHits().getAt(2).getId(), equalTo("3"));
|
||||
assertThat(response.getHits().getAt(2).score(), equalTo(Float.NaN));
|
||||
assertThat(response.getHits().getAt(3).getId(), equalTo("4"));
|
||||
assertThat(response.getHits().getAt(3).score(), equalTo(Float.NaN));
|
||||
}
|
||||
|
||||
public void testPercolatorQueryWithHighlighting() throws Exception {
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -162,7 +162,7 @@ if not "%ES_JAVA_OPTS%" == "" set JVM_OPTS=%JVM_OPTS%;%JVM_ES_JAVA_OPTS%
|
|||
if "%ES_START_TYPE%" == "" set ES_START_TYPE=manual
|
||||
if "%ES_STOP_TIMEOUT%" == "" set ES_STOP_TIMEOUT=0
|
||||
|
||||
"%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %ES_START_TYPE% --StopTimeout %ES_STOP_TIMEOUT% --StartClass org.elasticsearch.bootstrap.Elasticsearch --StopClass org.elasticsearch.bootstrap.Elasticsearch --StartMethod main --StopMethod close --Classpath "%ES_CLASSPATH%" --JvmSs %JVM_SS% --JvmMs %JVM_XMS% --JvmMx %JVM_XMX% --JvmOptions %JVM_OPTS% ++JvmOptions %ES_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "Elasticsearch %ES_VERSION% (%SERVICE_ID%)" --Description "Elasticsearch %ES_VERSION% Windows Service - http://elasticsearch.org" --Jvm "%%JAVA_HOME%%%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%ES_HOME%" ++StartParams start
|
||||
"%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %ES_START_TYPE% --StopTimeout %ES_STOP_TIMEOUT% --StartClass org.elasticsearch.bootstrap.Elasticsearch --StopClass org.elasticsearch.bootstrap.Elasticsearch --StartMethod main --StopMethod close --Classpath "%ES_CLASSPATH%" --JvmSs %JVM_SS% --JvmMs %JVM_XMS% --JvmMx %JVM_XMX% --JvmOptions %JVM_OPTS% ++JvmOptions %ES_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "Elasticsearch %ES_VERSION% (%SERVICE_ID%)" --Description "Elasticsearch %ES_VERSION% Windows Service - http://elasticsearch.org" --Jvm "%%JAVA_HOME%%%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%ES_HOME%"
|
||||
|
||||
|
||||
if not errorlevel 1 goto installed
|
||||
|
|
|
@ -99,5 +99,14 @@ POST /_tasks/_cancel?node_id=nodeId1,nodeId2&actions=*reindex
|
|||
// AUTOSENSE
|
||||
|
||||
|
||||
[float]
|
||||
=== Task Grouping
|
||||
|
||||
The task lists returned by task API commands can be grouped either by nodes (default) or by parent tasks using the `group_by` parameter.
|
||||
The following command will change the grouping to parent tasks:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_tasks?group_by=parents
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
|
|
@ -249,6 +249,148 @@ PUT my_index/my_type/1
|
|||
<1> The `english` field is mapped as a `string` field with the `english` analyzer.
|
||||
<2> The `count` field is mapped as a `long` field with `doc_values` disabled
|
||||
|
||||
[[template-examples]]
|
||||
==== Template examples
|
||||
|
||||
Here are some examples of potentially useful dynamic templates:
|
||||
|
||||
===== Structured search
|
||||
|
||||
By default elasticsearch will map string fields as a `text` field with a sub
|
||||
`keyword` field. However if you are only indexing structured content and not
|
||||
interested in full text search, you can make elasticsearch map your fields
|
||||
only as `keyword`s. Note that this means that in order to search those fields,
|
||||
you will have to search on the exact same value that was indexed.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"strings_as_keywords": {
|
||||
"match_mapping_type": "string",
|
||||
"mapping": {
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
===== `text`-only mappings for strings
|
||||
|
||||
On the contrary to the previous example, if the only thing that you care about
|
||||
on your string fields is full-text search, and if you don't plan on running
|
||||
aggregations, sorting or exact search on your string fields, you could tell
|
||||
elasticsearch to map it only as a text field (which was the default behaviour
|
||||
before 5.0):
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"strings_as_text": {
|
||||
"match_mapping_type": "string",
|
||||
"mapping": {
|
||||
"type": "text"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
===== Disabled norms
|
||||
|
||||
Norms are index-time scoring factors. If you do not care about scoring, which
|
||||
would be the case for instance if you never sort documents by score, you could
|
||||
disable the storage of these scoring factors in the index and save some space.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"strings_as_keywords": {
|
||||
"match_mapping_type": "string",
|
||||
"mapping": {
|
||||
"type": "text",
|
||||
"norms": false,
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
The sub `keyword` field appears in this template to be consistent with the
|
||||
default rules of dynamic mappings. Of course if you do not need them because
|
||||
you don't need to perform exact search or aggregate on this field, you could
|
||||
remove it as described in the previous section.
|
||||
|
||||
===== Time-series
|
||||
|
||||
When doing time series analysis with elastisearch, it is common to have many
|
||||
numeric fields that you will often aggregate on but never filter on. In such a
|
||||
case, you could disable indexing on those fields to save disk space and also
|
||||
maybe gain some indexing speed:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"unindexed_longs": {
|
||||
"match_mapping_type": "long",
|
||||
"mapping": {
|
||||
"type": "long",
|
||||
"index": false
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"unindexed_doubles": {
|
||||
"match_mapping_type": "double",
|
||||
"mapping": {
|
||||
"type": "float", <1>
|
||||
"index": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
<1> Like the default dynamic mapping rules, doubles are mapped as floats, which
|
||||
are usually accurate enough, yet require half the disk space.
|
||||
|
||||
|
||||
[[override-default-template]]
|
||||
=== Override default template
|
||||
|
|
|
@ -72,3 +72,52 @@ the tribe node will pick one of them. This can be configured using the `tribe.on
|
|||
setting. It defaults to `any`, but can be set to `drop` (drop indices that have
|
||||
a conflict), or `prefer_[tribeName]` to prefer the index from a specific tribe.
|
||||
|
||||
[float]
|
||||
=== Tribe node settings
|
||||
|
||||
The tribe node starts a node client for each listed cluster. The following
|
||||
configuration options are passed down from the tribe node to each node client:
|
||||
|
||||
* `node.name` (used to derive the `node.name` for each node client)
|
||||
* `network.host`
|
||||
* `network.bind_host`
|
||||
* `network.publish_host`
|
||||
* `transport.host`
|
||||
* `transport.bind_host`
|
||||
* `transport.publish_host`
|
||||
* `path.home`
|
||||
* `path.conf`
|
||||
* `path.plugins`
|
||||
* `path.logs`
|
||||
* `path.scripts`
|
||||
* `shield.*`
|
||||
|
||||
Almost any setting (except for `path.*`) may be configured at the node client
|
||||
level itself, in which case it will override any passed through setting from
|
||||
the tribe node. Settings you may want to set at the node client level
|
||||
include:
|
||||
|
||||
* `network.host`
|
||||
* `network.bind_host`
|
||||
* `network.publish_host`
|
||||
* `transport.host`
|
||||
* `transport.bind_host`
|
||||
* `transport.publish_host`
|
||||
* `cluster.name`
|
||||
* `discovery.zen.ping.unicast.hosts`
|
||||
|
||||
[source,yaml]
|
||||
------------------------
|
||||
path.scripts: some/path/to/config <1>
|
||||
network.host: 192.168.1.5 <2>
|
||||
|
||||
tribe:
|
||||
t1:
|
||||
cluster.name: cluster_one
|
||||
t2:
|
||||
cluster.name: cluster_two
|
||||
network.host: 10.1.2.3 <3>
|
||||
------------------------
|
||||
<1> The `path.scripts` setting is inherited by both `t1` and `t2`.
|
||||
<2> The `network.host` setting is inherited by `t1`.
|
||||
<3> The `t3` node client overrides the inherited from the tribe node.
|
||||
|
|
|
@ -19,18 +19,6 @@
|
|||
|
||||
lexer grammar PainlessLexer;
|
||||
|
||||
@header {
|
||||
import java.util.Set;
|
||||
}
|
||||
|
||||
@members {
|
||||
private Set<String> types = null;
|
||||
|
||||
void setTypes(Set<String> types) {
|
||||
this.types = types;
|
||||
}
|
||||
}
|
||||
|
||||
WS: [ \t\n\r]+ -> skip;
|
||||
COMMENT: ( '//' .*? [\n\r] | '/*' .*? '*/' ) -> skip;
|
||||
|
||||
|
@ -102,16 +90,14 @@ HEX: '0' [xX] [0-9a-fA-F]+ [lL]?;
|
|||
INTEGER: ( '0' | [1-9] [0-9]* ) [lLfFdD]?;
|
||||
DECIMAL: ( '0' | [1-9] [0-9]* ) DOT [0-9]* ( [eE] [+\-]? [0-9]+ )? [fF]?;
|
||||
|
||||
STRING: '"' ( '\\"' | '\\\\' | ~[\\"] )*? '"' {setText(getText().substring(1, getText().length() - 1));};
|
||||
CHAR: '\'' . '\'' {setText(getText().substring(1, getText().length() - 1));};
|
||||
STRING: '"' ( '\\"' | '\\\\' | ~[\\"] )*? '"';
|
||||
CHAR: '\'' . '\'';
|
||||
|
||||
TRUE: 'true';
|
||||
FALSE: 'false';
|
||||
|
||||
NULL: 'null';
|
||||
|
||||
TYPE: ID GENERIC? {types.contains(getText().replace(" ", ""))}? {setText(getText().replace(" ", ""));};
|
||||
fragment GENERIC: ' '* '<' ' '* ( ID GENERIC? ) ' '* ( COMMA ' '* ( ID GENERIC? ) ' '* )* '>';
|
||||
ID: [_a-zA-Z] [_a-zA-Z0-9]*;
|
||||
|
||||
mode EXT;
|
||||
|
|
|
@ -67,15 +67,23 @@ declaration
|
|||
;
|
||||
|
||||
decltype
|
||||
: TYPE (LBRACE RBRACE)*
|
||||
: identifier (LBRACE RBRACE)*
|
||||
;
|
||||
|
||||
declvar
|
||||
: ID ( ASSIGN expression )?
|
||||
: identifier ( ASSIGN expression )?
|
||||
;
|
||||
|
||||
trap
|
||||
: CATCH LP ( TYPE ID ) RP ( block | emptyscope )
|
||||
: CATCH LP ( identifier identifier ) RP ( block | emptyscope )
|
||||
;
|
||||
|
||||
identifier
|
||||
: ID generic?
|
||||
;
|
||||
|
||||
generic
|
||||
: LT identifier ( COMMA identifier )* GT
|
||||
;
|
||||
|
||||
expression
|
||||
|
@ -109,21 +117,19 @@ expression
|
|||
extstart
|
||||
: extprec
|
||||
| extcast
|
||||
| exttype
|
||||
| extvar
|
||||
| extnew
|
||||
| extstring
|
||||
;
|
||||
|
||||
extprec: LP ( extprec | extcast | exttype | extvar | extnew | extstring ) RP ( extdot | extbrace )?;
|
||||
extcast: LP decltype RP ( extprec | extcast | exttype | extvar | extnew | extstring );
|
||||
extprec: LP ( extprec | extcast | extvar | extnew | extstring ) RP ( extdot | extbrace )?;
|
||||
extcast: LP decltype RP ( extprec | extcast | extvar | extnew | extstring );
|
||||
extbrace: LBRACE expression RBRACE ( extdot | extbrace )?;
|
||||
extdot: DOT ( extcall | extfield );
|
||||
exttype: TYPE extdot;
|
||||
extcall: EXTID arguments ( extdot | extbrace )?;
|
||||
extvar: ID ( extdot | extbrace )?;
|
||||
extvar: identifier ( extdot | extbrace )?;
|
||||
extfield: ( EXTID | EXTINTEGER ) ( extdot | extbrace )?;
|
||||
extnew: NEW TYPE ( ( arguments ( extdot | extbrace)? ) | ( ( LBRACE expression RBRACE )+ extdot? ) );
|
||||
extnew: NEW identifier ( ( arguments extdot? ) | ( ( LBRACE expression RBRACE )+ extdot? ) );
|
||||
extstring: STRING (extdot | extbrace )?;
|
||||
|
||||
arguments
|
||||
|
|
|
@ -48,10 +48,11 @@ import org.elasticsearch.painless.PainlessParser.ExtnewContext;
|
|||
import org.elasticsearch.painless.PainlessParser.ExtprecContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtstartContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtstringContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExttypeContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtvarContext;
|
||||
import org.elasticsearch.painless.PainlessParser.FalseContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ForContext;
|
||||
import org.elasticsearch.painless.PainlessParser.GenericContext;
|
||||
import org.elasticsearch.painless.PainlessParser.IdentifierContext;
|
||||
import org.elasticsearch.painless.PainlessParser.IfContext;
|
||||
import org.elasticsearch.painless.PainlessParser.IncrementContext;
|
||||
import org.elasticsearch.painless.PainlessParser.InitializerContext;
|
||||
|
@ -83,7 +84,7 @@ class Analyzer extends PainlessParserBaseVisitor<Void> {
|
|||
private Analyzer(final Metadata metadata) {
|
||||
final Definition definition = metadata.definition;
|
||||
|
||||
final AnalyzerUtility utility = new AnalyzerUtility();
|
||||
final AnalyzerUtility utility = new AnalyzerUtility(metadata);
|
||||
final AnalyzerCaster caster = new AnalyzerCaster(definition);
|
||||
final AnalyzerPromoter promoter = new AnalyzerPromoter(definition);
|
||||
|
||||
|
@ -94,8 +95,8 @@ class Analyzer extends PainlessParserBaseVisitor<Void> {
|
|||
utility.incrementScope();
|
||||
utility.addVariable(null, "#this", definition.execType);
|
||||
metadata.inputValueSlot = utility.addVariable(null, "input", definition.smapType).slot;
|
||||
metadata.scoreValueSlot = utility.addVariable(null, "_score", definition.floatType).slot;
|
||||
metadata.loopCounterSlot = utility.addVariable(null, "#loop", definition.intType).slot;
|
||||
metadata.scoreValueSlot = utility.addVariable(null, "_score", definition.floatType).slot;
|
||||
|
||||
metadata.createStatementMetadata(metadata.root);
|
||||
visit(metadata.root);
|
||||
|
@ -253,6 +254,16 @@ class Analyzer extends PainlessParserBaseVisitor<Void> {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visitIdentifier(IdentifierContext ctx) {
|
||||
throw new UnsupportedOperationException(AnalyzerUtility.error(ctx) + "Unexpected state.");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visitGeneric(GenericContext ctx) {
|
||||
throw new UnsupportedOperationException(AnalyzerUtility.error(ctx) + "Unexpected state.");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visitPrecedence(final PrecedenceContext ctx) {
|
||||
throw new UnsupportedOperationException(AnalyzerUtility.error(ctx) + "Unexpected state.");
|
||||
|
@ -398,13 +409,6 @@ class Analyzer extends PainlessParserBaseVisitor<Void> {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visitExttype(final ExttypeContext ctx) {
|
||||
external.processExttype(ctx);
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visitExtcall(final ExtcallContext ctx) {
|
||||
external.processExtcall(ctx);
|
||||
|
|
|
@ -168,7 +168,7 @@ class AnalyzerExpression {
|
|||
throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state.");
|
||||
}
|
||||
|
||||
charemd.preConst = ctx.CHAR().getText().charAt(0);
|
||||
charemd.preConst = ctx.CHAR().getText().charAt(1);
|
||||
charemd.from = definition.charType;
|
||||
}
|
||||
|
||||
|
|
|
@ -41,8 +41,8 @@ import org.elasticsearch.painless.PainlessParser.ExtnewContext;
|
|||
import org.elasticsearch.painless.PainlessParser.ExtprecContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtstartContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtstringContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExttypeContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtvarContext;
|
||||
import org.elasticsearch.painless.PainlessParser.IdentifierContext;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
@ -80,7 +80,6 @@ class AnalyzerExternal {
|
|||
void processExtstart(final ExtstartContext ctx) {
|
||||
final ExtprecContext precctx = ctx.extprec();
|
||||
final ExtcastContext castctx = ctx.extcast();
|
||||
final ExttypeContext typectx = ctx.exttype();
|
||||
final ExtvarContext varctx = ctx.extvar();
|
||||
final ExtnewContext newctx = ctx.extnew();
|
||||
final ExtstringContext stringctx = ctx.extstring();
|
||||
|
@ -91,9 +90,6 @@ class AnalyzerExternal {
|
|||
} else if (castctx != null) {
|
||||
metadata.createExtNodeMetadata(ctx, castctx);
|
||||
analyzer.visit(castctx);
|
||||
} else if (typectx != null) {
|
||||
metadata.createExtNodeMetadata(ctx, typectx);
|
||||
analyzer.visit(typectx);
|
||||
} else if (varctx != null) {
|
||||
metadata.createExtNodeMetadata(ctx, varctx);
|
||||
analyzer.visit(varctx);
|
||||
|
@ -115,7 +111,6 @@ class AnalyzerExternal {
|
|||
|
||||
final ExtprecContext precctx = ctx.extprec();
|
||||
final ExtcastContext castctx = ctx.extcast();
|
||||
final ExttypeContext typectx = ctx.exttype();
|
||||
final ExtvarContext varctx = ctx.extvar();
|
||||
final ExtnewContext newctx = ctx.extnew();
|
||||
final ExtstringContext stringctx = ctx.extstring();
|
||||
|
@ -133,9 +128,6 @@ class AnalyzerExternal {
|
|||
} else if (castctx != null) {
|
||||
metadata.createExtNodeMetadata(parent, castctx);
|
||||
analyzer.visit(castctx);
|
||||
} else if (typectx != null) {
|
||||
metadata.createExtNodeMetadata(parent, typectx);
|
||||
analyzer.visit(typectx);
|
||||
} else if (varctx != null) {
|
||||
metadata.createExtNodeMetadata(parent, varctx);
|
||||
analyzer.visit(varctx);
|
||||
|
@ -171,7 +163,6 @@ class AnalyzerExternal {
|
|||
|
||||
final ExtprecContext precctx = ctx.extprec();
|
||||
final ExtcastContext castctx = ctx.extcast();
|
||||
final ExttypeContext typectx = ctx.exttype();
|
||||
final ExtvarContext varctx = ctx.extvar();
|
||||
final ExtnewContext newctx = ctx.extnew();
|
||||
final ExtstringContext stringctx = ctx.extstring();
|
||||
|
@ -182,9 +173,6 @@ class AnalyzerExternal {
|
|||
} else if (castctx != null) {
|
||||
metadata.createExtNodeMetadata(parent, castctx);
|
||||
analyzer.visit(castctx);
|
||||
} else if (typectx != null) {
|
||||
metadata.createExtNodeMetadata(parent, typectx);
|
||||
analyzer.visit(typectx);
|
||||
} else if (varctx != null) {
|
||||
metadata.createExtNodeMetadata(parent, varctx);
|
||||
analyzer.visit(varctx);
|
||||
|
@ -349,25 +337,6 @@ class AnalyzerExternal {
|
|||
}
|
||||
}
|
||||
|
||||
void processExttype(final ExttypeContext ctx) {
|
||||
final ExtNodeMetadata typeenmd = metadata.getExtNodeMetadata(ctx);
|
||||
final ParserRuleContext parent = typeenmd.parent;
|
||||
final ExternalMetadata parentemd = metadata.getExternalMetadata(parent);
|
||||
|
||||
if (parentemd.current != null) {
|
||||
throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Unexpected static type.");
|
||||
}
|
||||
|
||||
final String typestr = ctx.TYPE().getText();
|
||||
typeenmd.type = definition.getType(typestr);
|
||||
parentemd.current = typeenmd.type;
|
||||
parentemd.statik = true;
|
||||
|
||||
final ExtdotContext dotctx = ctx.extdot();
|
||||
metadata.createExtNodeMetadata(parent, dotctx);
|
||||
analyzer.visit(dotctx);
|
||||
}
|
||||
|
||||
void processExtcall(final ExtcallContext ctx) {
|
||||
final ExtNodeMetadata callenmd = metadata.getExtNodeMetadata(ctx);
|
||||
final ParserRuleContext parent = callenmd.parent;
|
||||
|
@ -445,34 +414,56 @@ class AnalyzerExternal {
|
|||
final ParserRuleContext parent = varenmd.parent;
|
||||
final ExternalMetadata parentemd = metadata.getExternalMetadata(parent);
|
||||
|
||||
final String name = ctx.ID().getText();
|
||||
final IdentifierContext idctx = ctx.identifier();
|
||||
final String id = idctx.getText();
|
||||
|
||||
final ExtdotContext dotctx = ctx.extdot();
|
||||
final ExtbraceContext bracectx = ctx.extbrace();
|
||||
|
||||
if (parentemd.current != null) {
|
||||
throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected variable [" + name + "] load.");
|
||||
}
|
||||
final boolean type = utility.isValidType(idctx, false);
|
||||
|
||||
varenmd.last = parentemd.scope == 0 && dotctx == null && bracectx == null;
|
||||
if (type) {
|
||||
if (parentemd.current != null || dotctx == null || bracectx != null) {
|
||||
throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Unexpected static type [" + id + "].");
|
||||
}
|
||||
|
||||
final Variable variable = utility.getVariable(name);
|
||||
varenmd.type = definition.getType(id);
|
||||
parentemd.current = varenmd.type;
|
||||
parentemd.statik = true;
|
||||
|
||||
if (variable == null) {
|
||||
throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Unknown variable [" + name + "].");
|
||||
}
|
||||
|
||||
varenmd.target = variable.slot;
|
||||
varenmd.type = variable.type;
|
||||
analyzeLoadStoreExternal(ctx);
|
||||
parentemd.current = varenmd.type;
|
||||
|
||||
if (dotctx != null) {
|
||||
metadata.createExtNodeMetadata(parent, dotctx);
|
||||
analyzer.visit(dotctx);
|
||||
} else if (bracectx != null) {
|
||||
metadata.createExtNodeMetadata(parent, bracectx);
|
||||
analyzer.visit(bracectx);
|
||||
} else {
|
||||
utility.isValidIdentifier(idctx, true);
|
||||
|
||||
if (parentemd.current != null) {
|
||||
throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected variable [" + id + "] load.");
|
||||
}
|
||||
|
||||
varenmd.last = parentemd.scope == 0 && dotctx == null && bracectx == null;
|
||||
|
||||
final Variable variable = utility.getVariable(id);
|
||||
|
||||
if (variable == null) {
|
||||
throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Unknown variable [" + id + "].");
|
||||
}
|
||||
|
||||
if ("_score".equals(id)) {
|
||||
metadata.scoreValueUsed = true;
|
||||
}
|
||||
|
||||
varenmd.target = variable.slot;
|
||||
varenmd.type = variable.type;
|
||||
analyzeLoadStoreExternal(ctx);
|
||||
parentemd.current = varenmd.type;
|
||||
|
||||
if (dotctx != null) {
|
||||
metadata.createExtNodeMetadata(parent, dotctx);
|
||||
analyzer.visit(dotctx);
|
||||
} else if (bracectx != null) {
|
||||
metadata.createExtNodeMetadata(parent, bracectx);
|
||||
analyzer.visit(bracectx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -650,21 +641,20 @@ class AnalyzerExternal {
|
|||
final ExternalMetadata parentemd = metadata.getExternalMetadata(parent);
|
||||
|
||||
final ExtdotContext dotctx = ctx.extdot();
|
||||
final ExtbraceContext bracectx = ctx.extbrace();
|
||||
newenmd.last = parentemd.scope == 0 && dotctx == null;
|
||||
|
||||
newenmd.last = parentemd.scope == 0 && dotctx == null && bracectx == null;
|
||||
|
||||
final String name = ctx.TYPE().getText();
|
||||
final Struct struct = definition.structs.get(name);
|
||||
final IdentifierContext idctx = ctx.identifier();
|
||||
final String type = idctx.getText();
|
||||
utility.isValidType(idctx, true);
|
||||
|
||||
if (parentemd.current != null) {
|
||||
throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Unexpected new call.");
|
||||
} else if (struct == null) {
|
||||
throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Specified type [" + name + "] not found.");
|
||||
} else if (newenmd.last && parentemd.storeExpr != null) {
|
||||
throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Cannot assign a value to a new call.");
|
||||
}
|
||||
|
||||
final Struct struct = definition.structs.get(type);
|
||||
|
||||
final boolean newclass = ctx.arguments() != null;
|
||||
final boolean newarray = !ctx.expression().isEmpty();
|
||||
|
||||
|
@ -712,7 +702,7 @@ class AnalyzerExternal {
|
|||
}
|
||||
|
||||
if (size != types.length) {
|
||||
throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "When calling [" + name + "] on type " +
|
||||
throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "When calling constructor on type " +
|
||||
"[" + struct.name + "] expected [" + types.length + "] arguments," +
|
||||
" but found [" + arguments.size() + "].");
|
||||
}
|
||||
|
@ -728,9 +718,6 @@ class AnalyzerExternal {
|
|||
if (dotctx != null) {
|
||||
metadata.createExtNodeMetadata(parent, dotctx);
|
||||
analyzer.visit(dotctx);
|
||||
} else if (bracectx != null) {
|
||||
metadata.createExtNodeMetadata(parent, bracectx);
|
||||
analyzer.visit(bracectx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -739,7 +726,7 @@ class AnalyzerExternal {
|
|||
final ParserRuleContext parent = memberenmd.parent;
|
||||
final ExternalMetadata parentemd = metadata.getExternalMetadata(parent);
|
||||
|
||||
final String string = ctx.STRING().getText();
|
||||
final String string = ctx.STRING().getText().substring(1, ctx.STRING().getText().length() - 1);
|
||||
|
||||
final ExtdotContext dotctx = ctx.extdot();
|
||||
final ExtbraceContext bracectx = ctx.extbrace();
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.painless.PainlessParser.DoContext;
|
|||
import org.elasticsearch.painless.PainlessParser.ExprContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExpressionContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ForContext;
|
||||
import org.elasticsearch.painless.PainlessParser.IdentifierContext;
|
||||
import org.elasticsearch.painless.PainlessParser.IfContext;
|
||||
import org.elasticsearch.painless.PainlessParser.InitializerContext;
|
||||
import org.elasticsearch.painless.PainlessParser.MultipleContext;
|
||||
|
@ -525,15 +526,20 @@ class AnalyzerStatement {
|
|||
|
||||
void processDecltype(final DecltypeContext ctx) {
|
||||
final ExpressionMetadata decltypeemd = metadata.getExpressionMetadata(ctx);
|
||||
final String name = ctx.getText();
|
||||
decltypeemd.from = definition.getType(name);
|
||||
final IdentifierContext idctx = ctx.identifier();
|
||||
final String type = ctx.getText();
|
||||
|
||||
utility.isValidType(idctx, true);
|
||||
decltypeemd.from = definition.getType(type);
|
||||
}
|
||||
|
||||
void processDeclvar(final DeclvarContext ctx) {
|
||||
final ExpressionMetadata declvaremd = metadata.getExpressionMetadata(ctx);
|
||||
final IdentifierContext idctx = ctx.identifier();
|
||||
final String identifier = idctx.getText();
|
||||
|
||||
final String name = ctx.ID().getText();
|
||||
declvaremd.postConst = utility.addVariable(ctx, name, declvaremd.to).slot;
|
||||
utility.isValidIdentifier(idctx, true);
|
||||
declvaremd.postConst = utility.addVariable(ctx, identifier, declvaremd.to).slot;
|
||||
|
||||
final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(ctx.expression());
|
||||
|
||||
|
@ -548,7 +554,9 @@ class AnalyzerStatement {
|
|||
void processTrap(final TrapContext ctx) {
|
||||
final StatementMetadata trapsmd = metadata.getStatementMetadata(ctx);
|
||||
|
||||
final String type = ctx.TYPE().getText();
|
||||
final IdentifierContext idctx0 = ctx.identifier(0);
|
||||
final String type = idctx0.getText();
|
||||
utility.isValidType(idctx0, true);
|
||||
trapsmd.exception = definition.getType(type);
|
||||
|
||||
try {
|
||||
|
@ -557,8 +565,10 @@ class AnalyzerStatement {
|
|||
throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Invalid exception type [" + trapsmd.exception.name + "].");
|
||||
}
|
||||
|
||||
final String id = ctx.ID().getText();
|
||||
trapsmd.slot = utility.addVariable(ctx, id, trapsmd.exception).slot;
|
||||
final IdentifierContext idctx1 = ctx.identifier(1);
|
||||
final String identifier = idctx1.getText();
|
||||
utility.isValidIdentifier(idctx1, true);
|
||||
trapsmd.slot = utility.addVariable(ctx, identifier, trapsmd.exception).slot;
|
||||
|
||||
final BlockContext blockctx = ctx.block();
|
||||
|
||||
|
|
|
@ -22,7 +22,9 @@ package org.elasticsearch.painless;
|
|||
import org.antlr.v4.runtime.ParserRuleContext;
|
||||
import org.antlr.v4.runtime.tree.ParseTree;
|
||||
import org.elasticsearch.painless.Definition.Type;
|
||||
import org.elasticsearch.painless.Metadata.ExtNodeMetadata;
|
||||
import org.elasticsearch.painless.PainlessParser.ExpressionContext;
|
||||
import org.elasticsearch.painless.PainlessParser.IdentifierContext;
|
||||
import org.elasticsearch.painless.PainlessParser.PrecedenceContext;
|
||||
|
||||
import java.util.ArrayDeque;
|
||||
|
@ -51,6 +53,26 @@ class AnalyzerUtility {
|
|||
return "Analyzer Error [" + ctx.getStart().getLine() + ":" + ctx.getStart().getCharPositionInLine() + "]: ";
|
||||
}
|
||||
|
||||
/**
|
||||
* A utility method to output consistent error messages for invalid types.
|
||||
* @param ctx The ANTLR node the error occurred in.
|
||||
* @param type The invalid type.
|
||||
* @return The error message with tacked on line number and character position.
|
||||
*/
|
||||
static String typeError(final ParserRuleContext ctx, final String type) {
|
||||
return error(ctx) + "Invalid type [" + type + "].";
|
||||
}
|
||||
|
||||
/**
|
||||
* A utility method to output consistent error messages for invalid identifiers.
|
||||
* @param ctx The ANTLR node the error occurred in.
|
||||
* @param identifier The invalid identifier.
|
||||
* @return The error message with tacked on line number and character position.
|
||||
*/
|
||||
static String identifierError(final ParserRuleContext ctx, final String identifier) {
|
||||
return error(ctx) + "Invalid identifier [" + identifier + "].";
|
||||
}
|
||||
|
||||
/**
|
||||
* The ANTLR parse tree is modified in one single case; a parent node needs to check a child node to see if it's
|
||||
* a precedence node, and if so, it must be removed from the tree permanently. Once the ANTLR tree is built,
|
||||
|
@ -87,9 +109,17 @@ class AnalyzerUtility {
|
|||
return source;
|
||||
}
|
||||
|
||||
private final Metadata metadata;
|
||||
private final Definition definition;
|
||||
|
||||
private final Deque<Integer> scopes = new ArrayDeque<>();
|
||||
private final Deque<Variable> variables = new ArrayDeque<>();
|
||||
|
||||
AnalyzerUtility(final Metadata metadata) {
|
||||
this.metadata = metadata;
|
||||
definition = metadata.definition;
|
||||
}
|
||||
|
||||
void incrementScope() {
|
||||
scopes.push(0);
|
||||
}
|
||||
|
@ -141,4 +171,24 @@ class AnalyzerUtility {
|
|||
|
||||
return variable;
|
||||
}
|
||||
|
||||
boolean isValidType(final IdentifierContext idctx, final boolean error) {
|
||||
boolean valid = definition.structs.containsKey(idctx.getText());
|
||||
|
||||
if (!valid && error) {
|
||||
throw new IllegalArgumentException(typeError(idctx, idctx.getText()));
|
||||
}
|
||||
|
||||
return valid;
|
||||
}
|
||||
|
||||
boolean isValidIdentifier(final IdentifierContext idctx, final boolean error) {
|
||||
boolean valid = !definition.structs.containsKey(idctx.getText()) && idctx.generic() == null;
|
||||
|
||||
if (!valid && error) {
|
||||
throw new IllegalArgumentException(identifierError(idctx, idctx.getText()));
|
||||
}
|
||||
|
||||
return valid;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ final class Compiler {
|
|||
}
|
||||
|
||||
final Definition definition = custom != null ? new Definition(custom) : DEFAULT_DEFINITION;
|
||||
final ParserRuleContext root = createParseTree(source, definition);
|
||||
final ParserRuleContext root = createParseTree(source);
|
||||
final Metadata metadata = new Metadata(definition, source, root, settings);
|
||||
Analyzer.analyze(metadata);
|
||||
final byte[] bytes = Writer.write(metadata);
|
||||
|
@ -118,17 +118,15 @@ final class Compiler {
|
|||
* to ensure that the first error generated by ANTLR will cause the compilation to fail rather than
|
||||
* use ANTLR's recovery strategies that may be potentially dangerous.
|
||||
* @param source The source code for the script.
|
||||
* @param definition The Painless API.
|
||||
* @return The root node for the ANTLR parse tree.
|
||||
*/
|
||||
private static ParserRuleContext createParseTree(final String source, final Definition definition) {
|
||||
private static ParserRuleContext createParseTree(final String source) {
|
||||
final ANTLRInputStream stream = new ANTLRInputStream(source);
|
||||
final ErrorHandlingLexer lexer = new ErrorHandlingLexer(stream);
|
||||
final PainlessParser parser = new PainlessParser(new CommonTokenStream(lexer));
|
||||
final ParserErrorStrategy strategy = new ParserErrorStrategy();
|
||||
|
||||
lexer.removeErrorListeners();
|
||||
lexer.setTypes(definition.structs.keySet());
|
||||
parser.removeErrorListeners();
|
||||
parser.setErrorHandler(strategy);
|
||||
|
||||
|
|
|
@ -2101,7 +2101,7 @@ class Definition {
|
|||
} else {
|
||||
sort = Sort.OBJECT;
|
||||
|
||||
for (Sort value : Sort.values()) {
|
||||
for (final Sort value : Sort.values()) {
|
||||
if (value.clazz == null) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -411,16 +411,22 @@ class Metadata {
|
|||
int inputValueSlot = -1;
|
||||
|
||||
/**
|
||||
* Used to determine what slot the score variable is stored in. This is used in the {@link Writer} whenever
|
||||
* Used to determine what slot the loopCounter variable is stored in. This is used n the {@link Writer} whenever
|
||||
* the loop variable is accessed.
|
||||
*/
|
||||
int loopCounterSlot = -1;
|
||||
|
||||
/**
|
||||
* Used to determine what slot the _score variable is stored in. This is used in the {@link Writer} whenever
|
||||
* the score variable is accessed.
|
||||
*/
|
||||
int scoreValueSlot = -1;
|
||||
|
||||
/**
|
||||
* Used to determine what slot the loopCounter variable is stored in. This is used n the {@link Writer} whenever
|
||||
* the loop variable is accessed.
|
||||
* Used to determine if the _score variable is actually used. This is used in the {@link Analyzer} to update
|
||||
* variable slots at the completion of analysis if _score is not used.
|
||||
*/
|
||||
int loopCounterSlot = -1;
|
||||
boolean scoreValueUsed = false;
|
||||
|
||||
/**
|
||||
* Maps the relevant ANTLR node to its metadata.
|
||||
|
|
|
@ -1,19 +1,13 @@
|
|||
// ANTLR GENERATED CODE: DO NOT EDIT
|
||||
package org.elasticsearch.painless;
|
||||
|
||||
import org.antlr.v4.runtime.CharStream;
|
||||
import org.antlr.v4.runtime.Lexer;
|
||||
import org.antlr.v4.runtime.RuleContext;
|
||||
import org.antlr.v4.runtime.RuntimeMetaData;
|
||||
import org.antlr.v4.runtime.Vocabulary;
|
||||
import org.antlr.v4.runtime.VocabularyImpl;
|
||||
import org.antlr.v4.runtime.atn.ATN;
|
||||
import org.antlr.v4.runtime.atn.ATNDeserializer;
|
||||
import org.antlr.v4.runtime.atn.LexerATNSimulator;
|
||||
import org.antlr.v4.runtime.atn.PredictionContextCache;
|
||||
import org.antlr.v4.runtime.CharStream;
|
||||
import org.antlr.v4.runtime.Token;
|
||||
import org.antlr.v4.runtime.TokenStream;
|
||||
import org.antlr.v4.runtime.*;
|
||||
import org.antlr.v4.runtime.atn.*;
|
||||
import org.antlr.v4.runtime.dfa.DFA;
|
||||
|
||||
import java.util.Set;
|
||||
import org.antlr.v4.runtime.misc.*;
|
||||
|
||||
@SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"})
|
||||
class PainlessLexer extends Lexer {
|
||||
|
@ -23,52 +17,52 @@ class PainlessLexer extends Lexer {
|
|||
protected static final PredictionContextCache _sharedContextCache =
|
||||
new PredictionContextCache();
|
||||
public static final int
|
||||
WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9,
|
||||
COMMA=10, SEMICOLON=11, IF=12, ELSE=13, WHILE=14, DO=15, FOR=16, CONTINUE=17,
|
||||
BREAK=18, RETURN=19, NEW=20, TRY=21, CATCH=22, THROW=23, BOOLNOT=24, BWNOT=25,
|
||||
MUL=26, DIV=27, REM=28, ADD=29, SUB=30, LSH=31, RSH=32, USH=33, LT=34,
|
||||
LTE=35, GT=36, GTE=37, EQ=38, EQR=39, NE=40, NER=41, BWAND=42, BWXOR=43,
|
||||
BWOR=44, BOOLAND=45, BOOLOR=46, COND=47, COLON=48, INCR=49, DECR=50, ASSIGN=51,
|
||||
AADD=52, ASUB=53, AMUL=54, ADIV=55, AREM=56, AAND=57, AXOR=58, AOR=59,
|
||||
ALSH=60, ARSH=61, AUSH=62, OCTAL=63, HEX=64, INTEGER=65, DECIMAL=66, STRING=67,
|
||||
CHAR=68, TRUE=69, FALSE=70, NULL=71, TYPE=72, ID=73, EXTINTEGER=74, EXTID=75;
|
||||
WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9,
|
||||
COMMA=10, SEMICOLON=11, IF=12, ELSE=13, WHILE=14, DO=15, FOR=16, CONTINUE=17,
|
||||
BREAK=18, RETURN=19, NEW=20, TRY=21, CATCH=22, THROW=23, BOOLNOT=24, BWNOT=25,
|
||||
MUL=26, DIV=27, REM=28, ADD=29, SUB=30, LSH=31, RSH=32, USH=33, LT=34,
|
||||
LTE=35, GT=36, GTE=37, EQ=38, EQR=39, NE=40, NER=41, BWAND=42, BWXOR=43,
|
||||
BWOR=44, BOOLAND=45, BOOLOR=46, COND=47, COLON=48, INCR=49, DECR=50, ASSIGN=51,
|
||||
AADD=52, ASUB=53, AMUL=54, ADIV=55, AREM=56, AAND=57, AXOR=58, AOR=59,
|
||||
ALSH=60, ARSH=61, AUSH=62, OCTAL=63, HEX=64, INTEGER=65, DECIMAL=66, STRING=67,
|
||||
CHAR=68, TRUE=69, FALSE=70, NULL=71, ID=72, EXTINTEGER=73, EXTID=74;
|
||||
public static final int EXT = 1;
|
||||
public static String[] modeNames = {
|
||||
"DEFAULT_MODE", "EXT"
|
||||
};
|
||||
|
||||
public static final String[] ruleNames = {
|
||||
"WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", "DOT",
|
||||
"COMMA", "SEMICOLON", "IF", "ELSE", "WHILE", "DO", "FOR", "CONTINUE",
|
||||
"BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "BOOLNOT", "BWNOT",
|
||||
"MUL", "DIV", "REM", "ADD", "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT",
|
||||
"GTE", "EQ", "EQR", "NE", "NER", "BWAND", "BWXOR", "BWOR", "BOOLAND",
|
||||
"BOOLOR", "COND", "COLON", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL",
|
||||
"ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL",
|
||||
"HEX", "INTEGER", "DECIMAL", "STRING", "CHAR", "TRUE", "FALSE", "NULL",
|
||||
"TYPE", "GENERIC", "ID", "EXTINTEGER", "EXTID"
|
||||
"WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", "DOT",
|
||||
"COMMA", "SEMICOLON", "IF", "ELSE", "WHILE", "DO", "FOR", "CONTINUE",
|
||||
"BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "BOOLNOT", "BWNOT",
|
||||
"MUL", "DIV", "REM", "ADD", "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT",
|
||||
"GTE", "EQ", "EQR", "NE", "NER", "BWAND", "BWXOR", "BWOR", "BOOLAND",
|
||||
"BOOLOR", "COND", "COLON", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL",
|
||||
"ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL",
|
||||
"HEX", "INTEGER", "DECIMAL", "STRING", "CHAR", "TRUE", "FALSE", "NULL",
|
||||
"ID", "EXTINTEGER", "EXTID"
|
||||
};
|
||||
|
||||
private static final String[] _LITERAL_NAMES = {
|
||||
null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "','",
|
||||
"';'", "'if'", "'else'", "'while'", "'do'", "'for'", "'continue'", "'break'",
|
||||
"'return'", "'new'", "'try'", "'catch'", "'throw'", "'!'", "'~'", "'*'",
|
||||
"'/'", "'%'", "'+'", "'-'", "'<<'", "'>>'", "'>>>'", "'<'", "'<='", "'>'",
|
||||
"'>='", "'=='", "'==='", "'!='", "'!=='", "'&'", "'^'", "'|'", "'&&'",
|
||||
"'||'", "'?'", "':'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", "'/='",
|
||||
"'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, null,
|
||||
null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "','",
|
||||
"';'", "'if'", "'else'", "'while'", "'do'", "'for'", "'continue'", "'break'",
|
||||
"'return'", "'new'", "'try'", "'catch'", "'throw'", "'!'", "'~'", "'*'",
|
||||
"'/'", "'%'", "'+'", "'-'", "'<<'", "'>>'", "'>>>'", "'<'", "'<='", "'>'",
|
||||
"'>='", "'=='", "'==='", "'!='", "'!=='", "'&'", "'^'", "'|'", "'&&'",
|
||||
"'||'", "'?'", "':'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", "'/='",
|
||||
"'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, null,
|
||||
null, null, null, null, "'true'", "'false'", "'null'"
|
||||
};
|
||||
private static final String[] _SYMBOLIC_NAMES = {
|
||||
null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP",
|
||||
"DOT", "COMMA", "SEMICOLON", "IF", "ELSE", "WHILE", "DO", "FOR", "CONTINUE",
|
||||
"BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "BOOLNOT", "BWNOT",
|
||||
"MUL", "DIV", "REM", "ADD", "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT",
|
||||
"GTE", "EQ", "EQR", "NE", "NER", "BWAND", "BWXOR", "BWOR", "BOOLAND",
|
||||
"BOOLOR", "COND", "COLON", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL",
|
||||
"ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL",
|
||||
"HEX", "INTEGER", "DECIMAL", "STRING", "CHAR", "TRUE", "FALSE", "NULL",
|
||||
"TYPE", "ID", "EXTINTEGER", "EXTID"
|
||||
null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP",
|
||||
"DOT", "COMMA", "SEMICOLON", "IF", "ELSE", "WHILE", "DO", "FOR", "CONTINUE",
|
||||
"BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "BOOLNOT", "BWNOT",
|
||||
"MUL", "DIV", "REM", "ADD", "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT",
|
||||
"GTE", "EQ", "EQR", "NE", "NER", "BWAND", "BWXOR", "BWOR", "BOOLAND",
|
||||
"BOOLOR", "COND", "COLON", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL",
|
||||
"ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL",
|
||||
"HEX", "INTEGER", "DECIMAL", "STRING", "CHAR", "TRUE", "FALSE", "NULL",
|
||||
"ID", "EXTINTEGER", "EXTID"
|
||||
};
|
||||
public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES);
|
||||
|
||||
|
@ -104,13 +98,6 @@ class PainlessLexer extends Lexer {
|
|||
}
|
||||
|
||||
|
||||
private Set<String> types = null;
|
||||
|
||||
void setTypes(Set<String> types) {
|
||||
this.types = types;
|
||||
}
|
||||
|
||||
|
||||
public PainlessLexer(CharStream input) {
|
||||
super(input);
|
||||
_interp = new LexerATNSimulator(this,_ATN,_decisionToDFA,_sharedContextCache);
|
||||
|
@ -131,59 +118,8 @@ class PainlessLexer extends Lexer {
|
|||
@Override
|
||||
public ATN getATN() { return _ATN; }
|
||||
|
||||
@Override
|
||||
public void action(RuleContext _localctx, int ruleIndex, int actionIndex) {
|
||||
switch (ruleIndex) {
|
||||
case 66:
|
||||
STRING_action((RuleContext)_localctx, actionIndex);
|
||||
break;
|
||||
case 67:
|
||||
CHAR_action((RuleContext)_localctx, actionIndex);
|
||||
break;
|
||||
case 71:
|
||||
TYPE_action((RuleContext)_localctx, actionIndex);
|
||||
break;
|
||||
}
|
||||
}
|
||||
private void STRING_action(RuleContext _localctx, int actionIndex) {
|
||||
switch (actionIndex) {
|
||||
case 0:
|
||||
setText(getText().substring(1, getText().length() - 1));
|
||||
break;
|
||||
}
|
||||
}
|
||||
private void CHAR_action(RuleContext _localctx, int actionIndex) {
|
||||
switch (actionIndex) {
|
||||
case 1:
|
||||
setText(getText().substring(1, getText().length() - 1));
|
||||
break;
|
||||
}
|
||||
}
|
||||
private void TYPE_action(RuleContext _localctx, int actionIndex) {
|
||||
switch (actionIndex) {
|
||||
case 2:
|
||||
setText(getText().replace(" ", ""));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) {
|
||||
switch (ruleIndex) {
|
||||
case 71:
|
||||
return TYPE_sempred((RuleContext)_localctx, predIndex);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
private boolean TYPE_sempred(RuleContext _localctx, int predIndex) {
|
||||
switch (predIndex) {
|
||||
case 0:
|
||||
return types.contains(getText().replace(" ", ""));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public static final String _serializedATN =
|
||||
"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2M\u0230\b\1\b\1\4"+
|
||||
"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2L\u01f4\b\1\b\1\4"+
|
||||
"\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n"+
|
||||
"\4\13\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22"+
|
||||
"\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31"+
|
||||
|
@ -192,193 +128,168 @@ class PainlessLexer extends Lexer {
|
|||
"+\4,\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64"+
|
||||
"\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t;\4<\t<\4=\t"+
|
||||
"=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4"+
|
||||
"I\tI\4J\tJ\4K\tK\4L\tL\4M\tM\3\2\6\2\u009e\n\2\r\2\16\2\u009f\3\2\3\2"+
|
||||
"\3\3\3\3\3\3\3\3\7\3\u00a8\n\3\f\3\16\3\u00ab\13\3\3\3\3\3\3\3\3\3\3\3"+
|
||||
"\7\3\u00b2\n\3\f\3\16\3\u00b5\13\3\3\3\3\3\5\3\u00b9\n\3\3\3\3\3\3\4\3"+
|
||||
"\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\n\3\n\3\13\3\13\3"+
|
||||
"\f\3\f\3\r\3\r\3\r\3\16\3\16\3\16\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3"+
|
||||
"\17\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3"+
|
||||
"\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24\3\24\3"+
|
||||
"\24\3\24\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\27\3"+
|
||||
"\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\31\3\31\3\32\3\32\3\33\3\33\3"+
|
||||
"\34\3\34\3\35\3\35\3\36\3\36\3\37\3\37\3 \3 \3 \3!\3!\3!\3\"\3\"\3\"\3"+
|
||||
"\"\3#\3#\3$\3$\3$\3%\3%\3&\3&\3&\3\'\3\'\3\'\3(\3(\3(\3(\3)\3)\3)\3*\3"+
|
||||
"*\3*\3*\3+\3+\3,\3,\3-\3-\3.\3.\3.\3/\3/\3/\3\60\3\60\3\61\3\61\3\62\3"+
|
||||
"\62\3\62\3\63\3\63\3\63\3\64\3\64\3\65\3\65\3\65\3\66\3\66\3\66\3\67\3"+
|
||||
"\67\3\67\38\38\38\39\39\39\3:\3:\3:\3;\3;\3;\3<\3<\3<\3=\3=\3=\3=\3>\3"+
|
||||
">\3>\3>\3?\3?\3?\3?\3?\3@\3@\6@\u017f\n@\r@\16@\u0180\3@\5@\u0184\n@\3"+
|
||||
"A\3A\3A\6A\u0189\nA\rA\16A\u018a\3A\5A\u018e\nA\3B\3B\3B\7B\u0193\nB\f"+
|
||||
"B\16B\u0196\13B\5B\u0198\nB\3B\5B\u019b\nB\3C\3C\3C\7C\u01a0\nC\fC\16"+
|
||||
"C\u01a3\13C\5C\u01a5\nC\3C\3C\7C\u01a9\nC\fC\16C\u01ac\13C\3C\3C\5C\u01b0"+
|
||||
"\nC\3C\6C\u01b3\nC\rC\16C\u01b4\5C\u01b7\nC\3C\5C\u01ba\nC\3D\3D\3D\3"+
|
||||
"D\3D\3D\7D\u01c2\nD\fD\16D\u01c5\13D\3D\3D\3D\3E\3E\3E\3E\3E\3F\3F\3F"+
|
||||
"\3F\3F\3G\3G\3G\3G\3G\3G\3H\3H\3H\3H\3H\3I\3I\5I\u01e1\nI\3I\3I\3I\3J"+
|
||||
"\7J\u01e7\nJ\fJ\16J\u01ea\13J\3J\3J\7J\u01ee\nJ\fJ\16J\u01f1\13J\3J\3"+
|
||||
"J\5J\u01f5\nJ\3J\7J\u01f8\nJ\fJ\16J\u01fb\13J\3J\3J\7J\u01ff\nJ\fJ\16"+
|
||||
"J\u0202\13J\3J\3J\5J\u0206\nJ\3J\7J\u0209\nJ\fJ\16J\u020c\13J\7J\u020e"+
|
||||
"\nJ\fJ\16J\u0211\13J\3J\3J\3K\3K\7K\u0217\nK\fK\16K\u021a\13K\3L\3L\3"+
|
||||
"L\7L\u021f\nL\fL\16L\u0222\13L\5L\u0224\nL\3L\3L\3M\3M\7M\u022a\nM\fM"+
|
||||
"\16M\u022d\13M\3M\3M\5\u00a9\u00b3\u01c3\2N\4\3\6\4\b\5\n\6\f\7\16\b\20"+
|
||||
"\t\22\n\24\13\26\f\30\r\32\16\34\17\36\20 \21\"\22$\23&\24(\25*\26,\27"+
|
||||
".\30\60\31\62\32\64\33\66\348\35:\36<\37> @!B\"D#F$H%J&L\'N(P)R*T+V,X"+
|
||||
"-Z.\\/^\60`\61b\62d\63f\64h\65j\66l\67n8p9r:t;v<x=z>|?~@\u0080A\u0082"+
|
||||
"B\u0084C\u0086D\u0088E\u008aF\u008cG\u008eH\u0090I\u0092J\u0094\2\u0096"+
|
||||
"K\u0098L\u009aM\4\2\3\21\5\2\13\f\17\17\"\"\4\2\f\f\17\17\3\2\629\4\2"+
|
||||
"NNnn\4\2ZZzz\5\2\62;CHch\3\2\63;\3\2\62;\b\2FFHHNNffhhnn\4\2GGgg\4\2-"+
|
||||
"-//\4\2HHhh\4\2$$^^\5\2C\\aac|\6\2\62;C\\aac|\u024f\2\4\3\2\2\2\2\6\3"+
|
||||
"\2\2\2\2\b\3\2\2\2\2\n\3\2\2\2\2\f\3\2\2\2\2\16\3\2\2\2\2\20\3\2\2\2\2"+
|
||||
"\22\3\2\2\2\2\24\3\2\2\2\2\26\3\2\2\2\2\30\3\2\2\2\2\32\3\2\2\2\2\34\3"+
|
||||
"\2\2\2\2\36\3\2\2\2\2 \3\2\2\2\2\"\3\2\2\2\2$\3\2\2\2\2&\3\2\2\2\2(\3"+
|
||||
"\2\2\2\2*\3\2\2\2\2,\3\2\2\2\2.\3\2\2\2\2\60\3\2\2\2\2\62\3\2\2\2\2\64"+
|
||||
"\3\2\2\2\2\66\3\2\2\2\28\3\2\2\2\2:\3\2\2\2\2<\3\2\2\2\2>\3\2\2\2\2@\3"+
|
||||
"\2\2\2\2B\3\2\2\2\2D\3\2\2\2\2F\3\2\2\2\2H\3\2\2\2\2J\3\2\2\2\2L\3\2\2"+
|
||||
"\2\2N\3\2\2\2\2P\3\2\2\2\2R\3\2\2\2\2T\3\2\2\2\2V\3\2\2\2\2X\3\2\2\2\2"+
|
||||
"Z\3\2\2\2\2\\\3\2\2\2\2^\3\2\2\2\2`\3\2\2\2\2b\3\2\2\2\2d\3\2\2\2\2f\3"+
|
||||
"\2\2\2\2h\3\2\2\2\2j\3\2\2\2\2l\3\2\2\2\2n\3\2\2\2\2p\3\2\2\2\2r\3\2\2"+
|
||||
"\2\2t\3\2\2\2\2v\3\2\2\2\2x\3\2\2\2\2z\3\2\2\2\2|\3\2\2\2\2~\3\2\2\2\2"+
|
||||
"\u0080\3\2\2\2\2\u0082\3\2\2\2\2\u0084\3\2\2\2\2\u0086\3\2\2\2\2\u0088"+
|
||||
"\3\2\2\2\2\u008a\3\2\2\2\2\u008c\3\2\2\2\2\u008e\3\2\2\2\2\u0090\3\2\2"+
|
||||
"\2\2\u0092\3\2\2\2\2\u0096\3\2\2\2\3\u0098\3\2\2\2\3\u009a\3\2\2\2\4\u009d"+
|
||||
"\3\2\2\2\6\u00b8\3\2\2\2\b\u00bc\3\2\2\2\n\u00be\3\2\2\2\f\u00c0\3\2\2"+
|
||||
"\2\16\u00c2\3\2\2\2\20\u00c4\3\2\2\2\22\u00c6\3\2\2\2\24\u00c8\3\2\2\2"+
|
||||
"\26\u00cc\3\2\2\2\30\u00ce\3\2\2\2\32\u00d0\3\2\2\2\34\u00d3\3\2\2\2\36"+
|
||||
"\u00d8\3\2\2\2 \u00de\3\2\2\2\"\u00e1\3\2\2\2$\u00e5\3\2\2\2&\u00ee\3"+
|
||||
"\2\2\2(\u00f4\3\2\2\2*\u00fb\3\2\2\2,\u00ff\3\2\2\2.\u0103\3\2\2\2\60"+
|
||||
"\u0109\3\2\2\2\62\u010f\3\2\2\2\64\u0111\3\2\2\2\66\u0113\3\2\2\28\u0115"+
|
||||
"\3\2\2\2:\u0117\3\2\2\2<\u0119\3\2\2\2>\u011b\3\2\2\2@\u011d\3\2\2\2B"+
|
||||
"\u0120\3\2\2\2D\u0123\3\2\2\2F\u0127\3\2\2\2H\u0129\3\2\2\2J\u012c\3\2"+
|
||||
"\2\2L\u012e\3\2\2\2N\u0131\3\2\2\2P\u0134\3\2\2\2R\u0138\3\2\2\2T\u013b"+
|
||||
"\3\2\2\2V\u013f\3\2\2\2X\u0141\3\2\2\2Z\u0143\3\2\2\2\\\u0145\3\2\2\2"+
|
||||
"^\u0148\3\2\2\2`\u014b\3\2\2\2b\u014d\3\2\2\2d\u014f\3\2\2\2f\u0152\3"+
|
||||
"\2\2\2h\u0155\3\2\2\2j\u0157\3\2\2\2l\u015a\3\2\2\2n\u015d\3\2\2\2p\u0160"+
|
||||
"\3\2\2\2r\u0163\3\2\2\2t\u0166\3\2\2\2v\u0169\3\2\2\2x\u016c\3\2\2\2z"+
|
||||
"\u016f\3\2\2\2|\u0173\3\2\2\2~\u0177\3\2\2\2\u0080\u017c\3\2\2\2\u0082"+
|
||||
"\u0185\3\2\2\2\u0084\u0197\3\2\2\2\u0086\u01a4\3\2\2\2\u0088\u01bb\3\2"+
|
||||
"\2\2\u008a\u01c9\3\2\2\2\u008c\u01ce\3\2\2\2\u008e\u01d3\3\2\2\2\u0090"+
|
||||
"\u01d9\3\2\2\2\u0092\u01de\3\2\2\2\u0094\u01e8\3\2\2\2\u0096\u0214\3\2"+
|
||||
"\2\2\u0098\u0223\3\2\2\2\u009a\u0227\3\2\2\2\u009c\u009e\t\2\2\2\u009d"+
|
||||
"\u009c\3\2\2\2\u009e\u009f\3\2\2\2\u009f\u009d\3\2\2\2\u009f\u00a0\3\2"+
|
||||
"\2\2\u00a0\u00a1\3\2\2\2\u00a1\u00a2\b\2\2\2\u00a2\5\3\2\2\2\u00a3\u00a4"+
|
||||
"\7\61\2\2\u00a4\u00a5\7\61\2\2\u00a5\u00a9\3\2\2\2\u00a6\u00a8\13\2\2"+
|
||||
"\2\u00a7\u00a6\3\2\2\2\u00a8\u00ab\3\2\2\2\u00a9\u00aa\3\2\2\2\u00a9\u00a7"+
|
||||
"\3\2\2\2\u00aa\u00ac\3\2\2\2\u00ab\u00a9\3\2\2\2\u00ac\u00b9\t\3\2\2\u00ad"+
|
||||
"\u00ae\7\61\2\2\u00ae\u00af\7,\2\2\u00af\u00b3\3\2\2\2\u00b0\u00b2\13"+
|
||||
"\2\2\2\u00b1\u00b0\3\2\2\2\u00b2\u00b5\3\2\2\2\u00b3\u00b4\3\2\2\2\u00b3"+
|
||||
"\u00b1\3\2\2\2\u00b4\u00b6\3\2\2\2\u00b5\u00b3\3\2\2\2\u00b6\u00b7\7,"+
|
||||
"\2\2\u00b7\u00b9\7\61\2\2\u00b8\u00a3\3\2\2\2\u00b8\u00ad\3\2\2\2\u00b9"+
|
||||
"\u00ba\3\2\2\2\u00ba\u00bb\b\3\2\2\u00bb\7\3\2\2\2\u00bc\u00bd\7}\2\2"+
|
||||
"\u00bd\t\3\2\2\2\u00be\u00bf\7\177\2\2\u00bf\13\3\2\2\2\u00c0\u00c1\7"+
|
||||
"]\2\2\u00c1\r\3\2\2\2\u00c2\u00c3\7_\2\2\u00c3\17\3\2\2\2\u00c4\u00c5"+
|
||||
"\7*\2\2\u00c5\21\3\2\2\2\u00c6\u00c7\7+\2\2\u00c7\23\3\2\2\2\u00c8\u00c9"+
|
||||
"\7\60\2\2\u00c9\u00ca\3\2\2\2\u00ca\u00cb\b\n\3\2\u00cb\25\3\2\2\2\u00cc"+
|
||||
"\u00cd\7.\2\2\u00cd\27\3\2\2\2\u00ce\u00cf\7=\2\2\u00cf\31\3\2\2\2\u00d0"+
|
||||
"\u00d1\7k\2\2\u00d1\u00d2\7h\2\2\u00d2\33\3\2\2\2\u00d3\u00d4\7g\2\2\u00d4"+
|
||||
"\u00d5\7n\2\2\u00d5\u00d6\7u\2\2\u00d6\u00d7\7g\2\2\u00d7\35\3\2\2\2\u00d8"+
|
||||
"\u00d9\7y\2\2\u00d9\u00da\7j\2\2\u00da\u00db\7k\2\2\u00db\u00dc\7n\2\2"+
|
||||
"\u00dc\u00dd\7g\2\2\u00dd\37\3\2\2\2\u00de\u00df\7f\2\2\u00df\u00e0\7"+
|
||||
"q\2\2\u00e0!\3\2\2\2\u00e1\u00e2\7h\2\2\u00e2\u00e3\7q\2\2\u00e3\u00e4"+
|
||||
"\7t\2\2\u00e4#\3\2\2\2\u00e5\u00e6\7e\2\2\u00e6\u00e7\7q\2\2\u00e7\u00e8"+
|
||||
"\7p\2\2\u00e8\u00e9\7v\2\2\u00e9\u00ea\7k\2\2\u00ea\u00eb\7p\2\2\u00eb"+
|
||||
"\u00ec\7w\2\2\u00ec\u00ed\7g\2\2\u00ed%\3\2\2\2\u00ee\u00ef\7d\2\2\u00ef"+
|
||||
"\u00f0\7t\2\2\u00f0\u00f1\7g\2\2\u00f1\u00f2\7c\2\2\u00f2\u00f3\7m\2\2"+
|
||||
"\u00f3\'\3\2\2\2\u00f4\u00f5\7t\2\2\u00f5\u00f6\7g\2\2\u00f6\u00f7\7v"+
|
||||
"\2\2\u00f7\u00f8\7w\2\2\u00f8\u00f9\7t\2\2\u00f9\u00fa\7p\2\2\u00fa)\3"+
|
||||
"\2\2\2\u00fb\u00fc\7p\2\2\u00fc\u00fd\7g\2\2\u00fd\u00fe\7y\2\2\u00fe"+
|
||||
"+\3\2\2\2\u00ff\u0100\7v\2\2\u0100\u0101\7t\2\2\u0101\u0102\7{\2\2\u0102"+
|
||||
"-\3\2\2\2\u0103\u0104\7e\2\2\u0104\u0105\7c\2\2\u0105\u0106\7v\2\2\u0106"+
|
||||
"\u0107\7e\2\2\u0107\u0108\7j\2\2\u0108/\3\2\2\2\u0109\u010a\7v\2\2\u010a"+
|
||||
"\u010b\7j\2\2\u010b\u010c\7t\2\2\u010c\u010d\7q\2\2\u010d\u010e\7y\2\2"+
|
||||
"\u010e\61\3\2\2\2\u010f\u0110\7#\2\2\u0110\63\3\2\2\2\u0111\u0112\7\u0080"+
|
||||
"\2\2\u0112\65\3\2\2\2\u0113\u0114\7,\2\2\u0114\67\3\2\2\2\u0115\u0116"+
|
||||
"\7\61\2\2\u01169\3\2\2\2\u0117\u0118\7\'\2\2\u0118;\3\2\2\2\u0119\u011a"+
|
||||
"\7-\2\2\u011a=\3\2\2\2\u011b\u011c\7/\2\2\u011c?\3\2\2\2\u011d\u011e\7"+
|
||||
">\2\2\u011e\u011f\7>\2\2\u011fA\3\2\2\2\u0120\u0121\7@\2\2\u0121\u0122"+
|
||||
"\7@\2\2\u0122C\3\2\2\2\u0123\u0124\7@\2\2\u0124\u0125\7@\2\2\u0125\u0126"+
|
||||
"\7@\2\2\u0126E\3\2\2\2\u0127\u0128\7>\2\2\u0128G\3\2\2\2\u0129\u012a\7"+
|
||||
">\2\2\u012a\u012b\7?\2\2\u012bI\3\2\2\2\u012c\u012d\7@\2\2\u012dK\3\2"+
|
||||
"\2\2\u012e\u012f\7@\2\2\u012f\u0130\7?\2\2\u0130M\3\2\2\2\u0131\u0132"+
|
||||
"\7?\2\2\u0132\u0133\7?\2\2\u0133O\3\2\2\2\u0134\u0135\7?\2\2\u0135\u0136"+
|
||||
"\7?\2\2\u0136\u0137\7?\2\2\u0137Q\3\2\2\2\u0138\u0139\7#\2\2\u0139\u013a"+
|
||||
"\7?\2\2\u013aS\3\2\2\2\u013b\u013c\7#\2\2\u013c\u013d\7?\2\2\u013d\u013e"+
|
||||
"\7?\2\2\u013eU\3\2\2\2\u013f\u0140\7(\2\2\u0140W\3\2\2\2\u0141\u0142\7"+
|
||||
"`\2\2\u0142Y\3\2\2\2\u0143\u0144\7~\2\2\u0144[\3\2\2\2\u0145\u0146\7("+
|
||||
"\2\2\u0146\u0147\7(\2\2\u0147]\3\2\2\2\u0148\u0149\7~\2\2\u0149\u014a"+
|
||||
"\7~\2\2\u014a_\3\2\2\2\u014b\u014c\7A\2\2\u014ca\3\2\2\2\u014d\u014e\7"+
|
||||
"<\2\2\u014ec\3\2\2\2\u014f\u0150\7-\2\2\u0150\u0151\7-\2\2\u0151e\3\2"+
|
||||
"\2\2\u0152\u0153\7/\2\2\u0153\u0154\7/\2\2\u0154g\3\2\2\2\u0155\u0156"+
|
||||
"\7?\2\2\u0156i\3\2\2\2\u0157\u0158\7-\2\2\u0158\u0159\7?\2\2\u0159k\3"+
|
||||
"\2\2\2\u015a\u015b\7/\2\2\u015b\u015c\7?\2\2\u015cm\3\2\2\2\u015d\u015e"+
|
||||
"\7,\2\2\u015e\u015f\7?\2\2\u015fo\3\2\2\2\u0160\u0161\7\61\2\2\u0161\u0162"+
|
||||
"\7?\2\2\u0162q\3\2\2\2\u0163\u0164\7\'\2\2\u0164\u0165\7?\2\2\u0165s\3"+
|
||||
"\2\2\2\u0166\u0167\7(\2\2\u0167\u0168\7?\2\2\u0168u\3\2\2\2\u0169\u016a"+
|
||||
"\7`\2\2\u016a\u016b\7?\2\2\u016bw\3\2\2\2\u016c\u016d\7~\2\2\u016d\u016e"+
|
||||
"\7?\2\2\u016ey\3\2\2\2\u016f\u0170\7>\2\2\u0170\u0171\7>\2\2\u0171\u0172"+
|
||||
"\7?\2\2\u0172{\3\2\2\2\u0173\u0174\7@\2\2\u0174\u0175\7@\2\2\u0175\u0176"+
|
||||
"\7?\2\2\u0176}\3\2\2\2\u0177\u0178\7@\2\2\u0178\u0179\7@\2\2\u0179\u017a"+
|
||||
"\7@\2\2\u017a\u017b\7?\2\2\u017b\177\3\2\2\2\u017c\u017e\7\62\2\2\u017d"+
|
||||
"\u017f\t\4\2\2\u017e\u017d\3\2\2\2\u017f\u0180\3\2\2\2\u0180\u017e\3\2"+
|
||||
"\2\2\u0180\u0181\3\2\2\2\u0181\u0183\3\2\2\2\u0182\u0184\t\5\2\2\u0183"+
|
||||
"\u0182\3\2\2\2\u0183\u0184\3\2\2\2\u0184\u0081\3\2\2\2\u0185\u0186\7\62"+
|
||||
"\2\2\u0186\u0188\t\6\2\2\u0187\u0189\t\7\2\2\u0188\u0187\3\2\2\2\u0189"+
|
||||
"\u018a\3\2\2\2\u018a\u0188\3\2\2\2\u018a\u018b\3\2\2\2\u018b\u018d\3\2"+
|
||||
"\2\2\u018c\u018e\t\5\2\2\u018d\u018c\3\2\2\2\u018d\u018e\3\2\2\2\u018e"+
|
||||
"\u0083\3\2\2\2\u018f\u0198\7\62\2\2\u0190\u0194\t\b\2\2\u0191\u0193\t"+
|
||||
"\t\2\2\u0192\u0191\3\2\2\2\u0193\u0196\3\2\2\2\u0194\u0192\3\2\2\2\u0194"+
|
||||
"\u0195\3\2\2\2\u0195\u0198\3\2\2\2\u0196\u0194\3\2\2\2\u0197\u018f\3\2"+
|
||||
"\2\2\u0197\u0190\3\2\2\2\u0198\u019a\3\2\2\2\u0199\u019b\t\n\2\2\u019a"+
|
||||
"\u0199\3\2\2\2\u019a\u019b\3\2\2\2\u019b\u0085\3\2\2\2\u019c\u01a5\7\62"+
|
||||
"\2\2\u019d\u01a1\t\b\2\2\u019e\u01a0\t\t\2\2\u019f\u019e\3\2\2\2\u01a0"+
|
||||
"\u01a3\3\2\2\2\u01a1\u019f\3\2\2\2\u01a1\u01a2\3\2\2\2\u01a2\u01a5\3\2"+
|
||||
"\2\2\u01a3\u01a1\3\2\2\2\u01a4\u019c\3\2\2\2\u01a4\u019d\3\2\2\2\u01a5"+
|
||||
"\u01a6\3\2\2\2\u01a6\u01aa\5\24\n\2\u01a7\u01a9\t\t\2\2\u01a8\u01a7\3"+
|
||||
"\2\2\2\u01a9\u01ac\3\2\2\2\u01aa\u01a8\3\2\2\2\u01aa\u01ab\3\2\2\2\u01ab"+
|
||||
"\u01b6\3\2\2\2\u01ac\u01aa\3\2\2\2\u01ad\u01af\t\13\2\2\u01ae\u01b0\t"+
|
||||
"\f\2\2\u01af\u01ae\3\2\2\2\u01af\u01b0\3\2\2\2\u01b0\u01b2\3\2\2\2\u01b1"+
|
||||
"\u01b3\t\t\2\2\u01b2\u01b1\3\2\2\2\u01b3\u01b4\3\2\2\2\u01b4\u01b2\3\2"+
|
||||
"\2\2\u01b4\u01b5\3\2\2\2\u01b5\u01b7\3\2\2\2\u01b6\u01ad\3\2\2\2\u01b6"+
|
||||
"\u01b7\3\2\2\2\u01b7\u01b9\3\2\2\2\u01b8\u01ba\t\r\2\2\u01b9\u01b8\3\2"+
|
||||
"\2\2\u01b9\u01ba\3\2\2\2\u01ba\u0087\3\2\2\2\u01bb\u01c3\7$\2\2\u01bc"+
|
||||
"\u01bd\7^\2\2\u01bd\u01c2\7$\2\2\u01be\u01bf\7^\2\2\u01bf\u01c2\7^\2\2"+
|
||||
"\u01c0\u01c2\n\16\2\2\u01c1\u01bc\3\2\2\2\u01c1\u01be\3\2\2\2\u01c1\u01c0"+
|
||||
"\3\2\2\2\u01c2\u01c5\3\2\2\2\u01c3\u01c4\3\2\2\2\u01c3\u01c1\3\2\2\2\u01c4"+
|
||||
"\u01c6\3\2\2\2\u01c5\u01c3\3\2\2\2\u01c6\u01c7\7$\2\2\u01c7\u01c8\bD\4"+
|
||||
"\2\u01c8\u0089\3\2\2\2\u01c9\u01ca\7)\2\2\u01ca\u01cb\13\2\2\2\u01cb\u01cc"+
|
||||
"\7)\2\2\u01cc\u01cd\bE\5\2\u01cd\u008b\3\2\2\2\u01ce\u01cf\7v\2\2\u01cf"+
|
||||
"\u01d0\7t\2\2\u01d0\u01d1\7w\2\2\u01d1\u01d2\7g\2\2\u01d2\u008d\3\2\2"+
|
||||
"\2\u01d3\u01d4\7h\2\2\u01d4\u01d5\7c\2\2\u01d5\u01d6\7n\2\2\u01d6\u01d7"+
|
||||
"\7u\2\2\u01d7\u01d8\7g\2\2\u01d8\u008f\3\2\2\2\u01d9\u01da\7p\2\2\u01da"+
|
||||
"\u01db\7w\2\2\u01db\u01dc\7n\2\2\u01dc\u01dd\7n\2\2\u01dd\u0091\3\2\2"+
|
||||
"\2\u01de\u01e0\5\u0096K\2\u01df\u01e1\5\u0094J\2\u01e0\u01df\3\2\2\2\u01e0"+
|
||||
"\u01e1\3\2\2\2\u01e1\u01e2\3\2\2\2\u01e2\u01e3\6I\2\2\u01e3\u01e4\bI\6"+
|
||||
"\2\u01e4\u0093\3\2\2\2\u01e5\u01e7\7\"\2\2\u01e6\u01e5\3\2\2\2\u01e7\u01ea"+
|
||||
"\3\2\2\2\u01e8\u01e6\3\2\2\2\u01e8\u01e9\3\2\2\2\u01e9\u01eb\3\2\2\2\u01ea"+
|
||||
"\u01e8\3\2\2\2\u01eb\u01ef\7>\2\2\u01ec\u01ee\7\"\2\2\u01ed\u01ec\3\2"+
|
||||
"\2\2\u01ee\u01f1\3\2\2\2\u01ef\u01ed\3\2\2\2\u01ef\u01f0\3\2\2\2\u01f0"+
|
||||
"\u01f2\3\2\2\2\u01f1\u01ef\3\2\2\2\u01f2\u01f4\5\u0096K\2\u01f3\u01f5"+
|
||||
"\5\u0094J\2\u01f4\u01f3\3\2\2\2\u01f4\u01f5\3\2\2\2\u01f5\u01f9\3\2\2"+
|
||||
"\2\u01f6\u01f8\7\"\2\2\u01f7\u01f6\3\2\2\2\u01f8\u01fb\3\2\2\2\u01f9\u01f7"+
|
||||
"\3\2\2\2\u01f9\u01fa\3\2\2\2\u01fa\u020f\3\2\2\2\u01fb\u01f9\3\2\2\2\u01fc"+
|
||||
"\u0200\5\26\13\2\u01fd\u01ff\7\"\2\2\u01fe\u01fd\3\2\2\2\u01ff\u0202\3"+
|
||||
"\2\2\2\u0200\u01fe\3\2\2\2\u0200\u0201\3\2\2\2\u0201\u0203\3\2\2\2\u0202"+
|
||||
"\u0200\3\2\2\2\u0203\u0205\5\u0096K\2\u0204\u0206\5\u0094J\2\u0205\u0204"+
|
||||
"\3\2\2\2\u0205\u0206\3\2\2\2\u0206\u020a\3\2\2\2\u0207\u0209\7\"\2\2\u0208"+
|
||||
"\u0207\3\2\2\2\u0209\u020c\3\2\2\2\u020a\u0208\3\2\2\2\u020a\u020b\3\2"+
|
||||
"\2\2\u020b\u020e\3\2\2\2\u020c\u020a\3\2\2\2\u020d\u01fc\3\2\2\2\u020e"+
|
||||
"\u0211\3\2\2\2\u020f\u020d\3\2\2\2\u020f\u0210\3\2\2\2\u0210\u0212\3\2"+
|
||||
"\2\2\u0211\u020f\3\2\2\2\u0212\u0213\7@\2\2\u0213\u0095\3\2\2\2\u0214"+
|
||||
"\u0218\t\17\2\2\u0215\u0217\t\20\2\2\u0216\u0215\3\2\2\2\u0217\u021a\3"+
|
||||
"\2\2\2\u0218\u0216\3\2\2\2\u0218\u0219\3\2\2\2\u0219\u0097\3\2\2\2\u021a"+
|
||||
"\u0218\3\2\2\2\u021b\u0224\7\62\2\2\u021c\u0220\t\b\2\2\u021d\u021f\t"+
|
||||
"\t\2\2\u021e\u021d\3\2\2\2\u021f\u0222\3\2\2\2\u0220\u021e\3\2\2\2\u0220"+
|
||||
"\u0221\3\2\2\2\u0221\u0224\3\2\2\2\u0222\u0220\3\2\2\2\u0223\u021b\3\2"+
|
||||
"\2\2\u0223\u021c\3\2\2\2\u0224\u0225\3\2\2\2\u0225\u0226\bL\7\2\u0226"+
|
||||
"\u0099\3\2\2\2\u0227\u022b\t\17\2\2\u0228\u022a\t\20\2\2\u0229\u0228\3"+
|
||||
"\2\2\2\u022a\u022d\3\2\2\2\u022b\u0229\3\2\2\2\u022b\u022c\3\2\2\2\u022c"+
|
||||
"\u022e\3\2\2\2\u022d\u022b\3\2\2\2\u022e\u022f\bM\7\2\u022f\u009b\3\2"+
|
||||
"\2\2%\2\3\u009f\u00a9\u00b3\u00b8\u0180\u0183\u018a\u018d\u0194\u0197"+
|
||||
"\u019a\u01a1\u01a4\u01aa\u01af\u01b4\u01b6\u01b9\u01c1\u01c3\u01e0\u01e8"+
|
||||
"\u01ef\u01f4\u01f9\u0200\u0205\u020a\u020f\u0218\u0220\u0223\u022b\b\b"+
|
||||
"\2\2\4\3\2\3D\2\3E\3\3I\4\4\2\2";
|
||||
"I\tI\4J\tJ\4K\tK\3\2\6\2\u009a\n\2\r\2\16\2\u009b\3\2\3\2\3\3\3\3\3\3"+
|
||||
"\3\3\7\3\u00a4\n\3\f\3\16\3\u00a7\13\3\3\3\3\3\3\3\3\3\3\3\7\3\u00ae\n"+
|
||||
"\3\f\3\16\3\u00b1\13\3\3\3\3\3\5\3\u00b5\n\3\3\3\3\3\3\4\3\4\3\5\3\5\3"+
|
||||
"\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\n\3\n\3\13\3\13\3\f\3\f\3\r\3"+
|
||||
"\r\3\r\3\16\3\16\3\16\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20"+
|
||||
"\3\20\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22"+
|
||||
"\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\25"+
|
||||
"\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\30"+
|
||||
"\3\30\3\30\3\30\3\30\3\30\3\31\3\31\3\32\3\32\3\33\3\33\3\34\3\34\3\35"+
|
||||
"\3\35\3\36\3\36\3\37\3\37\3 \3 \3 \3!\3!\3!\3\"\3\"\3\"\3\"\3#\3#\3$\3"+
|
||||
"$\3$\3%\3%\3&\3&\3&\3\'\3\'\3\'\3(\3(\3(\3(\3)\3)\3)\3*\3*\3*\3*\3+\3"+
|
||||
"+\3,\3,\3-\3-\3.\3.\3.\3/\3/\3/\3\60\3\60\3\61\3\61\3\62\3\62\3\62\3\63"+
|
||||
"\3\63\3\63\3\64\3\64\3\65\3\65\3\65\3\66\3\66\3\66\3\67\3\67\3\67\38\3"+
|
||||
"8\38\39\39\39\3:\3:\3:\3;\3;\3;\3<\3<\3<\3=\3=\3=\3=\3>\3>\3>\3>\3?\3"+
|
||||
"?\3?\3?\3?\3@\3@\6@\u017b\n@\r@\16@\u017c\3@\5@\u0180\n@\3A\3A\3A\6A\u0185"+
|
||||
"\nA\rA\16A\u0186\3A\5A\u018a\nA\3B\3B\3B\7B\u018f\nB\fB\16B\u0192\13B"+
|
||||
"\5B\u0194\nB\3B\5B\u0197\nB\3C\3C\3C\7C\u019c\nC\fC\16C\u019f\13C\5C\u01a1"+
|
||||
"\nC\3C\3C\7C\u01a5\nC\fC\16C\u01a8\13C\3C\3C\5C\u01ac\nC\3C\6C\u01af\n"+
|
||||
"C\rC\16C\u01b0\5C\u01b3\nC\3C\5C\u01b6\nC\3D\3D\3D\3D\3D\3D\7D\u01be\n"+
|
||||
"D\fD\16D\u01c1\13D\3D\3D\3E\3E\3E\3E\3F\3F\3F\3F\3F\3G\3G\3G\3G\3G\3G"+
|
||||
"\3H\3H\3H\3H\3H\3I\3I\7I\u01db\nI\fI\16I\u01de\13I\3J\3J\3J\7J\u01e3\n"+
|
||||
"J\fJ\16J\u01e6\13J\5J\u01e8\nJ\3J\3J\3K\3K\7K\u01ee\nK\fK\16K\u01f1\13"+
|
||||
"K\3K\3K\5\u00a5\u00af\u01bf\2L\4\3\6\4\b\5\n\6\f\7\16\b\20\t\22\n\24\13"+
|
||||
"\26\f\30\r\32\16\34\17\36\20 \21\"\22$\23&\24(\25*\26,\27.\30\60\31\62"+
|
||||
"\32\64\33\66\348\35:\36<\37> @!B\"D#F$H%J&L\'N(P)R*T+V,X-Z.\\/^\60`\61"+
|
||||
"b\62d\63f\64h\65j\66l\67n8p9r:t;v<x=z>|?~@\u0080A\u0082B\u0084C\u0086"+
|
||||
"D\u0088E\u008aF\u008cG\u008eH\u0090I\u0092J\u0094K\u0096L\4\2\3\21\5\2"+
|
||||
"\13\f\17\17\"\"\4\2\f\f\17\17\3\2\629\4\2NNnn\4\2ZZzz\5\2\62;CHch\3\2"+
|
||||
"\63;\3\2\62;\b\2FFHHNNffhhnn\4\2GGgg\4\2--//\4\2HHhh\4\2$$^^\5\2C\\aa"+
|
||||
"c|\6\2\62;C\\aac|\u020b\2\4\3\2\2\2\2\6\3\2\2\2\2\b\3\2\2\2\2\n\3\2\2"+
|
||||
"\2\2\f\3\2\2\2\2\16\3\2\2\2\2\20\3\2\2\2\2\22\3\2\2\2\2\24\3\2\2\2\2\26"+
|
||||
"\3\2\2\2\2\30\3\2\2\2\2\32\3\2\2\2\2\34\3\2\2\2\2\36\3\2\2\2\2 \3\2\2"+
|
||||
"\2\2\"\3\2\2\2\2$\3\2\2\2\2&\3\2\2\2\2(\3\2\2\2\2*\3\2\2\2\2,\3\2\2\2"+
|
||||
"\2.\3\2\2\2\2\60\3\2\2\2\2\62\3\2\2\2\2\64\3\2\2\2\2\66\3\2\2\2\28\3\2"+
|
||||
"\2\2\2:\3\2\2\2\2<\3\2\2\2\2>\3\2\2\2\2@\3\2\2\2\2B\3\2\2\2\2D\3\2\2\2"+
|
||||
"\2F\3\2\2\2\2H\3\2\2\2\2J\3\2\2\2\2L\3\2\2\2\2N\3\2\2\2\2P\3\2\2\2\2R"+
|
||||
"\3\2\2\2\2T\3\2\2\2\2V\3\2\2\2\2X\3\2\2\2\2Z\3\2\2\2\2\\\3\2\2\2\2^\3"+
|
||||
"\2\2\2\2`\3\2\2\2\2b\3\2\2\2\2d\3\2\2\2\2f\3\2\2\2\2h\3\2\2\2\2j\3\2\2"+
|
||||
"\2\2l\3\2\2\2\2n\3\2\2\2\2p\3\2\2\2\2r\3\2\2\2\2t\3\2\2\2\2v\3\2\2\2\2"+
|
||||
"x\3\2\2\2\2z\3\2\2\2\2|\3\2\2\2\2~\3\2\2\2\2\u0080\3\2\2\2\2\u0082\3\2"+
|
||||
"\2\2\2\u0084\3\2\2\2\2\u0086\3\2\2\2\2\u0088\3\2\2\2\2\u008a\3\2\2\2\2"+
|
||||
"\u008c\3\2\2\2\2\u008e\3\2\2\2\2\u0090\3\2\2\2\2\u0092\3\2\2\2\3\u0094"+
|
||||
"\3\2\2\2\3\u0096\3\2\2\2\4\u0099\3\2\2\2\6\u00b4\3\2\2\2\b\u00b8\3\2\2"+
|
||||
"\2\n\u00ba\3\2\2\2\f\u00bc\3\2\2\2\16\u00be\3\2\2\2\20\u00c0\3\2\2\2\22"+
|
||||
"\u00c2\3\2\2\2\24\u00c4\3\2\2\2\26\u00c8\3\2\2\2\30\u00ca\3\2\2\2\32\u00cc"+
|
||||
"\3\2\2\2\34\u00cf\3\2\2\2\36\u00d4\3\2\2\2 \u00da\3\2\2\2\"\u00dd\3\2"+
|
||||
"\2\2$\u00e1\3\2\2\2&\u00ea\3\2\2\2(\u00f0\3\2\2\2*\u00f7\3\2\2\2,\u00fb"+
|
||||
"\3\2\2\2.\u00ff\3\2\2\2\60\u0105\3\2\2\2\62\u010b\3\2\2\2\64\u010d\3\2"+
|
||||
"\2\2\66\u010f\3\2\2\28\u0111\3\2\2\2:\u0113\3\2\2\2<\u0115\3\2\2\2>\u0117"+
|
||||
"\3\2\2\2@\u0119\3\2\2\2B\u011c\3\2\2\2D\u011f\3\2\2\2F\u0123\3\2\2\2H"+
|
||||
"\u0125\3\2\2\2J\u0128\3\2\2\2L\u012a\3\2\2\2N\u012d\3\2\2\2P\u0130\3\2"+
|
||||
"\2\2R\u0134\3\2\2\2T\u0137\3\2\2\2V\u013b\3\2\2\2X\u013d\3\2\2\2Z\u013f"+
|
||||
"\3\2\2\2\\\u0141\3\2\2\2^\u0144\3\2\2\2`\u0147\3\2\2\2b\u0149\3\2\2\2"+
|
||||
"d\u014b\3\2\2\2f\u014e\3\2\2\2h\u0151\3\2\2\2j\u0153\3\2\2\2l\u0156\3"+
|
||||
"\2\2\2n\u0159\3\2\2\2p\u015c\3\2\2\2r\u015f\3\2\2\2t\u0162\3\2\2\2v\u0165"+
|
||||
"\3\2\2\2x\u0168\3\2\2\2z\u016b\3\2\2\2|\u016f\3\2\2\2~\u0173\3\2\2\2\u0080"+
|
||||
"\u0178\3\2\2\2\u0082\u0181\3\2\2\2\u0084\u0193\3\2\2\2\u0086\u01a0\3\2"+
|
||||
"\2\2\u0088\u01b7\3\2\2\2\u008a\u01c4\3\2\2\2\u008c\u01c8\3\2\2\2\u008e"+
|
||||
"\u01cd\3\2\2\2\u0090\u01d3\3\2\2\2\u0092\u01d8\3\2\2\2\u0094\u01e7\3\2"+
|
||||
"\2\2\u0096\u01eb\3\2\2\2\u0098\u009a\t\2\2\2\u0099\u0098\3\2\2\2\u009a"+
|
||||
"\u009b\3\2\2\2\u009b\u0099\3\2\2\2\u009b\u009c\3\2\2\2\u009c\u009d\3\2"+
|
||||
"\2\2\u009d\u009e\b\2\2\2\u009e\5\3\2\2\2\u009f\u00a0\7\61\2\2\u00a0\u00a1"+
|
||||
"\7\61\2\2\u00a1\u00a5\3\2\2\2\u00a2\u00a4\13\2\2\2\u00a3\u00a2\3\2\2\2"+
|
||||
"\u00a4\u00a7\3\2\2\2\u00a5\u00a6\3\2\2\2\u00a5\u00a3\3\2\2\2\u00a6\u00a8"+
|
||||
"\3\2\2\2\u00a7\u00a5\3\2\2\2\u00a8\u00b5\t\3\2\2\u00a9\u00aa\7\61\2\2"+
|
||||
"\u00aa\u00ab\7,\2\2\u00ab\u00af\3\2\2\2\u00ac\u00ae\13\2\2\2\u00ad\u00ac"+
|
||||
"\3\2\2\2\u00ae\u00b1\3\2\2\2\u00af\u00b0\3\2\2\2\u00af\u00ad\3\2\2\2\u00b0"+
|
||||
"\u00b2\3\2\2\2\u00b1\u00af\3\2\2\2\u00b2\u00b3\7,\2\2\u00b3\u00b5\7\61"+
|
||||
"\2\2\u00b4\u009f\3\2\2\2\u00b4\u00a9\3\2\2\2\u00b5\u00b6\3\2\2\2\u00b6"+
|
||||
"\u00b7\b\3\2\2\u00b7\7\3\2\2\2\u00b8\u00b9\7}\2\2\u00b9\t\3\2\2\2\u00ba"+
|
||||
"\u00bb\7\177\2\2\u00bb\13\3\2\2\2\u00bc\u00bd\7]\2\2\u00bd\r\3\2\2\2\u00be"+
|
||||
"\u00bf\7_\2\2\u00bf\17\3\2\2\2\u00c0\u00c1\7*\2\2\u00c1\21\3\2\2\2\u00c2"+
|
||||
"\u00c3\7+\2\2\u00c3\23\3\2\2\2\u00c4\u00c5\7\60\2\2\u00c5\u00c6\3\2\2"+
|
||||
"\2\u00c6\u00c7\b\n\3\2\u00c7\25\3\2\2\2\u00c8\u00c9\7.\2\2\u00c9\27\3"+
|
||||
"\2\2\2\u00ca\u00cb\7=\2\2\u00cb\31\3\2\2\2\u00cc\u00cd\7k\2\2\u00cd\u00ce"+
|
||||
"\7h\2\2\u00ce\33\3\2\2\2\u00cf\u00d0\7g\2\2\u00d0\u00d1\7n\2\2\u00d1\u00d2"+
|
||||
"\7u\2\2\u00d2\u00d3\7g\2\2\u00d3\35\3\2\2\2\u00d4\u00d5\7y\2\2\u00d5\u00d6"+
|
||||
"\7j\2\2\u00d6\u00d7\7k\2\2\u00d7\u00d8\7n\2\2\u00d8\u00d9\7g\2\2\u00d9"+
|
||||
"\37\3\2\2\2\u00da\u00db\7f\2\2\u00db\u00dc\7q\2\2\u00dc!\3\2\2\2\u00dd"+
|
||||
"\u00de\7h\2\2\u00de\u00df\7q\2\2\u00df\u00e0\7t\2\2\u00e0#\3\2\2\2\u00e1"+
|
||||
"\u00e2\7e\2\2\u00e2\u00e3\7q\2\2\u00e3\u00e4\7p\2\2\u00e4\u00e5\7v\2\2"+
|
||||
"\u00e5\u00e6\7k\2\2\u00e6\u00e7\7p\2\2\u00e7\u00e8\7w\2\2\u00e8\u00e9"+
|
||||
"\7g\2\2\u00e9%\3\2\2\2\u00ea\u00eb\7d\2\2\u00eb\u00ec\7t\2\2\u00ec\u00ed"+
|
||||
"\7g\2\2\u00ed\u00ee\7c\2\2\u00ee\u00ef\7m\2\2\u00ef\'\3\2\2\2\u00f0\u00f1"+
|
||||
"\7t\2\2\u00f1\u00f2\7g\2\2\u00f2\u00f3\7v\2\2\u00f3\u00f4\7w\2\2\u00f4"+
|
||||
"\u00f5\7t\2\2\u00f5\u00f6\7p\2\2\u00f6)\3\2\2\2\u00f7\u00f8\7p\2\2\u00f8"+
|
||||
"\u00f9\7g\2\2\u00f9\u00fa\7y\2\2\u00fa+\3\2\2\2\u00fb\u00fc\7v\2\2\u00fc"+
|
||||
"\u00fd\7t\2\2\u00fd\u00fe\7{\2\2\u00fe-\3\2\2\2\u00ff\u0100\7e\2\2\u0100"+
|
||||
"\u0101\7c\2\2\u0101\u0102\7v\2\2\u0102\u0103\7e\2\2\u0103\u0104\7j\2\2"+
|
||||
"\u0104/\3\2\2\2\u0105\u0106\7v\2\2\u0106\u0107\7j\2\2\u0107\u0108\7t\2"+
|
||||
"\2\u0108\u0109\7q\2\2\u0109\u010a\7y\2\2\u010a\61\3\2\2\2\u010b\u010c"+
|
||||
"\7#\2\2\u010c\63\3\2\2\2\u010d\u010e\7\u0080\2\2\u010e\65\3\2\2\2\u010f"+
|
||||
"\u0110\7,\2\2\u0110\67\3\2\2\2\u0111\u0112\7\61\2\2\u01129\3\2\2\2\u0113"+
|
||||
"\u0114\7\'\2\2\u0114;\3\2\2\2\u0115\u0116\7-\2\2\u0116=\3\2\2\2\u0117"+
|
||||
"\u0118\7/\2\2\u0118?\3\2\2\2\u0119\u011a\7>\2\2\u011a\u011b\7>\2\2\u011b"+
|
||||
"A\3\2\2\2\u011c\u011d\7@\2\2\u011d\u011e\7@\2\2\u011eC\3\2\2\2\u011f\u0120"+
|
||||
"\7@\2\2\u0120\u0121\7@\2\2\u0121\u0122\7@\2\2\u0122E\3\2\2\2\u0123\u0124"+
|
||||
"\7>\2\2\u0124G\3\2\2\2\u0125\u0126\7>\2\2\u0126\u0127\7?\2\2\u0127I\3"+
|
||||
"\2\2\2\u0128\u0129\7@\2\2\u0129K\3\2\2\2\u012a\u012b\7@\2\2\u012b\u012c"+
|
||||
"\7?\2\2\u012cM\3\2\2\2\u012d\u012e\7?\2\2\u012e\u012f\7?\2\2\u012fO\3"+
|
||||
"\2\2\2\u0130\u0131\7?\2\2\u0131\u0132\7?\2\2\u0132\u0133\7?\2\2\u0133"+
|
||||
"Q\3\2\2\2\u0134\u0135\7#\2\2\u0135\u0136\7?\2\2\u0136S\3\2\2\2\u0137\u0138"+
|
||||
"\7#\2\2\u0138\u0139\7?\2\2\u0139\u013a\7?\2\2\u013aU\3\2\2\2\u013b\u013c"+
|
||||
"\7(\2\2\u013cW\3\2\2\2\u013d\u013e\7`\2\2\u013eY\3\2\2\2\u013f\u0140\7"+
|
||||
"~\2\2\u0140[\3\2\2\2\u0141\u0142\7(\2\2\u0142\u0143\7(\2\2\u0143]\3\2"+
|
||||
"\2\2\u0144\u0145\7~\2\2\u0145\u0146\7~\2\2\u0146_\3\2\2\2\u0147\u0148"+
|
||||
"\7A\2\2\u0148a\3\2\2\2\u0149\u014a\7<\2\2\u014ac\3\2\2\2\u014b\u014c\7"+
|
||||
"-\2\2\u014c\u014d\7-\2\2\u014de\3\2\2\2\u014e\u014f\7/\2\2\u014f\u0150"+
|
||||
"\7/\2\2\u0150g\3\2\2\2\u0151\u0152\7?\2\2\u0152i\3\2\2\2\u0153\u0154\7"+
|
||||
"-\2\2\u0154\u0155\7?\2\2\u0155k\3\2\2\2\u0156\u0157\7/\2\2\u0157\u0158"+
|
||||
"\7?\2\2\u0158m\3\2\2\2\u0159\u015a\7,\2\2\u015a\u015b\7?\2\2\u015bo\3"+
|
||||
"\2\2\2\u015c\u015d\7\61\2\2\u015d\u015e\7?\2\2\u015eq\3\2\2\2\u015f\u0160"+
|
||||
"\7\'\2\2\u0160\u0161\7?\2\2\u0161s\3\2\2\2\u0162\u0163\7(\2\2\u0163\u0164"+
|
||||
"\7?\2\2\u0164u\3\2\2\2\u0165\u0166\7`\2\2\u0166\u0167\7?\2\2\u0167w\3"+
|
||||
"\2\2\2\u0168\u0169\7~\2\2\u0169\u016a\7?\2\2\u016ay\3\2\2\2\u016b\u016c"+
|
||||
"\7>\2\2\u016c\u016d\7>\2\2\u016d\u016e\7?\2\2\u016e{\3\2\2\2\u016f\u0170"+
|
||||
"\7@\2\2\u0170\u0171\7@\2\2\u0171\u0172\7?\2\2\u0172}\3\2\2\2\u0173\u0174"+
|
||||
"\7@\2\2\u0174\u0175\7@\2\2\u0175\u0176\7@\2\2\u0176\u0177\7?\2\2\u0177"+
|
||||
"\177\3\2\2\2\u0178\u017a\7\62\2\2\u0179\u017b\t\4\2\2\u017a\u0179\3\2"+
|
||||
"\2\2\u017b\u017c\3\2\2\2\u017c\u017a\3\2\2\2\u017c\u017d\3\2\2\2\u017d"+
|
||||
"\u017f\3\2\2\2\u017e\u0180\t\5\2\2\u017f\u017e\3\2\2\2\u017f\u0180\3\2"+
|
||||
"\2\2\u0180\u0081\3\2\2\2\u0181\u0182\7\62\2\2\u0182\u0184\t\6\2\2\u0183"+
|
||||
"\u0185\t\7\2\2\u0184\u0183\3\2\2\2\u0185\u0186\3\2\2\2\u0186\u0184\3\2"+
|
||||
"\2\2\u0186\u0187\3\2\2\2\u0187\u0189\3\2\2\2\u0188\u018a\t\5\2\2\u0189"+
|
||||
"\u0188\3\2\2\2\u0189\u018a\3\2\2\2\u018a\u0083\3\2\2\2\u018b\u0194\7\62"+
|
||||
"\2\2\u018c\u0190\t\b\2\2\u018d\u018f\t\t\2\2\u018e\u018d\3\2\2\2\u018f"+
|
||||
"\u0192\3\2\2\2\u0190\u018e\3\2\2\2\u0190\u0191\3\2\2\2\u0191\u0194\3\2"+
|
||||
"\2\2\u0192\u0190\3\2\2\2\u0193\u018b\3\2\2\2\u0193\u018c\3\2\2\2\u0194"+
|
||||
"\u0196\3\2\2\2\u0195\u0197\t\n\2\2\u0196\u0195\3\2\2\2\u0196\u0197\3\2"+
|
||||
"\2\2\u0197\u0085\3\2\2\2\u0198\u01a1\7\62\2\2\u0199\u019d\t\b\2\2\u019a"+
|
||||
"\u019c\t\t\2\2\u019b\u019a\3\2\2\2\u019c\u019f\3\2\2\2\u019d\u019b\3\2"+
|
||||
"\2\2\u019d\u019e\3\2\2\2\u019e\u01a1\3\2\2\2\u019f\u019d\3\2\2\2\u01a0"+
|
||||
"\u0198\3\2\2\2\u01a0\u0199\3\2\2\2\u01a1\u01a2\3\2\2\2\u01a2\u01a6\5\24"+
|
||||
"\n\2\u01a3\u01a5\t\t\2\2\u01a4\u01a3\3\2\2\2\u01a5\u01a8\3\2\2\2\u01a6"+
|
||||
"\u01a4\3\2\2\2\u01a6\u01a7\3\2\2\2\u01a7\u01b2\3\2\2\2\u01a8\u01a6\3\2"+
|
||||
"\2\2\u01a9\u01ab\t\13\2\2\u01aa\u01ac\t\f\2\2\u01ab\u01aa\3\2\2\2\u01ab"+
|
||||
"\u01ac\3\2\2\2\u01ac\u01ae\3\2\2\2\u01ad\u01af\t\t\2\2\u01ae\u01ad\3\2"+
|
||||
"\2\2\u01af\u01b0\3\2\2\2\u01b0\u01ae\3\2\2\2\u01b0\u01b1\3\2\2\2\u01b1"+
|
||||
"\u01b3\3\2\2\2\u01b2\u01a9\3\2\2\2\u01b2\u01b3\3\2\2\2\u01b3\u01b5\3\2"+
|
||||
"\2\2\u01b4\u01b6\t\r\2\2\u01b5\u01b4\3\2\2\2\u01b5\u01b6\3\2\2\2\u01b6"+
|
||||
"\u0087\3\2\2\2\u01b7\u01bf\7$\2\2\u01b8\u01b9\7^\2\2\u01b9\u01be\7$\2"+
|
||||
"\2\u01ba\u01bb\7^\2\2\u01bb\u01be\7^\2\2\u01bc\u01be\n\16\2\2\u01bd\u01b8"+
|
||||
"\3\2\2\2\u01bd\u01ba\3\2\2\2\u01bd\u01bc\3\2\2\2\u01be\u01c1\3\2\2\2\u01bf"+
|
||||
"\u01c0\3\2\2\2\u01bf\u01bd\3\2\2\2\u01c0\u01c2\3\2\2\2\u01c1\u01bf\3\2"+
|
||||
"\2\2\u01c2\u01c3\7$\2\2\u01c3\u0089\3\2\2\2\u01c4\u01c5\7)\2\2\u01c5\u01c6"+
|
||||
"\13\2\2\2\u01c6\u01c7\7)\2\2\u01c7\u008b\3\2\2\2\u01c8\u01c9\7v\2\2\u01c9"+
|
||||
"\u01ca\7t\2\2\u01ca\u01cb\7w\2\2\u01cb\u01cc\7g\2\2\u01cc\u008d\3\2\2"+
|
||||
"\2\u01cd\u01ce\7h\2\2\u01ce\u01cf\7c\2\2\u01cf\u01d0\7n\2\2\u01d0\u01d1"+
|
||||
"\7u\2\2\u01d1\u01d2\7g\2\2\u01d2\u008f\3\2\2\2\u01d3\u01d4\7p\2\2\u01d4"+
|
||||
"\u01d5\7w\2\2\u01d5\u01d6\7n\2\2\u01d6\u01d7\7n\2\2\u01d7\u0091\3\2\2"+
|
||||
"\2\u01d8\u01dc\t\17\2\2\u01d9\u01db\t\20\2\2\u01da\u01d9\3\2\2\2\u01db"+
|
||||
"\u01de\3\2\2\2\u01dc\u01da\3\2\2\2\u01dc\u01dd\3\2\2\2\u01dd\u0093\3\2"+
|
||||
"\2\2\u01de\u01dc\3\2\2\2\u01df\u01e8\7\62\2\2\u01e0\u01e4\t\b\2\2\u01e1"+
|
||||
"\u01e3\t\t\2\2\u01e2\u01e1\3\2\2\2\u01e3\u01e6\3\2\2\2\u01e4\u01e2\3\2"+
|
||||
"\2\2\u01e4\u01e5\3\2\2\2\u01e5\u01e8\3\2\2\2\u01e6\u01e4\3\2\2\2\u01e7"+
|
||||
"\u01df\3\2\2\2\u01e7\u01e0\3\2\2\2\u01e8\u01e9\3\2\2\2\u01e9\u01ea\bJ"+
|
||||
"\4\2\u01ea\u0095\3\2\2\2\u01eb\u01ef\t\17\2\2\u01ec\u01ee\t\20\2\2\u01ed"+
|
||||
"\u01ec\3\2\2\2\u01ee\u01f1\3\2\2\2\u01ef\u01ed\3\2\2\2\u01ef\u01f0\3\2"+
|
||||
"\2\2\u01f0\u01f2\3\2\2\2\u01f1\u01ef\3\2\2\2\u01f2\u01f3\bK\4\2\u01f3"+
|
||||
"\u0097\3\2\2\2\34\2\3\u009b\u00a5\u00af\u00b4\u017c\u017f\u0186\u0189"+
|
||||
"\u0190\u0193\u0196\u019d\u01a0\u01a6\u01ab\u01b0\u01b2\u01b5\u01bd\u01bf"+
|
||||
"\u01dc\u01e4\u01e7\u01ef\5\b\2\2\4\3\2\4\2\2";
|
||||
public static final ATN _ATN =
|
||||
new ATNDeserializer().deserialize(_serializedATN.toCharArray());
|
||||
static {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -165,6 +165,20 @@ class PainlessParserBaseVisitor<T> extends AbstractParseTreeVisitor<T> implement
|
|||
* {@link #visitChildren} on {@code ctx}.</p>
|
||||
*/
|
||||
@Override public T visitTrap(PainlessParser.TrapContext ctx) { return visitChildren(ctx); }
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* <p>The default implementation returns the result of calling
|
||||
* {@link #visitChildren} on {@code ctx}.</p>
|
||||
*/
|
||||
@Override public T visitIdentifier(PainlessParser.IdentifierContext ctx) { return visitChildren(ctx); }
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* <p>The default implementation returns the result of calling
|
||||
* {@link #visitChildren} on {@code ctx}.</p>
|
||||
*/
|
||||
@Override public T visitGeneric(PainlessParser.GenericContext ctx) { return visitChildren(ctx); }
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
|
@ -312,13 +326,6 @@ class PainlessParserBaseVisitor<T> extends AbstractParseTreeVisitor<T> implement
|
|||
* {@link #visitChildren} on {@code ctx}.</p>
|
||||
*/
|
||||
@Override public T visitExtdot(PainlessParser.ExtdotContext ctx) { return visitChildren(ctx); }
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* <p>The default implementation returns the result of calling
|
||||
* {@link #visitChildren} on {@code ctx}.</p>
|
||||
*/
|
||||
@Override public T visitExttype(PainlessParser.ExttypeContext ctx) { return visitChildren(ctx); }
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
|
|
|
@ -155,6 +155,18 @@ interface PainlessParserVisitor<T> extends ParseTreeVisitor<T> {
|
|||
* @return the visitor result
|
||||
*/
|
||||
T visitTrap(PainlessParser.TrapContext ctx);
|
||||
/**
|
||||
* Visit a parse tree produced by {@link PainlessParser#identifier}.
|
||||
* @param ctx the parse tree
|
||||
* @return the visitor result
|
||||
*/
|
||||
T visitIdentifier(PainlessParser.IdentifierContext ctx);
|
||||
/**
|
||||
* Visit a parse tree produced by {@link PainlessParser#generic}.
|
||||
* @param ctx the parse tree
|
||||
* @return the visitor result
|
||||
*/
|
||||
T visitGeneric(PainlessParser.GenericContext ctx);
|
||||
/**
|
||||
* Visit a parse tree produced by the {@code comp}
|
||||
* labeled alternative in {@link PainlessParser#expression}.
|
||||
|
@ -297,12 +309,6 @@ interface PainlessParserVisitor<T> extends ParseTreeVisitor<T> {
|
|||
* @return the visitor result
|
||||
*/
|
||||
T visitExtdot(PainlessParser.ExtdotContext ctx);
|
||||
/**
|
||||
* Visit a parse tree produced by {@link PainlessParser#exttype}.
|
||||
* @param ctx the parse tree
|
||||
* @return the visitor result
|
||||
*/
|
||||
T visitExttype(PainlessParser.ExttypeContext ctx);
|
||||
/**
|
||||
* Visit a parse tree produced by {@link PainlessParser#extcall}.
|
||||
* @param ctx the parse tree
|
||||
|
|
|
@ -49,10 +49,11 @@ import org.elasticsearch.painless.PainlessParser.ExtnewContext;
|
|||
import org.elasticsearch.painless.PainlessParser.ExtprecContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtstartContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtstringContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExttypeContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtvarContext;
|
||||
import org.elasticsearch.painless.PainlessParser.FalseContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ForContext;
|
||||
import org.elasticsearch.painless.PainlessParser.GenericContext;
|
||||
import org.elasticsearch.painless.PainlessParser.IdentifierContext;
|
||||
import org.elasticsearch.painless.PainlessParser.IfContext;
|
||||
import org.elasticsearch.painless.PainlessParser.IncrementContext;
|
||||
import org.elasticsearch.painless.PainlessParser.InitializerContext;
|
||||
|
@ -151,19 +152,22 @@ class Writer extends PainlessParserBaseVisitor<Void> {
|
|||
private void writeExecute() {
|
||||
final Label fals = new Label();
|
||||
final Label end = new Label();
|
||||
execute.visitVarInsn(Opcodes.ALOAD, metadata.inputValueSlot);
|
||||
execute.push("#score");
|
||||
execute.invokeInterface(MAP_TYPE, MAP_GET);
|
||||
execute.dup();
|
||||
execute.ifNull(fals);
|
||||
execute.checkCast(SCORE_ACCESSOR_TYPE);
|
||||
execute.invokeVirtual(SCORE_ACCESSOR_TYPE, SCORE_ACCESSOR_FLOAT);
|
||||
execute.goTo(end);
|
||||
execute.mark(fals);
|
||||
execute.pop();
|
||||
execute.push(0F);
|
||||
execute.mark(end);
|
||||
execute.visitVarInsn(Opcodes.FSTORE, metadata.scoreValueSlot);
|
||||
|
||||
if (metadata.scoreValueUsed) {
|
||||
execute.visitVarInsn(Opcodes.ALOAD, metadata.inputValueSlot);
|
||||
execute.push("#score");
|
||||
execute.invokeInterface(MAP_TYPE, MAP_GET);
|
||||
execute.dup();
|
||||
execute.ifNull(fals);
|
||||
execute.checkCast(SCORE_ACCESSOR_TYPE);
|
||||
execute.invokeVirtual(SCORE_ACCESSOR_TYPE, SCORE_ACCESSOR_FLOAT);
|
||||
execute.goTo(end);
|
||||
execute.mark(fals);
|
||||
execute.pop();
|
||||
execute.push(0F);
|
||||
execute.mark(end);
|
||||
execute.visitVarInsn(Opcodes.FSTORE, metadata.scoreValueSlot);
|
||||
}
|
||||
|
||||
execute.push(settings.getMaxLoopCounter());
|
||||
execute.visitVarInsn(Opcodes.ISTORE, metadata.loopCounterSlot);
|
||||
|
@ -328,6 +332,16 @@ class Writer extends PainlessParserBaseVisitor<Void> {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visitIdentifier(IdentifierContext ctx) {
|
||||
throw new UnsupportedOperationException(WriterUtility.error(ctx) + "Unexpected state.");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visitGeneric(GenericContext ctx) {
|
||||
throw new UnsupportedOperationException(WriterUtility.error(ctx) + "Unexpected state.");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visitPrecedence(final PrecedenceContext ctx) {
|
||||
throw new UnsupportedOperationException(WriterUtility.error(ctx) + "Unexpected state.");
|
||||
|
@ -474,13 +488,6 @@ class Writer extends PainlessParserBaseVisitor<Void> {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visitExttype(final ExttypeContext ctx) {
|
||||
external.processExttype(ctx);
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void visitExtcall(final ExtcallContext ctx) {
|
||||
external.processExtcall(ctx);
|
||||
|
|
|
@ -38,7 +38,6 @@ import org.elasticsearch.painless.PainlessParser.ExtnewContext;
|
|||
import org.elasticsearch.painless.PainlessParser.ExtprecContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtstartContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtstringContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExttypeContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtvarContext;
|
||||
import org.objectweb.asm.Opcodes;
|
||||
import org.objectweb.asm.commons.GeneratorAdapter;
|
||||
|
@ -114,7 +113,6 @@ class WriterExternal {
|
|||
|
||||
final ExtprecContext precctx = ctx.extprec();
|
||||
final ExtcastContext castctx = ctx.extcast();
|
||||
final ExttypeContext typectx = ctx.exttype();
|
||||
final ExtvarContext varctx = ctx.extvar();
|
||||
final ExtnewContext newctx = ctx.extnew();
|
||||
final ExtstringContext stringctx = ctx.extstring();
|
||||
|
@ -123,8 +121,6 @@ class WriterExternal {
|
|||
writer.visit(precctx);
|
||||
} else if (castctx != null) {
|
||||
writer.visit(castctx);
|
||||
} else if (typectx != null) {
|
||||
writer.visit(typectx);
|
||||
} else if (varctx != null) {
|
||||
writer.visit(varctx);
|
||||
} else if (newctx != null) {
|
||||
|
@ -139,7 +135,6 @@ class WriterExternal {
|
|||
void processExtprec(final ExtprecContext ctx) {
|
||||
final ExtprecContext precctx = ctx.extprec();
|
||||
final ExtcastContext castctx = ctx.extcast();
|
||||
final ExttypeContext typectx = ctx.exttype();
|
||||
final ExtvarContext varctx = ctx.extvar();
|
||||
final ExtnewContext newctx = ctx.extnew();
|
||||
final ExtstringContext stringctx = ctx.extstring();
|
||||
|
@ -148,8 +143,6 @@ class WriterExternal {
|
|||
writer.visit(precctx);
|
||||
} else if (castctx != null) {
|
||||
writer.visit(castctx);
|
||||
} else if (typectx != null) {
|
||||
writer.visit(typectx);
|
||||
} else if (varctx != null) {
|
||||
writer.visit(varctx);
|
||||
} else if (newctx != null) {
|
||||
|
@ -175,7 +168,6 @@ class WriterExternal {
|
|||
|
||||
final ExtprecContext precctx = ctx.extprec();
|
||||
final ExtcastContext castctx = ctx.extcast();
|
||||
final ExttypeContext typectx = ctx.exttype();
|
||||
final ExtvarContext varctx = ctx.extvar();
|
||||
final ExtnewContext newctx = ctx.extnew();
|
||||
final ExtstringContext stringctx = ctx.extstring();
|
||||
|
@ -184,8 +176,6 @@ class WriterExternal {
|
|||
writer.visit(precctx);
|
||||
} else if (castctx != null) {
|
||||
writer.visit(castctx);
|
||||
} else if (typectx != null) {
|
||||
writer.visit(typectx);
|
||||
} else if (varctx != null) {
|
||||
writer.visit(varctx);
|
||||
} else if (newctx != null) {
|
||||
|
@ -226,10 +216,6 @@ class WriterExternal {
|
|||
}
|
||||
}
|
||||
|
||||
void processExttype(final ExttypeContext ctx) {
|
||||
writer.visit(ctx.extdot());
|
||||
}
|
||||
|
||||
void processExtcall(final ExtcallContext ctx) {
|
||||
writeCallExternal(ctx);
|
||||
|
||||
|
@ -273,12 +259,9 @@ class WriterExternal {
|
|||
writeNewExternal(ctx);
|
||||
|
||||
final ExtdotContext dotctx = ctx.extdot();
|
||||
final ExtbraceContext bracectx = ctx.extbrace();
|
||||
|
||||
if (dotctx != null) {
|
||||
writer.visit(dotctx);
|
||||
} else if (bracectx != null) {
|
||||
writer.visit(bracectx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -301,6 +284,10 @@ class WriterExternal {
|
|||
final ExtNodeMetadata sourceenmd = metadata.getExtNodeMetadata(source);
|
||||
final ExternalMetadata parentemd = metadata.getExternalMetadata(sourceenmd.parent);
|
||||
|
||||
if (sourceenmd.target == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
final boolean length = "#length".equals(sourceenmd.target);
|
||||
final boolean array = "#brace".equals(sourceenmd.target);
|
||||
final boolean name = sourceenmd.target instanceof String && !length && !array;
|
||||
|
@ -444,12 +431,15 @@ class WriterExternal {
|
|||
}
|
||||
}
|
||||
|
||||
private void writeLoadStoreVariable(final ParserRuleContext source, final boolean store,
|
||||
final Type type, final int slot) {
|
||||
private void writeLoadStoreVariable(final ParserRuleContext source, final boolean store, final Type type, int slot) {
|
||||
if (type.sort == Sort.VOID) {
|
||||
throw new IllegalStateException(WriterUtility.error(source) + "Cannot load/store void type.");
|
||||
}
|
||||
|
||||
if (!metadata.scoreValueUsed && slot > metadata.scoreValueSlot) {
|
||||
--slot;
|
||||
}
|
||||
|
||||
if (store) {
|
||||
execute.visitVarInsn(type.type.getOpcode(Opcodes.ISTORE), slot);
|
||||
} else {
|
||||
|
|
|
@ -327,7 +327,11 @@ class WriterStatement {
|
|||
final ExpressionMetadata declvaremd = metadata.getExpressionMetadata(ctx);
|
||||
final org.objectweb.asm.Type type = declvaremd.to.type;
|
||||
final Sort sort = declvaremd.to.sort;
|
||||
final int slot = (int)declvaremd.postConst;
|
||||
int slot = (int)declvaremd.postConst;
|
||||
|
||||
if (!metadata.scoreValueUsed && slot > metadata.scoreValueSlot) {
|
||||
--slot;
|
||||
}
|
||||
|
||||
final ExpressionContext exprctx = ctx.expression();
|
||||
final boolean initialize = exprctx == null;
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.painless;
|
||||
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
public class ScoreTests extends ESSingleNodeTestCase {
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||
return pluginList(PainlessPlugin.class);
|
||||
}
|
||||
|
||||
public void testScore() {
|
||||
createIndex("test", Settings.EMPTY, "type", "t", "type=text");
|
||||
ensureGreen("test");
|
||||
|
||||
client().prepareIndex("test", "type", "1").setSource("t", "a").get();
|
||||
client().prepareIndex("test", "type", "2").setSource("t", "a a b").get();
|
||||
client().prepareIndex("test", "type", "3").setSource("t", "a a a b c").get();
|
||||
client().prepareIndex("test", "type", "4").setSource("t", "a b c d").get();
|
||||
client().prepareIndex("test", "type", "5").setSource("t", "a a b c d e").get();
|
||||
client().admin().indices().prepareRefresh("test").get();
|
||||
|
||||
final Script script = new Script("_score + 1", ScriptService.ScriptType.INLINE, "painless", null);
|
||||
|
||||
final SearchResponse sr = client().prepareSearch("test").setQuery(
|
||||
QueryBuilders.functionScoreQuery(QueryBuilders.matchQuery("t", "a"),
|
||||
ScoreFunctionBuilders.scriptFunction(script))).get();
|
||||
final SearchHit[] hits = sr.getHits().getHits();
|
||||
|
||||
for (final SearchHit hit : hits) {
|
||||
assertTrue(hit.score() > 0.9999F && hit.score() < 2.0001F);
|
||||
}
|
||||
|
||||
assertEquals("1", hits[0].getId());
|
||||
assertEquals("3", hits[1].getId());
|
||||
assertEquals("2", hits[2].getId());
|
||||
assertEquals("5", hits[3].getId());
|
||||
assertEquals("4", hits[4].getId());
|
||||
}
|
||||
}
|
|
@ -113,7 +113,7 @@ public class WhenThingsGoWrongTests extends ScriptTestCase {
|
|||
fail("should have hit ParseException");
|
||||
} catch (RuntimeException expected) {
|
||||
assertTrue(expected.getMessage().contains(
|
||||
"unexpected token ['PainlessError'] was expecting one of [TYPE]."));
|
||||
"Invalid type [PainlessError]."));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -70,8 +70,8 @@ import static org.elasticsearch.rest.RestStatus.CONFLICT;
|
|||
import static org.elasticsearch.search.sort.SortBuilders.fieldSort;
|
||||
|
||||
/**
|
||||
* Abstract base for scrolling across a search and executing bulk actions on all
|
||||
* results. All package private methods are package private so their tests can use them.
|
||||
* Abstract base for scrolling across a search and executing bulk actions on all results. All package private methods are package private so
|
||||
* their tests can use them. Most methods run in the listener thread pool because the are meant to be fast and don't expect to block.
|
||||
*/
|
||||
public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBulkByScrollRequest<Request>, Response> {
|
||||
/**
|
||||
|
@ -173,52 +173,62 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
total = min(total, mainRequest.getSize());
|
||||
}
|
||||
task.setTotal(total);
|
||||
task.countThrottle(delay);
|
||||
threadPool.schedule(delay, ThreadPool.Names.GENERIC, threadPool.getThreadContext().preserveContext(new AbstractRunnable() {
|
||||
AbstractRunnable prepareBulkRequestRunnable = new AbstractRunnable() {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
if (task.isCancelled()) {
|
||||
finishHim(null);
|
||||
return;
|
||||
}
|
||||
lastBatchStartTime.set(System.nanoTime());
|
||||
SearchHit[] docs = searchResponse.getHits().getHits();
|
||||
logger.debug("scroll returned [{}] documents with a scroll id of [{}]", docs.length, searchResponse.getScrollId());
|
||||
if (docs.length == 0) {
|
||||
startNormalTermination(emptyList(), emptyList(), false);
|
||||
return;
|
||||
}
|
||||
task.countBatch();
|
||||
List<SearchHit> docsIterable = Arrays.asList(docs);
|
||||
if (mainRequest.getSize() != SIZE_ALL_MATCHES) {
|
||||
// Truncate the docs if we have more than the request size
|
||||
long remaining = max(0, mainRequest.getSize() - task.getSuccessfullyProcessed());
|
||||
if (remaining < docs.length) {
|
||||
docsIterable = docsIterable.subList(0, (int) remaining);
|
||||
}
|
||||
}
|
||||
BulkRequest request = buildBulk(docsIterable);
|
||||
if (request.requests().isEmpty()) {
|
||||
/*
|
||||
* If we noop-ed the entire batch then just skip to the next batch or the BulkRequest would fail validation.
|
||||
*/
|
||||
startNextScroll(0);
|
||||
return;
|
||||
}
|
||||
request.timeout(mainRequest.getTimeout());
|
||||
request.consistencyLevel(mainRequest.getConsistency());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("sending [{}] entry, [{}] bulk request", request.requests().size(),
|
||||
new ByteSizeValue(request.estimatedSizeInBytes()));
|
||||
}
|
||||
sendBulkRequest(request);
|
||||
prepareBulkRequest(searchResponse);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
finishHim(t);
|
||||
}
|
||||
}));
|
||||
};
|
||||
prepareBulkRequestRunnable = (AbstractRunnable) threadPool.getThreadContext().preserveContext(prepareBulkRequestRunnable);
|
||||
task.delayPrepareBulkRequest(threadPool, delay, prepareBulkRequestRunnable);
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare the bulk request. Called on the generic thread pool after some preflight checks have been done one the SearchResponse and any
|
||||
* delay has been slept. Uses the generic thread pool because reindex is rare enough not to need its own thread pool and because the
|
||||
* thread may be blocked by the user script.
|
||||
*/
|
||||
void prepareBulkRequest(SearchResponse searchResponse) {
|
||||
if (task.isCancelled()) {
|
||||
finishHim(null);
|
||||
return;
|
||||
}
|
||||
lastBatchStartTime.set(System.nanoTime());
|
||||
SearchHit[] docs = searchResponse.getHits().getHits();
|
||||
logger.debug("scroll returned [{}] documents with a scroll id of [{}]", docs.length, searchResponse.getScrollId());
|
||||
if (docs.length == 0) {
|
||||
startNormalTermination(emptyList(), emptyList(), false);
|
||||
return;
|
||||
}
|
||||
task.countBatch();
|
||||
List<SearchHit> docsIterable = Arrays.asList(docs);
|
||||
if (mainRequest.getSize() != SIZE_ALL_MATCHES) {
|
||||
// Truncate the docs if we have more than the request size
|
||||
long remaining = max(0, mainRequest.getSize() - task.getSuccessfullyProcessed());
|
||||
if (remaining < docs.length) {
|
||||
docsIterable = docsIterable.subList(0, (int) remaining);
|
||||
}
|
||||
}
|
||||
BulkRequest request = buildBulk(docsIterable);
|
||||
if (request.requests().isEmpty()) {
|
||||
/*
|
||||
* If we noop-ed the entire batch then just skip to the next batch or the BulkRequest would fail validation.
|
||||
*/
|
||||
startNextScroll(0);
|
||||
return;
|
||||
}
|
||||
request.timeout(mainRequest.getTimeout());
|
||||
request.consistencyLevel(mainRequest.getConsistency());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("sending [{}] entry, [{}] bulk request", request.requests().size(),
|
||||
new ByteSizeValue(request.estimatedSizeInBytes()));
|
||||
}
|
||||
sendBulkRequest(request);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -329,13 +339,13 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
* How many nanoseconds should a batch of lastBatchSize have taken if it were perfectly throttled? Package private for testing.
|
||||
*/
|
||||
float perfectlyThrottledBatchTime(int lastBatchSize) {
|
||||
if (mainRequest.getRequestsPerSecond() == 0) {
|
||||
if (task.getRequestsPerSecond() == 0) {
|
||||
return 0;
|
||||
}
|
||||
// requests
|
||||
// ------------------- == seconds
|
||||
// request per seconds
|
||||
float targetBatchTimeInSeconds = lastBatchSize / mainRequest.getRequestsPerSecond();
|
||||
float targetBatchTimeInSeconds = lastBatchSize / task.getRequestsPerSecond();
|
||||
// nanoseconds per seconds * seconds == nanoseconds
|
||||
return TimeUnit.SECONDS.toNanos(1) * targetBatchTimeInSeconds;
|
||||
}
|
||||
|
|
|
@ -43,6 +43,24 @@ public abstract class AbstractBaseReindexRestHandler<
|
|||
Response extends BulkIndexByScrollResponse,
|
||||
TA extends TransportAction<Request, Response>
|
||||
> extends BaseRestHandler {
|
||||
|
||||
/**
|
||||
* @return requests_per_second from the request as a float if it was on the request, null otherwise
|
||||
*/
|
||||
public static Float parseRequestsPerSecond(RestRequest request) {
|
||||
String requestsPerSecond = request.param("requests_per_second");
|
||||
if (requestsPerSecond == null) {
|
||||
return null;
|
||||
}
|
||||
if ("".equals(requestsPerSecond)) {
|
||||
throw new IllegalArgumentException("requests_per_second cannot be an empty string");
|
||||
}
|
||||
if ("unlimited".equals(requestsPerSecond)) {
|
||||
return 0f;
|
||||
}
|
||||
return Float.parseFloat(requestsPerSecond);
|
||||
}
|
||||
|
||||
protected final IndicesQueriesRegistry indicesQueriesRegistry;
|
||||
protected final AggregatorParsers aggParsers;
|
||||
protected final Suggesters suggesters;
|
||||
|
@ -61,7 +79,11 @@ public abstract class AbstractBaseReindexRestHandler<
|
|||
}
|
||||
|
||||
protected void execute(RestRequest request, Request internalRequest, RestChannel channel) throws IOException {
|
||||
internalRequest.setRequestsPerSecond(request.paramAsFloat("requests_per_second", internalRequest.getRequestsPerSecond()));
|
||||
Float requestsPerSecond = parseRequestsPerSecond(request);
|
||||
if (requestsPerSecond != null) {
|
||||
internalRequest.setRequestsPerSecond(requestsPerSecond);
|
||||
}
|
||||
|
||||
if (request.paramAsBoolean("wait_for_completion", true)) {
|
||||
action.execute(internalRequest, new BulkIndexByScrollResponseContentListener<Response>(channel));
|
||||
return;
|
||||
|
|
|
@ -276,7 +276,7 @@ public abstract class AbstractBulkByScrollRequest<Self extends AbstractBulkByScr
|
|||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action) {
|
||||
return new BulkByScrollTask(id, type, action, getDescription());
|
||||
return new BulkByScrollTask(id, type, action, getDescription(), requestsPerSecond);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -23,14 +23,22 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.tasks.CancellableTask;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static java.lang.Math.round;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueNanos;
|
||||
|
||||
/**
|
||||
|
@ -50,15 +58,42 @@ public class BulkByScrollTask extends CancellableTask {
|
|||
private final AtomicLong versionConflicts = new AtomicLong(0);
|
||||
private final AtomicLong retries = new AtomicLong(0);
|
||||
private final AtomicLong throttledNanos = new AtomicLong();
|
||||
/**
|
||||
* The number of requests per second to which to throttle the request that this task represents. The other variables are all AtomicXXX
|
||||
* style variables but there isn't an AtomicFloat so we just use a volatile.
|
||||
*/
|
||||
private volatile float requestsPerSecond;
|
||||
/**
|
||||
* Reference to any the last delayed prepareBulkRequest call. Used during rethrottling and canceling to reschedule the request.
|
||||
*/
|
||||
private final AtomicReference<DelayedPrepareBulkRequest> delayedPrepareBulkRequestReference = new AtomicReference<>();
|
||||
|
||||
public BulkByScrollTask(long id, String type, String action, String description) {
|
||||
public BulkByScrollTask(long id, String type, String action, String description, float requestsPerSecond) {
|
||||
super(id, type, action, description);
|
||||
setRequestsPerSecond(requestsPerSecond);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onCancelled() {
|
||||
// Drop the throttle to 0, immediately rescheduling all outstanding tasks so the task will wake up and cancel itself.
|
||||
rethrottle(0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Status getStatus() {
|
||||
return new Status(total.get(), updated.get(), created.get(), deleted.get(), batch.get(), versionConflicts.get(), noops.get(),
|
||||
retries.get(), timeValueNanos(throttledNanos.get()), getReasonCancelled());
|
||||
retries.get(), timeValueNanos(throttledNanos.get()), getRequestsPerSecond(), getReasonCancelled(), throttledUntil());
|
||||
}
|
||||
|
||||
private TimeValue throttledUntil() {
|
||||
DelayedPrepareBulkRequest delayed = delayedPrepareBulkRequestReference.get();
|
||||
if (delayed == null) {
|
||||
return timeValueNanos(0);
|
||||
}
|
||||
if (delayed.future == null) {
|
||||
return timeValueNanos(0);
|
||||
}
|
||||
return timeValueNanos(delayed.future.getDelay(TimeUnit.NANOSECONDS));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -70,6 +105,7 @@ public class BulkByScrollTask extends CancellableTask {
|
|||
|
||||
public static class Status implements Task.Status {
|
||||
public static final String NAME = "bulk-by-scroll";
|
||||
|
||||
private final long total;
|
||||
private final long updated;
|
||||
private final long created;
|
||||
|
@ -79,10 +115,12 @@ public class BulkByScrollTask extends CancellableTask {
|
|||
private final long noops;
|
||||
private final long retries;
|
||||
private final TimeValue throttled;
|
||||
private final float requestsPerSecond;
|
||||
private final String reasonCancelled;
|
||||
private final TimeValue throttledUntil;
|
||||
|
||||
public Status(long total, long updated, long created, long deleted, int batches, long versionConflicts, long noops, long retries,
|
||||
TimeValue throttled, @Nullable String reasonCancelled) {
|
||||
TimeValue throttled, float requestsPerSecond, @Nullable String reasonCancelled, TimeValue throttledUntil) {
|
||||
this.total = checkPositive(total, "total");
|
||||
this.updated = checkPositive(updated, "updated");
|
||||
this.created = checkPositive(created, "created");
|
||||
|
@ -92,7 +130,9 @@ public class BulkByScrollTask extends CancellableTask {
|
|||
this.noops = checkPositive(noops, "noops");
|
||||
this.retries = checkPositive(retries, "retries");
|
||||
this.throttled = throttled;
|
||||
this.requestsPerSecond = requestsPerSecond;
|
||||
this.reasonCancelled = reasonCancelled;
|
||||
this.throttledUntil = throttledUntil;
|
||||
}
|
||||
|
||||
public Status(StreamInput in) throws IOException {
|
||||
|
@ -105,7 +145,9 @@ public class BulkByScrollTask extends CancellableTask {
|
|||
noops = in.readVLong();
|
||||
retries = in.readVLong();
|
||||
throttled = TimeValue.readTimeValue(in);
|
||||
requestsPerSecond = in.readFloat();
|
||||
reasonCancelled = in.readOptionalString();
|
||||
throttledUntil = TimeValue.readTimeValue(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -119,7 +161,9 @@ public class BulkByScrollTask extends CancellableTask {
|
|||
out.writeVLong(noops);
|
||||
out.writeVLong(retries);
|
||||
throttled.writeTo(out);
|
||||
out.writeFloat(requestsPerSecond);
|
||||
out.writeOptionalString(reasonCancelled);
|
||||
throttledUntil.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -144,9 +188,11 @@ public class BulkByScrollTask extends CancellableTask {
|
|||
builder.field("noops", noops);
|
||||
builder.field("retries", retries);
|
||||
builder.timeValueField("throttled_millis", "throttled", throttled);
|
||||
builder.field("requests_per_second", requestsPerSecond == 0 ? "unlimited" : requestsPerSecond);
|
||||
if (reasonCancelled != null) {
|
||||
builder.field("canceled", reasonCancelled);
|
||||
}
|
||||
builder.timeValueField("throttled_until_millis", "throttled_until", throttledUntil);
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
@ -173,6 +219,7 @@ public class BulkByScrollTask extends CancellableTask {
|
|||
if (reasonCancelled != null) {
|
||||
builder.append(",canceled=").append(reasonCancelled);
|
||||
}
|
||||
builder.append(",throttledUntil=").append(throttledUntil);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -238,12 +285,19 @@ public class BulkByScrollTask extends CancellableTask {
|
|||
}
|
||||
|
||||
/**
|
||||
* The total time this request has throttled itself.
|
||||
* The total time this request has throttled itself not including the current throttle time if it is currently sleeping.
|
||||
*/
|
||||
public TimeValue getThrottled() {
|
||||
return throttled;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of requests per second to which to throttle the request. 0 means unlimited.
|
||||
*/
|
||||
public float getRequestsPerSecond() {
|
||||
return requestsPerSecond;
|
||||
}
|
||||
|
||||
/**
|
||||
* The reason that the request was canceled or null if it hasn't been.
|
||||
*/
|
||||
|
@ -251,6 +305,13 @@ public class BulkByScrollTask extends CancellableTask {
|
|||
return reasonCancelled;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remaining delay of any current throttle sleep or 0 if not sleeping.
|
||||
*/
|
||||
public TimeValue getThrottledUntil() {
|
||||
return throttledUntil;
|
||||
}
|
||||
|
||||
private int checkPositive(int value, String name) {
|
||||
if (value < 0) {
|
||||
throw new IllegalArgumentException(name + " must be greater than 0 but was [" + value + "]");
|
||||
|
@ -298,10 +359,114 @@ public class BulkByScrollTask extends CancellableTask {
|
|||
retries.incrementAndGet();
|
||||
}
|
||||
|
||||
public void countThrottle(TimeValue delay) {
|
||||
long nanos = delay.nanos();
|
||||
if (nanos > 0) {
|
||||
throttledNanos.addAndGet(nanos);
|
||||
float getRequestsPerSecond() {
|
||||
return requestsPerSecond;
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedule prepareBulkRequestRunnable to run after some delay. This is where throttling plugs into reindexing so the request can be
|
||||
* rescheduled over and over again.
|
||||
*/
|
||||
void delayPrepareBulkRequest(ThreadPool threadPool, TimeValue delay, AbstractRunnable prepareBulkRequestRunnable) {
|
||||
// Synchronize so we are less likely to schedule the same request twice.
|
||||
synchronized (delayedPrepareBulkRequestReference) {
|
||||
AbstractRunnable oneTime = new AbstractRunnable() {
|
||||
private final AtomicBoolean hasRun = new AtomicBoolean(false);
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
// Paranoia to prevent furiously rethrottling from running the command multiple times. Without this we totally can.
|
||||
if (hasRun.compareAndSet(false, true)) {
|
||||
prepareBulkRequestRunnable.run();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
prepareBulkRequestRunnable.onFailure(t);
|
||||
}
|
||||
};
|
||||
delayedPrepareBulkRequestReference.set(new DelayedPrepareBulkRequest(threadPool, getRequestsPerSecond(), delay, oneTime));
|
||||
}
|
||||
}
|
||||
|
||||
private void setRequestsPerSecond(float requestsPerSecond) {
|
||||
if (requestsPerSecond == -1) {
|
||||
requestsPerSecond = 0;
|
||||
}
|
||||
this.requestsPerSecond = requestsPerSecond;
|
||||
}
|
||||
|
||||
void rethrottle(float newRequestsPerSecond) {
|
||||
synchronized (delayedPrepareBulkRequestReference) {
|
||||
setRequestsPerSecond(newRequestsPerSecond);
|
||||
|
||||
DelayedPrepareBulkRequest delayedPrepareBulkRequest = this.delayedPrepareBulkRequestReference.get();
|
||||
if (delayedPrepareBulkRequest == null) {
|
||||
// No request has been queued yet so nothing to reschedule.
|
||||
return;
|
||||
}
|
||||
|
||||
this.delayedPrepareBulkRequestReference.set(delayedPrepareBulkRequest.rethrottle(newRequestsPerSecond));
|
||||
}
|
||||
}
|
||||
|
||||
class DelayedPrepareBulkRequest {
|
||||
private final ThreadPool threadPool;
|
||||
private final AbstractRunnable command;
|
||||
private final float requestsPerSecond;
|
||||
private final ScheduledFuture<?> future;
|
||||
|
||||
DelayedPrepareBulkRequest(ThreadPool threadPool, float requestsPerSecond, TimeValue delay, AbstractRunnable command) {
|
||||
this.threadPool = threadPool;
|
||||
this.requestsPerSecond = requestsPerSecond;
|
||||
this.command = command;
|
||||
this.future = threadPool.schedule(delay, ThreadPool.Names.GENERIC, new AbstractRunnable() {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
throttledNanos.addAndGet(delay.nanos());
|
||||
command.run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
command.onFailure(t);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
DelayedPrepareBulkRequest rethrottle(float newRequestsPerSecond) {
|
||||
if (newRequestsPerSecond != 0 && newRequestsPerSecond < requestsPerSecond) {
|
||||
/*
|
||||
* The user is attempting to slow the request down. We'll let the change in throttle take effect the next time we delay
|
||||
* prepareBulkRequest. We can't just reschedule the request further out in the future the bulk context might time out.
|
||||
*/
|
||||
return this;
|
||||
}
|
||||
|
||||
long remainingDelay = future.getDelay(TimeUnit.NANOSECONDS);
|
||||
// Actually reschedule the task
|
||||
if (false == FutureUtils.cancel(future)) {
|
||||
// Couldn't cancel, probably because the task has finished or been scheduled. Either way we have nothing to do here.
|
||||
return this;
|
||||
}
|
||||
|
||||
/*
|
||||
* Strangely enough getting here doesn't mean that you actually cancelled the request, just that you probably did. If you stress
|
||||
* test it you'll find that requests sneak through. So each request is given a runOnce boolean to prevent that.
|
||||
*/
|
||||
TimeValue newDelay = newDelay(remainingDelay, newRequestsPerSecond);
|
||||
return new DelayedPrepareBulkRequest(threadPool, requestsPerSecond, newDelay, command);
|
||||
}
|
||||
|
||||
/**
|
||||
* Scale back remaining delay to fit the new delay.
|
||||
*/
|
||||
TimeValue newDelay(long remainingDelay, float newRequestsPerSecond) {
|
||||
if (remainingDelay < 0 || newRequestsPerSecond == 0) {
|
||||
return timeValueNanos(0);
|
||||
}
|
||||
return timeValueNanos(round(remainingDelay * requestsPerSecond / newRequestsPerSecond));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,11 +39,13 @@ public class ReindexPlugin extends Plugin {
|
|||
public void onModule(ActionModule actionModule) {
|
||||
actionModule.registerAction(ReindexAction.INSTANCE, TransportReindexAction.class);
|
||||
actionModule.registerAction(UpdateByQueryAction.INSTANCE, TransportUpdateByQueryAction.class);
|
||||
actionModule.registerAction(RethrottleAction.INSTANCE, TransportRethrottleAction.class);
|
||||
}
|
||||
|
||||
public void onModule(NetworkModule networkModule) {
|
||||
networkModule.registerRestHandler(RestReindexAction.class);
|
||||
networkModule.registerRestHandler(RestUpdateByQueryAction.class);
|
||||
networkModule.registerRestHandler(RestRethrottleAction.class);
|
||||
networkModule.registerTaskStatus(BulkByScrollTask.Status.NAME, BulkByScrollTask.Status::new);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.support.RestToXContentListener;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
|
||||
public class RestRethrottleAction extends BaseRestHandler {
|
||||
private final TransportRethrottleAction action;
|
||||
|
||||
@Inject
|
||||
public RestRethrottleAction(Settings settings, RestController controller, Client client, TransportRethrottleAction action) {
|
||||
super(settings, client);
|
||||
this.action = action;
|
||||
controller.registerHandler(POST, "/_update_by_query/{taskId}/_rethrottle", this);
|
||||
controller.registerHandler(POST, "/_reindex/{taskId}/_rethrottle", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
|
||||
RethrottleRequest internalRequest = new RethrottleRequest();
|
||||
internalRequest.setTaskId(new TaskId(request.param("taskId")));
|
||||
Float requestsPerSecond = AbstractBaseReindexRestHandler.parseRequestsPerSecond(request);
|
||||
if (requestsPerSecond == null) {
|
||||
throw new IllegalArgumentException("requests_per_second is a required parameter");
|
||||
}
|
||||
internalRequest.setRequestsPerSecond(requestsPerSecond);
|
||||
action.execute(internalRequest, new RestToXContentListener<>(channel));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class RethrottleAction extends Action<RethrottleRequest, ListTasksResponse, RethrottleRequestBuilder> {
|
||||
public static final RethrottleAction INSTANCE = new RethrottleAction();
|
||||
public static final String NAME = "cluster:admin/reindex/rethrottle";
|
||||
|
||||
private RethrottleAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RethrottleRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new RethrottleRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ListTasksResponse newResponse() {
|
||||
return new ListTasksResponse();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* A request to change throttling on a task.
|
||||
*/
|
||||
public class RethrottleRequest extends BaseTasksRequest<RethrottleRequest> {
|
||||
/**
|
||||
* The throttle to apply to all matching requests in sub-requests per second. 0 means set no throttle and that is the default.
|
||||
* Throttling is done between batches, as we start the next scroll requests. That way we can increase the scroll's timeout to make sure
|
||||
* that it contains any time that we might wait.
|
||||
*/
|
||||
private float requestsPerSecond = 0;
|
||||
|
||||
/**
|
||||
* The throttle to apply to all matching requests in sub-requests per second. 0 means set no throttle and that is the default.
|
||||
*/
|
||||
public float getRequestsPerSecond() {
|
||||
return requestsPerSecond;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the throttle to apply to all matching requests in sub-requests per second. 0 means set no throttle and that is the default.
|
||||
*/
|
||||
public RethrottleRequest setRequestsPerSecond(float requestsPerSecond) {
|
||||
this.requestsPerSecond = requestsPerSecond;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = super.validate();
|
||||
for (String action : getActions()) {
|
||||
switch (action) {
|
||||
case ReindexAction.NAME:
|
||||
case UpdateByQueryAction.NAME:
|
||||
continue;
|
||||
default:
|
||||
validationException = addValidationError(
|
||||
"Can only change the throttling on reindex or update-by-query. Not on [" + action + "]", validationException);
|
||||
}
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.support.tasks.TasksRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Java API support for changing the throttle on reindex tasks while they are running.
|
||||
*/
|
||||
public class RethrottleRequestBuilder extends TasksRequestBuilder<RethrottleRequest, ListTasksResponse, RethrottleRequestBuilder> {
|
||||
public RethrottleRequestBuilder(ElasticsearchClient client,
|
||||
Action<RethrottleRequest, ListTasksResponse, RethrottleRequestBuilder> action) {
|
||||
super(client, action, new RethrottleRequest());
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the throttle to apply to all matching requests in sub-requests per second. 0 means set no throttle and that is the default.
|
||||
*/
|
||||
public RethrottleRequestBuilder setRequestsPerSecond(float requestsPerSecond) {
|
||||
request.setRequestsPerSecond(requestsPerSecond);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.tasks.TransportTasksAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
public class TransportRethrottleAction extends TransportTasksAction<BulkByScrollTask, RethrottleRequest, ListTasksResponse, TaskInfo> {
|
||||
@Inject
|
||||
public TransportRethrottleAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, RethrottleAction.NAME, clusterName, threadPool, clusterService, transportService, actionFilters,
|
||||
indexNameExpressionResolver, RethrottleRequest::new, ListTasksResponse::new, ThreadPool.Names.MANAGEMENT);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TaskInfo taskOperation(RethrottleRequest request, BulkByScrollTask task) {
|
||||
// Apply the new throttle and fetch status of the task. The user might not want that status but they likely do and it is cheap.
|
||||
task.rethrottle(request.getRequestsPerSecond());
|
||||
return task.taskInfo(clusterService.localNode(), true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TaskInfo readTaskResponse(StreamInput in) throws IOException {
|
||||
return new TaskInfo(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ListTasksResponse newResponse(RethrottleRequest request, List<TaskInfo> tasks,
|
||||
List<TaskOperationFailure> taskOperationFailures, List<FailedNodeException> failedNodeExceptions) {
|
||||
return new ListTasksResponse(tasks, taskOperationFailures, failedNodeExceptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -35,7 +35,7 @@ public abstract class AbstractAsyncBulkIndexByScrollActionTestCase<
|
|||
@Before
|
||||
public void setupForTest() {
|
||||
threadPool = new ThreadPool(getTestName());
|
||||
task = new BulkByScrollTask(1, "test", "test", "test");
|
||||
task = new BulkByScrollTask(1, "test", "test", "test", 0);
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -88,6 +88,7 @@ import static org.apache.lucene.util.TestUtil.randomSimpleString;
|
|||
import static org.elasticsearch.action.bulk.BackoffPolicy.constantBackoff;
|
||||
import static org.elasticsearch.common.unit.TimeValue.parseTimeValue;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||
import static org.hamcrest.Matchers.closeTo;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
|
@ -263,8 +264,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
}
|
||||
assertThat(client.scrollsCleared, contains(scrollId));
|
||||
|
||||
// While we're mocking the threadPool lets also check that we incremented the throttle counter
|
||||
assertEquals(expectedDelay, task.getStatus().getThrottled());
|
||||
// When the task is rejected we don't increment the throttled timer
|
||||
assertEquals(timeValueMillis(0), task.getStatus().getThrottled());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -362,7 +363,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
assertThat((double) action.perfectlyThrottledBatchTime(randomInt()), closeTo(0f, 0f));
|
||||
|
||||
int total = between(0, 1000000);
|
||||
mainRequest.setRequestsPerSecond(1);
|
||||
task.rethrottle(1);
|
||||
assertThat((double) action.perfectlyThrottledBatchTime(total),
|
||||
closeTo(TimeUnit.SECONDS.toNanos(total), TimeUnit.SECONDS.toNanos(1)));
|
||||
}
|
||||
|
@ -373,11 +374,13 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
* delay for throttling.
|
||||
*/
|
||||
AtomicReference<TimeValue> capturedDelay = new AtomicReference<>();
|
||||
AtomicReference<Runnable> capturedCommand = new AtomicReference<>();
|
||||
threadPool.shutdown();
|
||||
threadPool = new ThreadPool(getTestName()) {
|
||||
@Override
|
||||
public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
|
||||
capturedDelay.set(delay);
|
||||
capturedCommand.set(command);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
@ -386,7 +389,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
action.setScroll(scrollId());
|
||||
|
||||
// We'd like to get about 1 request a second
|
||||
mainRequest.setRequestsPerSecond(1f);
|
||||
task.rethrottle(1f);
|
||||
// Make the last scroll look nearly instant
|
||||
action.setLastBatchStartTime(System.nanoTime());
|
||||
// The last batch had 100 documents
|
||||
|
@ -403,6 +406,10 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
|
||||
// The delay is still 100ish seconds because there hasn't been much time between when we requested the bulk and when we got it.
|
||||
assertThat(capturedDelay.get().seconds(), either(equalTo(100L)).or(equalTo(99L)));
|
||||
|
||||
// Running the command ought to increment the delay counter on the task.
|
||||
capturedCommand.get().run();
|
||||
assertEquals(capturedDelay.get(), task.getStatus().getThrottled());
|
||||
}
|
||||
|
||||
private long retryTestCase(boolean failWithRejection) throws Exception {
|
||||
|
@ -539,9 +546,17 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
threadPool = new ThreadPool(getTestName()) {
|
||||
@Override
|
||||
public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
|
||||
taskManager.cancel(task, reason, (Set<String> s) -> {});
|
||||
command.run();
|
||||
return null;
|
||||
/*
|
||||
* This is called twice:
|
||||
* 1. To schedule the throttling. When that happens we immediately cancel the task.
|
||||
* 2. After the task is canceled.
|
||||
* Both times we use delegate to the standard behavior so the task is scheduled as expected so it can be cancelled and all
|
||||
* that good stuff.
|
||||
*/
|
||||
if (delay.nanos() > 0) {
|
||||
generic().execute(() -> taskManager.cancel(task, reason, (Set<String> s) -> {}));
|
||||
}
|
||||
return super.schedule(delay, name, command);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -554,10 +569,11 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
long total = randomIntBetween(0, Integer.MAX_VALUE);
|
||||
InternalSearchHits hits = new InternalSearchHits(null, total, 0);
|
||||
InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false);
|
||||
action.onScrollResponse(timeValueSeconds(0), new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null));
|
||||
// Use a long delay here so the test will time out if the cancellation doesn't reschedule the throttled task
|
||||
action.onScrollResponse(timeValueMinutes(10), new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null));
|
||||
|
||||
// Now that we've got our cancel we'll just verify that it all came through allright
|
||||
assertEquals(reason, listener.get().getReasonCancelled());
|
||||
// Now that we've got our cancel we'll just verify that it all came through all right
|
||||
assertEquals(reason, listener.get(10, TimeUnit.SECONDS).getReasonCancelled());
|
||||
if (previousScrollSet) {
|
||||
// Canceled tasks always start to clear the scroll before they die.
|
||||
assertThat(client.scrollsCleared, contains(scrollId));
|
||||
|
|
|
@ -20,17 +20,31 @@
|
|||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.parseTimeValue;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||
import static org.hamcrest.Matchers.both;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
|
||||
public class BulkByScrollTaskTests extends ESTestCase {
|
||||
private BulkByScrollTask task;
|
||||
|
||||
@Before
|
||||
public void createTask() {
|
||||
task = new BulkByScrollTask(1, "test_type", "test_action", "test");
|
||||
task = new BulkByScrollTask(1, "test_type", "test_action", "test", 0);
|
||||
}
|
||||
|
||||
public void testBasicData() {
|
||||
|
@ -104,14 +118,89 @@ public class BulkByScrollTaskTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testStatusHatesNegatives() {
|
||||
expectThrows(IllegalArgumentException.class, status(-1, 0, 0, 0, 0, 0, 0, 0));
|
||||
expectThrows(IllegalArgumentException.class, status(0, -1, 0, 0, 0, 0, 0, 0));
|
||||
expectThrows(IllegalArgumentException.class, status(0, 0, -1, 0, 0, 0, 0, 0));
|
||||
expectThrows(IllegalArgumentException.class, status(0, 0, 0, -1, 0, 0, 0, 0));
|
||||
expectThrows(IllegalArgumentException.class, status(0, 0, 0, 0, -1, 0, 0, 0));
|
||||
expectThrows(IllegalArgumentException.class, status(0, 0, 0, 0, 0, -1, 0, 0));
|
||||
expectThrows(IllegalArgumentException.class, status(0, 0, 0, 0, 0, 0, -1, 0));
|
||||
expectThrows(IllegalArgumentException.class, status(0, 0, 0, 0, 0, 0, 0, -1));
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a task status with only some values. Used for testing negative values.
|
||||
*/
|
||||
private ThrowingRunnable status(long total, long updated, long created, long deleted, int batches, long versionConflicts,
|
||||
long noops, long retries) {
|
||||
TimeValue throttle = parseTimeValue(randomPositiveTimeValue(), "test");
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(-1, 0, 0, 0, 0, 0, 0, 0, throttle, null));
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, -1, 0, 0, 0, 0, 0, 0, throttle, null));
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, -1, 0, 0, 0, 0, 0, throttle, null));
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, 0, -1, 0, 0, 0, 0, throttle, null));
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, 0, 0, -1, 0, 0, 0, throttle, null));
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, 0, 0, 0, -1, 0, 0, throttle, null));
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, 0, 0, 0, 0, -1, 0, throttle, null));
|
||||
expectThrows(IllegalArgumentException.class, () -> new BulkByScrollTask.Status(0, 0, 0, 0, 0, 0, 0, -1, throttle, null));
|
||||
TimeValue throttledUntil = parseTimeValue(randomPositiveTimeValue(), "test");
|
||||
|
||||
return () -> new BulkByScrollTask.Status(-1, 0, 0, 0, 0, 0, 0, 0, throttle, 0f, null, throttledUntil);
|
||||
}
|
||||
|
||||
/**
|
||||
* Furiously rethrottles a delayed request to make sure that we never run it twice.
|
||||
*/
|
||||
public void testDelayAndRethrottle() throws IOException, InterruptedException {
|
||||
List<Throwable> errors = new CopyOnWriteArrayList<>();
|
||||
AtomicBoolean done = new AtomicBoolean();
|
||||
int threads = between(1, 10);
|
||||
|
||||
/*
|
||||
* We never end up waiting this long because the test rethrottles over and over again, ratcheting down the delay a random amount
|
||||
* each time.
|
||||
*/
|
||||
float originalRequestsPerSecond = (float) randomDoubleBetween(0, 10000, true);
|
||||
task.rethrottle(originalRequestsPerSecond);
|
||||
TimeValue maxDelay = timeValueSeconds(between(1, 5));
|
||||
assertThat(maxDelay.nanos(), greaterThanOrEqualTo(0L));
|
||||
ThreadPool threadPool = new ThreadPool(getTestName()) {
|
||||
@Override
|
||||
public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
|
||||
assertThat(delay.nanos(), both(greaterThanOrEqualTo(0L)).and(lessThanOrEqualTo(maxDelay.nanos())));
|
||||
return super.schedule(delay, name, command);
|
||||
}
|
||||
};
|
||||
try {
|
||||
task.delayPrepareBulkRequest(threadPool, maxDelay, new AbstractRunnable() {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
boolean oldValue = done.getAndSet(true);
|
||||
if (oldValue) {
|
||||
throw new RuntimeException("Ran twice oh no!");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
errors.add(t);
|
||||
}
|
||||
});
|
||||
|
||||
// Rethrottle on a random number of threads, on of which is this thread.
|
||||
Runnable test = () -> {
|
||||
try {
|
||||
int rethrottles = 0;
|
||||
while (false == done.get()) {
|
||||
float requestsPerSecond = (float) randomDoubleBetween(0, originalRequestsPerSecond * 2, true);
|
||||
task.rethrottle(requestsPerSecond);
|
||||
rethrottles += 1;
|
||||
}
|
||||
logger.info("Rethrottled [{}] times", rethrottles);
|
||||
} catch (Exception e) {
|
||||
errors.add(e);
|
||||
}
|
||||
};
|
||||
for (int i = 1; i < threads; i++) {
|
||||
threadPool.generic().execute(test);
|
||||
}
|
||||
test.run();
|
||||
} finally {
|
||||
// Other threads should finish up quickly as they are checking the same AtomicBoolean.
|
||||
threadPool.shutdown();
|
||||
threadPool.awaitTermination(10, TimeUnit.SECONDS);
|
||||
}
|
||||
assertThat(errors, empty());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ import static org.junit.Assert.assertNull;
|
|||
import static org.junit.Assert.assertThat;
|
||||
|
||||
/**
|
||||
* Utilities for testing reindex and update-by-query cancelation. This whole class isn't thread safe. Luckily we run out tests in separate
|
||||
* Utilities for testing reindex and update-by-query cancellation. This whole class isn't thread safe. Luckily we run out tests in separate
|
||||
* jvms.
|
||||
*/
|
||||
public class CancelTestUtils {
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import static java.lang.Math.abs;
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singletonList;
|
||||
|
@ -122,7 +123,8 @@ public class RoundTripTests extends ESTestCase {
|
|||
private BulkByScrollTask.Status randomStatus() {
|
||||
return new BulkByScrollTask.Status(randomPositiveLong(), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
randomPositiveInt(), randomPositiveLong(), randomPositiveLong(), randomPositiveLong(),
|
||||
parseTimeValue(randomPositiveTimeValue(), "test"), random().nextBoolean() ? null : randomSimpleString(random()));
|
||||
parseTimeValue(randomPositiveTimeValue(), "test"), abs(random().nextFloat()),
|
||||
random().nextBoolean() ? null : randomSimpleString(random()), parseTimeValue(randomPositiveTimeValue(), "test"));
|
||||
}
|
||||
|
||||
private List<Failure> randomIndexingFailures() {
|
||||
|
@ -198,6 +200,8 @@ public class RoundTripTests extends ESTestCase {
|
|||
assertEquals(expected.getNoops(), actual.getNoops());
|
||||
assertEquals(expected.getRetries(), actual.getRetries());
|
||||
assertEquals(expected.getThrottled(), actual.getThrottled());
|
||||
assertEquals(expected.getRequestsPerSecond(), actual.getRequestsPerSecond(), 0f);
|
||||
assertEquals(expected.getReasonCancelled(), actual.getReasonCancelled());
|
||||
assertEquals(expected.getThrottledUntil(), actual.getThrottledUntil());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -93,6 +93,7 @@
|
|||
tasks.list:
|
||||
wait_for_completion: true
|
||||
task_id: $task
|
||||
- is_false: node_failures
|
||||
|
||||
---
|
||||
"Response format for version conflict":
|
||||
|
|
|
@ -148,3 +148,15 @@
|
|||
dest:
|
||||
index: dest
|
||||
timestamp: "123"
|
||||
|
||||
---
|
||||
"requests_per_second cannot be an empty string":
|
||||
- do:
|
||||
catch: /requests_per_second cannot be an empty string/
|
||||
reindex:
|
||||
requests_per_second: ""
|
||||
body:
|
||||
source:
|
||||
from: 1
|
||||
dest:
|
||||
index: dest
|
||||
|
|
|
@ -51,3 +51,177 @@
|
|||
- lt: {throttled_millis: 4000}
|
||||
- gte: { took: 1000 }
|
||||
- is_false: task
|
||||
|
||||
---
|
||||
"Rethrottle":
|
||||
# Throttling happens between each scroll batch so we need to control the size of the batch by using a single shard
|
||||
# and a small batch size on the request
|
||||
- do:
|
||||
indices.create:
|
||||
index: source
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: "1"
|
||||
number_of_replicas: "0"
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 2
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 3
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
requests_per_second: .00000001 # About 9.5 years to complete the request
|
||||
wait_for_completion: false
|
||||
body:
|
||||
source:
|
||||
index: source
|
||||
size: 1
|
||||
dest:
|
||||
index: dest
|
||||
- match: {task: '/.+:\d+/'}
|
||||
- set: {task: task}
|
||||
|
||||
- do:
|
||||
reindex.rethrottle:
|
||||
requests_per_second: unlimited
|
||||
task_id: $task
|
||||
|
||||
- do:
|
||||
tasks.list:
|
||||
wait_for_completion: true
|
||||
task_id: $task
|
||||
|
||||
---
|
||||
"Rethrottle to -1 which also means unlimited":
|
||||
# Throttling happens between each scroll batch so we need to control the size of the batch by using a single shard
|
||||
# and a small batch size on the request
|
||||
- do:
|
||||
indices.create:
|
||||
index: source
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: "1"
|
||||
number_of_replicas: "0"
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 2
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 3
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
requests_per_second: .00000001 # About 9.5 years to complete the request
|
||||
wait_for_completion: false
|
||||
body:
|
||||
source:
|
||||
index: source
|
||||
size: 1
|
||||
dest:
|
||||
index: dest
|
||||
- match: {task: '/.+:\d+/'}
|
||||
- set: {task: task}
|
||||
|
||||
- do:
|
||||
reindex.rethrottle:
|
||||
requests_per_second: -1
|
||||
task_id: $task
|
||||
|
||||
- do:
|
||||
tasks.list:
|
||||
wait_for_completion: true
|
||||
task_id: $task
|
||||
|
||||
---
|
||||
"Rethrottle but not unlimited":
|
||||
# Throttling happens between each scroll batch so we need to control the size of the batch by using a single shard
|
||||
# and a small batch size on the request
|
||||
- do:
|
||||
indices.create:
|
||||
index: source
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: "1"
|
||||
number_of_replicas: "0"
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 2
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 3
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
reindex:
|
||||
requests_per_second: .00000001 # About 9.5 years to complete the request
|
||||
wait_for_completion: false
|
||||
body:
|
||||
source:
|
||||
index: source
|
||||
size: 1
|
||||
dest:
|
||||
index: dest
|
||||
- match: {task: '/.+:\d+/'}
|
||||
- set: {task: task}
|
||||
|
||||
- do:
|
||||
reindex.rethrottle:
|
||||
requests_per_second: 1
|
||||
task_id: $task
|
||||
|
||||
- do:
|
||||
tasks.list:
|
||||
wait_for_completion: true
|
||||
task_id: $task
|
||||
|
|
|
@ -54,6 +54,7 @@
|
|||
tasks.list:
|
||||
wait_for_completion: true
|
||||
task_id: $task
|
||||
- is_false: node_failures
|
||||
|
||||
---
|
||||
"Response for version conflict":
|
||||
|
|
|
@ -25,3 +25,17 @@
|
|||
update_by_query:
|
||||
index: test
|
||||
size: -4
|
||||
|
||||
---
|
||||
"requests_per_second cannot be an empty string":
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
catch: /requests_per_second cannot be an empty string/
|
||||
update_by_query:
|
||||
index: test
|
||||
requests_per_second: ''
|
||||
|
|
|
@ -37,3 +37,153 @@
|
|||
- match: {updated: 3}
|
||||
- gt: {throttled_millis: 1000}
|
||||
- lt: {throttled_millis: 4000}
|
||||
|
||||
---
|
||||
"Rethrottle":
|
||||
# Throttling happens between each scroll batch so we need to control the size of the batch by using a single shard
|
||||
# and a small batch size on the request
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update_by_query:
|
||||
requests_per_second: .00000001 # About 9.5 years to complete the request
|
||||
wait_for_completion: false
|
||||
index: test
|
||||
scroll_size: 1
|
||||
- match: {task: '/.+:\d+/'}
|
||||
- set: {task: task}
|
||||
|
||||
- do:
|
||||
reindex.rethrottle:
|
||||
requests_per_second: unlimited
|
||||
task_id: $task
|
||||
|
||||
- do:
|
||||
tasks.list:
|
||||
wait_for_completion: true
|
||||
task_id: $task
|
||||
|
||||
---
|
||||
"Rethrottle to -1 which also means unlimited":
|
||||
# Throttling happens between each scroll batch so we need to control the size of the batch by using a single shard
|
||||
# and a small batch size on the request
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update_by_query:
|
||||
requests_per_second: .00000001 # About 9.5 years to complete the request
|
||||
wait_for_completion: false
|
||||
index: test
|
||||
scroll_size: 1
|
||||
- match: {task: '/.+:\d+/'}
|
||||
- set: {task: task}
|
||||
|
||||
- do:
|
||||
reindex.rethrottle:
|
||||
requests_per_second: -1
|
||||
task_id: $task
|
||||
|
||||
- do:
|
||||
tasks.list:
|
||||
wait_for_completion: true
|
||||
task_id: $task
|
||||
|
||||
---
|
||||
"Rethrottle but not unlimited":
|
||||
# Throttling happens between each scroll batch so we need to control the size of the batch by using a single shard
|
||||
# and a small batch size on the request
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: foo
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
update_by_query:
|
||||
requests_per_second: .00000001 # About 9.5 years to complete the request
|
||||
wait_for_completion: false
|
||||
index: test
|
||||
scroll_size: 1
|
||||
- match: {task: '/.+:\d+/'}
|
||||
- set: {task: task}
|
||||
|
||||
- do:
|
||||
reindex.rethrottle:
|
||||
requests_per_second: 1
|
||||
task_id: $task
|
||||
|
||||
- do:
|
||||
tasks.list:
|
||||
wait_for_completion: true
|
||||
task_id: $task
|
||||
|
|
|
@ -46,20 +46,6 @@ The specification contains:
|
|||
The `methods` and `url.paths` elements list all possible HTTP methods and URLs for the endpoint;
|
||||
it is the responsibility of the developer to use this information for a sensible API on the target platform.
|
||||
|
||||
# Utilities
|
||||
|
||||
The repository contains some utilities in the `utils` directory:
|
||||
|
||||
* The `thor api:generate:spec` will generate the basic JSON specification from Java source code
|
||||
* The `thor api:generate:code` generates Ruby source code and tests from the specs, and can be extended
|
||||
to generate assets in another programming language
|
||||
|
||||
Run `bundle install` and then `thor list` in the _utils_ folder.
|
||||
|
||||
The full command to generate the api spec is:
|
||||
|
||||
thor api:spec:generate --output=myfolder --elasticsearch=/path/to/es
|
||||
|
||||
## License
|
||||
|
||||
This software is licensed under the Apache License, version 2 ("ALv2").
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
{
|
||||
"reindex.rethrottle": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html",
|
||||
"methods": ["POST"],
|
||||
"url": {
|
||||
"path": "/_reindex/{task_id}/_rethrottle",
|
||||
"paths": ["/_reindex/{task_id}/_rethrottle", "/_update_by_query/{task_id}/_rethrottle"],
|
||||
"parts": {
|
||||
"task_id": {
|
||||
"type": "string",
|
||||
"description": "The task id to rethrottle"
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
"requests_per_second": {
|
||||
"type": "float",
|
||||
"default": 0,
|
||||
"description": "The throttle to set on this request in sub-requests per second. 0 means set no throttle. As does \"unlimited\". Otherwise it must be a float."
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": null
|
||||
}
|
||||
}
|
|
@ -7,7 +7,7 @@
|
|||
"paths": ["/_tasks/_cancel", "/_tasks/{task_id}/_cancel"],
|
||||
"parts": {
|
||||
"task_id": {
|
||||
"type": "number",
|
||||
"type": "string",
|
||||
"description": "Cancel the task with specified id"
|
||||
}
|
||||
},
|
||||
|
|
|
@ -35,7 +35,14 @@
|
|||
"wait_for_completion": {
|
||||
"type": "boolean",
|
||||
"description": "Wait for the matching tasks to complete (default: false)"
|
||||
},
|
||||
"group_by": {
|
||||
"type" : "enum",
|
||||
"description": "Group tasks by nodes or parent/child relationships",
|
||||
"options" : ["nodes", "parents"],
|
||||
"default" : "nodes"
|
||||
}
|
||||
|
||||
}
|
||||
},
|
||||
"body": null
|
||||
|
|
|
@ -11,3 +11,9 @@
|
|||
|
||||
- is_true: nodes
|
||||
- is_true: nodes.$master.roles
|
||||
|
||||
- do:
|
||||
tasks.list:
|
||||
group_by: parents
|
||||
|
||||
- is_true: tasks
|
||||
|
|
Loading…
Reference in New Issue