Merge branch 'master' into index-lifecycle

This commit is contained in:
Tal Levy 2018-05-16 15:46:44 -07:00
commit 4e757fff21
101 changed files with 4923 additions and 1334 deletions

View File

@ -21,6 +21,8 @@ package org.elasticsearch.client;
import org.apache.http.Header; import org.apache.http.Header;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
@ -63,4 +65,26 @@ public final class ClusterClient {
restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings, restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings,
ClusterUpdateSettingsResponse::fromXContent, listener, emptySet(), headers); ClusterUpdateSettingsResponse::fromXContent, listener, emptySet(), headers);
} }
/**
* Get current tasks using the Task Management API
* <p>
* See
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html"> Task Management API on elastic.co</a>
*/
public ListTasksResponse listTasks(ListTasksRequest request, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent,
emptySet(), headers);
}
/**
* Asynchronously get current tasks using the Task Management API
* <p>
* See
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html"> Task Management API on elastic.co</a>
*/
public void listTasksAsync(ListTasksRequest request, ActionListener<ListTasksResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent,
listener, emptySet(), headers);
}
} }

View File

@ -29,6 +29,7 @@ import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType; import org.apache.http.entity.ContentType;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
@ -45,8 +46,8 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.admin.indices.shrink.ResizeType;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
@ -83,6 +84,7 @@ import org.elasticsearch.index.rankeval.RankEvalRequest;
import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.script.mustache.SearchTemplateRequest; import org.elasticsearch.script.mustache.SearchTemplateRequest;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.tasks.TaskId;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;
@ -606,6 +608,22 @@ final class RequestConverters {
return request; return request;
} }
static Request listTasks(ListTasksRequest listTaskRequest) {
if (listTaskRequest.getTaskId() != null && listTaskRequest.getTaskId().isSet()) {
throw new IllegalArgumentException("TaskId cannot be used for list tasks request");
}
Request request = new Request(HttpGet.METHOD_NAME, "/_tasks");
Params params = new Params(request);
params.withTimeout(listTaskRequest.getTimeout())
.withDetailed(listTaskRequest.getDetailed())
.withWaitForCompletion(listTaskRequest.getWaitForCompletion())
.withParentTaskId(listTaskRequest.getParentTaskId())
.withNodes(listTaskRequest.getNodes())
.withActions(listTaskRequest.getActions())
.putParam("group_by", "none");
return request;
}
static Request rollover(RolloverRequest rolloverRequest) throws IOException { static Request rollover(RolloverRequest rolloverRequest) throws IOException {
String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover") String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover")
.addPathPart(rolloverRequest.getNewIndexName()).build(); .addPathPart(rolloverRequest.getNewIndexName()).build();
@ -932,6 +950,41 @@ final class RequestConverters {
return this; return this;
} }
Params withDetailed(boolean detailed) {
if (detailed) {
return putParam("detailed", Boolean.TRUE.toString());
}
return this;
}
Params withWaitForCompletion(boolean waitForCompletion) {
if (waitForCompletion) {
return putParam("wait_for_completion", Boolean.TRUE.toString());
}
return this;
}
Params withNodes(String[] nodes) {
if (nodes != null && nodes.length > 0) {
return putParam("nodes", String.join(",", nodes));
}
return this;
}
Params withActions(String[] actions) {
if (actions != null && actions.length > 0) {
return putParam("actions", String.join(",", actions));
}
return this;
}
Params withParentTaskId(TaskId parentTaskId) {
if (parentTaskId != null && parentTaskId.isSet()) {
return putParam("parent_task_id", parentTaskId.toString());
}
return this;
}
Params withVerify(boolean verify) { Params withVerify(boolean verify) {
if (verify) { if (verify) {
return putParam("verify", Boolean.TRUE.toString()); return putParam("verify", Boolean.TRUE.toString());

View File

@ -20,6 +20,9 @@
package org.elasticsearch.client; package org.elasticsearch.client;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
@ -29,13 +32,16 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.tasks.TaskInfo;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import static java.util.Collections.emptyList;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.nullValue;
@ -105,4 +111,29 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
assertThat(exception.getMessage(), equalTo( assertThat(exception.getMessage(), equalTo(
"Elasticsearch exception [type=illegal_argument_exception, reason=transient setting [" + setting + "], not recognized]")); "Elasticsearch exception [type=illegal_argument_exception, reason=transient setting [" + setting + "], not recognized]"));
} }
public void testListTasks() throws IOException {
ListTasksRequest request = new ListTasksRequest();
ListTasksResponse response = execute(request, highLevelClient().cluster()::listTasks, highLevelClient().cluster()::listTasksAsync);
assertThat(response, notNullValue());
assertThat(response.getNodeFailures(), equalTo(emptyList()));
assertThat(response.getTaskFailures(), equalTo(emptyList()));
// It's possible that there are other tasks except 'cluster:monitor/tasks/lists[n]' and 'action":"cluster:monitor/tasks/lists'
assertThat(response.getTasks().size(), greaterThanOrEqualTo(2));
boolean listTasksFound = false;
for (TaskGroup taskGroup : response.getTaskGroups()) {
TaskInfo parent = taskGroup.getTaskInfo();
if ("cluster:monitor/tasks/lists".equals(parent.getAction())) {
assertThat(taskGroup.getChildTasks().size(), equalTo(1));
TaskGroup childGroup = taskGroup.getChildTasks().iterator().next();
assertThat(childGroup.getChildTasks().isEmpty(), equalTo(true));
TaskInfo child = childGroup.getTaskInfo();
assertThat(child.getAction(), equalTo("cluster:monitor/tasks/lists[n]"));
assertThat(child.getParentTaskId(), equalTo(parent.getTaskId()));
listTasksFound = true;
}
}
assertTrue("List tasks were not found", listTasksFound);
}
} }

View File

@ -29,6 +29,7 @@ import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
@ -111,6 +112,7 @@ import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.rescore.QueryRescorerBuilder;
import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.search.suggest.SuggestBuilder;
import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.RandomObjects; import org.elasticsearch.test.RandomObjects;
@ -142,6 +144,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXC
import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.nullValue;
public class RequestConvertersTests extends ESTestCase { public class RequestConvertersTests extends ESTestCase {
@ -188,8 +191,7 @@ public class RequestConvertersTests extends ESTestCase {
int numberOfRequests = randomIntBetween(0, 32); int numberOfRequests = randomIntBetween(0, 32);
for (int i = 0; i < numberOfRequests; i++) { for (int i = 0; i < numberOfRequests; i++) {
MultiGetRequest.Item item = MultiGetRequest.Item item = new MultiGetRequest.Item(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4));
new MultiGetRequest.Item(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4));
if (randomBoolean()) { if (randomBoolean()) {
item.routing(randomAlphaOfLength(4)); item.routing(randomAlphaOfLength(4));
} }
@ -268,7 +270,7 @@ public class RequestConvertersTests extends ESTestCase {
public void testIndicesExistEmptyIndices() { public void testIndicesExistEmptyIndices() {
expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest())); expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest()));
expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest().indices((String[])null))); expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest().indices((String[]) null)));
} }
private static void getAndExistsTest(Function<GetRequest, Request> requestConverter, String method) { private static void getAndExistsTest(Function<GetRequest, Request> requestConverter, String method) {
@ -422,7 +424,8 @@ public class RequestConvertersTests extends ESTestCase {
setRandomLocal(getSettingsRequest, expectedParams); setRandomLocal(getSettingsRequest, expectedParams);
if (randomBoolean()) { if (randomBoolean()) {
//the request object will not have include_defaults present unless it is set to true // the request object will not have include_defaults present unless it is set to
// true
getSettingsRequest.includeDefaults(randomBoolean()); getSettingsRequest.includeDefaults(randomBoolean());
if (getSettingsRequest.includeDefaults()) { if (getSettingsRequest.includeDefaults()) {
expectedParams.put("include_defaults", Boolean.toString(true)); expectedParams.put("include_defaults", Boolean.toString(true));
@ -966,22 +969,21 @@ public class RequestConvertersTests extends ESTestCase {
bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.SMILE)); bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.SMILE));
bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON)); bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON));
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest)); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest));
assertEquals("Mismatching content-type found for request with content-type [JSON], " + assertEquals(
"previous requests have content-type [SMILE]", exception.getMessage()); "Mismatching content-type found for request with content-type [JSON], " + "previous requests have content-type [SMILE]",
exception.getMessage());
} }
{ {
BulkRequest bulkRequest = new BulkRequest(); BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(new IndexRequest("index", "type", "0") bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.JSON));
.source(singletonMap("field", "value"), XContentType.JSON)); bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON));
bulkRequest.add(new IndexRequest("index", "type", "1")
.source(singletonMap("field", "value"), XContentType.JSON));
bulkRequest.add(new UpdateRequest("index", "type", "2") bulkRequest.add(new UpdateRequest("index", "type", "2")
.doc(new IndexRequest().source(singletonMap("field", "value"), XContentType.JSON)) .doc(new IndexRequest().source(singletonMap("field", "value"), XContentType.JSON))
.upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE)) .upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE)));
);
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest)); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest));
assertEquals("Mismatching content-type found for request with content-type [SMILE], " + assertEquals(
"previous requests have content-type [JSON]", exception.getMessage()); "Mismatching content-type found for request with content-type [SMILE], " + "previous requests have content-type [JSON]",
exception.getMessage());
} }
{ {
XContentType xContentType = randomFrom(XContentType.CBOR, XContentType.YAML); XContentType xContentType = randomFrom(XContentType.CBOR, XContentType.YAML);
@ -1022,9 +1024,10 @@ public class RequestConvertersTests extends ESTestCase {
setRandomIndicesOptions(searchRequest::indicesOptions, searchRequest::indicesOptions, expectedParams); setRandomIndicesOptions(searchRequest::indicesOptions, searchRequest::indicesOptions, expectedParams);
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
//rarely skip setting the search source completely // rarely skip setting the search source completely
if (frequently()) { if (frequently()) {
//frequently set the search source to have some content, otherwise leave it empty but still set it // frequently set the search source to have some content, otherwise leave it
// empty but still set it
if (frequently()) { if (frequently()) {
if (randomBoolean()) { if (randomBoolean()) {
searchSourceBuilder.size(randomIntBetween(0, Integer.MAX_VALUE)); searchSourceBuilder.size(randomIntBetween(0, Integer.MAX_VALUE));
@ -1094,7 +1097,8 @@ public class RequestConvertersTests extends ESTestCase {
MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
for (int i = 0; i < numberOfSearchRequests; i++) { for (int i = 0; i < numberOfSearchRequests; i++) {
SearchRequest searchRequest = randomSearchRequest(() -> { SearchRequest searchRequest = randomSearchRequest(() -> {
// No need to return a very complex SearchSourceBuilder here, that is tested elsewhere // No need to return a very complex SearchSourceBuilder here, that is tested
// elsewhere
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.from(randomInt(10)); searchSourceBuilder.from(randomInt(10));
searchSourceBuilder.size(randomIntBetween(20, 100)); searchSourceBuilder.size(randomIntBetween(20, 100));
@ -1102,14 +1106,13 @@ public class RequestConvertersTests extends ESTestCase {
}); });
// scroll is not supported in the current msearch api, so unset it: // scroll is not supported in the current msearch api, so unset it:
searchRequest.scroll((Scroll) null); searchRequest.scroll((Scroll) null);
// only expand_wildcards, ignore_unavailable and allow_no_indices can be specified from msearch api, so unset other options: // only expand_wildcards, ignore_unavailable and allow_no_indices can be
// specified from msearch api, so unset other options:
IndicesOptions randomlyGenerated = searchRequest.indicesOptions(); IndicesOptions randomlyGenerated = searchRequest.indicesOptions();
IndicesOptions msearchDefault = new MultiSearchRequest().indicesOptions(); IndicesOptions msearchDefault = new MultiSearchRequest().indicesOptions();
searchRequest.indicesOptions(IndicesOptions.fromOptions( searchRequest.indicesOptions(IndicesOptions.fromOptions(randomlyGenerated.ignoreUnavailable(),
randomlyGenerated.ignoreUnavailable(), randomlyGenerated.allowNoIndices(), randomlyGenerated.expandWildcardsOpen(), randomlyGenerated.allowNoIndices(), randomlyGenerated.expandWildcardsOpen(), randomlyGenerated.expandWildcardsClosed(),
randomlyGenerated.expandWildcardsClosed(), msearchDefault.allowAliasesToMultipleIndices(), msearchDefault.allowAliasesToMultipleIndices(), msearchDefault.forbidClosedIndices(), msearchDefault.ignoreAliases()));
msearchDefault.forbidClosedIndices(), msearchDefault.ignoreAliases()
));
multiSearchRequest.add(searchRequest); multiSearchRequest.add(searchRequest);
} }
@ -1134,8 +1137,8 @@ public class RequestConvertersTests extends ESTestCase {
requests.add(searchRequest); requests.add(searchRequest);
}; };
MultiSearchRequest.readMultiLineFormat(new BytesArray(EntityUtils.toByteArray(request.getEntity())), MultiSearchRequest.readMultiLineFormat(new BytesArray(EntityUtils.toByteArray(request.getEntity())),
REQUEST_BODY_CONTENT_TYPE.xContent(), consumer, null, multiSearchRequest.indicesOptions(), null, null, REQUEST_BODY_CONTENT_TYPE.xContent(), consumer, null, multiSearchRequest.indicesOptions(), null, null, null,
null, xContentRegistry(), true); xContentRegistry(), true);
assertEquals(requests, multiSearchRequest.requests()); assertEquals(requests, multiSearchRequest.requests());
} }
@ -1230,7 +1233,7 @@ public class RequestConvertersTests extends ESTestCase {
GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); GetAliasesRequest getAliasesRequest = new GetAliasesRequest();
String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5);
getAliasesRequest.indices(indices); getAliasesRequest.indices(indices);
//the HEAD endpoint requires at least an alias or an index // the HEAD endpoint requires at least an alias or an index
boolean hasIndices = indices != null && indices.length > 0; boolean hasIndices = indices != null && indices.length > 0;
String[] aliases; String[] aliases;
if (hasIndices) { if (hasIndices) {
@ -1261,15 +1264,15 @@ public class RequestConvertersTests extends ESTestCase {
public void testExistsAliasNoAliasNoIndex() { public void testExistsAliasNoAliasNoIndex() {
{ {
GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); GetAliasesRequest getAliasesRequest = new GetAliasesRequest();
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> IllegalArgumentException iae = expectThrows(IllegalArgumentException.class,
RequestConverters.existsAlias(getAliasesRequest)); () -> RequestConverters.existsAlias(getAliasesRequest));
assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); assertEquals("existsAlias requires at least an alias or an index", iae.getMessage());
} }
{ {
GetAliasesRequest getAliasesRequest = new GetAliasesRequest((String[])null); GetAliasesRequest getAliasesRequest = new GetAliasesRequest((String[]) null);
getAliasesRequest.indices((String[])null); getAliasesRequest.indices((String[]) null);
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> IllegalArgumentException iae = expectThrows(IllegalArgumentException.class,
RequestConverters.existsAlias(getAliasesRequest)); () -> RequestConverters.existsAlias(getAliasesRequest));
assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); assertEquals("existsAlias requires at least an alias or an index", iae.getMessage());
} }
} }
@ -1279,14 +1282,10 @@ public class RequestConvertersTests extends ESTestCase {
String[] indices = randomIndicesNames(0, 5); String[] indices = randomIndicesNames(0, 5);
String[] fields = generateRandomStringArray(5, 10, false, false); String[] fields = generateRandomStringArray(5, 10, false, false);
FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest() FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest().indices(indices).fields(fields);
.indices(indices)
.fields(fields);
Map<String, String> indicesOptionsParams = new HashMap<>(); Map<String, String> indicesOptionsParams = new HashMap<>();
setRandomIndicesOptions(fieldCapabilitiesRequest::indicesOptions, setRandomIndicesOptions(fieldCapabilitiesRequest::indicesOptions, fieldCapabilitiesRequest::indicesOptions, indicesOptionsParams);
fieldCapabilitiesRequest::indicesOptions,
indicesOptionsParams);
Request request = RequestConverters.fieldCaps(fieldCapabilitiesRequest); Request request = RequestConverters.fieldCaps(fieldCapabilitiesRequest);
@ -1301,12 +1300,13 @@ public class RequestConvertersTests extends ESTestCase {
assertEquals(endpoint.toString(), request.getEndpoint()); assertEquals(endpoint.toString(), request.getEndpoint());
assertEquals(4, request.getParameters().size()); assertEquals(4, request.getParameters().size());
// Note that we don't check the field param value explicitly, as field names are passed through // Note that we don't check the field param value explicitly, as field names are
// a hash set before being added to the request, and can appear in a non-deterministic order. // passed through
// a hash set before being added to the request, and can appear in a
// non-deterministic order.
assertThat(request.getParameters(), hasKey("fields")); assertThat(request.getParameters(), hasKey("fields"));
String[] requestFields = Strings.splitStringByCommaToArray(request.getParameters().get("fields")); String[] requestFields = Strings.splitStringByCommaToArray(request.getParameters().get("fields"));
assertEquals(new HashSet<>(Arrays.asList(fields)), assertEquals(new HashSet<>(Arrays.asList(fields)), new HashSet<>(Arrays.asList(requestFields)));
new HashSet<>(Arrays.asList(requestFields)));
for (Map.Entry<String, String> param : indicesOptionsParams.entrySet()) { for (Map.Entry<String, String> param : indicesOptionsParams.entrySet()) {
assertThat(request.getParameters(), hasEntry(param.getKey(), param.getValue())); assertThat(request.getParameters(), hasEntry(param.getKey(), param.getValue()));
@ -1465,6 +1465,66 @@ public class RequestConvertersTests extends ESTestCase {
assertEquals(expectedParams, request.getParameters()); assertEquals(expectedParams, request.getParameters());
} }
public void testListTasks() {
{
ListTasksRequest request = new ListTasksRequest();
Map<String, String> expectedParams = new HashMap<>();
if (randomBoolean()) {
request.setDetailed(randomBoolean());
if (request.getDetailed()) {
expectedParams.put("detailed", "true");
}
}
if (randomBoolean()) {
request.setWaitForCompletion(randomBoolean());
if (request.getWaitForCompletion()) {
expectedParams.put("wait_for_completion", "true");
}
}
if (randomBoolean()) {
String timeout = randomTimeValue();
request.setTimeout(timeout);
expectedParams.put("timeout", timeout);
}
if (randomBoolean()) {
if (randomBoolean()) {
TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong());
request.setParentTaskId(taskId);
expectedParams.put("parent_task_id", taskId.toString());
} else {
request.setParentTask(TaskId.EMPTY_TASK_ID);
}
}
if (randomBoolean()) {
String[] nodes = generateRandomStringArray(10, 8, false);
request.setNodes(nodes);
if (nodes.length > 0) {
expectedParams.put("nodes", String.join(",", nodes));
}
}
if (randomBoolean()) {
String[] actions = generateRandomStringArray(10, 8, false);
request.setActions(actions);
if (actions.length > 0) {
expectedParams.put("actions", String.join(",", actions));
}
}
expectedParams.put("group_by", "none");
Request httpRequest = RequestConverters.listTasks(request);
assertThat(httpRequest, notNullValue());
assertThat(httpRequest.getMethod(), equalTo(HttpGet.METHOD_NAME));
assertThat(httpRequest.getEntity(), nullValue());
assertThat(httpRequest.getEndpoint(), equalTo("/_tasks"));
assertThat(httpRequest.getParameters(), equalTo(expectedParams));
}
{
ListTasksRequest request = new ListTasksRequest();
request.setTaskId(new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()));
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.listTasks(request));
assertEquals("TaskId cannot be used for list tasks request", exception.getMessage());
}
}
public void testGetRepositories() { public void testGetRepositories() {
Map<String, String> expectedParams = new HashMap<>(); Map<String, String> expectedParams = new HashMap<>();
StringBuilder endpoint = new StringBuilder("/_snapshot"); StringBuilder endpoint = new StringBuilder("/_snapshot");
@ -1474,7 +1534,7 @@ public class RequestConvertersTests extends ESTestCase {
setRandomLocal(getRepositoriesRequest, expectedParams); setRandomLocal(getRepositoriesRequest, expectedParams);
if (randomBoolean()) { if (randomBoolean()) {
String[] entries = new String[] {"a", "b", "c"}; String[] entries = new String[] { "a", "b", "c" };
getRepositoriesRequest.repositories(entries); getRepositoriesRequest.repositories(entries);
endpoint.append("/" + String.join(",", entries)); endpoint.append("/" + String.join(",", entries));
} }
@ -1513,9 +1573,8 @@ public class RequestConvertersTests extends ESTestCase {
names.put("-#template", "-%23template"); names.put("-#template", "-%23template");
names.put("foo^bar", "foo%5Ebar"); names.put("foo^bar", "foo%5Ebar");
PutIndexTemplateRequest putTemplateRequest = new PutIndexTemplateRequest() PutIndexTemplateRequest putTemplateRequest = new PutIndexTemplateRequest().name(randomFrom(names.keySet()))
.name(randomFrom(names.keySet())) .patterns(Arrays.asList(generateRandomStringArray(20, 100, false, false)));
.patterns(Arrays.asList(generateRandomStringArray(20, 100, false, false)));
if (randomBoolean()) { if (randomBoolean()) {
putTemplateRequest.order(randomInt()); putTemplateRequest.order(randomInt());
} }
@ -1572,14 +1631,12 @@ public class RequestConvertersTests extends ESTestCase {
assertEquals("/a/b", endpointBuilder.build()); assertEquals("/a/b", endpointBuilder.build());
} }
{ {
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPart("b") EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPart("b").addPathPartAsIs("_create");
.addPathPartAsIs("_create");
assertEquals("/a/b/_create", endpointBuilder.build()); assertEquals("/a/b/_create", endpointBuilder.build());
} }
{ {
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b", "c") EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b", "c").addPathPartAsIs("_create");
.addPathPartAsIs("_create");
assertEquals("/a/b/c/_create", endpointBuilder.build()); assertEquals("/a/b/c/_create", endpointBuilder.build());
} }
{ {
@ -1638,13 +1695,12 @@ public class RequestConvertersTests extends ESTestCase {
assertEquals("/foo%5Ebar", endpointBuilder.build()); assertEquals("/foo%5Ebar", endpointBuilder.build());
} }
{ {
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("cluster1:index1,index2") EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("cluster1:index1,index2").addPathPartAsIs("_search");
.addPathPartAsIs("_search");
assertEquals("/cluster1:index1,index2/_search", endpointBuilder.build()); assertEquals("/cluster1:index1,index2/_search", endpointBuilder.build());
} }
{ {
EndpointBuilder endpointBuilder = new EndpointBuilder() EndpointBuilder endpointBuilder = new EndpointBuilder().addCommaSeparatedPathParts(new String[] { "index1", "index2" })
.addCommaSeparatedPathParts(new String[]{"index1", "index2"}).addPathPartAsIs("cache/clear"); .addPathPartAsIs("cache/clear");
assertEquals("/index1,index2/cache/clear", endpointBuilder.build()); assertEquals("/index1,index2/cache/clear", endpointBuilder.build());
} }
} }
@ -1652,12 +1708,12 @@ public class RequestConvertersTests extends ESTestCase {
public void testEndpoint() { public void testEndpoint() {
assertEquals("/index/type/id", RequestConverters.endpoint("index", "type", "id")); assertEquals("/index/type/id", RequestConverters.endpoint("index", "type", "id"));
assertEquals("/index/type/id/_endpoint", RequestConverters.endpoint("index", "type", "id", "_endpoint")); assertEquals("/index/type/id/_endpoint", RequestConverters.endpoint("index", "type", "id", "_endpoint"));
assertEquals("/index1,index2", RequestConverters.endpoint(new String[]{"index1", "index2"})); assertEquals("/index1,index2", RequestConverters.endpoint(new String[] { "index1", "index2" }));
assertEquals("/index1,index2/_endpoint", RequestConverters.endpoint(new String[]{"index1", "index2"}, "_endpoint")); assertEquals("/index1,index2/_endpoint", RequestConverters.endpoint(new String[] { "index1", "index2" }, "_endpoint"));
assertEquals("/index1,index2/type1,type2/_endpoint", RequestConverters.endpoint(new String[]{"index1", "index2"}, assertEquals("/index1,index2/type1,type2/_endpoint",
new String[]{"type1", "type2"}, "_endpoint")); RequestConverters.endpoint(new String[] { "index1", "index2" }, new String[] { "type1", "type2" }, "_endpoint"));
assertEquals("/index1,index2/_endpoint/suffix1,suffix2", RequestConverters.endpoint(new String[]{"index1", "index2"}, assertEquals("/index1,index2/_endpoint/suffix1,suffix2",
"_endpoint", new String[]{"suffix1", "suffix2"})); RequestConverters.endpoint(new String[] { "index1", "index2" }, "_endpoint", new String[] { "suffix1", "suffix2" }));
} }
public void testCreateContentType() { public void testCreateContentType() {
@ -1673,20 +1729,22 @@ public class RequestConvertersTests extends ESTestCase {
XContentType bulkContentType = randomBoolean() ? xContentType : null; XContentType bulkContentType = randomBoolean() ? xContentType : null;
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.CBOR), bulkContentType)); () -> enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.CBOR),
bulkContentType));
assertEquals("Unsupported content-type found for request with content-type [CBOR], only JSON and SMILE are supported", assertEquals("Unsupported content-type found for request with content-type [CBOR], only JSON and SMILE are supported",
exception.getMessage()); exception.getMessage());
exception = expectThrows(IllegalArgumentException.class, () -> exception = expectThrows(IllegalArgumentException.class,
enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.YAML), bulkContentType)); () -> enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.YAML),
bulkContentType));
assertEquals("Unsupported content-type found for request with content-type [YAML], only JSON and SMILE are supported", assertEquals("Unsupported content-type found for request with content-type [YAML], only JSON and SMILE are supported",
exception.getMessage()); exception.getMessage());
XContentType requestContentType = xContentType == XContentType.JSON ? XContentType.SMILE : XContentType.JSON; XContentType requestContentType = xContentType == XContentType.JSON ? XContentType.SMILE : XContentType.JSON;
exception = expectThrows(IllegalArgumentException.class, () -> exception = expectThrows(IllegalArgumentException.class,
enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), requestContentType), xContentType)); () -> enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), requestContentType), xContentType));
assertEquals("Mismatching content-type found for request with content-type [" + requestContentType + "], " assertEquals("Mismatching content-type found for request with content-type [" + requestContentType + "], "
+ "previous requests have content-type [" + xContentType + "]", exception.getMessage()); + "previous requests have content-type [" + xContentType + "]", exception.getMessage());
} }
@ -1754,11 +1812,10 @@ public class RequestConvertersTests extends ESTestCase {
} }
private static void setRandomIndicesOptions(Consumer<IndicesOptions> setter, Supplier<IndicesOptions> getter, private static void setRandomIndicesOptions(Consumer<IndicesOptions> setter, Supplier<IndicesOptions> getter,
Map<String, String> expectedParams) { Map<String, String> expectedParams) {
if (randomBoolean()) { if (randomBoolean()) {
setter.accept(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), setter.accept(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
randomBoolean()));
} }
expectedParams.put("ignore_unavailable", Boolean.toString(getter.get().ignoreUnavailable())); expectedParams.put("ignore_unavailable", Boolean.toString(getter.get().ignoreUnavailable()));
expectedParams.put("allow_no_indices", Boolean.toString(getter.get().allowNoIndices())); expectedParams.put("allow_no_indices", Boolean.toString(getter.get().allowNoIndices()));

View File

@ -19,8 +19,14 @@
package org.elasticsearch.client.documentation; package org.elasticsearch.client.documentation;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.LatchedActionListener;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.ESRestHighLevelClientTestCase;
@ -31,14 +37,20 @@ import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.tasks.TaskInfo;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap; import java.util.HashMap;
import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import static java.util.Collections.emptyList;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.notNullValue;
/** /**
* This class is used to generate the Java Cluster API documentation. * This class is used to generate the Java Cluster API documentation.
@ -177,4 +189,87 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase
assertTrue(latch.await(30L, TimeUnit.SECONDS)); assertTrue(latch.await(30L, TimeUnit.SECONDS));
} }
} }
public void testListTasks() throws IOException {
RestHighLevelClient client = highLevelClient();
{
// tag::list-tasks-request
ListTasksRequest request = new ListTasksRequest();
// end::list-tasks-request
// tag::list-tasks-request-filter
request.setActions("cluster:*"); // <1>
request.setNodes("nodeId1", "nodeId2"); // <2>
request.setParentTaskId(new TaskId("parentTaskId", 42)); // <3>
// end::list-tasks-request-filter
// tag::list-tasks-request-detailed
request.setDetailed(true); // <1>
// end::list-tasks-request-detailed
// tag::list-tasks-request-wait-completion
request.setWaitForCompletion(true); // <1>
request.setTimeout(TimeValue.timeValueSeconds(50)); // <2>
request.setTimeout("50s"); // <3>
// end::list-tasks-request-wait-completion
}
ListTasksRequest request = new ListTasksRequest();
// tag::list-tasks-execute
ListTasksResponse response = client.cluster().listTasks(request);
// end::list-tasks-execute
assertThat(response, notNullValue());
// tag::list-tasks-response-tasks
List<TaskInfo> tasks = response.getTasks(); // <1>
// end::list-tasks-response-tasks
// tag::list-tasks-response-calc
Map<String, List<TaskInfo>> perNodeTasks = response.getPerNodeTasks(); // <1>
List<TaskGroup> groups = response.getTaskGroups(); // <2>
// end::list-tasks-response-calc
// tag::list-tasks-response-failures
List<ElasticsearchException> nodeFailures = response.getNodeFailures(); // <1>
List<TaskOperationFailure> taskFailures = response.getTaskFailures(); // <2>
// end::list-tasks-response-failures
assertThat(response.getNodeFailures(), equalTo(emptyList()));
assertThat(response.getTaskFailures(), equalTo(emptyList()));
assertThat(response.getTasks().size(), greaterThanOrEqualTo(2));
}
public void testListTasksAsync() throws Exception {
RestHighLevelClient client = highLevelClient();
{
ListTasksRequest request = new ListTasksRequest();
// tag::list-tasks-execute-listener
ActionListener<ListTasksResponse> listener =
new ActionListener<ListTasksResponse>() {
@Override
public void onResponse(ListTasksResponse response) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::list-tasks-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::list-tasks-execute-async
client.cluster().listTasksAsync(request, listener); // <1>
// end::list-tasks-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
} }

View File

@ -224,7 +224,7 @@ subprojects {
doLast { doLast {
// this is just a small sample from the C++ notices, the idea being that if we've added these lines we've probably added all the required lines // this is just a small sample from the C++ notices, the idea being that if we've added these lines we've probably added all the required lines
final List<String> expectedLines = Arrays.asList("Apache log4cxx", "Boost Software License - Version 1.0 - August 17th, 2003") final List<String> expectedLines = Arrays.asList("Apache log4cxx", "Boost Software License - Version 1.0 - August 17th, 2003")
final Path noticePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/modules/x-pack/x-pack-ml/NOTICE.txt") final Path noticePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/modules/x-pack-ml/NOTICE.txt")
final List<String> actualLines = Files.readAllLines(noticePath) final List<String> actualLines = Files.readAllLines(noticePath)
for (final String expectedLine : expectedLines) { for (final String expectedLine : expectedLines) {
if (actualLines.contains(expectedLine) == false) { if (actualLines.contains(expectedLine) == false) {

View File

@ -201,17 +201,14 @@ project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each {
// use licenses from each of the bundled xpack plugins // use licenses from each of the bundled xpack plugins
Project xpack = project(':x-pack:plugin') Project xpack = project(':x-pack:plugin')
xpack.subprojects.findAll { it.name != 'bwc' }.each { Project xpackSubproject -> xpack.subprojects.findAll { it.parent == xpack }.each { Project xpackModule ->
File licenses = new File(xpackSubproject.projectDir, 'licenses') File licenses = new File(xpackModule.projectDir, 'licenses')
if (licenses.exists()) { if (licenses.exists()) {
buildDefaultNotice.licensesDir licenses buildDefaultNotice.licensesDir licenses
} }
copyModule(processDefaultOutputs, xpackModule)
copyLog4jProperties(buildDefaultLog4jConfig, xpackModule)
} }
// but copy just the top level meta plugin to the default modules
copyModule(processDefaultOutputs, xpack)
copyLog4jProperties(buildDefaultLog4jConfig, xpack)
//
// make sure we have a clean task since we aren't a java project, but we have tasks that // make sure we have a clean task since we aren't a java project, but we have tasks that
// put stuff in the build dir // put stuff in the build dir

View File

@ -0,0 +1,101 @@
[[java-rest-high-cluster-list-tasks]]
=== List Tasks API
The List Tasks API allows to get information about the tasks currently executing in the cluster.
[[java-rest-high-cluster-list-tasks-request]]
==== List Tasks Request
A `ListTasksRequest`:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request]
--------------------------------------------------
There is no required parameters. By default the client will list all tasks and will not wait
for task completion.
==== Parameters
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request-filter]
--------------------------------------------------
<1> Request only cluster-related tasks
<2> Request all tasks running on nodes nodeId1 and nodeId2
<3> Request only children of a particular task
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request-detailed]
--------------------------------------------------
<1> Should the information include detailed, potentially slow to generate data. Defaults to `false`
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request-wait-completion]
--------------------------------------------------
<1> Should this request wait for all found tasks to complete. Defaults to `false`
<2> Timeout for the request as a `TimeValue`. Applicable only if `setWaitForCompletion` is `true`.
Defaults to 30 seconds
<3> Timeout as a `String`
[[java-rest-high-cluster-list-tasks-sync]]
==== Synchronous Execution
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-execute]
--------------------------------------------------
[[java-rest-high-cluster-list-tasks-async]]
==== Asynchronous Execution
The asynchronous execution of a cluster update settings requires both the
`ListTasksRequest` instance and an `ActionListener` instance to be
passed to the asynchronous method:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-execute-async]
--------------------------------------------------
<1> The `ListTasksRequest` to execute and the `ActionListener` to use
when the execution completes
The asynchronous method does not block and returns immediately. Once it is
completed the `ActionListener` is called back using the `onResponse` method
if the execution successfully completed or using the `onFailure` method if
it failed.
A typical listener for `ListTasksResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-execute-listener]
--------------------------------------------------
<1> Called when the execution is successfully completed. The response is
provided as an argument
<2> Called in case of a failure. The raised exception is provided as an argument
[[java-rest-high-cluster-list-tasks-response]]
==== List Tasks Response
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-response-tasks]
--------------------------------------------------
<1> List of currently running tasks
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-response-calc]
--------------------------------------------------
<1> List of tasks grouped by a node
<2> List of tasks grouped by a parent task
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-response-failures]
--------------------------------------------------
<1> List of node failures
<2> List of tasks failures

View File

@ -104,8 +104,10 @@ include::indices/put_template.asciidoc[]
The Java High Level REST Client supports the following Cluster APIs: The Java High Level REST Client supports the following Cluster APIs:
* <<java-rest-high-cluster-put-settings>> * <<java-rest-high-cluster-put-settings>>
* <<java-rest-high-cluster-list-tasks>>
include::cluster/put_settings.asciidoc[] include::cluster/put_settings.asciidoc[]
include::cluster/list_tasks.asciidoc[]
== Snapshot APIs == Snapshot APIs
@ -114,4 +116,4 @@ The Java High Level REST Client supports the following Snapshot APIs:
* <<java-rest-high-snapshot-get-repository>> * <<java-rest-high-snapshot-get-repository>>
include::snapshot/get_repository.asciidoc[] include::snapshot/get_repository.asciidoc[]
include::snapshot/create_repository.asciidoc[] include::snapshot/create_repository.asciidoc[]

View File

@ -72,6 +72,7 @@ POST /_search
} }
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.]
<1> The metric is called `"the_sum"` <1> The metric is called `"the_sum"`
<2> The `buckets_path` refers to the metric via a relative path `"the_sum"` <2> The `buckets_path` refers to the metric via a relative path `"the_sum"`
@ -136,6 +137,7 @@ POST /_search
} }
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.]
<1> By using `_count` instead of a metric name, we can calculate the moving average of document counts in the histogram <1> By using `_count` instead of a metric name, we can calculate the moving average of document counts in the histogram
The `buckets_path` can also use `"_bucket_count"` and path to a multi-bucket aggregation to use the number of buckets The `buckets_path` can also use `"_bucket_count"` and path to a multi-bucket aggregation to use the number of buckets
@ -231,6 +233,7 @@ include::pipeline/stats-bucket-aggregation.asciidoc[]
include::pipeline/extended-stats-bucket-aggregation.asciidoc[] include::pipeline/extended-stats-bucket-aggregation.asciidoc[]
include::pipeline/percentiles-bucket-aggregation.asciidoc[] include::pipeline/percentiles-bucket-aggregation.asciidoc[]
include::pipeline/movavg-aggregation.asciidoc[] include::pipeline/movavg-aggregation.asciidoc[]
include::pipeline/movfn-aggregation.asciidoc[]
include::pipeline/cumulative-sum-aggregation.asciidoc[] include::pipeline/cumulative-sum-aggregation.asciidoc[]
include::pipeline/bucket-script-aggregation.asciidoc[] include::pipeline/bucket-script-aggregation.asciidoc[]
include::pipeline/bucket-selector-aggregation.asciidoc[] include::pipeline/bucket-selector-aggregation.asciidoc[]

View File

@ -1,6 +1,10 @@
[[search-aggregations-pipeline-movavg-aggregation]] [[search-aggregations-pipeline-movavg-aggregation]]
=== Moving Average Aggregation === Moving Average Aggregation
deprecated[6.4.0, The Moving Average aggregation has been deprecated in favor of the more general
<<search-aggregations-pipeline-movfn-aggregation,Moving Function Aggregation>>. The new Moving Function aggregation provides
all the same functionality as the Moving Average aggregation, but also provides more flexibility.]
Given an ordered series of data, the Moving Average aggregation will slide a window across the data and emit the average Given an ordered series of data, the Moving Average aggregation will slide a window across the data and emit the average
value of that window. For example, given the data `[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]`, we can calculate a simple moving value of that window. For example, given the data `[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]`, we can calculate a simple moving
average with windows size of `5` as follows: average with windows size of `5` as follows:
@ -74,6 +78,7 @@ POST /_search
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[setup:sales] // TEST[setup:sales]
// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.]
<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals <1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals
<2> A `sum` metric is used to calculate the sum of a field. This could be any metric (sum, min, max, etc) <2> A `sum` metric is used to calculate the sum of a field. This could be any metric (sum, min, max, etc)
@ -180,6 +185,7 @@ POST /_search
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[setup:sales] // TEST[setup:sales]
// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.]
A `simple` model has no special settings to configure A `simple` model has no special settings to configure
@ -233,6 +239,7 @@ POST /_search
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[setup:sales] // TEST[setup:sales]
// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.]
A `linear` model has no special settings to configure A `linear` model has no special settings to configure
@ -295,7 +302,7 @@ POST /_search
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[setup:sales] // TEST[setup:sales]
// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.]
[[single_0.2alpha]] [[single_0.2alpha]]
.EWMA with window of size 10, alpha = 0.2 .EWMA with window of size 10, alpha = 0.2
@ -355,6 +362,7 @@ POST /_search
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[setup:sales] // TEST[setup:sales]
// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.]
In practice, the `alpha` value behaves very similarly in `holt` as `ewma`: small values produce more smoothing In practice, the `alpha` value behaves very similarly in `holt` as `ewma`: small values produce more smoothing
and more lag, while larger values produce closer tracking and less lag. The value of `beta` is often difficult and more lag, while larger values produce closer tracking and less lag. The value of `beta` is often difficult
@ -446,7 +454,7 @@ POST /_search
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[setup:sales] // TEST[setup:sales]
// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.]
[[holt_winters_add]] [[holt_winters_add]]
.Holt-Winters moving average with window of size 120, alpha = 0.5, beta = 0.7, gamma = 0.3, period = 30 .Holt-Winters moving average with window of size 120, alpha = 0.5, beta = 0.7, gamma = 0.3, period = 30
@ -508,6 +516,7 @@ POST /_search
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[setup:sales] // TEST[setup:sales]
// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.]
==== Prediction ==== Prediction
@ -550,6 +559,7 @@ POST /_search
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[setup:sales] // TEST[setup:sales]
// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.]
The `simple`, `linear` and `ewma` models all produce "flat" predictions: they essentially converge on the mean The `simple`, `linear` and `ewma` models all produce "flat" predictions: they essentially converge on the mean
of the last value in the series, producing a flat: of the last value in the series, producing a flat:
@ -631,6 +641,7 @@ POST /_search
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[setup:sales] // TEST[setup:sales]
// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.]
<1> Minimization is enabled with the `minimize` parameter <1> Minimization is enabled with the `minimize` parameter

View File

@ -0,0 +1,633 @@
[[search-aggregations-pipeline-movfn-aggregation]]
=== Moving Function Aggregation
Given an ordered series of data, the Moving Function aggregation will slide a window across the data and allow the user to specify a custom
script that is executed on each window of data. For convenience, a number of common functions are predefined such as min/max, moving averages,
etc.
This is conceptually very similar to the <<search-aggregations-pipeline-movavg-aggregation, Moving Average>> pipeline aggregation, except
it provides more functionality.
==== Syntax
A `moving_fn` aggregation looks like this in isolation:
[source,js]
--------------------------------------------------
{
"moving_fn": {
"buckets_path": "the_sum",
"window": 10,
"script": "MovingFunctions.min(values)"
}
}
--------------------------------------------------
// NOTCONSOLE
.`moving_avg` Parameters
|===
|Parameter Name |Description |Required |Default Value
|`buckets_path` |Path to the metric of interest (see <<buckets-path-syntax, `buckets_path` Syntax>> for more details |Required |
|`window` |The size of window to "slide" across the histogram. |Required |
|`script` |The script that should be executed on each window of data |Required |
|===
`moving_fn` aggregations must be embedded inside of a `histogram` or `date_histogram` aggregation. They can be
embedded like any other metric aggregation:
[source,js]
--------------------------------------------------
POST /_search
{
"size": 0,
"aggs": {
"my_date_histo":{ <1>
"date_histogram":{
"field":"date",
"interval":"1M"
},
"aggs":{
"the_sum":{
"sum":{ "field": "price" } <2>
},
"the_movfn": {
"moving_fn": {
"buckets_path": "the_sum", <3>
"window": 10,
"script": "MovingFunctions.unweightedAvg(values)"
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[setup:sales]
<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals
<2> A `sum` metric is used to calculate the sum of a field. This could be any numeric metric (sum, min, max, etc)
<3> Finally, we specify a `moving_fn` aggregation which uses "the_sum" metric as its input.
Moving averages are built by first specifying a `histogram` or `date_histogram` over a field. You can then optionally
add numeric metrics, such as a `sum`, inside of that histogram. Finally, the `moving_fn` is embedded inside the histogram.
The `buckets_path` parameter is then used to "point" at one of the sibling metrics inside of the histogram (see
<<buckets-path-syntax>> for a description of the syntax for `buckets_path`.
An example response from the above aggregation may look like:
[source,js]
--------------------------------------------------
{
"took": 11,
"timed_out": false,
"_shards": ...,
"hits": ...,
"aggregations": {
"my_date_histo": {
"buckets": [
{
"key_as_string": "2015/01/01 00:00:00",
"key": 1420070400000,
"doc_count": 3,
"the_sum": {
"value": 550.0
},
"the_movfn": {
"value": null
}
},
{
"key_as_string": "2015/02/01 00:00:00",
"key": 1422748800000,
"doc_count": 2,
"the_sum": {
"value": 60.0
},
"the_movfn": {
"value": 550.0
}
},
{
"key_as_string": "2015/03/01 00:00:00",
"key": 1425168000000,
"doc_count": 2,
"the_sum": {
"value": 375.0
},
"the_movfn": {
"value": 305.0
}
}
]
}
}
}
--------------------------------------------------
// TESTRESPONSE[s/"took": 11/"took": $body.took/]
// TESTRESPONSE[s/"_shards": \.\.\./"_shards": $body._shards/]
// TESTRESPONSE[s/"hits": \.\.\./"hits": $body.hits/]
==== Custom user scripting
The Moving Function aggregation allows the user to specify any arbitrary script to define custom logic. The script is invoked each time a
new window of data is collected. These values are provided to the script in the `values` variable. The script should then perform some
kind of calculation and emit a single `double` as the result. Emitting `null` is not permitted, although `NaN` and +/- `Inf` are allowed.
For example, this script will simply return the first value from the window, or `NaN` if no values are available:
[source,js]
--------------------------------------------------
POST /_search
{
"size": 0,
"aggs": {
"my_date_histo":{
"date_histogram":{
"field":"date",
"interval":"1M"
},
"aggs":{
"the_sum":{
"sum":{ "field": "price" }
},
"the_movavg": {
"moving_fn": {
"buckets_path": "the_sum",
"window": 10,
"script": "return values.length > 0 ? values[0] : Double.NaN"
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[setup:sales]
==== Pre-built Functions
For convenience, a number of functions have been prebuilt and are available inside the `moving_fn` script context:
- `max()`
- `min()`
- `sum()`
- `stdDev()`
- `unweightedAvg()`
- `linearWeightedAvg()`
- `ewma()`
- `holt()`
- `holtWinters()`
The functions are available from the `MovingFunctions` namespace. E.g. `MovingFunctions.max()`
===== max Function
This function accepts a collection of doubles and returns the maximum value in that window. `null` and `NaN` values are ignored; the maximum
is only calculated over the real values. If the window is empty, or all values are `null`/`NaN`, `NaN` is returned as the result.
.`max(double[] values)` Parameters
|===
|Parameter Name |Description
|`values` |The window of values to find the maximum
|===
[source,js]
--------------------------------------------------
POST /_search
{
"size": 0,
"aggs": {
"my_date_histo":{
"date_histogram":{
"field":"date",
"interval":"1M"
},
"aggs":{
"the_sum":{
"sum":{ "field": "price" }
},
"the_moving_max": {
"moving_fn": {
"buckets_path": "the_sum",
"window": 10,
"script": "MovingFunctions.max(values)"
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[setup:sales]
===== min Function
This function accepts a collection of doubles and returns the minimum value in that window. `null` and `NaN` values are ignored; the minimum
is only calculated over the real values. If the window is empty, or all values are `null`/`NaN`, `NaN` is returned as the result.
.`min(double[] values)` Parameters
|===
|Parameter Name |Description
|`values` |The window of values to find the minimum
|===
[source,js]
--------------------------------------------------
POST /_search
{
"size": 0,
"aggs": {
"my_date_histo":{
"date_histogram":{
"field":"date",
"interval":"1M"
},
"aggs":{
"the_sum":{
"sum":{ "field": "price" }
},
"the_moving_min": {
"moving_fn": {
"buckets_path": "the_sum",
"window": 10,
"script": "MovingFunctions.min(values)"
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[setup:sales]
===== sum Function
This function accepts a collection of doubles and returns the sum of the values in that window. `null` and `NaN` values are ignored;
the sum is only calculated over the real values. If the window is empty, or all values are `null`/`NaN`, `0.0` is returned as the result.
.`sum(double[] values)` Parameters
|===
|Parameter Name |Description
|`values` |The window of values to find the sum of
|===
[source,js]
--------------------------------------------------
POST /_search
{
"size": 0,
"aggs": {
"my_date_histo":{
"date_histogram":{
"field":"date",
"interval":"1M"
},
"aggs":{
"the_sum":{
"sum":{ "field": "price" }
},
"the_moving_sum": {
"moving_fn": {
"buckets_path": "the_sum",
"window": 10,
"script": "MovingFunctions.sum(values)"
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[setup:sales]
===== stdDev Function
This function accepts a collection of doubles and and average, then returns the standard deviation of the values in that window.
`null` and `NaN` values are ignored; the sum is only calculated over the real values. If the window is empty, or all values are
`null`/`NaN`, `0.0` is returned as the result.
.`stdDev(double[] values)` Parameters
|===
|Parameter Name |Description
|`values` |The window of values to find the standard deviation of
|`avg` |The average of the window
|===
[source,js]
--------------------------------------------------
POST /_search
{
"size": 0,
"aggs": {
"my_date_histo":{
"date_histogram":{
"field":"date",
"interval":"1M"
},
"aggs":{
"the_sum":{
"sum":{ "field": "price" }
},
"the_moving_sum": {
"moving_fn": {
"buckets_path": "the_sum",
"window": 10,
"script": "MovingFunctions.stdDev(values, MovingFunctions.unweightedAvg(values))"
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[setup:sales]
The `avg` parameter must be provided to the standard deviation function because different styles of averages can be computed on the window
(simple, linearly weighted, etc). The various moving averages that are detailed below can be used to calculate the average for the
standard deviation function.
===== unweightedAvg Function
The `unweightedAvg` function calculates the sum of all values in the window, then divides by the size of the window. It is effectively
a simple arithmetic mean of the window. The simple moving average does not perform any time-dependent weighting, which means
the values from a `simple` moving average tend to "lag" behind the real data.
`null` and `NaN` values are ignored; the average is only calculated over the real values. If the window is empty, or all values are
`null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN`
values.
.`unweightedAvg(double[] values)` Parameters
|===
|Parameter Name |Description
|`values` |The window of values to find the sum of
|===
[source,js]
--------------------------------------------------
POST /_search
{
"size": 0,
"aggs": {
"my_date_histo":{
"date_histogram":{
"field":"date",
"interval":"1M"
},
"aggs":{
"the_sum":{
"sum":{ "field": "price" }
},
"the_movavg": {
"moving_fn": {
"buckets_path": "the_sum",
"window": 10,
"script": "MovingFunctions.unweightedAvg(values)"
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[setup:sales]
==== linearWeightedAvg Function
The `linearWeightedAvg` function assigns a linear weighting to points in the series, such that "older" datapoints (e.g. those at
the beginning of the window) contribute a linearly less amount to the total average. The linear weighting helps reduce
the "lag" behind the data's mean, since older points have less influence.
If the window is empty, or all values are `null`/`NaN`, `NaN` is returned as the result.
.`linearWeightedAvg(double[] values)` Parameters
|===
|Parameter Name |Description
|`values` |The window of values to find the sum of
|===
[source,js]
--------------------------------------------------
POST /_search
{
"size": 0,
"aggs": {
"my_date_histo":{
"date_histogram":{
"field":"date",
"interval":"1M"
},
"aggs":{
"the_sum":{
"sum":{ "field": "price" }
},
"the_movavg": {
"moving_fn": {
"buckets_path": "the_sum",
"window": 10,
"script": "MovingFunctions.linearWeightedAvg(values)"
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[setup:sales]
==== ewma Function
The `ewma` function (aka "single-exponential") is similar to the `linearMovAvg` function,
except older data-points become exponentially less important,
rather than linearly less important. The speed at which the importance decays can be controlled with an `alpha`
setting. Small values make the weight decay slowly, which provides greater smoothing and takes into account a larger
portion of the window. Larger valuers make the weight decay quickly, which reduces the impact of older values on the
moving average. This tends to make the moving average track the data more closely but with less smoothing.
`null` and `NaN` values are ignored; the average is only calculated over the real values. If the window is empty, or all values are
`null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN`
values.
.`ewma(double[] values, double alpha)` Parameters
|===
|Parameter Name |Description
|`values` |The window of values to find the sum of
|`alpha` |Exponential decay
|===
[source,js]
--------------------------------------------------
POST /_search
{
"size": 0,
"aggs": {
"my_date_histo":{
"date_histogram":{
"field":"date",
"interval":"1M"
},
"aggs":{
"the_sum":{
"sum":{ "field": "price" }
},
"the_movavg": {
"moving_fn": {
"buckets_path": "the_sum",
"window": 10,
"script": "MovingFunctions.ewma(values, 0.3)"
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[setup:sales]
==== holt Function
The `holt` function (aka "double exponential") incorporates a second exponential term which
tracks the data's trend. Single exponential does not perform well when the data has an underlying linear trend. The
double exponential model calculates two values internally: a "level" and a "trend".
The level calculation is similar to `ewma`, and is an exponentially weighted view of the data. The difference is
that the previously smoothed value is used instead of the raw value, which allows it to stay close to the original series.
The trend calculation looks at the difference between the current and last value (e.g. the slope, or trend, of the
smoothed data). The trend value is also exponentially weighted.
Values are produced by multiplying the level and trend components.
`null` and `NaN` values are ignored; the average is only calculated over the real values. If the window is empty, or all values are
`null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN`
values.
.`holt(double[] values, double alpha)` Parameters
|===
|Parameter Name |Description
|`values` |The window of values to find the sum of
|`alpha` |Level decay value
|`beta` |Trend decay value
|===
[source,js]
--------------------------------------------------
POST /_search
{
"size": 0,
"aggs": {
"my_date_histo":{
"date_histogram":{
"field":"date",
"interval":"1M"
},
"aggs":{
"the_sum":{
"sum":{ "field": "price" }
},
"the_movavg": {
"moving_fn": {
"buckets_path": "the_sum",
"window": 10,
"script": "MovingFunctions.holt(values, 0.3, 0.1)"
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[setup:sales]
In practice, the `alpha` value behaves very similarly in `holtMovAvg` as `ewmaMovAvg`: small values produce more smoothing
and more lag, while larger values produce closer tracking and less lag. The value of `beta` is often difficult
to see. Small values emphasize long-term trends (such as a constant linear trend in the whole series), while larger
values emphasize short-term trends.
==== holtWinters Function
The `holtWinters` function (aka "triple exponential") incorporates a third exponential term which
tracks the seasonal aspect of your data. This aggregation therefore smooths based on three components: "level", "trend"
and "seasonality".
The level and trend calculation is identical to `holt` The seasonal calculation looks at the difference between
the current point, and the point one period earlier.
Holt-Winters requires a little more handholding than the other moving averages. You need to specify the "periodicity"
of your data: e.g. if your data has cyclic trends every 7 days, you would set `period = 7`. Similarly if there was
a monthly trend, you would set it to `30`. There is currently no periodicity detection, although that is planned
for future enhancements.
`null` and `NaN` values are ignored; the average is only calculated over the real values. If the window is empty, or all values are
`null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN`
values.
.`holtWinters(double[] values, double alpha)` Parameters
|===
|Parameter Name |Description
|`values` |The window of values to find the sum of
|`alpha` |Level decay value
|`beta` |Trend decay value
|`gamma` |Seasonality decay value
|`period` |The periodicity of the data
|`multiplicative` |True if you wish to use multiplicative holt-winters, false to use additive
|===
[source,js]
--------------------------------------------------
POST /_search
{
"size": 0,
"aggs": {
"my_date_histo":{
"date_histogram":{
"field":"date",
"interval":"1M"
},
"aggs":{
"the_sum":{
"sum":{ "field": "price" }
},
"the_movavg": {
"moving_fn": {
"buckets_path": "the_sum",
"window": 10,
"script": "if (values.length > 5*2) {MovingFunctions.holtWinters(values, 0.3, 0.1, 0.1, 5, false)}"
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[setup:sales]
[WARNING]
======
Multiplicative Holt-Winters works by dividing each data point by the seasonal value. This is problematic if any of
your data is zero, or if there are gaps in the data (since this results in a divid-by-zero). To combat this, the
`mult` Holt-Winters pads all values by a very small amount (1*10^-10^) so that all values are non-zero. This affects
the result, but only minimally. If your data is non-zero, or you prefer to see `NaN` when zero's are encountered,
you can disable this behavior with `pad: false`
======
===== "Cold Start"
Unfortunately, due to the nature of Holt-Winters, it requires two periods of data to "bootstrap" the algorithm. This
means that your `window` must always be *at least* twice the size of your period. An exception will be thrown if it
isn't. It also means that Holt-Winters will not emit a value for the first `2 * period` buckets; the current algorithm
does not backcast.
You'll notice in the above example we have an `if ()` statement checking the size of values. This is checking to make sure
we have two periods worth of data (`5 * 2`, where 5 is the period specified in the `holtWintersMovAvg` function) before calling
the holt-winters function.

View File

@ -32,6 +32,7 @@ import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.painless.spi.PainlessExtension; import org.elasticsearch.painless.spi.PainlessExtension;
import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.spi.Whitelist;
import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.painless.spi.WhitelistLoader;
import org.elasticsearch.plugins.ExtensiblePlugin; import org.elasticsearch.plugins.ExtensiblePlugin;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.plugins.ScriptPlugin;
@ -39,6 +40,7 @@ import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestHandler; import org.elasticsearch.rest.RestHandler;
import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptEngine; import org.elasticsearch.script.ScriptEngine;
import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctionScript;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@ -55,18 +57,34 @@ import java.util.function.Supplier;
*/ */
public final class PainlessPlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin, ActionPlugin { public final class PainlessPlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin, ActionPlugin {
private final Map<ScriptContext<?>, List<Whitelist>> extendedWhitelists = new HashMap<>(); private static final Map<ScriptContext<?>, List<Whitelist>> whitelists;
/*
* Contexts from Core that need custom whitelists can add them to the map below.
* Whitelist resources should be added as appropriately named, separate files
* under Painless' resources
*/
static {
Map<ScriptContext<?>, List<Whitelist>> map = new HashMap<>();
// Moving Function Pipeline Agg
List<Whitelist> movFn = new ArrayList<>(Whitelist.BASE_WHITELISTS);
movFn.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.elasticsearch.aggs.movfn.txt"));
map.put(MovingFunctionScript.CONTEXT, movFn);
whitelists = map;
}
@Override @Override
public ScriptEngine getScriptEngine(Settings settings, Collection<ScriptContext<?>> contexts) { public ScriptEngine getScriptEngine(Settings settings, Collection<ScriptContext<?>> contexts) {
Map<ScriptContext<?>, List<Whitelist>> contextsWithWhitelists = new HashMap<>(); Map<ScriptContext<?>, List<Whitelist>> contextsWithWhitelists = new HashMap<>();
for (ScriptContext<?> context : contexts) { for (ScriptContext<?> context : contexts) {
// we might have a context that only uses the base whitelists, so would not have been filled in by reloadSPI // we might have a context that only uses the base whitelists, so would not have been filled in by reloadSPI
List<Whitelist> whitelists = extendedWhitelists.get(context); List<Whitelist> contextWhitelists = whitelists.get(context);
if (whitelists == null) { if (contextWhitelists == null) {
whitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS); contextWhitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS);
} }
contextsWithWhitelists.put(context, whitelists); contextsWithWhitelists.put(context, contextWhitelists);
} }
return new PainlessScriptEngine(settings, contextsWithWhitelists); return new PainlessScriptEngine(settings, contextsWithWhitelists);
} }
@ -80,7 +98,7 @@ public final class PainlessPlugin extends Plugin implements ScriptPlugin, Extens
public void reloadSPI(ClassLoader loader) { public void reloadSPI(ClassLoader loader) {
for (PainlessExtension extension : ServiceLoader.load(PainlessExtension.class, loader)) { for (PainlessExtension extension : ServiceLoader.load(PainlessExtension.class, loader)) {
for (Map.Entry<ScriptContext<?>, List<Whitelist>> entry : extension.getContextWhitelists().entrySet()) { for (Map.Entry<ScriptContext<?>, List<Whitelist>> entry : extension.getContextWhitelists().entrySet()) {
List<Whitelist> existing = extendedWhitelists.computeIfAbsent(entry.getKey(), List<Whitelist> existing = whitelists.computeIfAbsent(entry.getKey(),
c -> new ArrayList<>(Whitelist.BASE_WHITELISTS)); c -> new ArrayList<>(Whitelist.BASE_WHITELISTS));
existing.addAll(entry.getValue()); existing.addAll(entry.getValue());
} }

View File

@ -0,0 +1,32 @@
#
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This file contains a whitelist for the Moving Function pipeline aggregator in core
class org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions {
double max(double[])
double min(double[])
double sum(double[])
double stdDev(double[], double)
double unweightedAvg(double[])
double linearWeightedAvg(double[])
double ewma(double[], double)
double holt(double[], double, double)
double holtWinters(double[], double, double, double, int, boolean)
}

View File

@ -0,0 +1,315 @@
# Sanity integration test to make sure the custom context and whitelist work for moving_fn pipeline agg
#
setup:
- skip:
version: " - 6.4.0"
reason: "moving_fn added in 6.4.0"
- do:
indices.create:
index: test
body:
mappings:
_doc:
properties:
value_field:
type: integer
date:
type: date
- do:
bulk:
refresh: true
body:
- index:
_index: test
_type: _doc
_id: 1
- date: "2017-01-01T00:00:00"
value_field: 1
- index:
_index: test
_type: _doc
_id: 2
- date: "2017-01-02T00:00:00"
value_field: 2
- index:
_index: test
_type: _doc
_id: 3
- date: "2017-01-03T00:00:00"
value_field: 3
- index:
_index: test
_type: _doc
_id: 4
- date: "2017-01-04T00:00:00"
value_field: 4
- index:
_index: test
_type: _doc
_id: 5
- date: "2017-01-05T00:00:00"
value_field: 5
- index:
_index: test
_type: _doc
_id: 6
- date: "2017-01-06T00:00:00"
value_field: 6
- do:
indices.refresh:
index: [test]
---
"max":
- do:
search:
body:
size: 0
aggs:
the_histo:
date_histogram:
field: "date"
interval: "1d"
aggs:
the_avg:
avg:
field: "value_field"
the_mov_fn:
moving_fn:
buckets_path: "the_avg"
window: 3
script: "MovingFunctions.max(values)"
- match: { hits.total: 6 }
- length: { hits.hits: 0 }
- is_false: aggregations.the_histo.buckets.0.the_mov_fn.value
- match: { aggregations.the_histo.buckets.1.the_mov_fn.value: 1.0 }
- match: { aggregations.the_histo.buckets.2.the_mov_fn.value: 2.0 }
- match: { aggregations.the_histo.buckets.3.the_mov_fn.value: 3.0 }
- match: { aggregations.the_histo.buckets.4.the_mov_fn.value: 4.0 }
- match: { aggregations.the_histo.buckets.5.the_mov_fn.value: 5.0 }
---
"min":
- do:
search:
body:
size: 0
aggs:
the_histo:
date_histogram:
field: "date"
interval: "1d"
aggs:
the_avg:
avg:
field: "value_field"
the_mov_fn:
moving_fn:
buckets_path: "the_avg"
window: 3
script: "MovingFunctions.min(values)"
- match: { hits.total: 6 }
- length: { hits.hits: 0 }
- is_false: aggregations.the_histo.buckets.0.the_mov_fn.value
- match: { aggregations.the_histo.buckets.1.the_mov_fn.value: 1.0 }
- match: { aggregations.the_histo.buckets.2.the_mov_fn.value: 1.0 }
- match: { aggregations.the_histo.buckets.3.the_mov_fn.value: 1.0 }
- match: { aggregations.the_histo.buckets.4.the_mov_fn.value: 2.0 }
- match: { aggregations.the_histo.buckets.5.the_mov_fn.value: 3.0 }
---
"sum":
- do:
search:
body:
size: 0
aggs:
the_histo:
date_histogram:
field: "date"
interval: "1d"
aggs:
the_avg:
avg:
field: "value_field"
the_mov_fn:
moving_fn:
buckets_path: "the_avg"
window: 3
script: "MovingFunctions.sum(values)"
- match: { hits.total: 6 }
- length: { hits.hits: 0 }
- match: { aggregations.the_histo.buckets.0.the_mov_fn.value: 0.0 }
- match: { aggregations.the_histo.buckets.1.the_mov_fn.value: 1.0 }
- match: { aggregations.the_histo.buckets.2.the_mov_fn.value: 3.0 }
- match: { aggregations.the_histo.buckets.3.the_mov_fn.value: 6.0 }
- match: { aggregations.the_histo.buckets.4.the_mov_fn.value: 9.0 }
- match: { aggregations.the_histo.buckets.5.the_mov_fn.value: 12.0 }
---
"unweightedAvg":
- do:
search:
body:
size: 0
aggs:
the_histo:
date_histogram:
field: "date"
interval: "1d"
aggs:
the_avg:
avg:
field: "value_field"
the_mov_fn:
moving_fn:
buckets_path: "the_avg"
window: 3
script: "MovingFunctions.unweightedAvg(values)"
- match: { hits.total: 6 }
- length: { hits.hits: 0 }
---
"linearWeightedAvg":
- do:
search:
body:
size: 0
aggs:
the_histo:
date_histogram:
field: "date"
interval: "1d"
aggs:
the_avg:
avg:
field: "value_field"
the_mov_fn:
moving_fn:
buckets_path: "the_avg"
window: 3
script: "MovingFunctions.linearWeightedAvg(values)"
- match: { hits.total: 6 }
- length: { hits.hits: 0 }
---
"ewma":
- do:
search:
body:
size: 0
aggs:
the_histo:
date_histogram:
field: "date"
interval: "1d"
aggs:
the_avg:
avg:
field: "value_field"
the_mov_fn:
moving_fn:
buckets_path: "the_avg"
window: 3
script: "MovingFunctions.ewma(values, 0.1)"
- match: { hits.total: 6 }
- length: { hits.hits: 0 }
---
"holt":
- do:
search:
body:
size: 0
aggs:
the_histo:
date_histogram:
field: "date"
interval: "1d"
aggs:
the_avg:
avg:
field: "value_field"
the_mov_fn:
moving_fn:
buckets_path: "the_avg"
window: 3
script: "MovingFunctions.holt(values, 0.1, 0.1)"
- match: { hits.total: 6 }
- length: { hits.hits: 0 }
---
"holtWinters":
- do:
search:
body:
size: 0
aggs:
the_histo:
date_histogram:
field: "date"
interval: "1d"
aggs:
the_avg:
avg:
field: "value_field"
the_mov_fn:
moving_fn:
buckets_path: "the_avg"
window: 1
script: "if (values.length > 1) { MovingFunctions.holtWinters(values, 0.1, 0.1, 0.1, 1, true)}"
- match: { hits.total: 6 }
- length: { hits.hits: 0 }
---
"stdDev":
- do:
search:
body:
size: 0
aggs:
the_histo:
date_histogram:
field: "date"
interval: "1d"
aggs:
the_avg:
avg:
field: "value_field"
the_mov_fn:
moving_fn:
buckets_path: "the_avg"
window: 3
script: "MovingFunctions.stdDev(values, MovingFunctions.unweightedAvg(values))"
- match: { hits.total: 6 }
- length: { hits.hits: 0 }

View File

@ -161,7 +161,7 @@ public class PercolatorFieldMapper extends FieldMapper {
} }
static RangeFieldMapper createExtractedRangeFieldBuilder(String name, RangeType rangeType, BuilderContext context) { static RangeFieldMapper createExtractedRangeFieldBuilder(String name, RangeType rangeType, BuilderContext context) {
RangeFieldMapper.Builder builder = new RangeFieldMapper.Builder(name, rangeType, context.indexCreatedVersion()); RangeFieldMapper.Builder builder = new RangeFieldMapper.Builder(name, rangeType);
// For now no doc values, because in processQuery(...) only the Lucene range fields get added: // For now no doc values, because in processQuery(...) only the Lucene range fields get added:
builder.docValues(false); builder.docValues(false);
return builder.build(context); return builder.build(context);

View File

@ -115,7 +115,7 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler<ReindexReq
@Override @Override
protected ReindexRequest buildRequest(RestRequest request) throws IOException { protected ReindexRequest buildRequest(RestRequest request) throws IOException {
if (request.hasParam("pipeline")) { if (request.hasParam("pipeline")) {
throw new IllegalArgumentException("_reindex doesn't support [pipeline] as a query parmaeter. " throw new IllegalArgumentException("_reindex doesn't support [pipeline] as a query parameter. "
+ "Specify it in the [dest] object instead."); + "Specify it in the [dest] object instead.");
} }
ReindexRequest internal = new ReindexRequest(new SearchRequest(), new IndexRequest()); ReindexRequest internal = new ReindexRequest(new SearchRequest(), new IndexRequest());

View File

@ -149,7 +149,7 @@ public class RestReindexActionTests extends ESTestCase {
request.withParams(singletonMap("pipeline", "doesn't matter")); request.withParams(singletonMap("pipeline", "doesn't matter"));
Exception e = expectThrows(IllegalArgumentException.class, () -> action.buildRequest(request.build())); Exception e = expectThrows(IllegalArgumentException.class, () -> action.buildRequest(request.build()));
assertEquals("_reindex doesn't support [pipeline] as a query parmaeter. Specify it in the [dest] object instead.", e.getMessage()); assertEquals("_reindex doesn't support [pipeline] as a query parameter. Specify it in the [dest] object instead.", e.getMessage());
} }
public void testSetScrollTimeout() throws IOException { public void testSetScrollTimeout() throws IOException {

View File

@ -90,6 +90,8 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin {
S3ClientSettings.PROXY_PASSWORD_SETTING, S3ClientSettings.PROXY_PASSWORD_SETTING,
S3ClientSettings.READ_TIMEOUT_SETTING, S3ClientSettings.READ_TIMEOUT_SETTING,
S3ClientSettings.MAX_RETRIES_SETTING, S3ClientSettings.MAX_RETRIES_SETTING,
S3ClientSettings.USE_THROTTLE_RETRIES_SETTING); S3ClientSettings.USE_THROTTLE_RETRIES_SETTING,
S3Repository.ACCESS_KEY_SETTING,
S3Repository.SECRET_KEY_SETTING);
} }
} }

View File

@ -21,7 +21,10 @@ package org.elasticsearch.repositories.s3;
import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.CannedAccessControlList; import com.amazonaws.services.s3.model.CannedAccessControlList;
import com.amazonaws.services.s3.model.StorageClass; import com.amazonaws.services.s3.model.StorageClass;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry;
@ -29,6 +32,12 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase;
import org.elasticsearch.rest.AbstractRestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction;
import org.elasticsearch.test.rest.FakeRestRequest;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
@ -38,9 +47,14 @@ import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicReference;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.not;
import static org.mockito.Mockito.mock;
public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase { public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase {
@ -81,7 +95,9 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), bufferSize) .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), bufferSize)
.put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), serverSideEncryption) .put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), serverSideEncryption)
.put(S3Repository.CANNED_ACL_SETTING.getKey(), cannedACL) .put(S3Repository.CANNED_ACL_SETTING.getKey(), cannedACL)
.put(S3Repository.STORAGE_CLASS_SETTING.getKey(), storageClass))); .put(S3Repository.STORAGE_CLASS_SETTING.getKey(), storageClass)
.put(S3Repository.ACCESS_KEY_SETTING.getKey(), "not_used_but_this_is_a_secret")
.put(S3Repository.SECRET_KEY_SETTING.getKey(), "not_used_but_this_is_a_secret")));
} }
@Override @Override
@ -106,4 +122,32 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa
})); }));
} }
} }
public void testInsecureRepositoryCredentials() throws Exception {
final String repositoryName = "testInsecureRepositoryCredentials";
createTestRepository(repositoryName);
final NodeClient nodeClient = internalCluster().getInstance(NodeClient.class);
final RestGetRepositoriesAction getRepoAction = new RestGetRepositoriesAction(Settings.EMPTY, mock(RestController.class),
internalCluster().getInstance(SettingsFilter.class));
final RestRequest getRepoRequest = new FakeRestRequest();
getRepoRequest.params().put("repository", repositoryName);
final CountDownLatch getRepoLatch = new CountDownLatch(1);
final AtomicReference<AssertionError> getRepoError = new AtomicReference<>();
getRepoAction.handleRequest(getRepoRequest, new AbstractRestChannel(getRepoRequest, true) {
@Override
public void sendResponse(RestResponse response) {
try {
assertThat(response.content().utf8ToString(), not(containsString("not_used_but_this_is_a_secret")));
} catch (final AssertionError ex) {
getRepoError.set(ex);
}
getRepoLatch.countDown();
}
}, nodeClient);
getRepoLatch.await();
if (getRepoError.get() != null) {
throw getRepoError.get();
}
}
} }

View File

@ -0,0 +1,46 @@
setup:
- skip:
version: " - 6.4.0"
reason: "moving_fn added in 6.4.0"
---
"Bad window":
- do:
catch: /\[window\] must be a positive, non-zero integer\./
search:
body:
size: 0
aggs:
the_histo:
date_histogram:
field: "date"
interval: "1d"
aggs:
the_avg:
avg:
field: "value_field"
the_mov_fn:
moving_fn:
buckets_path: "the_avg"
window: -1
script: "MovingFunctions.windowMax(values)"
---
"Not under date_histo":
- do:
catch: /\[window\] must be a positive, non-zero integer\./
search:
body:
size: 0
aggs:
the_avg:
avg:
field: "value_field"
the_mov_fn:
moving_fn:
buckets_path: "the_avg"
window: -1
script: "MovingFunctions.windowMax(values)"

View File

@ -195,7 +195,13 @@ setup:
--- ---
"Test typed keys parameter for date_histogram aggregation and max_bucket pipeline aggregation": "Test typed keys parameter for date_histogram aggregation and max_bucket pipeline aggregation":
- skip:
features: warnings
version: " - 6.4.0"
reason: "deprecation added in 6.4.0"
- do: - do:
warnings:
- 'The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.'
search: search:
typed_keys: true typed_keys: true
body: body:

View File

@ -21,17 +21,20 @@ package org.elasticsearch.action;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import java.io.IOException; import java.io.IOException;
import static org.elasticsearch.ExceptionsHelper.detailedMessage; import static org.elasticsearch.ExceptionsHelper.detailedMessage;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
/** /**
* Information about task operation failures * Information about task operation failures
@ -39,7 +42,10 @@ import static org.elasticsearch.ExceptionsHelper.detailedMessage;
* The class is final due to serialization limitations * The class is final due to serialization limitations
*/ */
public final class TaskOperationFailure implements Writeable, ToXContentFragment { public final class TaskOperationFailure implements Writeable, ToXContentFragment {
private static final String TASK_ID = "task_id";
private static final String NODE_ID = "node_id";
private static final String STATUS = "status";
private static final String REASON = "reason";
private final String nodeId; private final String nodeId;
private final long taskId; private final long taskId;
@ -48,6 +54,21 @@ public final class TaskOperationFailure implements Writeable, ToXContentFragment
private final RestStatus status; private final RestStatus status;
private static final ConstructingObjectParser<TaskOperationFailure, Void> PARSER =
new ConstructingObjectParser<>("task_info", true, constructorObjects -> {
int i = 0;
String nodeId = (String) constructorObjects[i++];
long taskId = (long) constructorObjects[i++];
ElasticsearchException reason = (ElasticsearchException) constructorObjects[i];
return new TaskOperationFailure(nodeId, taskId, reason);
});
static {
PARSER.declareString(constructorArg(), new ParseField(NODE_ID));
PARSER.declareLong(constructorArg(), new ParseField(TASK_ID));
PARSER.declareObject(constructorArg(), (parser, c) -> ElasticsearchException.fromXContent(parser), new ParseField(REASON));
}
public TaskOperationFailure(String nodeId, long taskId, Exception e) { public TaskOperationFailure(String nodeId, long taskId, Exception e) {
this.nodeId = nodeId; this.nodeId = nodeId;
this.taskId = taskId; this.taskId = taskId;
@ -98,13 +119,17 @@ public final class TaskOperationFailure implements Writeable, ToXContentFragment
return "[" + nodeId + "][" + taskId + "] failed, reason [" + getReason() + "]"; return "[" + nodeId + "][" + taskId + "] failed, reason [" + getReason() + "]";
} }
public static TaskOperationFailure fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("task_id", getTaskId()); builder.field(TASK_ID, getTaskId());
builder.field("node_id", getNodeId()); builder.field(NODE_ID, getNodeId());
builder.field("status", status.name()); builder.field(STATUS, status.name());
if (reason != null) { if (reason != null) {
builder.field("reason"); builder.field(REASON);
builder.startObject(); builder.startObject();
ElasticsearchException.generateThrowableXContent(builder, params, reason); ElasticsearchException.generateThrowableXContent(builder, params, reason);
builder.endObject(); builder.endObject();
@ -112,5 +137,4 @@ public final class TaskOperationFailure implements Writeable, ToXContentFragment
return builder; return builder;
} }
} }

View File

@ -19,16 +19,19 @@
package org.elasticsearch.action.admin.cluster.node.tasks.list; package org.elasticsearch.action.admin.cluster.node.tasks.list;
import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.action.support.tasks.BaseTasksResponse;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.tasks.TaskInfo;
@ -40,10 +43,16 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
/** /**
* Returns the list of tasks currently running on the nodes * Returns the list of tasks currently running on the nodes
*/ */
public class ListTasksResponse extends BaseTasksResponse implements ToXContentObject { public class ListTasksResponse extends BaseTasksResponse implements ToXContentObject {
private static final String TASKS = "tasks";
private static final String TASK_FAILURES = "task_failures";
private static final String NODE_FAILURES = "node_failures";
private List<TaskInfo> tasks; private List<TaskInfo> tasks;
@ -56,11 +65,31 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContentOb
} }
public ListTasksResponse(List<TaskInfo> tasks, List<TaskOperationFailure> taskFailures, public ListTasksResponse(List<TaskInfo> tasks, List<TaskOperationFailure> taskFailures,
List<? extends FailedNodeException> nodeFailures) { List<? extends ElasticsearchException> nodeFailures) {
super(taskFailures, nodeFailures); super(taskFailures, nodeFailures);
this.tasks = tasks == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(tasks)); this.tasks = tasks == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(tasks));
} }
private static final ConstructingObjectParser<ListTasksResponse, Void> PARSER =
new ConstructingObjectParser<>("list_tasks_response", true,
constructingObjects -> {
int i = 0;
@SuppressWarnings("unchecked")
List<TaskInfo> tasks = (List<TaskInfo>) constructingObjects[i++];
@SuppressWarnings("unchecked")
List<TaskOperationFailure> tasksFailures = (List<TaskOperationFailure>) constructingObjects[i++];
@SuppressWarnings("unchecked")
List<ElasticsearchException> nodeFailures = (List<ElasticsearchException>) constructingObjects[i];
return new ListTasksResponse(tasks, tasksFailures, nodeFailures);
});
static {
PARSER.declareObjectArray(constructorArg(), TaskInfo.PARSER, new ParseField(TASKS));
PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> TaskOperationFailure.fromXContent(p), new ParseField(TASK_FAILURES));
PARSER.declareObjectArray(optionalConstructorArg(),
(parser, c) -> ElasticsearchException.fromXContent(parser), new ParseField(NODE_FAILURES));
}
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
@ -159,7 +188,7 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContentOb
builder.endObject(); builder.endObject();
} }
} }
builder.startObject("tasks"); builder.startObject(TASKS);
for(TaskInfo task : entry.getValue()) { for(TaskInfo task : entry.getValue()) {
builder.startObject(task.getTaskId().toString()); builder.startObject(task.getTaskId().toString());
task.toXContent(builder, params); task.toXContent(builder, params);
@ -177,7 +206,7 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContentOb
*/ */
public XContentBuilder toXContentGroupedByParents(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContentGroupedByParents(XContentBuilder builder, Params params) throws IOException {
toXContentCommon(builder, params); toXContentCommon(builder, params);
builder.startObject("tasks"); builder.startObject(TASKS);
for (TaskGroup group : getTaskGroups()) { for (TaskGroup group : getTaskGroups()) {
builder.field(group.getTaskInfo().getTaskId().toString()); builder.field(group.getTaskInfo().getTaskId().toString());
group.toXContent(builder, params); group.toXContent(builder, params);
@ -191,7 +220,7 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContentOb
*/ */
public XContentBuilder toXContentGroupedByNone(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContentGroupedByNone(XContentBuilder builder, Params params) throws IOException {
toXContentCommon(builder, params); toXContentCommon(builder, params);
builder.startArray("tasks"); builder.startArray(TASKS);
for (TaskInfo taskInfo : getTasks()) { for (TaskInfo taskInfo : getTasks()) {
builder.startObject(); builder.startObject();
taskInfo.toXContent(builder, params); taskInfo.toXContent(builder, params);
@ -204,14 +233,14 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContentOb
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(); builder.startObject();
toXContentGroupedByParents(builder, params); toXContentGroupedByNone(builder, params);
builder.endObject(); builder.endObject();
return builder; return builder;
} }
private void toXContentCommon(XContentBuilder builder, Params params) throws IOException { private void toXContentCommon(XContentBuilder builder, Params params) throws IOException {
if (getTaskFailures() != null && getTaskFailures().size() > 0) { if (getTaskFailures() != null && getTaskFailures().size() > 0) {
builder.startArray("task_failures"); builder.startArray(TASK_FAILURES);
for (TaskOperationFailure ex : getTaskFailures()){ for (TaskOperationFailure ex : getTaskFailures()){
builder.startObject(); builder.startObject();
builder.value(ex); builder.value(ex);
@ -221,8 +250,8 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContentOb
} }
if (getNodeFailures() != null && getNodeFailures().size() > 0) { if (getNodeFailures() != null && getNodeFailures().size() > 0) {
builder.startArray("node_failures"); builder.startArray(NODE_FAILURES);
for (FailedNodeException ex : getNodeFailures()) { for (ElasticsearchException ex : getNodeFailures()) {
builder.startObject(); builder.startObject();
ex.toXContent(builder, params); ex.toXContent(builder, params);
builder.endObject(); builder.endObject();
@ -231,6 +260,10 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContentOb
} }
} }
public static ListTasksResponse fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
@Override @Override
public String toString() { public String toString() {
return Strings.toString(this); return Strings.toString(this);

View File

@ -239,7 +239,7 @@ public class IndicesOptions {
} }
public void writeIndicesOptions(StreamOutput out) throws IOException { public void writeIndicesOptions(StreamOutput out) throws IOException {
if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
out.writeEnumSet(options); out.writeEnumSet(options);
out.writeEnumSet(expandWildcards); out.writeEnumSet(expandWildcards);
} else { } else {
@ -248,7 +248,7 @@ public class IndicesOptions {
} }
public static IndicesOptions readIndicesOptions(StreamInput in) throws IOException { public static IndicesOptions readIndicesOptions(StreamInput in) throws IOException {
if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
return new IndicesOptions(in.readEnumSet(Option.class), in.readEnumSet(WildcardStates.class)); return new IndicesOptions(in.readEnumSet(Option.class), in.readEnumSet(WildcardStates.class));
} else { } else {
byte id = in.readByte(); byte id = in.readByte();

View File

@ -42,9 +42,9 @@ import static org.elasticsearch.ExceptionsHelper.rethrowAndSuppress;
*/ */
public class BaseTasksResponse extends ActionResponse { public class BaseTasksResponse extends ActionResponse {
private List<TaskOperationFailure> taskFailures; private List<TaskOperationFailure> taskFailures;
private List<FailedNodeException> nodeFailures; private List<ElasticsearchException> nodeFailures;
public BaseTasksResponse(List<TaskOperationFailure> taskFailures, List<? extends FailedNodeException> nodeFailures) { public BaseTasksResponse(List<TaskOperationFailure> taskFailures, List<? extends ElasticsearchException> nodeFailures) {
this.taskFailures = taskFailures == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(taskFailures)); this.taskFailures = taskFailures == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(taskFailures));
this.nodeFailures = nodeFailures == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(nodeFailures)); this.nodeFailures = nodeFailures == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(nodeFailures));
} }
@ -59,7 +59,7 @@ public class BaseTasksResponse extends ActionResponse {
/** /**
* The list of node failures exception. * The list of node failures exception.
*/ */
public List<FailedNodeException> getNodeFailures() { public List<ElasticsearchException> getNodeFailures() {
return nodeFailures; return nodeFailures;
} }
@ -99,7 +99,7 @@ public class BaseTasksResponse extends ActionResponse {
exp.writeTo(out); exp.writeTo(out);
} }
out.writeVInt(nodeFailures.size()); out.writeVInt(nodeFailures.size());
for (FailedNodeException exp : nodeFailures) { for (ElasticsearchException exp : nodeFailures) {
exp.writeTo(out); exp.writeTo(out);
} }
} }

View File

@ -41,6 +41,7 @@ import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
@ -128,12 +129,21 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster
Version.CURRENT, Version.CURRENT,
changes.get().v1().size(), changes.get().v1().size(),
changes.get().v2().size()); changes.get().v2().size());
threadPool.generic().execute(() -> updateTemplates(changes.get().v1(), changes.get().v2()));
final ThreadContext threadContext = threadPool.getThreadContext();
try (ThreadContext.StoredContext ignore = threadContext.stashContext()) {
threadContext.markAsSystemContext();
threadPool.generic().execute(() -> updateTemplates(changes.get().v1(), changes.get().v2()));
}
} }
} }
} }
void updateTemplates(Map<String, BytesReference> changes, Set<String> deletions) { void updateTemplates(Map<String, BytesReference> changes, Set<String> deletions) {
if (threadPool.getThreadContext().isSystemContext() == false) {
throw new IllegalStateException("template updates from the template upgrade service should always happen in a system context");
}
for (Map.Entry<String, BytesReference> change : changes.entrySet()) { for (Map.Entry<String, BytesReference> change : changes.entrySet()) {
PutIndexTemplateRequest request = PutIndexTemplateRequest request =
new PutIndexTemplateRequest(change.getKey()).source(change.getValue(), XContentType.JSON); new PutIndexTemplateRequest(change.getKey()).source(change.getValue(), XContentType.JSON);
@ -141,7 +151,7 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster
client.admin().indices().putTemplate(request, new ActionListener<PutIndexTemplateResponse>() { client.admin().indices().putTemplate(request, new ActionListener<PutIndexTemplateResponse>() {
@Override @Override
public void onResponse(PutIndexTemplateResponse response) { public void onResponse(PutIndexTemplateResponse response) {
if(updatesInProgress.decrementAndGet() == 0) { if (updatesInProgress.decrementAndGet() == 0) {
logger.info("Finished upgrading templates to version {}", Version.CURRENT); logger.info("Finished upgrading templates to version {}", Version.CURRENT);
} }
if (response.isAcknowledged() == false) { if (response.isAcknowledged() == false) {
@ -151,7 +161,7 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster
@Override @Override
public void onFailure(Exception e) { public void onFailure(Exception e) {
if(updatesInProgress.decrementAndGet() == 0) { if (updatesInProgress.decrementAndGet() == 0) {
logger.info("Templates were upgraded to version {}", Version.CURRENT); logger.info("Templates were upgraded to version {}", Version.CURRENT);
} }
logger.warn(new ParameterizedMessage("Error updating template [{}]", change.getKey()), e); logger.warn(new ParameterizedMessage("Error updating template [{}]", change.getKey()), e);

View File

@ -40,7 +40,6 @@ import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.store.ByteArrayDataOutput;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
@ -93,8 +92,8 @@ public class RangeFieldMapper extends FieldMapper {
private Boolean coerce; private Boolean coerce;
private Locale locale; private Locale locale;
public Builder(String name, RangeType type, Version indexVersionCreated) { public Builder(String name, RangeType type) {
super(name, new RangeFieldType(type, indexVersionCreated), new RangeFieldType(type, indexVersionCreated)); super(name, new RangeFieldType(type), new RangeFieldType(type));
builder = this; builder = this;
locale = Locale.ROOT; locale = Locale.ROOT;
} }
@ -174,7 +173,7 @@ public class RangeFieldMapper extends FieldMapper {
@Override @Override
public Mapper.Builder<?,?> parse(String name, Map<String, Object> node, public Mapper.Builder<?,?> parse(String name, Map<String, Object> node,
ParserContext parserContext) throws MapperParsingException { ParserContext parserContext) throws MapperParsingException {
Builder builder = new Builder(name, type, parserContext.indexVersionCreated()); Builder builder = new Builder(name, type);
TypeParsers.parseField(builder, name, node, parserContext); TypeParsers.parseField(builder, name, node, parserContext);
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) { for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
Map.Entry<String, Object> entry = iterator.next(); Map.Entry<String, Object> entry = iterator.next();
@ -205,7 +204,7 @@ public class RangeFieldMapper extends FieldMapper {
protected FormatDateTimeFormatter dateTimeFormatter; protected FormatDateTimeFormatter dateTimeFormatter;
protected DateMathParser dateMathParser; protected DateMathParser dateMathParser;
RangeFieldType(RangeType type, Version indexVersionCreated) { RangeFieldType(RangeType type) {
super(); super();
this.rangeType = Objects.requireNonNull(type); this.rangeType = Objects.requireNonNull(type);
setTokenized(false); setTokenized(false);

View File

@ -1,83 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.store;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FileSwitchDirectory;
import org.apache.lucene.store.FilterDirectory;
/**
* Utils for working with {@link Directory} classes.
*/
public final class DirectoryUtils {
private DirectoryUtils() {} // no instance
static <T extends Directory> Directory getLeafDirectory(FilterDirectory dir, Class<T> targetClass) {
Directory current = dir.getDelegate();
while (true) {
if ((current instanceof FilterDirectory)) {
if (targetClass != null && targetClass.isAssignableFrom(current.getClass())) {
break;
}
current = ((FilterDirectory) current).getDelegate();
} else {
break;
}
}
return current;
}
/**
* Tries to extract the leaf of the {@link Directory} if the directory is a {@link FilterDirectory} and cast
* it to the given target class or returns <code>null</code> if the leaf is not assignable to the target class.
* If the given {@link Directory} is a concrete directory it will treated as a leaf and the above applies.
*/
public static <T extends Directory> T getLeaf(Directory dir, Class<T> targetClass) {
return getLeaf(dir, targetClass, null);
}
/**
* Tries to extract the leaf of the {@link Directory} if the directory is a {@link FilterDirectory} and cast
* it to the given target class or returns the given default value, if the leaf is not assignable to the target class.
* If the given {@link Directory} is a concrete directory it will treated as a leaf and the above applies.
*/
public static <T extends Directory> T getLeaf(Directory dir, Class<T> targetClass, T defaultValue) {
Directory d = dir;
if (dir instanceof FilterDirectory) {
d = getLeafDirectory((FilterDirectory) dir, targetClass);
}
if (d instanceof FileSwitchDirectory) {
T leaf = getLeaf(((FileSwitchDirectory) d).getPrimaryDir(), targetClass);
if (leaf == null) {
d = getLeaf(((FileSwitchDirectory) d).getSecondaryDir(), targetClass, defaultValue);
} else {
d = leaf;
}
}
if (d != null && targetClass.isAssignableFrom(d.getClass())) {
return targetClass.cast(d);
} else {
return defaultValue;
}
}
}

View File

@ -103,18 +103,17 @@ public class RestListTasksAction extends BaseRestHandler {
return new BytesRestResponse(RestStatus.OK, builder); return new BytesRestResponse(RestStatus.OK, builder);
} }
}; };
} else if ("none".equals(groupBy)) { } else if ("parents".equals(groupBy)) {
return new RestBuilderListener<T>(channel) { return new RestBuilderListener<T>(channel) {
@Override @Override
public RestResponse buildResponse(T response, XContentBuilder builder) throws Exception { public RestResponse buildResponse(T response, XContentBuilder builder) throws Exception {
builder.startObject(); builder.startObject();
response.toXContentGroupedByNone(builder, channel.request()); response.toXContentGroupedByParents(builder, channel.request());
builder.endObject(); builder.endObject();
return new BytesRestResponse(RestStatus.OK, builder); return new BytesRestResponse(RestStatus.OK, builder);
} }
}; };
} else if ("none".equals(groupBy)) {
} else if ("parents".equals(groupBy)) {
return new RestToXContentListener<>(channel); return new RestToXContentListener<>(channel);
} else { } else {
throw new IllegalArgumentException("[group_by] must be one of [nodes], [parents] or [none] but was [" + groupBy + "]"); throw new IllegalArgumentException("[group_by] must be one of [nodes], [parents] or [none] but was [" + groupBy + "]");

View File

@ -30,6 +30,7 @@ import java.util.stream.Stream;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.plugins.ScriptPlugin;
import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctionScript;
/** /**
* Manages building {@link ScriptService}. * Manages building {@link ScriptService}.
@ -48,7 +49,8 @@ public class ScriptModule {
FilterScript.CONTEXT, FilterScript.CONTEXT,
SimilarityScript.CONTEXT, SimilarityScript.CONTEXT,
SimilarityWeightScript.CONTEXT, SimilarityWeightScript.CONTEXT,
TemplateScript.CONTEXT TemplateScript.CONTEXT,
MovingFunctionScript.CONTEXT
).collect(Collectors.toMap(c -> c.name, Function.identity())); ).collect(Collectors.toMap(c -> c.name, Function.identity()));
} }

View File

@ -220,6 +220,8 @@ import org.elasticsearch.search.aggregations.pipeline.movavg.models.HoltWintersM
import org.elasticsearch.search.aggregations.pipeline.movavg.models.LinearModel; import org.elasticsearch.search.aggregations.pipeline.movavg.models.LinearModel;
import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel; import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel;
import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel; import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel;
import org.elasticsearch.search.aggregations.pipeline.movfn.MovFnPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.movfn.MovFnPipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregator;
import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.FetchPhase;
@ -514,6 +516,11 @@ public class SearchModule {
SerialDiffPipelineAggregationBuilder::new, SerialDiffPipelineAggregationBuilder::new,
SerialDiffPipelineAggregator::new, SerialDiffPipelineAggregator::new,
SerialDiffPipelineAggregationBuilder::parse)); SerialDiffPipelineAggregationBuilder::parse));
registerPipelineAggregation(new PipelineAggregationSpec(
MovFnPipelineAggregationBuilder.NAME,
MovFnPipelineAggregationBuilder::new,
MovFnPipelineAggregator::new,
MovFnPipelineAggregationBuilder::parse));
registerFromPlugin(plugins, SearchPlugin::getPipelineAggregations, this::registerPipelineAggregation); registerFromPlugin(plugins, SearchPlugin::getPipelineAggregations, this::registerPipelineAggregation);
} }

View File

@ -19,6 +19,10 @@
package org.elasticsearch.search.aggregations.bucket.histogram; package org.elasticsearch.search.aggregations.bucket.histogram;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.search.DocIdSetIterator;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.rounding.DateTimeUnit; import org.elasticsearch.common.rounding.DateTimeUnit;
@ -27,8 +31,13 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.fielddata.AtomicNumericFieldData;
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MappedFieldType.Relation;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;
import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.BucketOrder;
@ -44,6 +53,8 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper;
import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.search.aggregations.support.ValuesSourceType;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import org.joda.time.DateTimeField;
import org.joda.time.DateTimeZone;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap; import java.util.HashMap;
@ -351,36 +362,121 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil
return NAME; return NAME;
} }
/*
* NOTE: this can't be done in rewrite() because the timezone is then also used on the
* coordinating node in order to generate missing buckets, which may cross a transition
* even though data on the shards doesn't.
*/
DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException {
final DateTimeZone tz = timeZone();
if (field() != null &&
tz != null &&
tz.isFixed() == false &&
field() != null &&
script() == null) {
final MappedFieldType ft = context.fieldMapper(field());
final IndexReader reader = context.getIndexReader();
if (ft != null && reader != null) {
Long anyInstant = null;
final IndexNumericFieldData fieldData = context.getForField(ft);
for (LeafReaderContext ctx : reader.leaves()) {
AtomicNumericFieldData leafFD = ((IndexNumericFieldData) fieldData).load(ctx);
SortedNumericDocValues values = leafFD.getLongValues();
if (values.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
anyInstant = values.nextValue();
break;
}
}
if (anyInstant != null) {
final long prevTransition = tz.previousTransition(anyInstant);
final long nextTransition = tz.nextTransition(anyInstant);
// We need all not only values but also rounded values to be within
// [prevTransition, nextTransition].
final long low;
DateTimeUnit intervalAsUnit = getIntervalAsDateTimeUnit();
if (intervalAsUnit != null) {
final DateTimeField dateTimeField = intervalAsUnit.field(tz);
low = dateTimeField.roundCeiling(prevTransition);
} else {
final TimeValue intervalAsMillis = getIntervalAsTimeValue();
low = Math.addExact(prevTransition, intervalAsMillis.millis());
}
// rounding rounds down, so 'nextTransition' is a good upper bound
final long high = nextTransition;
final DocValueFormat format = ft.docValueFormat(null, null);
final String formattedLow = format.format(low);
final String formattedHigh = format.format(high);
if (ft.isFieldWithinQuery(reader, formattedLow, formattedHigh,
true, false, tz, null, context) == Relation.WITHIN) {
// All values in this reader have the same offset despite daylight saving times.
// This is very common for location-based timezones such as Europe/Paris in
// combination with time-based indices.
return DateTimeZone.forOffsetMillis(tz.getOffset(anyInstant));
}
}
}
}
return tz;
}
@Override @Override
protected ValuesSourceAggregatorFactory<Numeric, ?> innerBuild(SearchContext context, ValuesSourceConfig<Numeric> config, protected ValuesSourceAggregatorFactory<Numeric, ?> innerBuild(SearchContext context, ValuesSourceConfig<Numeric> config,
AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException { AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {
Rounding rounding = createRounding(); final DateTimeZone tz = timeZone();
final Rounding rounding = createRounding(tz);
final DateTimeZone rewrittenTimeZone = rewriteTimeZone(context.getQueryShardContext());
final Rounding shardRounding;
if (tz == rewrittenTimeZone) {
shardRounding = rounding;
} else {
shardRounding = createRounding(rewrittenTimeZone);
}
ExtendedBounds roundedBounds = null; ExtendedBounds roundedBounds = null;
if (this.extendedBounds != null) { if (this.extendedBounds != null) {
// parse any string bounds to longs and round // parse any string bounds to longs and round
roundedBounds = this.extendedBounds.parseAndValidate(name, context, config.format()).round(rounding); roundedBounds = this.extendedBounds.parseAndValidate(name, context, config.format()).round(rounding);
} }
return new DateHistogramAggregatorFactory(name, config, interval, dateHistogramInterval, offset, order, keyed, minDocCount, return new DateHistogramAggregatorFactory(name, config, offset, order, keyed, minDocCount,
rounding, roundedBounds, context, parent, subFactoriesBuilder, metaData); rounding, shardRounding, roundedBounds, context, parent, subFactoriesBuilder, metaData);
} }
private Rounding createRounding() { /** Return the interval as a date time unit if applicable. If this returns
Rounding.Builder tzRoundingBuilder; * {@code null} then it means that the interval is expressed as a fixed
* {@link TimeValue} and may be accessed via
* {@link #getIntervalAsTimeValue()}. */
private DateTimeUnit getIntervalAsDateTimeUnit() {
if (dateHistogramInterval != null) { if (dateHistogramInterval != null) {
DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); return DATE_FIELD_UNITS.get(dateHistogramInterval.toString());
if (dateTimeUnit != null) {
tzRoundingBuilder = Rounding.builder(dateTimeUnit);
} else {
// the interval is a time value?
tzRoundingBuilder = Rounding.builder(
TimeValue.parseTimeValue(dateHistogramInterval.toString(), null, getClass().getSimpleName() + ".interval"));
}
} else {
// the interval is an integer time value in millis?
tzRoundingBuilder = Rounding.builder(TimeValue.timeValueMillis(interval));
} }
if (timeZone() != null) { return null;
tzRoundingBuilder.timeZone(timeZone()); }
/**
* Get the interval as a {@link TimeValue}. Should only be called if
* {@link #getIntervalAsDateTimeUnit()} returned {@code null}.
*/
private TimeValue getIntervalAsTimeValue() {
if (dateHistogramInterval != null) {
return TimeValue.parseTimeValue(dateHistogramInterval.toString(), null, getClass().getSimpleName() + ".interval");
} else {
return TimeValue.timeValueMillis(interval);
}
}
private Rounding createRounding(DateTimeZone timeZone) {
Rounding.Builder tzRoundingBuilder;
DateTimeUnit intervalAsUnit = getIntervalAsDateTimeUnit();
if (intervalAsUnit != null) {
tzRoundingBuilder = Rounding.builder(intervalAsUnit);
} else {
tzRoundingBuilder = Rounding.builder(getIntervalAsTimeValue());
}
if (timeZone != null) {
tzRoundingBuilder.timeZone(timeZone);
} }
Rounding rounding = tzRoundingBuilder.build(); Rounding rounding = tzRoundingBuilder.build();
return rounding; return rounding;

View File

@ -55,6 +55,7 @@ class DateHistogramAggregator extends BucketsAggregator {
private final ValuesSource.Numeric valuesSource; private final ValuesSource.Numeric valuesSource;
private final DocValueFormat formatter; private final DocValueFormat formatter;
private final Rounding rounding; private final Rounding rounding;
private final Rounding shardRounding;
private final BucketOrder order; private final BucketOrder order;
private final boolean keyed; private final boolean keyed;
@ -64,14 +65,15 @@ class DateHistogramAggregator extends BucketsAggregator {
private final LongHash bucketOrds; private final LongHash bucketOrds;
private long offset; private long offset;
DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, long offset, BucketOrder order, DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, Rounding shardRounding,
boolean keyed, long offset, BucketOrder order, boolean keyed,
long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource, long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource,
DocValueFormat formatter, SearchContext aggregationContext, DocValueFormat formatter, SearchContext aggregationContext,
Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException { Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); super(name, factories, aggregationContext, parent, pipelineAggregators, metaData);
this.rounding = rounding; this.rounding = rounding;
this.shardRounding = shardRounding;
this.offset = offset; this.offset = offset;
this.order = InternalOrder.validate(order, this);; this.order = InternalOrder.validate(order, this);;
this.keyed = keyed; this.keyed = keyed;
@ -105,7 +107,9 @@ class DateHistogramAggregator extends BucketsAggregator {
long previousRounded = Long.MIN_VALUE; long previousRounded = Long.MIN_VALUE;
for (int i = 0; i < valuesCount; ++i) { for (int i = 0; i < valuesCount; ++i) {
long value = values.nextValue(); long value = values.nextValue();
long rounded = rounding.round(value - offset) + offset; // We can use shardRounding here, which is sometimes more efficient
// if daylight saving times are involved.
long rounded = shardRounding.round(value - offset) + offset;
assert rounded >= previousRounded; assert rounded >= previousRounded;
if (rounded == previousRounded) { if (rounded == previousRounded) {
continue; continue;
@ -138,6 +142,7 @@ class DateHistogramAggregator extends BucketsAggregator {
CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this)); CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this));
// value source will be null for unmapped fields // value source will be null for unmapped fields
// Important: use `rounding` here, not `shardRounding`
InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0
? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds)
: null; : null;

View File

@ -38,28 +38,27 @@ import java.util.Map;
public final class DateHistogramAggregatorFactory public final class DateHistogramAggregatorFactory
extends ValuesSourceAggregatorFactory<ValuesSource.Numeric, DateHistogramAggregatorFactory> { extends ValuesSourceAggregatorFactory<ValuesSource.Numeric, DateHistogramAggregatorFactory> {
private final DateHistogramInterval dateHistogramInterval;
private final long interval;
private final long offset; private final long offset;
private final BucketOrder order; private final BucketOrder order;
private final boolean keyed; private final boolean keyed;
private final long minDocCount; private final long minDocCount;
private final ExtendedBounds extendedBounds; private final ExtendedBounds extendedBounds;
private Rounding rounding; private final Rounding rounding;
private final Rounding shardRounding;
public DateHistogramAggregatorFactory(String name, ValuesSourceConfig<Numeric> config, long interval, public DateHistogramAggregatorFactory(String name, ValuesSourceConfig<Numeric> config,
DateHistogramInterval dateHistogramInterval, long offset, BucketOrder order, boolean keyed, long minDocCount, long offset, BucketOrder order, boolean keyed, long minDocCount,
Rounding rounding, ExtendedBounds extendedBounds, SearchContext context, AggregatorFactory<?> parent, Rounding rounding, Rounding shardRounding, ExtendedBounds extendedBounds, SearchContext context,
AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException { AggregatorFactory<?> parent, AggregatorFactories.Builder subFactoriesBuilder,
Map<String, Object> metaData) throws IOException {
super(name, config, context, parent, subFactoriesBuilder, metaData); super(name, config, context, parent, subFactoriesBuilder, metaData);
this.interval = interval;
this.dateHistogramInterval = dateHistogramInterval;
this.offset = offset; this.offset = offset;
this.order = order; this.order = order;
this.keyed = keyed; this.keyed = keyed;
this.minDocCount = minDocCount; this.minDocCount = minDocCount;
this.extendedBounds = extendedBounds; this.extendedBounds = extendedBounds;
this.rounding = rounding; this.rounding = rounding;
this.shardRounding = shardRounding;
} }
public long minDocCount() { public long minDocCount() {
@ -77,8 +76,8 @@ public final class DateHistogramAggregatorFactory
private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List<PipelineAggregator> pipelineAggregators, private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData) throws IOException { Map<String, Object> metaData) throws IOException {
return new DateHistogramAggregator(name, factories, rounding, offset, order, keyed, minDocCount, extendedBounds, valuesSource, return new DateHistogramAggregator(name, factories, rounding, shardRounding, offset, order, keyed, minDocCount, extendedBounds,
config.format(), context, parent, pipelineAggregators, metaData); valuesSource, config.format(), context, parent, pipelineAggregators, metaData);
} }
@Override @Override

View File

@ -33,6 +33,7 @@ import org.elasticsearch.search.aggregations.pipeline.bucketsort.BucketSortPipel
import org.elasticsearch.search.aggregations.pipeline.cumulativesum.CumulativeSumPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.cumulativesum.CumulativeSumPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.movfn.MovFnPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregationBuilder;
import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.FieldSortBuilder;
@ -78,6 +79,10 @@ public final class PipelineAggregatorBuilders {
return new PercentilesBucketPipelineAggregationBuilder(name, bucketsPath); return new PercentilesBucketPipelineAggregationBuilder(name, bucketsPath);
} }
/**
* @deprecated use {@link #movingFunction(String, Script, String, int)} instead
*/
@Deprecated
public static MovAvgPipelineAggregationBuilder movingAvg(String name, String bucketsPath) { public static MovAvgPipelineAggregationBuilder movingAvg(String name, String bucketsPath) {
return new MovAvgPipelineAggregationBuilder(name, bucketsPath); return new MovAvgPipelineAggregationBuilder(name, bucketsPath);
} }
@ -114,4 +119,9 @@ public final class PipelineAggregatorBuilders {
public static SerialDiffPipelineAggregationBuilder diff(String name, String bucketsPath) { public static SerialDiffPipelineAggregationBuilder diff(String name, String bucketsPath) {
return new SerialDiffPipelineAggregationBuilder(name, bucketsPath); return new SerialDiffPipelineAggregationBuilder(name, bucketsPath);
} }
public static MovFnPipelineAggregationBuilder movingFunction(String name, Script script,
String bucketsPaths, int window) {
return new MovFnPipelineAggregationBuilder(name, bucketsPaths, script, window);
}
} }

View File

@ -79,11 +79,11 @@ public abstract class BucketMetricsPipelineAggregator extends SiblingPipelineAgg
List<String> bucketsPath = AggregationPath.parse(bucketsPaths()[0]).getPathElementsAsStringList(); List<String> bucketsPath = AggregationPath.parse(bucketsPaths()[0]).getPathElementsAsStringList();
for (Aggregation aggregation : aggregations) { for (Aggregation aggregation : aggregations) {
if (aggregation.getName().equals(bucketsPath.get(0))) { if (aggregation.getName().equals(bucketsPath.get(0))) {
bucketsPath = bucketsPath.subList(1, bucketsPath.size()); List<String> sublistedPath = bucketsPath.subList(1, bucketsPath.size());
InternalMultiBucketAggregation<?, ?> multiBucketsAgg = (InternalMultiBucketAggregation<?, ?>) aggregation; InternalMultiBucketAggregation<?, ?> multiBucketsAgg = (InternalMultiBucketAggregation<?, ?>) aggregation;
List<? extends InternalMultiBucketAggregation.InternalBucket> buckets = multiBucketsAgg.getBuckets(); List<? extends InternalMultiBucketAggregation.InternalBucket> buckets = multiBucketsAgg.getBuckets();
for (InternalMultiBucketAggregation.InternalBucket bucket : buckets) { for (InternalMultiBucketAggregation.InternalBucket bucket : buckets) {
Double bucketValue = BucketHelpers.resolveBucketValue(multiBucketsAgg, bucket, bucketsPath, gapPolicy); Double bucketValue = BucketHelpers.resolveBucketValue(multiBucketsAgg, bucket, sublistedPath, gapPolicy);
if (bucketValue != null && !Double.isNaN(bucketValue)) { if (bucketValue != null && !Double.isNaN(bucketValue)) {
collectBucketValue(bucket.getKeyAsString(), bucketValue); collectBucketValue(bucket.getKeyAsString(), bucketValue);
} }

View File

@ -23,6 +23,8 @@ import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.ParseFieldRegistry; import org.elasticsearch.common.xcontent.ParseFieldRegistry;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -59,6 +61,8 @@ public class MovAvgPipelineAggregationBuilder extends AbstractPipelineAggregatio
public static final ParseField SETTINGS = new ParseField("settings"); public static final ParseField SETTINGS = new ParseField("settings");
private static final ParseField PREDICT = new ParseField("predict"); private static final ParseField PREDICT = new ParseField("predict");
private static final ParseField MINIMIZE = new ParseField("minimize"); private static final ParseField MINIMIZE = new ParseField("minimize");
private static final DeprecationLogger DEPRECATION_LOGGER
= new DeprecationLogger(Loggers.getLogger(MovAvgPipelineAggregationBuilder.class));
private String format; private String format;
private GapPolicy gapPolicy = GapPolicy.SKIP; private GapPolicy gapPolicy = GapPolicy.SKIP;
@ -318,6 +322,8 @@ public class MovAvgPipelineAggregationBuilder extends AbstractPipelineAggregatio
Integer predict = null; Integer predict = null;
Boolean minimize = null; Boolean minimize = null;
DEPRECATION_LOGGER.deprecated("The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.");
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) { if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName(); currentFieldName = parser.currentName();

View File

@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions;
import java.io.IOException; import java.io.IOException;
import java.text.ParseException; import java.text.ParseException;
@ -90,7 +91,7 @@ public class EwmaModel extends MovAvgModel {
} }
@Override @Override
protected <T extends Number> double[] doPredict(Collection<T> values, int numPredictions) { protected double[] doPredict(Collection<Double> values, int numPredictions) {
double[] predictions = new double[numPredictions]; double[] predictions = new double[numPredictions];
// EWMA just emits the same final prediction repeatedly. // EWMA just emits the same final prediction repeatedly.
@ -100,19 +101,8 @@ public class EwmaModel extends MovAvgModel {
} }
@Override @Override
public <T extends Number> double next(Collection<T> values) { public double next(Collection<Double> values) {
double avg = 0; return MovingFunctions.ewma(values.stream().mapToDouble(Double::doubleValue).toArray(), alpha);
boolean first = true;
for (T v : values) {
if (first) {
avg = v.doubleValue();
first = false;
} else {
avg = (v.doubleValue() * alpha) + (avg * (1 - alpha));
}
}
return avg;
} }
@Override @Override

View File

@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions;
import java.io.IOException; import java.io.IOException;
import java.text.ParseException; import java.text.ParseException;
@ -116,16 +117,15 @@ public class HoltLinearModel extends MovAvgModel {
* *
* @param values Collection of numerics to movingAvg, usually windowed * @param values Collection of numerics to movingAvg, usually windowed
* @param numPredictions Number of newly generated predictions to return * @param numPredictions Number of newly generated predictions to return
* @param <T> Type of numeric
* @return Returns an array of doubles, since most smoothing methods operate on floating points * @return Returns an array of doubles, since most smoothing methods operate on floating points
*/ */
@Override @Override
protected <T extends Number> double[] doPredict(Collection<T> values, int numPredictions) { protected double[] doPredict(Collection<Double> values, int numPredictions) {
return next(values, numPredictions); return next(values, numPredictions);
} }
@Override @Override
public <T extends Number> double next(Collection<T> values) { public double next(Collection<Double> values) {
return next(values, 1)[0]; return next(values, 1)[0];
} }
@ -135,47 +135,13 @@ public class HoltLinearModel extends MovAvgModel {
* @param values Collection of values to calculate avg for * @param values Collection of values to calculate avg for
* @param numForecasts number of forecasts into the future to return * @param numForecasts number of forecasts into the future to return
* *
* @param <T> Type T extending Number
* @return Returns a Double containing the moving avg for the window * @return Returns a Double containing the moving avg for the window
*/ */
public <T extends Number> double[] next(Collection<T> values, int numForecasts) { public double[] next(Collection<Double> values, int numForecasts) {
if (values.size() == 0) { if (values.size() == 0) {
return emptyPredictions(numForecasts); return emptyPredictions(numForecasts);
} }
return MovingFunctions.holtForecast(values.stream().mapToDouble(Double::doubleValue).toArray(), alpha, beta, numForecasts);
// Smoothed value
double s = 0;
double last_s = 0;
// Trend value
double b = 0;
double last_b = 0;
int counter = 0;
T last;
for (T v : values) {
last = v;
if (counter == 1) {
s = v.doubleValue();
b = v.doubleValue() - last.doubleValue();
} else {
s = alpha * v.doubleValue() + (1.0d - alpha) * (last_s + last_b);
b = beta * (s - last_s) + (1 - beta) * last_b;
}
counter += 1;
last_s = s;
last_b = b;
}
double[] forecastValues = new double[numForecasts];
for (int i = 0; i < numForecasts; i++) {
forecastValues[i] = s + (i * b);
}
return forecastValues;
} }
@Override @Override

View File

@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregationExecutionException;
import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions;
import java.io.IOException; import java.io.IOException;
import java.text.ParseException; import java.text.ParseException;
@ -259,16 +260,15 @@ public class HoltWintersModel extends MovAvgModel {
* *
* @param values Collection of numerics to movingAvg, usually windowed * @param values Collection of numerics to movingAvg, usually windowed
* @param numPredictions Number of newly generated predictions to return * @param numPredictions Number of newly generated predictions to return
* @param <T> Type of numeric
* @return Returns an array of doubles, since most smoothing methods operate on floating points * @return Returns an array of doubles, since most smoothing methods operate on floating points
*/ */
@Override @Override
protected <T extends Number> double[] doPredict(Collection<T> values, int numPredictions) { protected double[] doPredict(Collection<Double> values, int numPredictions) {
return next(values, numPredictions); return next(values, numPredictions);
} }
@Override @Override
public <T extends Number> double next(Collection<T> values) { public double next(Collection<Double> values) {
return next(values, 1)[0]; return next(values, 1)[0];
} }
@ -278,88 +278,11 @@ public class HoltWintersModel extends MovAvgModel {
* @param values Collection of values to calculate avg for * @param values Collection of values to calculate avg for
* @param numForecasts number of forecasts into the future to return * @param numForecasts number of forecasts into the future to return
* *
* @param <T> Type T extending Number
* @return Returns a Double containing the moving avg for the window * @return Returns a Double containing the moving avg for the window
*/ */
public <T extends Number> double[] next(Collection<T> values, int numForecasts) { public double[] next(Collection<Double> values, int numForecasts) {
return MovingFunctions.holtWintersForecast(values.stream().mapToDouble(Double::doubleValue).toArray(),
if (values.size() < period * 2) { alpha, beta, gamma, period, padding, seasonalityType.equals(SeasonalityType.MULTIPLICATIVE), numForecasts);
// We need at least two full "seasons" to use HW
// This should have been caught earlier, we can't do anything now...bail
throw new AggregationExecutionException("Holt-Winters aggregation requires at least (2 * period == 2 * "
+ period + " == "+(2 * period)+") data-points to function. Only [" + values.size() + "] were provided.");
}
// Smoothed value
double s = 0;
double last_s;
// Trend value
double b = 0;
double last_b = 0;
// Seasonal value
double[] seasonal = new double[values.size()];
int counter = 0;
double[] vs = new double[values.size()];
for (T v : values) {
vs[counter] = v.doubleValue() + padding;
counter += 1;
}
// Initial level value is average of first season
// Calculate the slopes between first and second season for each period
for (int i = 0; i < period; i++) {
s += vs[i];
b += (vs[i + period] - vs[i]) / period;
}
s /= period;
b /= period;
last_s = s;
// Calculate first seasonal
if (Double.compare(s, 0.0) == 0 || Double.compare(s, -0.0) == 0) {
Arrays.fill(seasonal, 0.0);
} else {
for (int i = 0; i < period; i++) {
seasonal[i] = vs[i] / s;
}
}
for (int i = period; i < vs.length; i++) {
// TODO if perf is a problem, we can specialize a subclass to avoid conditionals on each iteration
if (seasonalityType.equals(SeasonalityType.MULTIPLICATIVE)) {
s = alpha * (vs[i] / seasonal[i - period]) + (1.0d - alpha) * (last_s + last_b);
} else {
s = alpha * (vs[i] - seasonal[i - period]) + (1.0d - alpha) * (last_s + last_b);
}
b = beta * (s - last_s) + (1 - beta) * last_b;
if (seasonalityType.equals(SeasonalityType.MULTIPLICATIVE)) {
seasonal[i] = gamma * (vs[i] / (last_s + last_b )) + (1 - gamma) * seasonal[i - period];
} else {
seasonal[i] = gamma * (vs[i] - (last_s - last_b )) + (1 - gamma) * seasonal[i - period];
}
last_s = s;
last_b = b;
}
double[] forecastValues = new double[numForecasts];
for (int i = 1; i <= numForecasts; i++) {
int idx = values.size() - period + ((i - 1) % period);
// TODO perhaps pad out seasonal to a power of 2 and use a mask instead of modulo?
if (seasonalityType.equals(SeasonalityType.MULTIPLICATIVE)) {
forecastValues[i-1] = (s + (i * b)) * seasonal[idx];
} else {
forecastValues[i-1] = s + (i * b) + seasonal[idx];
}
}
return forecastValues;
} }
@Override @Override

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions;
import java.io.IOException; import java.io.IOException;
import java.text.ParseException; import java.text.ParseException;
@ -74,7 +75,7 @@ public class LinearModel extends MovAvgModel {
} }
@Override @Override
protected <T extends Number> double[] doPredict(Collection<T> values, int numPredictions) { protected double[] doPredict(Collection<Double> values, int numPredictions) {
double[] predictions = new double[numPredictions]; double[] predictions = new double[numPredictions];
// EWMA just emits the same final prediction repeatedly. // EWMA just emits the same final prediction repeatedly.
@ -84,17 +85,8 @@ public class LinearModel extends MovAvgModel {
} }
@Override @Override
public <T extends Number> double next(Collection<T> values) { public double next(Collection<Double> values) {
double avg = 0; return MovingFunctions.linearWeightedAvg(values.stream().mapToDouble(Double::doubleValue).toArray());
long totalWeight = 1;
long current = 1;
for (T v : values) {
avg += v.doubleValue() * current;
totalWeight += current;
current += 1;
}
return avg / totalWeight;
} }
@Override @Override

View File

@ -68,20 +68,18 @@ public abstract class MovAvgModel implements NamedWriteable, ToXContentFragment
* Returns the next value in the series, according to the underlying smoothing model * Returns the next value in the series, according to the underlying smoothing model
* *
* @param values Collection of numerics to movingAvg, usually windowed * @param values Collection of numerics to movingAvg, usually windowed
* @param <T> Type of numeric
* @return Returns a double, since most smoothing methods operate on floating points * @return Returns a double, since most smoothing methods operate on floating points
*/ */
public abstract <T extends Number> double next(Collection<T> values); public abstract double next(Collection<Double> values);
/** /**
* Predicts the next `n` values in the series. * Predicts the next `n` values in the series.
* *
* @param values Collection of numerics to movingAvg, usually windowed * @param values Collection of numerics to movingAvg, usually windowed
* @param numPredictions Number of newly generated predictions to return * @param numPredictions Number of newly generated predictions to return
* @param <T> Type of numeric
* @return Returns an array of doubles, since most smoothing methods operate on floating points * @return Returns an array of doubles, since most smoothing methods operate on floating points
*/ */
public <T extends Number> double[] predict(Collection<T> values, int numPredictions) { public double[] predict(Collection<Double> values, int numPredictions) {
assert(numPredictions >= 1); assert(numPredictions >= 1);
// If there are no values, we can't do anything. Return an array of NaNs. // If there are no values, we can't do anything. Return an array of NaNs.
@ -97,10 +95,9 @@ public abstract class MovAvgModel implements NamedWriteable, ToXContentFragment
* *
* @param values Collection of numerics to movingAvg, usually windowed * @param values Collection of numerics to movingAvg, usually windowed
* @param numPredictions Number of newly generated predictions to return * @param numPredictions Number of newly generated predictions to return
* @param <T> Type of numeric
* @return Returns an array of doubles, since most smoothing methods operate on floating points * @return Returns an array of doubles, since most smoothing methods operate on floating points
*/ */
protected abstract <T extends Number> double[] doPredict(Collection<T> values, int numPredictions); protected abstract double[] doPredict(Collection<Double> values, int numPredictions);
/** /**
* Returns an empty set of predictions, filled with NaNs * Returns an empty set of predictions, filled with NaNs

View File

@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions;
import java.io.IOException; import java.io.IOException;
import java.text.ParseException; import java.text.ParseException;
@ -72,7 +73,7 @@ public class SimpleModel extends MovAvgModel {
} }
@Override @Override
protected <T extends Number> double[] doPredict(Collection<T> values, int numPredictions) { protected double[] doPredict(Collection<Double> values, int numPredictions) {
double[] predictions = new double[numPredictions]; double[] predictions = new double[numPredictions];
// Simple just emits the same final prediction repeatedly. // Simple just emits the same final prediction repeatedly.
@ -82,12 +83,8 @@ public class SimpleModel extends MovAvgModel {
} }
@Override @Override
public <T extends Number> double next(Collection<T> values) { public double next(Collection<Double> values) {
double avg = 0; return MovingFunctions.unweightedAvg(values.stream().mapToDouble(Double::doubleValue).toArray());
for (T v : values) {
avg += v.doubleValue();
}
return avg / values.size();
} }
@Override @Override

View File

@ -0,0 +1,264 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.pipeline.movfn;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory;
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorFactory;
import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import java.io.IOException;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.function.Function;
import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.Parser.BUCKETS_PATH;
import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.Parser.FORMAT;
import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.Parser.GAP_POLICY;
public class MovFnPipelineAggregationBuilder extends AbstractPipelineAggregationBuilder<MovFnPipelineAggregationBuilder> {
public static final String NAME = "moving_fn";
private static final ParseField WINDOW = new ParseField("window");
private final Script script;
private final String bucketsPathString;
private String format = null;
private GapPolicy gapPolicy = GapPolicy.SKIP;
private int window;
private static final Function<String, ConstructingObjectParser<MovFnPipelineAggregationBuilder, Void>> PARSER
= name -> {
@SuppressWarnings("unchecked")
ConstructingObjectParser<MovFnPipelineAggregationBuilder, Void> parser = new ConstructingObjectParser<>(
MovFnPipelineAggregationBuilder.NAME,
false,
o -> new MovFnPipelineAggregationBuilder(name, (String) o[0], (Script) o[1], (int)o[2]));
parser.declareString(ConstructingObjectParser.constructorArg(), BUCKETS_PATH_FIELD);
parser.declareField(ConstructingObjectParser.constructorArg(),
(p, c) -> Script.parse(p), Script.SCRIPT_PARSE_FIELD, ObjectParser.ValueType.OBJECT_OR_STRING);
parser.declareInt(ConstructingObjectParser.constructorArg(), WINDOW);
parser.declareString(MovFnPipelineAggregationBuilder::format, FORMAT);
parser.declareField(MovFnPipelineAggregationBuilder::gapPolicy, p -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return GapPolicy.parse(p.text().toLowerCase(Locale.ROOT), p.getTokenLocation());
}
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, GAP_POLICY, ObjectParser.ValueType.STRING);
return parser;
};
public MovFnPipelineAggregationBuilder(String name, String bucketsPath, Script script, int window) {
super(name, NAME, new String[]{bucketsPath});
this.bucketsPathString = bucketsPath;
this.script = script;
if (window <= 0) {
throw new IllegalArgumentException("[" + WINDOW.getPreferredName() + "] must be a positive, non-zero integer.");
}
this.window = window;
}
public MovFnPipelineAggregationBuilder(StreamInput in) throws IOException {
super(in, NAME);
bucketsPathString = in.readString();
script = new Script(in);
format = in.readOptionalString();
gapPolicy = GapPolicy.readFrom(in);
window = in.readInt();
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeString(bucketsPathString);
script.writeTo(out);
out.writeOptionalString(format);
gapPolicy.writeTo(out);
out.writeInt(window);
}
/**
* Sets the format to use on the output of this aggregation.
*/
public MovFnPipelineAggregationBuilder format(String format) {
if (Strings.isNullOrEmpty(format)) {
throw new IllegalArgumentException("[" + FORMAT.getPreferredName() + "] must not be null or an empty string.");
}
this.format = format;
return this;
}
/**
* Gets the format to use on the output of this aggregation.
*/
public String format() {
return format;
}
protected DocValueFormat formatter() {
if (format != null) {
return new DocValueFormat.Decimal(format);
}
return DocValueFormat.RAW;
}
/**
* Sets the gap policy to use for this aggregation.
*/
public MovFnPipelineAggregationBuilder gapPolicy(GapPolicy gapPolicy) {
if (gapPolicy == null) {
throw new IllegalArgumentException("[" + GAP_POLICY.getPreferredName() + "] must not be null.");
}
this.gapPolicy = gapPolicy;
return this;
}
/**
* Gets the gap policy to use for this aggregation.
*/
public GapPolicy gapPolicy() {
return gapPolicy;
}
/**
* Returns the window size for this aggregation
*/
public int getWindow() {
return window;
}
/**
* Sets the window size for this aggregation
*/
public void setWindow(int window) {
if (window <= 0) {
throw new IllegalArgumentException("[" + WINDOW.getPreferredName() + "] must be a positive, non-zero integer.");
}
this.window = window;
}
@Override
public void doValidate(AggregatorFactory<?> parent, List<AggregationBuilder> aggFactories,
List<PipelineAggregationBuilder> pipelineAggregatoractories) {
if (window <= 0) {
throw new IllegalArgumentException("[" + WINDOW.getPreferredName() + "] must be a positive, non-zero integer.");
}
if (parent instanceof HistogramAggregatorFactory) {
HistogramAggregatorFactory histoParent = (HistogramAggregatorFactory) parent;
if (histoParent.minDocCount() != 0) {
throw new IllegalStateException("parent histogram of moving_function aggregation [" + name
+ "] must have min_doc_count of 0");
}
} else if (parent instanceof DateHistogramAggregatorFactory) {
DateHistogramAggregatorFactory histoParent = (DateHistogramAggregatorFactory) parent;
if (histoParent.minDocCount() != 0) {
throw new IllegalStateException("parent histogram of moving_function aggregation [" + name
+ "] must have min_doc_count of 0");
}
} else {
throw new IllegalStateException("moving_function aggregation [" + name
+ "] must have a histogram or date_histogram as parent");
}
}
@Override
protected PipelineAggregator createInternal(Map<String, Object> metaData) throws IOException {
return new MovFnPipelineAggregator(name, bucketsPathString, script, window, formatter(), gapPolicy, metaData);
}
@Override
protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(BUCKETS_PATH.getPreferredName(), bucketsPathString);
builder.field(Script.SCRIPT_PARSE_FIELD.getPreferredName(), script);
if (format != null) {
builder.field(FORMAT.getPreferredName(), format);
}
builder.field(GAP_POLICY.getPreferredName(), gapPolicy.getName());
builder.field(WINDOW.getPreferredName(), window);
return builder;
}
public static MovFnPipelineAggregationBuilder parse(String aggName, XContentParser parser) {
return PARSER.apply(aggName).apply(parser, null);
}
/**
* Used for serialization testing, since pipeline aggs serialize themselves as a named object but are parsed
* as a regular object with the name passed in.
*/
static MovFnPipelineAggregationBuilder parse(XContentParser parser) throws IOException {
parser.nextToken();
if (parser.currentToken().equals(XContentParser.Token.START_OBJECT)) {
parser.nextToken();
if (parser.currentToken().equals(XContentParser.Token.FIELD_NAME)) {
String aggName = parser.currentName();
parser.nextToken(); // "moving_fn"
parser.nextToken(); // start_object
return PARSER.apply(aggName).apply(parser, null);
}
}
throw new IllegalStateException("Expected aggregation name but none found");
}
@Override
protected boolean overrideBucketsPath() {
return true;
}
@Override
protected int doHashCode() {
return Objects.hash(bucketsPathString, script, format, gapPolicy, window);
}
@Override
protected boolean doEquals(Object obj) {
MovFnPipelineAggregationBuilder other = (MovFnPipelineAggregationBuilder) obj;
return Objects.equals(bucketsPathString, other.bucketsPathString)
&& Objects.equals(script, other.script)
&& Objects.equals(format, other.format)
&& Objects.equals(gapPolicy, other.gapPolicy)
&& Objects.equals(window, other.window);
}
@Override
public String getWriteableName() {
return NAME;
}
}

View File

@ -0,0 +1,149 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.pipeline.movfn;
import org.elasticsearch.common.collect.EvictingQueue;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory;
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers;
import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import static org.elasticsearch.search.aggregations.pipeline.BucketHelpers.resolveBucketValue;
/**
* This pipeline aggregation gives the user the ability to script functions that "move" across a window
* of data, instead of single data points. It is the scripted version of MovingAvg pipeline agg.
*
* Through custom script contexts, we expose a number of convenience methods:
*
* - max
* - min
* - sum
* - unweightedAvg
* - linearWeightedAvg
* - ewma
* - holt
* - holtWintersMovAvg
*
* The user can also define any arbitrary logic via their own scripting, or combine with the above methods.
*/
public class MovFnPipelineAggregator extends PipelineAggregator {
private final DocValueFormat formatter;
private final BucketHelpers.GapPolicy gapPolicy;
private final Script script;
private final String bucketsPath;
private final int window;
MovFnPipelineAggregator(String name, String bucketsPath, Script script, int window, DocValueFormat formatter,
BucketHelpers.GapPolicy gapPolicy, Map<String, Object> metadata) {
super(name, new String[]{bucketsPath}, metadata);
this.bucketsPath = bucketsPath;
this.script = script;
this.formatter = formatter;
this.gapPolicy = gapPolicy;
this.window = window;
}
public MovFnPipelineAggregator(StreamInput in) throws IOException {
super(in);
script = new Script(in);
formatter = in.readNamedWriteable(DocValueFormat.class);
gapPolicy = BucketHelpers.GapPolicy.readFrom(in);
bucketsPath = in.readString();
window = in.readInt();
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
script.writeTo(out);
out.writeNamedWriteable(formatter);
gapPolicy.writeTo(out);
out.writeString(bucketsPath);
out.writeInt(window);
}
@Override
public String getWriteableName() {
return MovFnPipelineAggregationBuilder.NAME;
}
@Override
public InternalAggregation reduce(InternalAggregation aggregation, InternalAggregation.ReduceContext reduceContext) {
InternalMultiBucketAggregation<? extends InternalMultiBucketAggregation, ? extends InternalMultiBucketAggregation.InternalBucket>
histo = (InternalMultiBucketAggregation<? extends InternalMultiBucketAggregation, ? extends
InternalMultiBucketAggregation.InternalBucket>) aggregation;
List<? extends InternalMultiBucketAggregation.InternalBucket> buckets = histo.getBuckets();
HistogramFactory factory = (HistogramFactory) histo;
List<MultiBucketsAggregation.Bucket> newBuckets = new ArrayList<>();
EvictingQueue<Double> values = new EvictingQueue<>(this.window);
// Initialize the script
MovingFunctionScript.Factory scriptFactory = reduceContext.scriptService().compile(script, MovingFunctionScript.CONTEXT);
Map<String, Object> vars = new HashMap<>();
if (script.getParams() != null) {
vars.putAll(script.getParams());
}
MovingFunctionScript executableScript = scriptFactory.newInstance();
for (InternalMultiBucketAggregation.InternalBucket bucket : buckets) {
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);
// Default is to reuse existing bucket. Simplifies the rest of the logic,
// since we only change newBucket if we can add to it
MultiBucketsAggregation.Bucket newBucket = bucket;
if (thisBucketValue != null && thisBucketValue.equals(Double.NaN) == false) {
// The custom context mandates that the script returns a double (not Double) so we
// don't need null checks, etc.
double movavg = executableScript.execute(vars, values.stream().mapToDouble(Double::doubleValue).toArray());
List<InternalAggregation> aggs = StreamSupport
.stream(bucket.getAggregations().spliterator(), false)
.map(InternalAggregation.class::cast)
.collect(Collectors.toList());
aggs.add(new InternalSimpleValue(name(), movavg, formatter, new ArrayList<>(), metaData()));
newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));
values.offer(thisBucketValue);
}
newBuckets.add(newBucket);
}
return factory.createAggregation(newBuckets);
}
}

View File

@ -0,0 +1,45 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.pipeline.movfn;
import org.elasticsearch.script.ScriptContext;
import java.util.Collection;
import java.util.Map;
/**
* This class provides a custom script context for the Moving Function pipeline aggregation,
* so that we can expose a number of pre-baked moving functions like min, max, movavg, etc
*/
public abstract class MovingFunctionScript {
/**
* @param params The user-provided parameters
* @param values The values in the window that we are moving a function across
* @return A double representing the value from this particular window
*/
public abstract double execute(Map<String, Object> params, double[] values);
public interface Factory {
MovingFunctionScript newInstance();
}
public static final String[] PARAMETERS = new String[] {"params", "values"};
public static final ScriptContext<Factory> CONTEXT = new ScriptContext<>("moving-function", Factory.class);
}

View File

@ -0,0 +1,359 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.pipeline.movfn;
import java.util.Arrays;
import java.util.Collection;
/**
* Provides a collection of static utility methods that can be referenced from MovingFunction script contexts
*/
public class MovingFunctions {
/**
* Find the maximum value in a window of values.
* If all values are missing/null/NaN, the return value will be NaN
*/
public static double max(double[] values) {
return Arrays.stream(values).max().orElse(Double.NaN);
}
/**
* Find the minimum value in a window of values
* If all values are missing/null/NaN, the return value will be NaN
*/
public static double min(double[] values) {
return Arrays.stream(values).min().orElse(Double.NaN);
}
/**
* Find the sum of a window of values
* If all values are missing/null/NaN, the return value will be 0.0
*/
public static double sum(double[] values) {
if (values.length == 0) {
return 0.0;
}
return Arrays.stream(values).map(value -> {
if (Double.isNaN(value) == false) {
return value;
}
return 0.0;
}).sum();
}
/**
* Calculate a simple unweighted (arithmetic) moving average.
*
* Only finite values are averaged. NaN or null are ignored.
* If all values are missing/null/NaN, the return value will be NaN.
* The average is based on the count of non-null, non-NaN values.
*/
public static double unweightedAvg(double[] values) {
double avg = 0.0;
long count = 0;
for (double v : values) {
if (Double.isNaN(v) == false) {
avg += v;
count += 1;
}
}
return count == 0 ? Double.NaN : avg / count;
}
/**
* Calculate a standard deviation over the values using the provided average.
*
* Only finite values are averaged. NaN or null are ignored.
* If all values are missing/null/NaN, the return value will be NaN.
* The average is based on the count of non-null, non-NaN values.
*/
public static double stdDev(double[] values, double avg) {
if (avg == Double.NaN) {
return Double.NaN;
} else {
long count = 0;
double squaredMean = 0;
for (double v : values) {
if (Double.isNaN(v) == false) {
squaredMean += Math.pow(v - avg, 2);
count += 1;
}
}
return Math.sqrt(squaredMean / count);
}
}
/**
* Calculate a linearly weighted moving average, such that older values are
* linearly less important. "Time" is determined by position in collection
*
* Only finite values are averaged. NaN or null are ignored.
* If all values are missing/null/NaN, the return value will be NaN
* The average is based on the count of non-null, non-NaN values.
*/
public static double linearWeightedAvg(double[] values) {
double avg = 0;
long totalWeight = 1;
long current = 1;
for (double v : values) {
if (Double.isNaN(v) == false) {
avg += v * current;
totalWeight += current;
current += 1;
}
}
return totalWeight == 1 ? Double.NaN : avg / totalWeight;
}
/**
*
* Calculate a exponentially weighted moving average.
*
* Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values
* (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g.
* the series mean). Useful values are somewhere in between. Defaults to 0.5.
*
* Only finite values are averaged. NaN or null are ignored.
* If all values are missing/null/NaN, the return value will be NaN
* The average is based on the count of non-null, non-NaN values.
*
* @param alpha A double between 0-1 inclusive, controls data smoothing
*/
public static double ewma(double[] values, double alpha) {
double avg = Double.NaN;
boolean first = true;
for (double v : values) {
if (Double.isNaN(v) == false) {
if (first) {
avg = v;
first = false;
} else {
avg = (v * alpha) + (avg * (1 - alpha));
}
}
}
return avg;
}
/**
* Calculate a doubly exponential weighted moving average
*
* Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values
* (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g.
* the series mean). Useful values are somewhere in between. Defaults to 0.5.
*
* Beta is equivalent to alpha, but controls the smoothing of the trend instead of the data
*
* Only finite values are averaged. NaN or null are ignored.
* If all values are missing/null/NaN, the return value will be NaN
* The average is based on the count of non-null, non-NaN values.
*
* @param alpha A double between 0-1 inclusive, controls data smoothing
* @param beta a double between 0-1 inclusive, controls trend smoothing
*/
public static double holt(double[] values, double alpha, double beta) {
if (values.length == 0) {
return Double.NaN;
}
return holtForecast(values, alpha, beta, 1)[0];
}
/**
* Version of holt that can "forecast", not exposed as a whitelisted function for moving_fn scripts, but
* here as compatibility/code sharing for existing moving_avg agg. Can be removed when moving_avg is gone.
*/
public static double[] holtForecast(double[] values, double alpha, double beta, int numForecasts) {
// Smoothed value
double s = 0;
double last_s = 0;
// Trend value
double b = 0;
double last_b = 0;
int counter = 0;
Double last;
for (double v : values) {
if (Double.isNaN(v) == false) {
last = v;
if (counter == 0) {
s = v;
b = v - last;
} else {
s = alpha * v + (1.0d - alpha) * (last_s + last_b);
b = beta * (s - last_s) + (1 - beta) * last_b;
}
counter += 1;
last_s = s;
last_b = b;
}
}
if (counter == 0) {
return emptyPredictions(numForecasts);
}
double[] forecastValues = new double[numForecasts];
for (int i = 0; i < numForecasts; i++) {
forecastValues[i] = s + (i * b);
}
return forecastValues;
}
/**
* Calculate a triple exponential weighted moving average
*
* Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values
* (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g.
* the series mean). Useful values are somewhere in between. Defaults to 0.5.
*
* Beta is equivalent to alpha, but controls the smoothing of the trend instead of the data.
* Gamma is equivalent to alpha, but controls the smoothing of the seasonality instead of the data
*
* Only finite values are averaged. NaN or null are ignored.
* If all values are missing/null/NaN, the return value will be NaN
* The average is based on the count of non-null, non-NaN values.
*
* @param alpha A double between 0-1 inclusive, controls data smoothing
* @param beta a double between 0-1 inclusive, controls trend smoothing
* @param gamma a double between 0-1 inclusive, controls seasonality smoothing
* @param period the expected periodicity of the data
* @param multiplicative true if multiplicative HW should be used. False for additive
*/
public static double holtWinters(double[] values, double alpha, double beta, double gamma,
int period, boolean multiplicative) {
if (values.length == 0) {
return Double.NaN;
}
double padding = multiplicative ? 0.0000000001 : 0.0;
return holtWintersForecast(values, alpha, beta, gamma, period, padding, multiplicative, 1)[0];
}
/**
* Version of holt-winters that can "forecast", not exposed as a whitelisted function for moving_fn scripts, but
* here as compatibility/code sharing for existing moving_avg agg. Can be removed when moving_avg is gone.
*/
public static double[] holtWintersForecast(double[] values, double alpha, double beta, double gamma,
int period, double padding, boolean multiplicative, int numForecasts) {
if (values.length < period * 2) {
// We need at least two full "seasons" to use HW
// This should have been caught earlier, we can't do anything now...bail
throw new IllegalArgumentException("Holt-Winters aggregation requires at least (2 * period == 2 * "
+ period + " == "+(2 * period)+") data-points to function. Only [" + values.length + "] were provided.");
}
// Smoothed value
double s = 0;
double last_s;
// Trend value
double b = 0;
double last_b = 0;
// Seasonal value
double[] seasonal = new double[values.length];
int counter = 0;
double[] vs = new double[values.length];
for (double v : values) {
if (Double.isNaN(v) == false) {
vs[counter] = v + padding;
counter += 1;
}
}
if (counter == 0) {
return emptyPredictions(numForecasts);
}
// Initial level value is average of first season
// Calculate the slopes between first and second season for each period
for (int i = 0; i < period; i++) {
s += vs[i];
b += (vs[i + period] - vs[i]) / period;
}
s /= period;
b /= period;
last_s = s;
// Calculate first seasonal
if (Double.compare(s, 0.0) == 0 || Double.compare(s, -0.0) == 0) {
Arrays.fill(seasonal, 0.0);
} else {
for (int i = 0; i < period; i++) {
seasonal[i] = vs[i] / s;
}
}
for (int i = period; i < vs.length; i++) {
// TODO if perf is a problem, we can specialize a subclass to avoid conditionals on each iteration
if (multiplicative) {
s = alpha * (vs[i] / seasonal[i - period]) + (1.0d - alpha) * (last_s + last_b);
} else {
s = alpha * (vs[i] - seasonal[i - period]) + (1.0d - alpha) * (last_s + last_b);
}
b = beta * (s - last_s) + (1 - beta) * last_b;
if (multiplicative) {
seasonal[i] = gamma * (vs[i] / (last_s + last_b )) + (1 - gamma) * seasonal[i - period];
} else {
seasonal[i] = gamma * (vs[i] - (last_s - last_b )) + (1 - gamma) * seasonal[i - period];
}
last_s = s;
last_b = b;
}
double[] forecastValues = new double[numForecasts];
for (int i = 1; i <= numForecasts; i++) {
int idx = values.length - period + ((i - 1) % period);
// TODO perhaps pad out seasonal to a power of 2 and use a mask instead of modulo?
if (multiplicative) {
forecastValues[i-1] = (s + (i * b)) * seasonal[idx];
} else {
forecastValues[i-1] = s + (i * b) + seasonal[idx];
}
}
return forecastValues;
}
/**
* Returns an empty set of predictions, filled with NaNs
* @param numPredictions Number of empty predictions to generate
*/
private static double[] emptyPredictions(int numPredictions) {
double[] predictions = new double[numPredictions];
Arrays.fill(predictions, Double.NaN);
return predictions;
}
}

View File

@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.ObjectParserHelper;
import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
@ -214,6 +215,10 @@ public final class TaskInfo implements Writeable, ToXContentFragment {
return builder; return builder;
} }
public static TaskInfo fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
public static final ConstructingObjectParser<TaskInfo, Void> PARSER = new ConstructingObjectParser<>( public static final ConstructingObjectParser<TaskInfo, Void> PARSER = new ConstructingObjectParser<>(
"task_info", true, a -> { "task_info", true, a -> {
int i = 0; int i = 0;

View File

@ -0,0 +1,63 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import static org.hamcrest.Matchers.equalTo;
public class TaskOperationFailureTests extends AbstractXContentTestCase<TaskOperationFailure> {
@Override
protected TaskOperationFailure createTestInstance() {
return new TaskOperationFailure(randomAlphaOfLength(5), randomNonNegativeLong(), new IllegalStateException("message"));
}
@Override
protected TaskOperationFailure doParseInstance(XContentParser parser) throws IOException {
return TaskOperationFailure.fromXContent(parser);
}
@Override
protected void assertEqualInstances(TaskOperationFailure expectedInstance, TaskOperationFailure newInstance) {
assertNotSame(expectedInstance, newInstance);
assertThat(newInstance.getNodeId(), equalTo(expectedInstance.getNodeId()));
assertThat(newInstance.getTaskId(), equalTo(expectedInstance.getTaskId()));
assertThat(newInstance.getStatus(), equalTo(expectedInstance.getStatus()));
// XContent loses the original exception and wraps it as a message in Elasticsearch exception
assertThat(newInstance.getCause().getMessage(), equalTo("Elasticsearch exception [type=illegal_state_exception, reason=message]"));
// getReason returns Exception class and the message
assertThat(newInstance.getReason(),
equalTo("ElasticsearchException[Elasticsearch exception [type=illegal_state_exception, reason=message]]"));
}
@Override
protected boolean supportsUnknownFields() {
return false;
}
@Override
protected boolean assertToXContentEquivalence() {
return false;
}
}

View File

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.action.admin.cluster.node.tasks; package org.elasticsearch.action.admin.cluster.node.tasks;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.ResourceNotFoundException;
@ -716,7 +717,7 @@ public class TasksIT extends ESIntegTestCase {
.setTimeout(timeValueSeconds(10)).get(); .setTimeout(timeValueSeconds(10)).get();
// It should finish quickly and without complaint and list the list tasks themselves // It should finish quickly and without complaint and list the list tasks themselves
assertThat(response.getNodeFailures(), emptyCollectionOf(FailedNodeException.class)); assertThat(response.getNodeFailures(), emptyCollectionOf(ElasticsearchException.class));
assertThat(response.getTaskFailures(), emptyCollectionOf(TaskOperationFailure.class)); assertThat(response.getTaskFailures(), emptyCollectionOf(TaskOperationFailure.class));
assertThat(response.getTasks().size(), greaterThanOrEqualTo(1)); assertThat(response.getTasks().size(), greaterThanOrEqualTo(1));
} }

View File

@ -23,6 +23,7 @@ import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortField;
import org.apache.lucene.search.SortedSetSelector; import org.apache.lucene.search.SortedSetSelector;
import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.SortedSetSortField;
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
@ -83,6 +84,7 @@ public class ShrinkIndexIT extends ESIntegTestCase {
return Arrays.asList(InternalSettingsPlugin.class); return Arrays.asList(InternalSettingsPlugin.class);
} }
@AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-8318")
public void testCreateShrinkIndexToN() { public void testCreateShrinkIndexToN() {
int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}}; int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}};
int[] shardSplits = randomFrom(possibleShardSplits); int[] shardSplits = randomFrom(possibleShardSplits);

View File

@ -28,23 +28,51 @@ import org.elasticsearch.test.EqualsHashCodeTestUtils;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.elasticsearch.test.VersionUtils.randomVersionBetween;
import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.equalTo;
public class IndicesOptionsTests extends ESTestCase { public class IndicesOptionsTests extends ESTestCase {
public void testSerialization() throws Exception { public void testSerialization() throws Exception {
int iterations = randomIntBetween(5, 20); int iterations = randomIntBetween(5, 20);
for (int i = 0; i < iterations; i++) { for (int i = 0; i < iterations; i++) {
Version version = randomVersionBetween(random(), Version.V_7_0_0_alpha1, null);
IndicesOptions indicesOptions = IndicesOptions.fromOptions( IndicesOptions indicesOptions = IndicesOptions.fromOptions(
randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean());
BytesStreamOutput output = new BytesStreamOutput(); BytesStreamOutput output = new BytesStreamOutput();
Version outputVersion = randomVersion(random()); output.setVersion(version);
output.setVersion(outputVersion);
indicesOptions.writeIndicesOptions(output); indicesOptions.writeIndicesOptions(output);
StreamInput streamInput = output.bytes().streamInput(); StreamInput streamInput = output.bytes().streamInput();
streamInput.setVersion(randomVersion(random())); streamInput.setVersion(version);
IndicesOptions indicesOptions2 = IndicesOptions.readIndicesOptions(streamInput);
assertThat(indicesOptions2.ignoreUnavailable(), equalTo(indicesOptions.ignoreUnavailable()));
assertThat(indicesOptions2.allowNoIndices(), equalTo(indicesOptions.allowNoIndices()));
assertThat(indicesOptions2.expandWildcardsOpen(), equalTo(indicesOptions.expandWildcardsOpen()));
assertThat(indicesOptions2.expandWildcardsClosed(), equalTo(indicesOptions.expandWildcardsClosed()));
assertThat(indicesOptions2.forbidClosedIndices(), equalTo(indicesOptions.forbidClosedIndices()));
assertThat(indicesOptions2.allowAliasesToMultipleIndices(), equalTo(indicesOptions.allowAliasesToMultipleIndices()));
assertEquals(indicesOptions2.ignoreAliases(), indicesOptions.ignoreAliases());
}
}
public void testSerializationPre70() throws Exception {
int iterations = randomIntBetween(5, 20);
for (int i = 0; i < iterations; i++) {
Version version = randomVersionBetween(random(), null, Version.V_6_4_0);
IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(),
randomBoolean(), randomBoolean(), randomBoolean());
BytesStreamOutput output = new BytesStreamOutput();
output.setVersion(version);
indicesOptions.writeIndicesOptions(output);
StreamInput streamInput = output.bytes().streamInput();
streamInput.setVersion(version);
IndicesOptions indicesOptions2 = IndicesOptions.readIndicesOptions(streamInput); IndicesOptions indicesOptions2 = IndicesOptions.readIndicesOptions(streamInput);
assertThat(indicesOptions2.ignoreUnavailable(), equalTo(indicesOptions.ignoreUnavailable())); assertThat(indicesOptions2.ignoreUnavailable(), equalTo(indicesOptions.ignoreUnavailable()));

View File

@ -38,6 +38,7 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -61,6 +62,7 @@ import static java.util.Collections.emptyMap;
import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.elasticsearch.test.VersionUtils.randomVersion;
import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.CoreMatchers.startsWith;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasSize;
@ -188,9 +190,16 @@ public class TemplateUpgradeServiceTests extends ESTestCase {
additions.put("add_template_" + i, new BytesArray("{\"index_patterns\" : \"*\", \"order\" : " + i + "}")); additions.put("add_template_" + i, new BytesArray("{\"index_patterns\" : \"*\", \"order\" : " + i + "}"));
} }
TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, null, ThreadPool threadPool = mock(ThreadPool.class);
ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
when(threadPool.getThreadContext()).thenReturn(threadContext);
TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, threadPool,
Collections.emptyList()); Collections.emptyList());
IllegalStateException ise = expectThrows(IllegalStateException.class, () -> service.updateTemplates(additions, deletions));
assertThat(ise.getMessage(), containsString("template upgrade service should always happen in a system context"));
threadContext.markAsSystemContext();
service.updateTemplates(additions, deletions); service.updateTemplates(additions, deletions);
int updatesInProgress = service.getUpdatesInProgress(); int updatesInProgress = service.getUpdatesInProgress();
@ -241,11 +250,14 @@ public class TemplateUpgradeServiceTests extends ESTestCase {
); );
ThreadPool threadPool = mock(ThreadPool.class); ThreadPool threadPool = mock(ThreadPool.class);
ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
when(threadPool.getThreadContext()).thenReturn(threadContext);
ExecutorService executorService = mock(ExecutorService.class); ExecutorService executorService = mock(ExecutorService.class);
when(threadPool.generic()).thenReturn(executorService); when(threadPool.generic()).thenReturn(executorService);
doAnswer(invocation -> { doAnswer(invocation -> {
Object[] args = invocation.getArguments(); Object[] args = invocation.getArguments();
assert args.length == 1; assert args.length == 1;
assertTrue(threadContext.isSystemContext());
Runnable runnable = (Runnable) args[0]; Runnable runnable = (Runnable) args[0];
runnable.run(); runnable.run();
updateInvocation.incrementAndGet(); updateInvocation.incrementAndGet();

View File

@ -77,12 +77,12 @@ public class RangeFieldTypeTests extends FieldTypeTestCase {
@Override @Override
protected RangeFieldType createDefaultFieldType() { protected RangeFieldType createDefaultFieldType() {
return new RangeFieldType(type, Version.CURRENT); return new RangeFieldType(type);
} }
public void testRangeQuery() throws Exception { public void testRangeQuery() throws Exception {
QueryShardContext context = createContext(); QueryShardContext context = createContext();
RangeFieldType ft = new RangeFieldType(type, Version.CURRENT); RangeFieldType ft = new RangeFieldType(type);
ft.setName(FIELDNAME); ft.setName(FIELDNAME);
ft.setIndexOptions(IndexOptions.DOCS); ft.setIndexOptions(IndexOptions.DOCS);
@ -106,7 +106,7 @@ public class RangeFieldTypeTests extends FieldTypeTestCase {
public void testDateRangeQueryUsingMappingFormat() { public void testDateRangeQueryUsingMappingFormat() {
QueryShardContext context = createContext(); QueryShardContext context = createContext();
RangeFieldType fieldType = new RangeFieldType(RangeType.DATE, Version.CURRENT); RangeFieldType fieldType = new RangeFieldType(RangeType.DATE);
fieldType.setName(FIELDNAME); fieldType.setName(FIELDNAME);
fieldType.setIndexOptions(IndexOptions.DOCS); fieldType.setIndexOptions(IndexOptions.DOCS);
fieldType.setHasDocValues(false); fieldType.setHasDocValues(false);
@ -313,7 +313,7 @@ public class RangeFieldTypeTests extends FieldTypeTestCase {
public void testTermQuery() throws Exception { public void testTermQuery() throws Exception {
// See https://github.com/elastic/elasticsearch/issues/25950 // See https://github.com/elastic/elasticsearch/issues/25950
QueryShardContext context = createContext(); QueryShardContext context = createContext();
RangeFieldType ft = new RangeFieldType(type, Version.CURRENT); RangeFieldType ft = new RangeFieldType(type);
ft.setName(FIELDNAME); ft.setName(FIELDNAME);
ft.setIndexOptions(IndexOptions.DOCS); ft.setIndexOptions(IndexOptions.DOCS);

View File

@ -1,86 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.store;
import org.apache.lucene.store.BaseDirectoryWrapper;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.FileSwitchDirectory;
import org.apache.lucene.store.FilterDirectory;
import org.apache.lucene.store.RAMDirectory;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Collections;
import java.util.Set;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.CoreMatchers.sameInstance;
public class DirectoryUtilsTests extends ESTestCase {
public void testGetLeave() throws IOException {
Path file = createTempDir();
final int iters = scaledRandomIntBetween(10, 100);
for (int i = 0; i < iters; i++) {
{
BaseDirectoryWrapper dir = newFSDirectory(file);
FSDirectory directory = DirectoryUtils.getLeaf(new FilterDirectory(dir) {}, FSDirectory.class, null);
assertThat(directory, notNullValue());
assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null)));
dir.close();
}
{
BaseDirectoryWrapper dir = newFSDirectory(file);
FSDirectory directory = DirectoryUtils.getLeaf(dir, FSDirectory.class, null);
assertThat(directory, notNullValue());
assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null)));
dir.close();
}
{
Set<String> stringSet = Collections.emptySet();
BaseDirectoryWrapper dir = newFSDirectory(file);
FSDirectory directory = DirectoryUtils.getLeaf(new FileSwitchDirectory(stringSet, dir, dir, random().nextBoolean()), FSDirectory.class, null);
assertThat(directory, notNullValue());
assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null)));
dir.close();
}
{
Set<String> stringSet = Collections.emptySet();
BaseDirectoryWrapper dir = newFSDirectory(file);
FSDirectory directory = DirectoryUtils.getLeaf(new FilterDirectory(new FileSwitchDirectory(stringSet, dir, dir, random().nextBoolean())) {}, FSDirectory.class, null);
assertThat(directory, notNullValue());
assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null)));
dir.close();
}
{
Set<String> stringSet = Collections.emptySet();
BaseDirectoryWrapper dir = newFSDirectory(file);
RAMDirectory directory = DirectoryUtils.getLeaf(new FilterDirectory(new FileSwitchDirectory(stringSet, dir, dir, random().nextBoolean())) {}, RAMDirectory.class, null);
assertThat(directory, nullValue());
dir.close();
}
}
}
}

View File

@ -446,7 +446,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
InternalDateHistogram histogram; InternalDateHistogram histogram;
if (reduced) { if (reduced) {
histogram = searchAndReduce(indexSearcher, query, aggregationBuilder, maxBucket, fieldType); histogram = searchAndReduce(indexSearcher, query, aggregationBuilder, maxBucket, null, fieldType);
} else { } else {
histogram = search(indexSearcher, query, aggregationBuilder, maxBucket, fieldType); histogram = search(indexSearcher, query, aggregationBuilder, maxBucket, fieldType);
} }

View File

@ -17,14 +17,27 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.search.aggregations.bucket; package org.elasticsearch.search.aggregations.bucket.histogram;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.LongPoint;
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.Directory;
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.BaseAggregationTestCase;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBoundsTests; import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBoundsTests;
import org.elasticsearch.search.aggregations.BucketOrder; import org.joda.time.DateTimeZone;
import org.junit.Assume;
import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
@ -120,4 +133,73 @@ public class DateHistogramTests extends BaseAggregationTestCase<DateHistogramAgg
return orders; return orders;
} }
private static Document documentForDate(String field, long millis) {
Document doc = new Document();
doc.add(new LongPoint(field, millis));
doc.add(new SortedNumericDocValuesField(field, millis));
return doc;
}
public void testRewriteTimeZone() throws IOException {
Assume.assumeTrue(getCurrentTypes().length > 0); // we need mappings
FormatDateTimeFormatter format = Joda.forPattern("strict_date_optional_time");
try (Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) {
w.addDocument(documentForDate(DATE_FIELD_NAME, format.parser().parseDateTime("2018-03-11T11:55:00").getMillis()));
w.addDocument(documentForDate(DATE_FIELD_NAME, format.parser().parseDateTime("2017-10-30T18:13:00").getMillis()));
try (IndexReader readerThatDoesntCross = DirectoryReader.open(w)) {
w.addDocument(documentForDate(DATE_FIELD_NAME, format.parser().parseDateTime("2018-03-25T02:44:00").getMillis()));
try (IndexReader readerThatCrosses = DirectoryReader.open(w)) {
QueryShardContext shardContextThatDoesntCross = createShardContext(readerThatDoesntCross);
QueryShardContext shardContextThatCrosses = createShardContext(readerThatCrosses);
DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("my_date_histo");
builder.field(DATE_FIELD_NAME);
builder.dateHistogramInterval(DateHistogramInterval.DAY);
// no timeZone => no rewrite
assertNull(builder.rewriteTimeZone(shardContextThatDoesntCross));
assertNull(builder.rewriteTimeZone(shardContextThatCrosses));
// fixed timeZone => no rewrite
DateTimeZone tz = DateTimeZone.forOffsetHours(1);
builder.timeZone(tz);
assertSame(tz, builder.rewriteTimeZone(shardContextThatDoesntCross));
assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses));
// daylight-saving-times => rewrite if doesn't cross
tz = DateTimeZone.forID("Europe/Paris");
builder.timeZone(tz);
assertEquals(DateTimeZone.forOffsetHours(1), builder.rewriteTimeZone(shardContextThatDoesntCross));
assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses));
// Rounded values are no longer all within the same transitions => no rewrite
builder.dateHistogramInterval(DateHistogramInterval.MONTH);
assertSame(tz, builder.rewriteTimeZone(shardContextThatDoesntCross));
assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses));
builder = new DateHistogramAggregationBuilder("my_date_histo");
builder.field(DATE_FIELD_NAME);
builder.timeZone(tz);
builder.interval(1000L * 60 * 60 * 24); // ~ 1 day
assertEquals(DateTimeZone.forOffsetHours(1), builder.rewriteTimeZone(shardContextThatDoesntCross));
assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses));
// Because the interval is large, rounded values are not
// within the same transitions as the values => no rewrite
builder.interval(1000L * 60 * 60 * 24 * 30); // ~ 1 month
assertSame(tz, builder.rewriteTimeZone(shardContextThatDoesntCross));
assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses));
}
}
}
}
} }

View File

@ -0,0 +1,146 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.avg;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.Directory;
import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.elasticsearch.search.aggregations.Aggregation;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.AggregatorTestCase;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram;
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.avg.InternalAvg;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
public class AvgBucketAggregatorTests extends AggregatorTestCase {
private static final String DATE_FIELD = "date";
private static final String VALUE_FIELD = "value";
private static final List<String> dataset = Arrays.asList(
"2010-03-12T01:07:45",
"2010-04-27T03:43:34",
"2012-05-18T04:11:00",
"2013-05-29T05:11:31",
"2013-10-31T08:24:05",
"2015-02-13T13:09:32",
"2015-06-24T13:47:43",
"2015-11-13T16:14:34",
"2016-03-04T17:09:50",
"2017-12-12T22:55:46");
/**
* Test for issue #30608. Under the following circumstances:
*
* A. Multi-bucket agg in the first entry of our internal list
* B. Regular agg as the immediate child of the multi-bucket in A
* C. Regular agg with the same name as B at the top level, listed as the second entry in our internal list
* D. Finally, a pipeline agg with the path down to B
*
* BucketMetrics reduction would throw a class cast exception due to bad subpathing. This test ensures
* it is fixed.
*
* Note: we have this test inside of the `avg_bucket` package so that we can get access to the package-private
* `doReduce()` needed for testing this
*/
public void testSameAggNames() throws IOException {
Query query = new MatchAllDocsQuery();
AvgAggregationBuilder avgBuilder = new AvgAggregationBuilder("foo").field(VALUE_FIELD);
DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("histo")
.dateHistogramInterval(DateHistogramInterval.YEAR)
.field(DATE_FIELD)
.subAggregation(new AvgAggregationBuilder("foo").field(VALUE_FIELD));
AvgBucketPipelineAggregationBuilder avgBucketBuilder
= new AvgBucketPipelineAggregationBuilder("the_avg_bucket", "histo>foo");
try (Directory directory = newDirectory()) {
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
Document document = new Document();
for (String date : dataset) {
if (frequently()) {
indexWriter.commit();
}
document.add(new SortedNumericDocValuesField(DATE_FIELD, asLong(date)));
document.add(new SortedNumericDocValuesField(VALUE_FIELD, randomInt()));
indexWriter.addDocument(document);
document.clear();
}
}
InternalAvg avgResult;
InternalDateHistogram histogramResult;
try (IndexReader indexReader = DirectoryReader.open(directory)) {
IndexSearcher indexSearcher = newSearcher(indexReader, true, true);
DateFieldMapper.Builder builder = new DateFieldMapper.Builder("histo");
DateFieldMapper.DateFieldType fieldType = builder.fieldType();
fieldType.setHasDocValues(true);
fieldType.setName(DATE_FIELD);
MappedFieldType valueFieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
valueFieldType.setName(VALUE_FIELD);
valueFieldType.setHasDocValues(true);
avgResult = searchAndReduce(indexSearcher, query, avgBuilder, 10000, null,
new MappedFieldType[]{fieldType, valueFieldType});
histogramResult = searchAndReduce(indexSearcher, query, histo, 10000, null,
new MappedFieldType[]{fieldType, valueFieldType});
}
// Finally, reduce the pipeline agg
PipelineAggregator avgBucketAgg = avgBucketBuilder.createInternal(Collections.emptyMap());
List<Aggregation> reducedAggs = new ArrayList<>(2);
// Histo has to go first to exercise the bug
reducedAggs.add(histogramResult);
reducedAggs.add(avgResult);
Aggregations aggregations = new Aggregations(reducedAggs);
InternalAggregation pipelineResult = ((AvgBucketPipelineAggregator)avgBucketAgg).doReduce(aggregations, null);
assertNotNull(pipelineResult);
}
}
private static long asLong(String dateTime) {
return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(dateTime).getMillis();
}
}

View File

@ -0,0 +1,51 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.pipeline.movfn;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.script.Script;
import org.elasticsearch.test.AbstractSerializingTestCase;
import java.io.IOException;
public class MovFnPipelineAggregationBuilderSerializationTests extends AbstractSerializingTestCase<MovFnPipelineAggregationBuilder> {
@Override
protected MovFnPipelineAggregationBuilder createTestInstance() {
return new MovFnPipelineAggregationBuilder(randomAlphaOfLength(10), "foo", new Script("foo"), randomIntBetween(1, 10));
}
@Override
protected Writeable.Reader<MovFnPipelineAggregationBuilder> instanceReader() {
return MovFnPipelineAggregationBuilder::new;
}
@Override
protected MovFnPipelineAggregationBuilder doParseInstance(XContentParser parser) throws IOException {
return MovFnPipelineAggregationBuilder.parse(parser);
}
@Override
protected boolean supportsUnknownFields() {
return false;
}
}

View File

@ -0,0 +1,164 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.pipeline.movfn;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.LongPoint;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.Directory;
import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.aggregations.AggregatorTestCase;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram;
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
import static org.hamcrest.Matchers.equalTo;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class MovFnUnitTests extends AggregatorTestCase {
private static final String DATE_FIELD = "date";
private static final String INSTANT_FIELD = "instant";
private static final String VALUE_FIELD = "value_field";
private static final List<String> datasetTimes = Arrays.asList(
"2017-01-01T01:07:45",
"2017-01-02T03:43:34",
"2017-01-03T04:11:00",
"2017-01-04T05:11:31",
"2017-01-05T08:24:05",
"2017-01-06T13:09:32",
"2017-01-07T13:47:43",
"2017-01-08T16:14:34",
"2017-01-09T17:09:50",
"2017-01-10T22:55:46");
private static final List<Integer> datasetValues = Arrays.asList(1,2,3,4,5,6,7,8,9,10);
public void testMatchAllDocs() throws IOException {
Query query = new MatchAllDocsQuery();
Script script = new Script(Script.DEFAULT_SCRIPT_TYPE, "painless", "test", Collections.emptyMap());
DateHistogramAggregationBuilder aggBuilder = new DateHistogramAggregationBuilder("histo");
aggBuilder.dateHistogramInterval(DateHistogramInterval.DAY).field(DATE_FIELD);
aggBuilder.subAggregation(new AvgAggregationBuilder("avg").field(VALUE_FIELD));
aggBuilder.subAggregation(new MovFnPipelineAggregationBuilder("mov_fn", "avg", script, 3));
executeTestCase(query, aggBuilder, histogram -> {
assertEquals(10, histogram.getBuckets().size());
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
for (int i = 0; i < buckets.size(); i++) {
if (i == 0) {
assertThat(((InternalSimpleValue)(buckets.get(i).getAggregations().get("mov_fn"))).value(), equalTo(Double.NaN));
} else {
assertThat(((InternalSimpleValue)(buckets.get(i).getAggregations().get("mov_fn"))).value(), equalTo(((double) i)));
}
}
}, 1000, script);
}
@SuppressWarnings("unchecked")
private void executeTestCase(Query query,
DateHistogramAggregationBuilder aggBuilder,
Consumer<Histogram> verify,
int maxBucket, Script script) throws IOException {
try (Directory directory = newDirectory()) {
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
Document document = new Document();
int counter = 0;
for (String date : datasetTimes) {
if (frequently()) {
indexWriter.commit();
}
long instant = asLong(date);
document.add(new SortedNumericDocValuesField(DATE_FIELD, instant));
document.add(new LongPoint(INSTANT_FIELD, instant));
document.add(new NumericDocValuesField(VALUE_FIELD, datasetValues.get(counter)));
indexWriter.addDocument(document);
document.clear();
counter += 1;
}
}
ScriptService scriptService = mock(ScriptService.class);
MovingFunctionScript.Factory factory = mock(MovingFunctionScript.Factory.class);
when(scriptService.compile(script, MovingFunctionScript.CONTEXT)).thenReturn(factory);
MovingFunctionScript scriptInstance = new MovingFunctionScript() {
@Override
public double execute(Map<String, Object> params, double[] values) {
assertNotNull(values);
return MovingFunctions.max(values);
}
};
when(factory.newInstance()).thenReturn(scriptInstance);
try (IndexReader indexReader = DirectoryReader.open(directory)) {
IndexSearcher indexSearcher = newSearcher(indexReader, true, true);
DateFieldMapper.Builder builder = new DateFieldMapper.Builder("_name");
DateFieldMapper.DateFieldType fieldType = builder.fieldType();
fieldType.setHasDocValues(true);
fieldType.setName(aggBuilder.field());
MappedFieldType valueFieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
valueFieldType.setHasDocValues(true);
valueFieldType.setName("value_field");
InternalDateHistogram histogram;
histogram = searchAndReduce(indexSearcher, query, aggBuilder, maxBucket, scriptService,
new MappedFieldType[]{fieldType, valueFieldType});
verify.accept(histogram);
}
}
}
private static long asLong(String dateTime) {
return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(dateTime).getMillis();
}
}

View File

@ -0,0 +1,684 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.pipeline.movfn;
import org.elasticsearch.common.collect.EvictingQueue;
import org.elasticsearch.test.ESTestCase;
import java.util.Arrays;
import static org.hamcrest.Matchers.equalTo;
public class MovFnWhitelistedFunctionTests extends ESTestCase {
public void testWindowMax() {
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
double randValue = randomDouble();
double expected = -Double.MAX_VALUE;
if (i == 0) {
window.offer(randValue);
continue;
}
for (double value : window) {
expected = Math.max(expected, value);
}
double actual = MovingFunctions.max(window.stream().mapToDouble(Double::doubleValue).toArray());
assertEquals(expected, actual, 0.01 * Math.abs(expected));
window.offer(randValue);
}
}
public void testNullWindowMax() {
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
Double randValue = randomBoolean() ? Double.NaN : null;
if (i == 0) {
if (randValue != null) {
window.offer(randValue);
}
continue;
}
double actual = MovingFunctions.max(window.stream().mapToDouble(Double::doubleValue).toArray());
assertThat(actual, equalTo(Double.NaN));
if (randValue != null) {
window.offer(randValue);
}
}
}
public void testEmptyWindowMax() {
EvictingQueue<Double> window = new EvictingQueue<>(0);
double actual = MovingFunctions.max(window.stream().mapToDouble(Double::doubleValue).toArray());
assertThat(actual, equalTo(Double.NaN));
}
public void testWindowMin() {
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
double randValue = randomDouble();
double expected = Double.MAX_VALUE;
if (i == 0) {
window.offer(randValue);
continue;
}
for (double value : window) {
expected = Math.min(expected, value);
}
double actual = MovingFunctions.min(window.stream().mapToDouble(Double::doubleValue).toArray());
assertEquals(expected, actual, 0.01 * Math.abs(expected));
window.offer(randValue);
}
}
public void testNullWindowMin() {
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
Double randValue = randomBoolean() ? Double.NaN : null;
if (i == 0) {
if (randValue != null) {
window.offer(randValue);
}
continue;
}
double actual = MovingFunctions.min(window.stream().mapToDouble(Double::doubleValue).toArray());
assertThat(actual, equalTo(Double.NaN));
if (randValue != null) {
window.offer(randValue);
}
}
}
public void testEmptyWindowMin() {
EvictingQueue<Double> window = new EvictingQueue<>(0);
double actual = MovingFunctions.min(window.stream().mapToDouble(Double::doubleValue).toArray());
assertThat(actual, equalTo(Double.NaN));
}
public void testWindowSum() {
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
double randValue = randomDouble();
double expected = 0;
if (i == 0) {
window.offer(randValue);
continue;
}
for (double value : window) {
expected += value;
}
double actual = MovingFunctions.sum(window.stream().mapToDouble(Double::doubleValue).toArray());
assertEquals(expected, actual, 0.01 * Math.abs(expected));
window.offer(randValue);
}
}
public void testNullWindowSum() {
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
Double randValue = randomBoolean() ? Double.NaN : null;
if (i == 0) {
if (randValue != null) {
window.offer(randValue);
}
continue;
}
double actual = MovingFunctions.sum(window.stream().mapToDouble(Double::doubleValue).toArray());
assertThat(actual, equalTo(0.0));
if (randValue != null) {
window.offer(randValue);
}
}
}
public void testEmptyWindowSum() {
EvictingQueue<Double> window = new EvictingQueue<>(0);
double actual = MovingFunctions.sum(window.stream().mapToDouble(Double::doubleValue).toArray());
assertThat(actual, equalTo(0.0));
}
public void testSimpleMovAvg() {
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
double randValue = randomDouble();
double expected = 0;
if (i == 0) {
window.offer(randValue);
continue;
}
for (double value : window) {
expected += value;
}
expected /= window.size();
double actual = MovingFunctions.unweightedAvg(window.stream().mapToDouble(Double::doubleValue).toArray());
assertEquals(expected, actual, 0.01 * Math.abs(expected));
window.offer(randValue);
}
}
public void testNullSimpleMovAvg() {
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
Double randValue = randomBoolean() ? Double.NaN : null;
if (i == 0) {
if (randValue != null) {
window.offer(randValue);
}
continue;
}
double actual = MovingFunctions.unweightedAvg(window.stream().mapToDouble(Double::doubleValue).toArray());
assertThat(actual, equalTo(Double.NaN));
if (randValue != null) {
window.offer(randValue);
}
}
}
public void testEmptySimpleMovAvg() {
EvictingQueue<Double> window = new EvictingQueue<>(0);
double actual = MovingFunctions.unweightedAvg(window.stream().mapToDouble(Double::doubleValue).toArray());
assertThat(actual, equalTo(Double.NaN));
}
public void testSimpleMovStdDev() {
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
double randValue = randomDouble();
double mean = 0;
if (i == 0) {
window.offer(randValue);
continue;
}
for (double value : window) {
mean += value;
}
mean /= window.size();
double expected = 0.0;
for (double value : window) {
expected += Math.pow(value - mean, 2);
}
expected = Math.sqrt(expected / window.size());
double actual = MovingFunctions.stdDev(window.stream().mapToDouble(Double::doubleValue).toArray(), mean);
assertEquals(expected, actual, 0.01 * Math.abs(expected));
window.offer(randValue);
}
}
public void testNullSimpleStdDev() {
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
Double randValue = randomBoolean() ? Double.NaN : null;
if (i == 0) {
if (randValue != null) {
window.offer(randValue);
}
continue;
}
double actual = MovingFunctions.stdDev(window.stream().mapToDouble(Double::doubleValue).toArray(),
MovingFunctions.unweightedAvg(window.stream().mapToDouble(Double::doubleValue).toArray()));
assertThat(actual, equalTo(Double.NaN));
if (randValue != null) {
window.offer(randValue);
}
}
}
public void testEmptySimpleStdDev() {
EvictingQueue<Double> window = new EvictingQueue<>(0);
double actual = MovingFunctions.stdDev(window.stream().mapToDouble(Double::doubleValue).toArray(),
MovingFunctions.unweightedAvg(window.stream().mapToDouble(Double::doubleValue).toArray()));
assertThat(actual, equalTo(Double.NaN));
}
public void testLinearMovAvg() {
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
double randValue = randomDouble();
if (i == 0) {
window.offer(randValue);
continue;
}
double avg = 0;
long totalWeight = 1;
long current = 1;
for (double value : window) {
avg += value * current;
totalWeight += current;
current += 1;
}
double expected = avg / totalWeight;
double actual = MovingFunctions.linearWeightedAvg(window.stream().mapToDouble(Double::doubleValue).toArray());
assertEquals(expected, actual, 0.01 * Math.abs(expected));
window.offer(randValue);
}
}
public void testNullLinearMovAvg() {
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
Double randValue = randomBoolean() ? Double.NaN : null;
if (i == 0) {
if (randValue != null) {
window.offer(randValue);
}
continue;
}
double actual = MovingFunctions.linearWeightedAvg(window.stream().mapToDouble(Double::doubleValue).toArray());
assertThat(actual, equalTo(Double.NaN));
if (randValue != null) {
window.offer(randValue);
}
}
}
public void testEmptyLinearMovAvg() {
EvictingQueue<Double> window = new EvictingQueue<>(0);
double actual = MovingFunctions.linearWeightedAvg(window.stream().mapToDouble(Double::doubleValue).toArray());
assertThat(actual, equalTo(Double.NaN));
}
public void testEWMAMovAvg() {
double alpha = randomDouble();
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
double randValue = randomDouble();
if (i == 0) {
window.offer(randValue);
continue;
}
double avg = 0;
boolean first = true;
for (double value : window) {
if (first) {
avg = value;
first = false;
} else {
avg = (value * alpha) + (avg * (1 - alpha));
}
}
double expected = avg;
double actual = MovingFunctions.ewma(window.stream().mapToDouble(Double::doubleValue).toArray(), alpha);
assertEquals(expected, actual, 0.01 * Math.abs(expected));
window.offer(randValue);
}
}
public void testNullEwmaMovAvg() {
double alpha = randomDouble();
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
Double randValue = randomBoolean() ? Double.NaN : null;
if (i == 0) {
if (randValue != null) {
window.offer(randValue);
}
continue;
}
double actual = MovingFunctions.ewma(window.stream().mapToDouble(Double::doubleValue).toArray(), alpha);
assertThat(actual, equalTo(Double.NaN));
if (randValue != null) {
window.offer(randValue);
}
}
}
public void testEmptyEwmaMovAvg() {
double alpha = randomDouble();
EvictingQueue<Double> window = new EvictingQueue<>(0);
double actual = MovingFunctions.ewma(window.stream().mapToDouble(Double::doubleValue).toArray(), alpha);
assertThat(actual, equalTo(Double.NaN));
}
public void testHoltLinearMovAvg() {
double alpha = randomDouble();
double beta = randomDouble();
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
double randValue = randomDouble();
if (i == 0) {
window.offer(randValue);
continue;
}
double s = 0;
double last_s = 0;
// Trend value
double b = 0;
double last_b = 0;
int counter = 0;
double last;
for (double value : window) {
last = value;
if (counter == 0) {
s = value;
b = value - last;
} else {
s = alpha * value + (1.0d - alpha) * (last_s + last_b);
b = beta * (s - last_s) + (1 - beta) * last_b;
}
counter += 1;
last_s = s;
last_b = b;
}
double expected = s + (0 * b) ;
double actual = MovingFunctions.holt(window.stream().mapToDouble(Double::doubleValue).toArray(), alpha, beta);
assertEquals(expected, actual, 0.01 * Math.abs(expected));
window.offer(randValue);
}
}
public void testNullHoltMovAvg() {
double alpha = randomDouble();
double beta = randomDouble();
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(1, 50);
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < numValues; i++) {
Double randValue = randomBoolean() ? Double.NaN : null;
if (i == 0) {
if (randValue != null) {
window.offer(randValue);
}
continue;
}
double actual = MovingFunctions.holt(window.stream().mapToDouble(Double::doubleValue).toArray(), alpha, beta);
assertThat(actual, equalTo(Double.NaN));
if (randValue != null) {
window.offer(randValue);
}
}
}
public void testEmptyHoltMovAvg() {
double alpha = randomDouble();
double beta = randomDouble();
EvictingQueue<Double> window = new EvictingQueue<>(0);
double actual = MovingFunctions.holt(window.stream().mapToDouble(Double::doubleValue).toArray(), alpha, beta);
assertThat(actual, equalTo(Double.NaN));
}
public void testHoltWintersMultiplicative() {
double alpha = randomDouble();
double beta = randomDouble();
double gamma = randomDouble();
int period = randomIntBetween(1,10);
int windowSize = randomIntBetween(period * 2, 50); // HW requires at least two periods of data
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < windowSize; i++) {
window.offer(randomDouble());
}
// Smoothed value
double s = 0;
double last_s = 0;
// Trend value
double b = 0;
double last_b = 0;
// Seasonal value
double[] seasonal = new double[windowSize];
int counter = 0;
double[] vs = new double[windowSize];
for (double v : window) {
vs[counter] = v + 0.0000000001;
counter += 1;
}
// Initial level value is average of first season
// Calculate the slopes between first and second season for each period
for (int i = 0; i < period; i++) {
s += vs[i];
b += (vs[i + period] - vs[i]) / period;
}
s /= period;
b /= period;
last_s = s;
// Calculate first seasonal
if (Double.compare(s, 0.0) == 0 || Double.compare(s, -0.0) == 0) {
Arrays.fill(seasonal, 0.0);
} else {
for (int i = 0; i < period; i++) {
seasonal[i] = vs[i] / s;
}
}
for (int i = period; i < vs.length; i++) {
s = alpha * (vs[i] / seasonal[i - period]) + (1.0d - alpha) * (last_s + last_b);
b = beta * (s - last_s) + (1 - beta) * last_b;
seasonal[i] = gamma * (vs[i] / (last_s + last_b )) + (1 - gamma) * seasonal[i - period];
last_s = s;
last_b = b;
}
int idx = window.size() - period + (0 % period);
double expected = (s + (1 * b)) * seasonal[idx];
double actual = MovingFunctions.holtWinters(window.stream().mapToDouble(Double::doubleValue).toArray(),
alpha, beta, gamma, period, true);
assertEquals(expected, actual, 0.01 * Math.abs(expected));
}
public void testNullHoltWintersMovAvg() {
double alpha = randomDouble();
double beta = randomDouble();
double gamma = randomDouble();
int period = randomIntBetween(1,10);
int numValues = randomIntBetween(1, 100);
int windowSize = randomIntBetween(period * 2, 50); // HW requires at least two periods of data
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < windowSize; i++) {
window.offer(Double.NaN);
}
for (int i = 0; i < numValues; i++) {
double actual = MovingFunctions.holtWinters(window.stream().mapToDouble(Double::doubleValue).toArray(),
alpha, beta, gamma, period, false);
assertThat(actual, equalTo(Double.NaN));
}
}
public void testEmptyHoltWintersMovAvg() {
double alpha = randomDouble();
double beta = randomDouble();
double gamma = randomDouble();
int period = randomIntBetween(1,10);
EvictingQueue<Double> window = new EvictingQueue<>(0);
double actual = MovingFunctions.holtWinters(window.stream().mapToDouble(Double::doubleValue).toArray(),
alpha, beta, gamma, period, false);
assertThat(actual, equalTo(Double.NaN));
}
public void testHoltWintersAdditive() {
double alpha = randomDouble();
double beta = randomDouble();
double gamma = randomDouble();
int period = randomIntBetween(1,10);
int windowSize = randomIntBetween(period * 2, 50); // HW requires at least two periods of data
EvictingQueue<Double> window = new EvictingQueue<>(windowSize);
for (int i = 0; i < windowSize; i++) {
window.offer(randomDouble());
}
// Smoothed value
double s = 0;
double last_s = 0;
// Trend value
double b = 0;
double last_b = 0;
// Seasonal value
double[] seasonal = new double[windowSize];
int counter = 0;
double[] vs = new double[windowSize];
for (double v : window) {
vs[counter] = v;
counter += 1;
}
// Initial level value is average of first season
// Calculate the slopes between first and second season for each period
for (int i = 0; i < period; i++) {
s += vs[i];
b += (vs[i + period] - vs[i]) / period;
}
s /= period;
b /= period;
last_s = s;
// Calculate first seasonal
if (Double.compare(s, 0.0) == 0 || Double.compare(s, -0.0) == 0) {
Arrays.fill(seasonal, 0.0);
} else {
for (int i = 0; i < period; i++) {
seasonal[i] = vs[i] / s;
}
}
for (int i = period; i < vs.length; i++) {
s = alpha * (vs[i] - seasonal[i - period]) + (1.0d - alpha) * (last_s + last_b);
b = beta * (s - last_s) + (1 - beta) * last_b;
seasonal[i] = gamma * (vs[i] - (last_s - last_b )) + (1 - gamma) * seasonal[i - period];
last_s = s;
last_b = b;
}
int idx = window.size() - period + (0 % period);
double expected = s + (1 * b) + seasonal[idx];
double actual = MovingFunctions.holtWinters(window.stream().mapToDouble(Double::doubleValue).toArray(),
alpha, beta, gamma, period, false);
assertEquals(expected, actual, 0.01 * Math.abs(expected));
}
}

View File

@ -312,7 +312,7 @@ public class MovAvgIT extends ESIntegTestCase {
double last; double last;
for (double value : window) { for (double value : window) {
last = value; last = value;
if (counter == 1) { if (counter == 0) {
s = value; s = value;
b = value - last; b = value - last;
} else { } else {

View File

@ -31,6 +31,8 @@ import org.elasticsearch.search.aggregations.pipeline.movavg.models.HoltWintersM
import org.elasticsearch.search.aggregations.pipeline.movavg.models.LinearModel; import org.elasticsearch.search.aggregations.pipeline.movavg.models.LinearModel;
import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel; import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel;
import java.io.IOException;
public class MovAvgTests extends BasePipelineAggregationTestCase<MovAvgPipelineAggregationBuilder> { public class MovAvgTests extends BasePipelineAggregationTestCase<MovAvgPipelineAggregationBuilder> {
@Override @Override
@ -94,6 +96,12 @@ public class MovAvgTests extends BasePipelineAggregationTestCase<MovAvgPipelineA
return factory; return factory;
} }
@Override
public void testFromXContent() throws IOException {
super.testFromXContent();
assertWarnings("The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.");
}
public void testDefaultParsing() throws Exception { public void testDefaultParsing() throws Exception {
MovAvgPipelineAggregationBuilder expected = new MovAvgPipelineAggregationBuilder("commits_moving_avg", "commits"); MovAvgPipelineAggregationBuilder expected = new MovAvgPipelineAggregationBuilder("commits_moving_avg", "commits");
String json = "{" + String json = "{" +
@ -104,6 +112,7 @@ public class MovAvgTests extends BasePipelineAggregationTestCase<MovAvgPipelineA
" }" + " }" +
"}"; "}";
PipelineAggregationBuilder newAgg = parse(createParser(JsonXContent.jsonXContent, json)); PipelineAggregationBuilder newAgg = parse(createParser(JsonXContent.jsonXContent, json));
assertWarnings("The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.");
assertNotSame(newAgg, expected); assertNotSame(newAgg, expected);
assertEquals(expected, newAgg); assertEquals(expected, newAgg);
assertEquals(expected.hashCode(), newAgg.hashCode()); assertEquals(expected.hashCode(), newAgg.hashCode());

View File

@ -246,7 +246,7 @@ public class MovAvgUnitTests extends ESTestCase {
double last; double last;
for (double value : window) { for (double value : window) {
last = value; last = value;
if (counter == 1) { if (counter == 0) {
s = value; s = value;
b = value - last; b = value - last;
} else { } else {
@ -292,7 +292,7 @@ public class MovAvgUnitTests extends ESTestCase {
double last; double last;
for (double value : window) { for (double value : window) {
last = value; last = value;
if (counter == 1) { if (counter == 0) {
s = value; s = value;
b = value - last; b = value - last;
} else { } else {

View File

@ -19,18 +19,33 @@
package org.elasticsearch.tasks; package org.elasticsearch.tasks;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.List;
import java.util.Objects;
import static java.util.Collections.emptyList; import static java.util.Collections.emptyList;
import static java.util.Collections.singletonList; import static java.util.Collections.singletonList;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
public class ListTasksResponseTests extends ESTestCase { public class ListTasksResponseTests extends AbstractXContentTestCase<ListTasksResponse> {
public void testEmptyToString() { public void testEmptyToString() {
assertEquals("{\"tasks\":{}}", new ListTasksResponse().toString()); assertEquals("{\"tasks\":[]}", new ListTasksResponse().toString());
} }
public void testNonEmptyToString() { public void testNonEmptyToString() {
@ -38,8 +53,48 @@ public class ListTasksResponseTests extends ESTestCase {
new TaskId("node1", 1), "dummy-type", "dummy-action", "dummy-description", null, 0, 1, true, new TaskId("node1", 0), new TaskId("node1", 1), "dummy-type", "dummy-action", "dummy-description", null, 0, 1, true, new TaskId("node1", 0),
Collections.singletonMap("foo", "bar")); Collections.singletonMap("foo", "bar"));
ListTasksResponse tasksResponse = new ListTasksResponse(singletonList(info), emptyList(), emptyList()); ListTasksResponse tasksResponse = new ListTasksResponse(singletonList(info), emptyList(), emptyList());
assertEquals("{\"tasks\":{\"node1:1\":{\"node\":\"node1\",\"id\":1,\"type\":\"dummy-type\",\"action\":\"dummy-action\"," assertEquals("{\"tasks\":[{\"node\":\"node1\",\"id\":1,\"type\":\"dummy-type\",\"action\":\"dummy-action\","
+ "\"description\":\"dummy-description\",\"start_time_in_millis\":0,\"running_time_in_nanos\":1,\"cancellable\":true," + "\"description\":\"dummy-description\",\"start_time_in_millis\":0,\"running_time_in_nanos\":1,\"cancellable\":true,"
+ "\"parent_task_id\":\"node1:0\",\"headers\":{\"foo\":\"bar\"}}}}", tasksResponse.toString()); + "\"parent_task_id\":\"node1:0\",\"headers\":{\"foo\":\"bar\"}}]}", tasksResponse.toString());
}
@Override
protected ListTasksResponse createTestInstance() {
List<TaskInfo> tasks = new ArrayList<>();
for (int i = 0; i < randomInt(10); i++) {
tasks.add(TaskInfoTests.randomTaskInfo());
}
List<TaskOperationFailure> taskFailures = new ArrayList<>();
for (int i = 0; i < randomInt(5); i++) {
taskFailures.add(new TaskOperationFailure(
randomAlphaOfLength(5), randomNonNegativeLong(), new IllegalStateException("message")));
}
return new ListTasksResponse(tasks, taskFailures, Collections.singletonList(new FailedNodeException("", "message", null)));
}
@Override
protected ListTasksResponse doParseInstance(XContentParser parser) throws IOException {
return ListTasksResponse.fromXContent(parser);
}
@Override
protected boolean supportsUnknownFields() {
return false;
}
@Override
protected void assertEqualInstances(ListTasksResponse expectedInstance, ListTasksResponse newInstance) {
assertNotSame(expectedInstance, newInstance);
assertThat(newInstance.getTasks(), equalTo(expectedInstance.getTasks()));
assertThat(newInstance.getNodeFailures().size(), equalTo(1));
for (ElasticsearchException failure : newInstance.getNodeFailures()) {
assertThat(failure, notNullValue());
assertThat(failure.getMessage(), equalTo("Elasticsearch exception [type=failed_node_exception, reason=message]"));
}
}
@Override
protected boolean assertToXContentEquivalence() {
return false;
} }
} }

View File

@ -0,0 +1,156 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.tasks;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractSerializingTestCase;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Predicate;
public class TaskInfoTests extends AbstractSerializingTestCase<TaskInfo> {
@Override
protected TaskInfo doParseInstance(XContentParser parser) {
return TaskInfo.fromXContent(parser);
}
@Override
protected TaskInfo createTestInstance() {
return randomTaskInfo();
}
@Override
protected Writeable.Reader<TaskInfo> instanceReader() {
return TaskInfo::new;
}
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
return new NamedWriteableRegistry(Collections.singletonList(
new NamedWriteableRegistry.Entry(Task.Status.class, RawTaskStatus.NAME, RawTaskStatus::new)));
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
return field -> "status".equals(field) || "headers".equals(field);
}
@Override
protected TaskInfo mutateInstance(TaskInfo info) throws IOException {
switch (between(0, 9)) {
case 0:
TaskId taskId = new TaskId(info.getTaskId().getNodeId() + randomAlphaOfLength(5), info.getTaskId().getId());
return new TaskInfo(taskId, info.getType(), info.getAction(), info.getDescription(), info.getStatus(),
info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable(), info.getParentTaskId(), info.getHeaders());
case 1:
return new TaskInfo(info.getTaskId(), info.getType() + randomAlphaOfLength(5), info.getAction(), info.getDescription(),
info.getStatus(), info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable(), info.getParentTaskId(),
info.getHeaders());
case 2:
return new TaskInfo(info.getTaskId(), info.getType(), info.getAction() + randomAlphaOfLength(5), info.getDescription(),
info.getStatus(), info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable(), info.getParentTaskId(),
info.getHeaders());
case 3:
return new TaskInfo(info.getTaskId(), info.getType(), info.getAction(), info.getDescription() + randomAlphaOfLength(5),
info.getStatus(), info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable(), info.getParentTaskId(),
info.getHeaders());
case 4:
Task.Status newStatus = randomValueOtherThan(info.getStatus(), TaskInfoTests::randomRawTaskStatus);
return new TaskInfo(info.getTaskId(), info.getType(), info.getAction(), info.getDescription(), newStatus,
info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable(), info.getParentTaskId(), info.getHeaders());
case 5:
return new TaskInfo(info.getTaskId(), info.getType(), info.getAction(), info.getDescription(), info.getStatus(),
info.getStartTime() + between(1, 100), info.getRunningTimeNanos(), info.isCancellable(), info.getParentTaskId(),
info.getHeaders());
case 6:
return new TaskInfo(info.getTaskId(), info.getType(), info.getAction(), info.getDescription(), info.getStatus(),
info.getStartTime(), info.getRunningTimeNanos() + between(1, 100), info.isCancellable(), info.getParentTaskId(),
info.getHeaders());
case 7:
return new TaskInfo(info.getTaskId(), info.getType(), info.getAction(), info.getDescription(), info.getStatus(),
info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable() == false, info.getParentTaskId(),
info.getHeaders());
case 8:
TaskId parentId = new TaskId(info.getParentTaskId().getNodeId() + randomAlphaOfLength(5), info.getParentTaskId().getId());
return new TaskInfo(info.getTaskId(), info.getType(), info.getAction(), info.getDescription(), info.getStatus(),
info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable(), parentId, info.getHeaders());
case 9:
Map<String, String> headers = info.getHeaders();
if (headers == null) {
headers = new HashMap<>(1);
} else {
headers = new HashMap<>(info.getHeaders());
}
headers.put(randomAlphaOfLength(15), randomAlphaOfLength(15));
return new TaskInfo(info.getTaskId(), info.getType(), info.getAction(), info.getDescription(), info.getStatus(),
info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable(), info.getParentTaskId(), headers);
default:
throw new IllegalStateException();
}
}
static TaskInfo randomTaskInfo() {
TaskId taskId = randomTaskId();
String type = randomAlphaOfLength(5);
String action = randomAlphaOfLength(5);
Task.Status status = randomBoolean() ? randomRawTaskStatus() : null;
String description = randomBoolean() ? randomAlphaOfLength(5) : null;
long startTime = randomLong();
long runningTimeNanos = randomLong();
boolean cancellable = randomBoolean();
TaskId parentTaskId = randomBoolean() ? TaskId.EMPTY_TASK_ID : randomTaskId();
Map<String, String> headers = randomBoolean() ?
Collections.emptyMap() :
Collections.singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5));
return new TaskInfo(taskId, type, action, description, status, startTime, runningTimeNanos, cancellable, parentTaskId, headers);
}
private static TaskId randomTaskId() {
return new TaskId(randomAlphaOfLength(5), randomLong());
}
private static RawTaskStatus randomRawTaskStatus() {
try (XContentBuilder builder = XContentBuilder.builder(Requests.INDEX_CONTENT_TYPE.xContent())) {
builder.startObject();
int fields = between(0, 10);
for (int f = 0; f < fields; f++) {
builder.field(randomAlphaOfLength(5), randomAlphaOfLength(5));
}
builder.endObject();
return new RawTaskStatus(BytesReference.bytes(builder));
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
}

View File

@ -19,8 +19,6 @@
package org.elasticsearch.tasks; package org.elasticsearch.tasks;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
@ -37,6 +35,8 @@ import java.util.Collections;
import java.util.Map; import java.util.Map;
import java.util.TreeMap; import java.util.TreeMap;
import static org.elasticsearch.tasks.TaskInfoTests.randomTaskInfo;
/** /**
* Round trip tests for {@link TaskResult} and those classes that it includes like {@link TaskInfo} and {@link RawTaskStatus}. * Round trip tests for {@link TaskResult} and those classes that it includes like {@link TaskInfo} and {@link RawTaskStatus}.
*/ */
@ -125,37 +125,6 @@ public class TaskResultTests extends ESTestCase {
} }
} }
private static TaskInfo randomTaskInfo() throws IOException {
TaskId taskId = randomTaskId();
String type = randomAlphaOfLength(5);
String action = randomAlphaOfLength(5);
Task.Status status = randomBoolean() ? randomRawTaskStatus() : null;
String description = randomBoolean() ? randomAlphaOfLength(5) : null;
long startTime = randomLong();
long runningTimeNanos = randomLong();
boolean cancellable = randomBoolean();
TaskId parentTaskId = randomBoolean() ? TaskId.EMPTY_TASK_ID : randomTaskId();
Map<String, String> headers =
randomBoolean() ? Collections.emptyMap() : Collections.singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5));
return new TaskInfo(taskId, type, action, description, status, startTime, runningTimeNanos, cancellable, parentTaskId, headers);
}
private static TaskId randomTaskId() {
return new TaskId(randomAlphaOfLength(5), randomLong());
}
private static RawTaskStatus randomRawTaskStatus() throws IOException {
try (XContentBuilder builder = XContentBuilder.builder(Requests.INDEX_CONTENT_TYPE.xContent())) {
builder.startObject();
int fields = between(0, 10);
for (int f = 0; f < fields; f++) {
builder.field(randomAlphaOfLength(5), randomAlphaOfLength(5));
}
builder.endObject();
return new RawTaskStatus(BytesReference.bytes(builder));
}
}
private static ToXContent randomTaskResponse() { private static ToXContent randomTaskResponse() {
Map<String, String> result = new TreeMap<>(); Map<String, String> result = new TreeMap<>();
int fields = between(0, 10); int fields = between(0, 10);

View File

@ -26,6 +26,8 @@ import org.elasticsearch.index.similarity.ScriptedSimilarity.Field;
import org.elasticsearch.index.similarity.ScriptedSimilarity.Query; import org.elasticsearch.index.similarity.ScriptedSimilarity.Query;
import org.elasticsearch.index.similarity.ScriptedSimilarity.Term; import org.elasticsearch.index.similarity.ScriptedSimilarity.Term;
import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctionScript;
import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions;
import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.LeafSearchLookup;
import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.SearchLookup;
@ -109,6 +111,9 @@ public class MockScriptEngine implements ScriptEngine {
} else if (context.instanceClazz.equals(SimilarityWeightScript.class)) { } else if (context.instanceClazz.equals(SimilarityWeightScript.class)) {
SimilarityWeightScript.Factory factory = mockCompiled::createSimilarityWeightScript; SimilarityWeightScript.Factory factory = mockCompiled::createSimilarityWeightScript;
return context.factoryClazz.cast(factory); return context.factoryClazz.cast(factory);
} else if (context.instanceClazz.equals(MovingFunctionScript.class)) {
MovingFunctionScript.Factory factory = mockCompiled::createMovingFunctionScript;
return context.factoryClazz.cast(factory);
} }
throw new IllegalArgumentException("mock script engine does not know how to handle context [" + context.name + "]"); throw new IllegalArgumentException("mock script engine does not know how to handle context [" + context.name + "]");
} }
@ -169,6 +174,10 @@ public class MockScriptEngine implements ScriptEngine {
public SimilarityWeightScript createSimilarityWeightScript() { public SimilarityWeightScript createSimilarityWeightScript() {
return new MockSimilarityWeightScript(script != null ? script : ctx -> 42d); return new MockSimilarityWeightScript(script != null ? script : ctx -> 42d);
} }
public MovingFunctionScript createMovingFunctionScript() {
return new MockMovingFunctionScript();
}
} }
public class MockExecutableScript implements ExecutableScript { public class MockExecutableScript implements ExecutableScript {
@ -327,4 +336,11 @@ public class MockScriptEngine implements ScriptEngine {
return new Script(ScriptType.INLINE, "mock", script, emptyMap()); return new Script(ScriptType.INLINE, "mock", script, emptyMap());
} }
public class MockMovingFunctionScript extends MovingFunctionScript {
@Override
public double execute(Map<String, Object> params, double[] values) {
return MovingFunctions.unweightedAvg(values);
}
}
} }

View File

@ -61,8 +61,11 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.mock.orig.Mockito; import org.elasticsearch.mock.orig.Mockito;
import org.elasticsearch.search.aggregations.MultiBucketConsumerService.MultiBucketConsumer; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase; import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase;
import org.elasticsearch.search.fetch.subphase.FetchSourceSubPhase; import org.elasticsearch.search.fetch.subphase.FetchSourceSubPhase;
@ -302,7 +305,7 @@ public abstract class AggregatorTestCase extends ESTestCase {
Query query, Query query,
AggregationBuilder builder, AggregationBuilder builder,
MappedFieldType... fieldTypes) throws IOException { MappedFieldType... fieldTypes) throws IOException {
return searchAndReduce(searcher, query, builder, DEFAULT_MAX_BUCKETS, fieldTypes); return searchAndReduce(searcher, query, builder, DEFAULT_MAX_BUCKETS, null, fieldTypes);
} }
/** /**
@ -314,6 +317,7 @@ public abstract class AggregatorTestCase extends ESTestCase {
Query query, Query query,
AggregationBuilder builder, AggregationBuilder builder,
int maxBucket, int maxBucket,
ScriptService scriptService,
MappedFieldType... fieldTypes) throws IOException { MappedFieldType... fieldTypes) throws IOException {
final IndexReaderContext ctx = searcher.getTopReaderContext(); final IndexReaderContext ctx = searcher.getTopReaderContext();
@ -368,7 +372,7 @@ public abstract class AggregatorTestCase extends ESTestCase {
// now do the final reduce // now do the final reduce
MultiBucketConsumer reduceBucketConsumer = new MultiBucketConsumer(maxBucket); MultiBucketConsumer reduceBucketConsumer = new MultiBucketConsumer(maxBucket);
InternalAggregation.ReduceContext context = InternalAggregation.ReduceContext context =
new InternalAggregation.ReduceContext(root.context().bigArrays(), null, reduceBucketConsumer, true); new InternalAggregation.ReduceContext(root.context().bigArrays(), scriptService, reduceBucketConsumer, true);
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
A internalAgg = (A) aggs.get(0).doReduce(aggs, context); A internalAgg = (A) aggs.get(0).doReduce(aggs, context);

View File

@ -38,6 +38,7 @@ import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.plugins.SearchPlugin;
import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
import org.elasticsearch.test.AbstractBuilderTestCase;
import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.test.AbstractQueryTestCase;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
@ -50,60 +51,12 @@ import java.util.List;
import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode;
import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasSize;
public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuilder<AB>> extends ESTestCase { public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuilder<AB>> extends AbstractBuilderTestCase {
protected static final String STRING_FIELD_NAME = "mapped_string";
protected static final String INT_FIELD_NAME = "mapped_int";
protected static final String DOUBLE_FIELD_NAME = "mapped_double";
protected static final String BOOLEAN_FIELD_NAME = "mapped_boolean";
protected static final String DATE_FIELD_NAME = "mapped_date";
protected static final String IP_FIELD_NAME = "mapped_ip"; protected static final String IP_FIELD_NAME = "mapped_ip";
private String[] currentTypes;
protected String[] getCurrentTypes() {
return currentTypes;
}
private NamedWriteableRegistry namedWriteableRegistry;
private NamedXContentRegistry xContentRegistry;
protected abstract AB createTestAggregatorBuilder(); protected abstract AB createTestAggregatorBuilder();
protected Collection<Class<? extends Plugin>> getPlugins() {
return Collections.emptyList();
}
/**
* Setup for the whole base test class.
*/
@Override
public void setUp() throws Exception {
super.setUp();
Settings settings = Settings.builder()
.put("node.name", AbstractQueryTestCase.class.toString())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
.build();
IndicesModule indicesModule = new IndicesModule(Collections.emptyList());
PluginsService pluginsService = new PluginsService(settings, null, null, null, getPlugins());
SearchModule searchModule = new SearchModule(settings, false, pluginsService.filterPlugins(SearchPlugin.class));
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(indicesModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables());
namedWriteableRegistry = new NamedWriteableRegistry(entries);
xContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents());
//create some random type with some default field, those types will stick around for all of the subclasses
currentTypes = new String[randomIntBetween(0, 5)];
for (int i = 0; i < currentTypes.length; i++) {
String type = randomAlphaOfLengthBetween(1, 10);
currentTypes[i] = type;
}
}
@Override
protected NamedXContentRegistry xContentRegistry() {
return xContentRegistry;
}
/** /**
* Generic test that creates new AggregatorFactory from the test * Generic test that creates new AggregatorFactory from the test
* AggregatorFactory and checks both for equality and asserts equality on * AggregatorFactory and checks both for equality and asserts equality on
@ -157,7 +110,7 @@ public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuil
AB testAgg = createTestAggregatorBuilder(); AB testAgg = createTestAggregatorBuilder();
try (BytesStreamOutput output = new BytesStreamOutput()) { try (BytesStreamOutput output = new BytesStreamOutput()) {
output.writeNamedWriteable(testAgg); output.writeNamedWriteable(testAgg);
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry())) {
AggregationBuilder deserialized = in.readNamedWriteable(AggregationBuilder.class); AggregationBuilder deserialized = in.readNamedWriteable(AggregationBuilder.class);
assertEquals(testAgg, deserialized); assertEquals(testAgg, deserialized);
assertEquals(testAgg.hashCode(), deserialized.hashCode()); assertEquals(testAgg.hashCode(), deserialized.hashCode());
@ -181,12 +134,12 @@ public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuil
// we use the streaming infra to create a copy of the query provided as // we use the streaming infra to create a copy of the query provided as
// argument // argument
private AB copyAggregation(AB agg) throws IOException { protected AB copyAggregation(AB agg) throws IOException {
try (BytesStreamOutput output = new BytesStreamOutput()) { try (BytesStreamOutput output = new BytesStreamOutput()) {
agg.writeTo(output); agg.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry())) {
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
AB secondAgg = (AB) namedWriteableRegistry.getReader(AggregationBuilder.class, agg.getWriteableName()).read(in); AB secondAgg = (AB) namedWriteableRegistry().getReader(AggregationBuilder.class, agg.getWriteableName()).read(in);
return secondAgg; return secondAgg;
} }
} }

View File

@ -0,0 +1,399 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.Accountable;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.termvectors.MultiTermVectorsRequest;
import org.elasticsearch.action.termvectors.MultiTermVectorsResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.TestEnvironment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.node.InternalSettingsPreparer;
import org.elasticsearch.plugins.MapperPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.plugins.ScriptPlugin;
import org.elasticsearch.plugins.SearchPlugin;
import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.internal.SearchContext;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import java.io.Closeable;
import java.io.IOException;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.function.Function;
import java.util.stream.Stream;
import static java.util.Collections.emptyList;
import static java.util.stream.Collectors.toList;
public abstract class AbstractBuilderTestCase extends ESTestCase {
public static final String STRING_FIELD_NAME = "mapped_string";
protected static final String STRING_FIELD_NAME_2 = "mapped_string_2";
protected static final String INT_FIELD_NAME = "mapped_int";
protected static final String INT_RANGE_FIELD_NAME = "mapped_int_range";
protected static final String DOUBLE_FIELD_NAME = "mapped_double";
protected static final String BOOLEAN_FIELD_NAME = "mapped_boolean";
protected static final String DATE_FIELD_NAME = "mapped_date";
protected static final String DATE_RANGE_FIELD_NAME = "mapped_date_range";
protected static final String OBJECT_FIELD_NAME = "mapped_object";
protected static final String GEO_POINT_FIELD_NAME = "mapped_geo_point";
protected static final String GEO_SHAPE_FIELD_NAME = "mapped_geo_shape";
protected static final String[] MAPPED_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, INT_RANGE_FIELD_NAME,
DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME,
GEO_SHAPE_FIELD_NAME};
protected static final String[] MAPPED_LEAF_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, INT_RANGE_FIELD_NAME,
DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, GEO_POINT_FIELD_NAME, };
protected static Version indexVersionCreated;
private static ServiceHolder serviceHolder;
private static int queryNameId = 0;
private static Settings nodeSettings;
private static Index index;
private static String[] currentTypes;
protected static String[] randomTypes;
protected static Index getIndex() {
return index;
}
protected static String[] getCurrentTypes() {
return currentTypes;
}
protected Collection<Class<? extends Plugin>> getPlugins() {
return Collections.emptyList();
}
protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {
}
@BeforeClass
public static void beforeClass() {
nodeSettings = Settings.builder()
.put("node.name", AbstractQueryTestCase.class.toString())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
.build();
index = new Index(randomAlphaOfLengthBetween(1, 10), "_na_");
// Set a single type in the index
switch (random().nextInt(3)) {
case 0:
currentTypes = new String[0]; // no types
break;
default:
currentTypes = new String[] { "_doc" };
break;
}
randomTypes = getRandomTypes();
}
private static String[] getRandomTypes() {
String[] types;
if (currentTypes.length > 0 && randomBoolean()) {
int numberOfQueryTypes = randomIntBetween(1, currentTypes.length);
types = new String[numberOfQueryTypes];
for (int i = 0; i < numberOfQueryTypes; i++) {
types[i] = randomFrom(currentTypes);
}
} else {
if (randomBoolean()) {
types = new String[]{MetaData.ALL};
} else {
types = new String[0];
}
}
return types;
}
@Override
protected NamedXContentRegistry xContentRegistry() {
return serviceHolder.xContentRegistry;
}
protected NamedWriteableRegistry namedWriteableRegistry() {
return serviceHolder.namedWriteableRegistry;
}
/**
* make sure query names are unique by suffixing them with increasing counter
*/
protected static String createUniqueRandomName() {
String queryName = randomAlphaOfLengthBetween(1, 10) + queryNameId;
queryNameId++;
return queryName;
}
protected Settings indexSettings() {
// we have to prefer CURRENT since with the range of versions we support it's rather unlikely to get the current actually.
indexVersionCreated = randomBoolean() ? Version.CURRENT
: VersionUtils.randomVersionBetween(random(), null, Version.CURRENT);
return Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, indexVersionCreated)
.build();
}
@AfterClass
public static void afterClass() throws Exception {
IOUtils.close(serviceHolder);
serviceHolder = null;
}
@Before
public void beforeTest() throws IOException {
if (serviceHolder == null) {
serviceHolder = new ServiceHolder(nodeSettings, indexSettings(), getPlugins(), this);
}
serviceHolder.clientInvocationHandler.delegate = this;
}
protected static SearchContext getSearchContext(String[] types, QueryShardContext context) {
TestSearchContext testSearchContext = new TestSearchContext(context) {
@Override
public MapperService mapperService() {
return serviceHolder.mapperService; // need to build / parse inner hits sort fields
}
@Override
public <IFD extends IndexFieldData<?>> IFD getForField(MappedFieldType fieldType) {
return serviceHolder.indexFieldDataService.getForField(fieldType); // need to build / parse inner hits sort fields
}
};
testSearchContext.getQueryShardContext().setTypes(types);
return testSearchContext;
}
@After
public void afterTest() {
serviceHolder.clientInvocationHandler.delegate = null;
}
/**
* Override this to handle {@link Client#get(GetRequest)} calls from parsers / builders
*/
protected GetResponse executeGet(GetRequest getRequest) {
throw new UnsupportedOperationException("this test can't handle GET requests");
}
/**
* Override this to handle {@link Client#get(GetRequest)} calls from parsers / builders
*/
protected MultiTermVectorsResponse executeMultiTermVectors(MultiTermVectorsRequest mtvRequest) {
throw new UnsupportedOperationException("this test can't handle MultiTermVector requests");
}
/**
* @return a new {@link QueryShardContext} with the provided reader
*/
protected static QueryShardContext createShardContext(IndexReader reader) {
return serviceHolder.createShardContext(reader);
}
/**
* @return a new {@link QueryShardContext} based on the base test index and queryParserService
*/
protected static QueryShardContext createShardContext() {
return createShardContext(null);
}
private static class ClientInvocationHandler implements InvocationHandler {
AbstractBuilderTestCase delegate;
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
if (method.equals(Client.class.getMethod("get", GetRequest.class, ActionListener.class))){
GetResponse getResponse = delegate.executeGet((GetRequest) args[0]);
ActionListener<GetResponse> listener = (ActionListener<GetResponse>) args[1];
if (randomBoolean()) {
listener.onResponse(getResponse);
} else {
new Thread(() -> listener.onResponse(getResponse)).start();
}
return null;
} else if (method.equals(Client.class.getMethod
("multiTermVectors", MultiTermVectorsRequest.class))) {
return new PlainActionFuture<MultiTermVectorsResponse>() {
@Override
public MultiTermVectorsResponse get() throws InterruptedException, ExecutionException {
return delegate.executeMultiTermVectors((MultiTermVectorsRequest) args[0]);
}
};
} else if (method.equals(Object.class.getMethod("toString"))) {
return "MockClient";
}
throw new UnsupportedOperationException("this test can't handle calls to: " + method);
}
}
private static class ServiceHolder implements Closeable {
private final IndexFieldDataService indexFieldDataService;
private final SearchModule searchModule;
private final NamedWriteableRegistry namedWriteableRegistry;
private final NamedXContentRegistry xContentRegistry;
private final ClientInvocationHandler clientInvocationHandler = new ClientInvocationHandler();
private final IndexSettings idxSettings;
private final SimilarityService similarityService;
private final MapperService mapperService;
private final BitsetFilterCache bitsetFilterCache;
private final ScriptService scriptService;
private final Client client;
private final long nowInMillis = randomNonNegativeLong();
ServiceHolder(Settings nodeSettings, Settings indexSettings,
Collection<Class<? extends Plugin>> plugins, AbstractBuilderTestCase testCase) throws IOException {
Environment env = InternalSettingsPreparer.prepareEnvironment(nodeSettings);
PluginsService pluginsService;
pluginsService = new PluginsService(nodeSettings, null, env.modulesFile(), env.pluginsFile(), plugins);
client = (Client) Proxy.newProxyInstance(
Client.class.getClassLoader(),
new Class[]{Client.class},
clientInvocationHandler);
ScriptModule scriptModule = createScriptModule(pluginsService.filterPlugins(ScriptPlugin.class));
List<Setting<?>> additionalSettings = pluginsService.getPluginSettings();
additionalSettings.add(InternalSettingsPlugin.VERSION_CREATED);
SettingsModule settingsModule = new SettingsModule(nodeSettings, additionalSettings, pluginsService.getPluginSettingsFilter());
searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class));
IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class));
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(indicesModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables());
namedWriteableRegistry = new NamedWriteableRegistry(entries);
xContentRegistry = new NamedXContentRegistry(Stream.of(
searchModule.getNamedXContents().stream()
).flatMap(Function.identity()).collect(toList()));
IndexScopedSettings indexScopedSettings = settingsModule.getIndexScopedSettings();
idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings, indexScopedSettings);
AnalysisModule analysisModule = new AnalysisModule(TestEnvironment.newEnvironment(nodeSettings), emptyList());
IndexAnalyzers indexAnalyzers = analysisModule.getAnalysisRegistry().build(idxSettings);
scriptService = scriptModule.getScriptService();
similarityService = new SimilarityService(idxSettings, null, Collections.emptyMap());
MapperRegistry mapperRegistry = indicesModule.getMapperRegistry();
mapperService = new MapperService(idxSettings, indexAnalyzers, xContentRegistry, similarityService, mapperRegistry,
() -> createShardContext(null));
IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(nodeSettings, new IndexFieldDataCache.Listener() {
});
indexFieldDataService = new IndexFieldDataService(idxSettings, indicesFieldDataCache,
new NoneCircuitBreakerService(), mapperService);
bitsetFilterCache = new BitsetFilterCache(idxSettings, new BitsetFilterCache.Listener() {
@Override
public void onCache(ShardId shardId, Accountable accountable) {
}
@Override
public void onRemoval(ShardId shardId, Accountable accountable) {
}
});
for (String type : currentTypes) {
mapperService.merge(type, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(type,
STRING_FIELD_NAME, "type=text",
STRING_FIELD_NAME_2, "type=keyword",
INT_FIELD_NAME, "type=integer",
INT_RANGE_FIELD_NAME, "type=integer_range",
DOUBLE_FIELD_NAME, "type=double",
BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date",
DATE_RANGE_FIELD_NAME, "type=date_range",
OBJECT_FIELD_NAME, "type=object",
GEO_POINT_FIELD_NAME, "type=geo_point",
GEO_SHAPE_FIELD_NAME, "type=geo_shape"
))), MapperService.MergeReason.MAPPING_UPDATE);
// also add mappings for two inner field in the object field
mapperService.merge(type, new CompressedXContent("{\"properties\":{\"" + OBJECT_FIELD_NAME + "\":{\"type\":\"object\","
+ "\"properties\":{\"" + DATE_FIELD_NAME + "\":{\"type\":\"date\"},\"" +
INT_FIELD_NAME + "\":{\"type\":\"integer\"}}}}}"),
MapperService.MergeReason.MAPPING_UPDATE);
}
testCase.initializeAdditionalMappings(mapperService);
}
@Override
public void close() throws IOException {
}
QueryShardContext createShardContext(IndexReader reader) {
return new QueryShardContext(0, idxSettings, bitsetFilterCache, indexFieldDataService::getForField, mapperService,
similarityService, scriptService, xContentRegistry, namedWriteableRegistry, this.client, reader, () -> nowInMillis, null);
}
ScriptModule createScriptModule(List<ScriptPlugin> scriptPlugins) {
if (scriptPlugins == null || scriptPlugins.isEmpty()) {
return newTestScriptModule();
}
return new ScriptModule(Settings.EMPTY, scriptPlugins);
}
}
}

View File

@ -25,33 +25,17 @@ import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanBoostQuery; import org.apache.lucene.search.spans.SpanBoostQuery;
import org.apache.lucene.util.Accountable;
import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.termvectors.MultiTermVectorsRequest;
import org.elasticsearch.action.termvectors.MultiTermVectorsResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.io.stream.Writeable.Reader;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.DeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry;
@ -64,55 +48,18 @@ import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.TestEnvironment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.AbstractQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.QueryRewriteContext;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.query.Rewriteable;
import org.elasticsearch.index.query.support.QueryParsers; import org.elasticsearch.index.query.support.QueryParsers;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.node.InternalSettingsPreparer;
import org.elasticsearch.plugins.MapperPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.plugins.ScriptPlugin;
import org.elasticsearch.plugins.SearchPlugin;
import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import org.joda.time.DateTime; import org.joda.time.DateTime;
import org.joda.time.DateTimeZone; import org.joda.time.DateTimeZone;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.Deque; import java.util.Deque;
import java.util.HashSet; import java.util.HashSet;
@ -121,12 +68,7 @@ import java.util.List;
import java.util.Locale; import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.function.Function;
import java.util.stream.Stream;
import static java.util.Collections.emptyList;
import static java.util.stream.Collectors.toList;
import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder;
import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode;
import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.equalTo;
@ -136,116 +78,10 @@ import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.instanceOf;
public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>> extends ESTestCase { public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>> extends AbstractBuilderTestCase {
public static final String STRING_FIELD_NAME = "mapped_string";
protected static final String STRING_FIELD_NAME_2 = "mapped_string_2";
protected static final String INT_FIELD_NAME = "mapped_int";
protected static final String INT_RANGE_FIELD_NAME = "mapped_int_range";
protected static final String DOUBLE_FIELD_NAME = "mapped_double";
protected static final String BOOLEAN_FIELD_NAME = "mapped_boolean";
protected static final String DATE_FIELD_NAME = "mapped_date";
protected static final String DATE_RANGE_FIELD_NAME = "mapped_date_range";
protected static final String OBJECT_FIELD_NAME = "mapped_object";
protected static final String GEO_POINT_FIELD_NAME = "mapped_geo_point";
protected static final String GEO_SHAPE_FIELD_NAME = "mapped_geo_shape";
protected static final String[] MAPPED_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, INT_RANGE_FIELD_NAME,
DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME,
GEO_SHAPE_FIELD_NAME};
private static final String[] MAPPED_LEAF_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, INT_RANGE_FIELD_NAME,
DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, GEO_POINT_FIELD_NAME, };
private static final int NUMBER_OF_TESTQUERIES = 20; private static final int NUMBER_OF_TESTQUERIES = 20;
protected static Version indexVersionCreated;
private static ServiceHolder serviceHolder;
private static int queryNameId = 0;
private static Settings nodeSettings;
private static Index index;
private static String[] currentTypes;
private static String[] randomTypes;
protected static Index getIndex() {
return index;
}
protected static String[] getCurrentTypes() {
return currentTypes;
}
protected Collection<Class<? extends Plugin>> getPlugins() {
return Collections.emptyList();
}
protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {
}
@BeforeClass
public static void beforeClass() {
nodeSettings = Settings.builder()
.put("node.name", AbstractQueryTestCase.class.toString())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
.build();
index = new Index(randomAlphaOfLengthBetween(1, 10), "_na_");
// Set a single type in the index
switch (random().nextInt(3)) {
case 0:
currentTypes = new String[0]; // no types
break;
default:
currentTypes = new String[] { "_doc" };
break;
}
randomTypes = getRandomTypes();
}
protected Settings indexSettings() {
// we have to prefer CURRENT since with the range of versions we support it's rather unlikely to get the current actually.
indexVersionCreated = randomBoolean() ? Version.CURRENT
: VersionUtils.randomVersionBetween(random(), null, Version.CURRENT);
return Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, indexVersionCreated)
.build();
}
@AfterClass
public static void afterClass() throws Exception {
IOUtils.close(serviceHolder);
serviceHolder = null;
}
@Before
public void beforeTest() throws IOException {
if (serviceHolder == null) {
serviceHolder = new ServiceHolder(nodeSettings, indexSettings(), getPlugins(), this);
}
serviceHolder.clientInvocationHandler.delegate = this;
}
private static SearchContext getSearchContext(String[] types, QueryShardContext context) {
TestSearchContext testSearchContext = new TestSearchContext(context) {
@Override
public MapperService mapperService() {
return serviceHolder.mapperService; // need to build / parse inner hits sort fields
}
@Override
public <IFD extends IndexFieldData<?>> IFD getForField(MappedFieldType fieldType) {
return serviceHolder.indexFieldDataService.getForField(fieldType); // need to build / parse inner hits sort fields
}
};
testSearchContext.getQueryShardContext().setTypes(types);
return testSearchContext;
}
@After
public void afterTest() {
serviceHolder.clientInvocationHandler.delegate = null;
}
public final QB createTestQueryBuilder() { public final QB createTestQueryBuilder() {
QB query = doCreateTestQueryBuilder(); QB query = doCreateTestQueryBuilder();
//we should not set boost and query name for queries that don't parse it //we should not set boost and query name for queries that don't parse it
@ -260,15 +96,6 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
return query; return query;
} }
/**
* make sure query names are unique by suffixing them with increasing counter
*/
private static String createUniqueRandomName() {
String queryName = randomAlphaOfLengthBetween(1, 10) + queryNameId;
queryNameId++;
return queryName;
}
/** /**
* Create the query that is being tested * Create the query that is being tested
*/ */
@ -717,18 +544,18 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
} }
} }
protected static QueryBuilder assertSerialization(QueryBuilder testQuery) throws IOException { protected QueryBuilder assertSerialization(QueryBuilder testQuery) throws IOException {
return assertSerialization(testQuery, Version.CURRENT); return assertSerialization(testQuery, Version.CURRENT);
} }
/** /**
* Serialize the given query builder and asserts that both are equal * Serialize the given query builder and asserts that both are equal
*/ */
protected static QueryBuilder assertSerialization(QueryBuilder testQuery, Version version) throws IOException { protected QueryBuilder assertSerialization(QueryBuilder testQuery, Version version) throws IOException {
try (BytesStreamOutput output = new BytesStreamOutput()) { try (BytesStreamOutput output = new BytesStreamOutput()) {
output.setVersion(version); output.setVersion(version);
output.writeNamedWriteable(testQuery); output.writeNamedWriteable(testQuery);
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), serviceHolder.namedWriteableRegistry)) { try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry())) {
in.setVersion(version); in.setVersion(version);
QueryBuilder deserializedQuery = in.readNamedWriteable(QueryBuilder.class); QueryBuilder deserializedQuery = in.readNamedWriteable(QueryBuilder.class);
assertEquals(testQuery, deserializedQuery); assertEquals(testQuery, deserializedQuery);
@ -780,15 +607,8 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
//we use the streaming infra to create a copy of the query provided as argument //we use the streaming infra to create a copy of the query provided as argument
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
private QB copyQuery(QB query) throws IOException { private QB copyQuery(QB query) throws IOException {
Reader<QB> reader = (Reader<QB>) serviceHolder.namedWriteableRegistry.getReader(QueryBuilder.class, query.getWriteableName()); Reader<QB> reader = (Reader<QB>) namedWriteableRegistry().getReader(QueryBuilder.class, query.getWriteableName());
return copyWriteable(query, serviceHolder.namedWriteableRegistry, reader); return copyWriteable(query, namedWriteableRegistry(), reader);
}
/**
* @return a new {@link QueryShardContext} based on the base test index and queryParserService
*/
protected static QueryShardContext createShardContext() {
return serviceHolder.createShardContext();
} }
/** /**
@ -840,7 +660,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
*/ */
protected static String getRandomFieldName() { protected static String getRandomFieldName() {
// if no type is set then return a random field name // if no type is set then return a random field name
if (currentTypes.length == 0 || randomBoolean()) { if (getCurrentTypes().length == 0 || randomBoolean()) {
return randomAlphaOfLengthBetween(1, 10); return randomAlphaOfLengthBetween(1, 10);
} }
return randomFrom(MAPPED_LEAF_FIELD_NAMES); return randomFrom(MAPPED_LEAF_FIELD_NAMES);
@ -863,24 +683,6 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
return rewrite; return rewrite;
} }
private static String[] getRandomTypes() {
String[] types;
if (currentTypes.length > 0 && randomBoolean()) {
int numberOfQueryTypes = randomIntBetween(1, currentTypes.length);
types = new String[numberOfQueryTypes];
for (int i = 0; i < numberOfQueryTypes; i++) {
types[i] = randomFrom(currentTypes);
}
} else {
if (randomBoolean()) {
types = new String[]{MetaData.ALL};
} else {
types = new String[0];
}
}
return types;
}
protected static Fuzziness randomFuzziness(String fieldName) { protected static Fuzziness randomFuzziness(String fieldName) {
switch (fieldName) { switch (fieldName) {
case INT_FIELD_NAME: case INT_FIELD_NAME:
@ -905,50 +707,6 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
return randomFrom("1", "-1", "75%", "-25%", "2<75%", "2<-25%"); return randomFrom("1", "-1", "75%", "-25%", "2<75%", "2<-25%");
} }
private static class ClientInvocationHandler implements InvocationHandler {
AbstractQueryTestCase<?> delegate;
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
if (method.equals(Client.class.getMethod("get", GetRequest.class, ActionListener.class))){
GetResponse getResponse = delegate.executeGet((GetRequest) args[0]);
ActionListener<GetResponse> listener = (ActionListener<GetResponse>) args[1];
if (randomBoolean()) {
listener.onResponse(getResponse);
} else {
new Thread(() -> listener.onResponse(getResponse)).start();
}
return null;
} else if (method.equals(Client.class.getMethod
("multiTermVectors", MultiTermVectorsRequest.class))) {
return new PlainActionFuture<MultiTermVectorsResponse>() {
@Override
public MultiTermVectorsResponse get() throws InterruptedException, ExecutionException {
return delegate.executeMultiTermVectors((MultiTermVectorsRequest) args[0]);
}
};
} else if (method.equals(Object.class.getMethod("toString"))) {
return "MockClient";
}
throw new UnsupportedOperationException("this test can't handle calls to: " + method);
}
}
/**
* Override this to handle {@link Client#get(GetRequest)} calls from parsers / builders
*/
protected GetResponse executeGet(GetRequest getRequest) {
throw new UnsupportedOperationException("this test can't handle GET requests");
}
/**
* Override this to handle {@link Client#get(GetRequest)} calls from parsers / builders
*/
protected MultiTermVectorsResponse executeMultiTermVectors(MultiTermVectorsRequest mtvRequest) {
throw new UnsupportedOperationException("this test can't handle MultiTermVector requests");
}
/** /**
* Call this method to check a valid json string representing the query under test against * Call this method to check a valid json string representing the query under test against
* it's generated json. * it's generated json.
@ -1015,113 +773,6 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
return query; return query;
} }
@Override
protected NamedXContentRegistry xContentRegistry() {
return serviceHolder.xContentRegistry;
}
private static class ServiceHolder implements Closeable {
private final IndexFieldDataService indexFieldDataService;
private final SearchModule searchModule;
private final NamedWriteableRegistry namedWriteableRegistry;
private final NamedXContentRegistry xContentRegistry;
private final ClientInvocationHandler clientInvocationHandler = new ClientInvocationHandler();
private final IndexSettings idxSettings;
private final SimilarityService similarityService;
private final MapperService mapperService;
private final BitsetFilterCache bitsetFilterCache;
private final ScriptService scriptService;
private final Client client;
private final long nowInMillis = randomNonNegativeLong();
ServiceHolder(Settings nodeSettings, Settings indexSettings,
Collection<Class<? extends Plugin>> plugins, AbstractQueryTestCase<?> testCase) throws IOException {
Environment env = InternalSettingsPreparer.prepareEnvironment(nodeSettings);
PluginsService pluginsService;
pluginsService = new PluginsService(nodeSettings, null, env.modulesFile(), env.pluginsFile(), plugins);
client = (Client) Proxy.newProxyInstance(
Client.class.getClassLoader(),
new Class[]{Client.class},
clientInvocationHandler);
ScriptModule scriptModule = createScriptModule(pluginsService.filterPlugins(ScriptPlugin.class));
List<Setting<?>> additionalSettings = pluginsService.getPluginSettings();
additionalSettings.add(InternalSettingsPlugin.VERSION_CREATED);
SettingsModule settingsModule = new SettingsModule(nodeSettings, additionalSettings, pluginsService.getPluginSettingsFilter());
searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class));
IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class));
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(indicesModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables());
namedWriteableRegistry = new NamedWriteableRegistry(entries);
xContentRegistry = new NamedXContentRegistry(Stream.of(
searchModule.getNamedXContents().stream()
).flatMap(Function.identity()).collect(toList()));
IndexScopedSettings indexScopedSettings = settingsModule.getIndexScopedSettings();
idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings, indexScopedSettings);
AnalysisModule analysisModule = new AnalysisModule(TestEnvironment.newEnvironment(nodeSettings), emptyList());
IndexAnalyzers indexAnalyzers = analysisModule.getAnalysisRegistry().build(idxSettings);
scriptService = scriptModule.getScriptService();
similarityService = new SimilarityService(idxSettings, null, Collections.emptyMap());
MapperRegistry mapperRegistry = indicesModule.getMapperRegistry();
mapperService = new MapperService(idxSettings, indexAnalyzers, xContentRegistry, similarityService, mapperRegistry,
this::createShardContext);
IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(nodeSettings, new IndexFieldDataCache.Listener() {
});
indexFieldDataService = new IndexFieldDataService(idxSettings, indicesFieldDataCache,
new NoneCircuitBreakerService(), mapperService);
bitsetFilterCache = new BitsetFilterCache(idxSettings, new BitsetFilterCache.Listener() {
@Override
public void onCache(ShardId shardId, Accountable accountable) {
}
@Override
public void onRemoval(ShardId shardId, Accountable accountable) {
}
});
for (String type : currentTypes) {
mapperService.merge(type, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(type,
STRING_FIELD_NAME, "type=text",
STRING_FIELD_NAME_2, "type=keyword",
INT_FIELD_NAME, "type=integer",
INT_RANGE_FIELD_NAME, "type=integer_range",
DOUBLE_FIELD_NAME, "type=double",
BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date",
DATE_RANGE_FIELD_NAME, "type=date_range",
OBJECT_FIELD_NAME, "type=object",
GEO_POINT_FIELD_NAME, "type=geo_point",
GEO_SHAPE_FIELD_NAME, "type=geo_shape"
))), MapperService.MergeReason.MAPPING_UPDATE);
// also add mappings for two inner field in the object field
mapperService.merge(type, new CompressedXContent("{\"properties\":{\"" + OBJECT_FIELD_NAME + "\":{\"type\":\"object\","
+ "\"properties\":{\"" + DATE_FIELD_NAME + "\":{\"type\":\"date\"},\"" +
INT_FIELD_NAME + "\":{\"type\":\"integer\"}}}}}"),
MapperService.MergeReason.MAPPING_UPDATE);
}
testCase.initializeAdditionalMappings(mapperService);
}
@Override
public void close() throws IOException {
}
QueryShardContext createShardContext() {
return new QueryShardContext(0, idxSettings, bitsetFilterCache, indexFieldDataService::getForField, mapperService,
similarityService, scriptService, xContentRegistry, namedWriteableRegistry, this.client, null, () -> nowInMillis, null);
}
ScriptModule createScriptModule(List<ScriptPlugin> scriptPlugins) {
if (scriptPlugins == null || scriptPlugins.isEmpty()) {
return newTestScriptModule();
}
return new ScriptModule(Settings.EMPTY, scriptPlugins);
}
}
protected QueryBuilder rewriteAndFetch(QueryBuilder builder, QueryRewriteContext context) throws IOException { protected QueryBuilder rewriteAndFetch(QueryBuilder builder, QueryRewriteContext context) throws IOException {
PlainActionFuture<QueryBuilder> future = new PlainActionFuture<>(); PlainActionFuture<QueryBuilder> future = new PlainActionFuture<>();
Rewriteable.rewriteAndFetch(builder, context, future); Rewriteable.rewriteAndFetch(builder, context, future);

View File

@ -659,20 +659,20 @@ public abstract class ESTestCase extends LuceneTestCase {
return RandomizedTest.randomRealisticUnicodeOfCodepointLength(codePoints); return RandomizedTest.randomRealisticUnicodeOfCodepointLength(codePoints);
} }
public static String[] generateRandomStringArray(int maxArraySize, int maxStringSize, boolean allowNull, boolean allowEmpty) { public static String[] generateRandomStringArray(int maxArraySize, int stringSize, boolean allowNull, boolean allowEmpty) {
if (allowNull && random().nextBoolean()) { if (allowNull && random().nextBoolean()) {
return null; return null;
} }
int arraySize = randomIntBetween(allowEmpty ? 0 : 1, maxArraySize); int arraySize = randomIntBetween(allowEmpty ? 0 : 1, maxArraySize);
String[] array = new String[arraySize]; String[] array = new String[arraySize];
for (int i = 0; i < arraySize; i++) { for (int i = 0; i < arraySize; i++) {
array[i] = RandomStrings.randomAsciiOfLength(random(), maxStringSize); array[i] = RandomStrings.randomAsciiOfLength(random(), stringSize);
} }
return array; return array;
} }
public static String[] generateRandomStringArray(int maxArraySize, int maxStringSize, boolean allowNull) { public static String[] generateRandomStringArray(int maxArraySize, int stringSize, boolean allowNull) {
return generateRandomStringArray(maxArraySize, maxStringSize, allowNull, true); return generateRandomStringArray(maxArraySize, stringSize, allowNull, true);
} }
private static final String[] TIME_SUFFIXES = new String[]{"d", "h", "ms", "s", "m", "micros", "nanos"}; private static final String[] TIME_SUFFIXES = new String[]{"d", "h", "ms", "s", "m", "micros", "nanos"};

View File

@ -81,7 +81,7 @@ buildRestTests.expectedUnconvertedCandidates = [
'en/rest-api/ml/validate-job.asciidoc', 'en/rest-api/ml/validate-job.asciidoc',
'en/rest-api/security/authenticate.asciidoc', 'en/rest-api/security/authenticate.asciidoc',
'en/rest-api/watcher/stats.asciidoc', 'en/rest-api/watcher/stats.asciidoc',
'en/security/authorization/overview.asciidoc', 'en/security/authorization/managing-roles.asciidoc',
'en/watcher/example-watches/watching-time-series-data.asciidoc', 'en/watcher/example-watches/watching-time-series-data.asciidoc',
] ]

View File

@ -0,0 +1,114 @@
[role="xpack"]
[[built-in-roles]]
=== Built-in roles
{security} applies a default role to all users, including
<<anonymous-access, anonymous users>>. The default role enables users to access
the authenticate endpoint, change their own passwords, and get information about
themselves.
{security} also provides a set of built-in roles you can explicitly assign
to users. These roles have a fixed set of privileges and cannot be updated.
[[built-in-roles-ingest-user]] `ingest_admin` ::
Grants access to manage *all* index templates and *all* ingest pipeline configurations.
+
NOTE: This role does *not* provide the ability to create indices; those privileges
must be defined in a separate role.
[[built-in-roles-kibana-dashboard]] `kibana_dashboard_only_user` ::
Grants access to the {kib} Dashboard and read-only permissions on the `.kibana`
index. This role does not have access to editing tools in {kib}. For more
information, see
{kibana-ref}/xpack-dashboard-only-mode.html[{kib} Dashboard Only Mode].
[[built-in-roles-kibana-system]] `kibana_system` ::
Grants access necessary for the {kib} system user to read from and write to the
{kib} indices, manage index templates, and check the availability of the {es} cluster.
This role grants read access to the `.monitoring-*` indices and read and write access
to the `.reporting-*` indices. For more information, see
{kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}].
+
NOTE: This role should not be assigned to users as the granted permissions may
change between releases.
[[built-in-roles-kibana-user]] `kibana_user`::
Grants the minimum privileges required for any user of {kib}. This role grants
access to the {kib} indices and grants monitoring privileges for the cluster.
[[built-in-roles-logstash-admin]] `logstash_admin` ::
Grants access to the `.logstash*` indices for managing configurations.
[[built-in-roles-logstash-system]] `logstash_system` ::
Grants access necessary for the Logstash system user to send system-level data
(such as monitoring) to {es}. For more information, see
{logstash-ref}/ls-security.html[Configuring Security in Logstash].
+
NOTE: This role should not be assigned to users as the granted permissions may
change between releases.
+
NOTE: This role does not provide access to the logstash indices and is not
suitable for use within a Logstash pipeline.
[[built-in-roles-beats-system]] `beats_system` ::
Grants access necessary for the Beats system user to send system-level data
(such as monitoring) to {es}.
+
NOTE: This role should not be assigned to users as the granted permissions may
change between releases.
+
NOTE: This role does not provide access to the beats indices and is not
suitable for writing beats output to {es}.
[[built-in-roles-ml-admin]] `machine_learning_admin`::
Grants `manage_ml` cluster privileges and read access to the `.ml-*` indices.
[[built-in-roles-ml-user]] `machine_learning_user`::
Grants the minimum privileges required to view {xpackml} configuration,
status, and results. This role grants `monitor_ml` cluster privileges and
read access to the `.ml-notifications` and `.ml-anomalies*` indices,
which store {ml} results.
[[built-in-roles-monitoring-user]] `monitoring_user`::
Grants the minimum privileges required for any user of {monitoring} other than those
required to use {kib}. This role grants access to the monitoring indices and grants
privileges necessary for reading basic cluster information. Monitoring users should
also be assigned the `kibana_user` role.
[[built-in-roles-remote-monitoring-agent]] `remote_monitoring_agent`::
Grants the minimum privileges required for a remote monitoring agent to write data
into this cluster.
[[built-in-roles-reporting-user]] `reporting_user`::
Grants the specific privileges required for users of {reporting} other than those
required to use {kib}. This role grants access to the reporting indices. Reporting
users should also be assigned the `kibana_user` role and a role that grants them
access to the data that will be used to generate reports with.
[[built-in-roles-superuser]] `superuser`::
Grants full access to the cluster, including all indices and data. A user with
the `superuser` role can also manage users and roles and
<<run-as-privilege, impersonate>> any other user in the system. Due to the
permissive nature of this role, take extra care when assigning it to a user.
[[built-in-roles-transport-client]] `transport_client`::
Grants the privileges required to access the cluster through the Java Transport
Client. The Java Transport Client fetches information about the nodes in the
cluster using the _Node Liveness API_ and the _Cluster State API_ (when
sniffing is enabled). Assign your users this role if they use the
Transport Client.
+
NOTE: Using the Transport Client effectively means the users are granted access
to the cluster state. This means users can view the metadata over all indices,
index templates, mappings, node and basically everything about the cluster.
However, this role does not grant permission to view the data in all indices.
[[built-in-roles-watcher-admin]] `watcher_admin`::
+
Grants write access to the `.watches` index, read access to the watch history and
the triggered watches index and allows to execute all watcher actions.
[[built-in-roles-watcher-user]] `watcher_user`::
+
Grants read access to the `.watches` index, the get watch action and the watcher
stats.

View File

@ -0,0 +1,175 @@
[role="xpack"]
[[defining-roles]]
=== Defining roles
A role is defined by the following JSON structure:
[source,js]
-----
{
"run_as": [ ... ], <1>
"cluster": [ ... ], <2>
"indices": [ ... ] <3>
}
-----
<1> A list of usernames the owners of this role can <<run-as-privilege, impersonate>>.
<2> A list of cluster privileges. These privileges define the
cluster level actions users with this role are able to execute. This field
is optional (missing `cluster` privileges effectively mean no cluster level
permissions).
<3> A list of indices permissions entries. This field is optional (missing `indices`
privileges effectively mean no index level permissions).
[[valid-role-name]]
NOTE: Role names must be at least 1 and no more than 1024 characters. They can
contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces,
punctuation, and printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block].
Leading or trailing whitespace is not allowed.
The following describes the structure of an indices permissions entry:
[source,js]
-------
{
"names": [ ... ], <1>
"privileges": [ ... ], <2>
"field_security" : { ... }, <3>
"query": "..." <4>
}
-------
<1> A list of indices (or index name patterns) to which the permissions in this
entry apply.
<2> The index level privileges the owners of the role have on the associated
indices (those indices that are specified in the `name` field)
<3> Specification for document fields the owners of the role have read access to.
See <<field-and-document-access-control>> for details.
<4> A search query that defines the documents the owners of the role have read
access to. A document within the associated indices must match this query
in order for it to be accessible by the owners of the role.
[TIP]
==============================================================================
When specifying index names, you can use indices and aliases with their full
names or regular expressions that refer to multiple indices.
* Wildcard (default) - simple wildcard matching where `*` is a placeholder
for zero or more characters, `?` is a placeholder for a single character
and `\` may be used as an escape character.
* Regular Expressions - A more powerful syntax for matching more complex
patterns. This regular expression is based on Lucene's regexp automaton
syntax. To enable this syntax, it must be wrapped within a pair of
forward slashes (`/`). Any pattern starting with `/` and not ending with
`/` is considered to be malformed.
.Example Regular Expressions
[source,yaml]
------------------------------------------------------------------------------
"foo-bar": # match the literal `foo-bar`
"foo-*": # match anything beginning with "foo-"
"logstash-201?-*": # ? matches any one character
"/.*-201[0-9]-.*/": # use a regex to match anything containing 2010-2019
"/foo": # syntax error - missing final /
------------------------------------------------------------------------------
==============================================================================
The following snippet shows an example definition of a `clicks_admin` role:
[source,js]
-----------
{
"run_as": [ "clicks_watcher_1" ]
"cluster": [ "monitor" ],
"indices": [
{
"names": [ "events-*" ],
"privileges": [ "read" ],
"field_security" : {
"grant" : [ "category", "@timestamp", "message" ]
},
"query": "{\"match\": {\"category\": \"click\"}}"
}
]
}
-----------
Based on the above definition, users owning the `clicks_admin` role can:
* Impersonate the `clicks_watcher_1` user and execute requests on its behalf.
* Monitor the {es} cluster
* Read data from all indices prefixed with `events-`
* Within these indices, only read the events of the `click` category
* Within these document, only read the `category`, `@timestamp` and `message`
fields.
TIP: For a complete list of available <<security-privileges, cluster and indices privileges>>
There are two available mechanisms to define roles: using the _Role Management APIs_
or in local files on the {es} nodes. {security} also supports implementing
custom roles providers. If you need to integrate with another system to retrieve
user roles, you can build a custom roles provider plugin. For more information,
see <<custom-roles-provider, Custom Roles Provider Extension>>.
[float]
[[roles-management-ui]]
=== Role management UI
{security} enables you to easily manage users and roles from within {kib}. To
manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*.
[float]
[[roles-management-api]]
=== Role management API
The _Role Management APIs_ enable you to add, update, remove and retrieve roles
dynamically. When you use the APIs to manage roles in the `native` realm, the
roles are stored in an internal {es} index. For more information and examples,
see {ref}/security-api-roles.html[Role Management APIs].
[float]
[[roles-management-file]]
=== File-based role management
Apart from the _Role Management APIs_, roles can also be defined in local
`roles.yml` file located in `CONFIG_DIR`. This is a YAML file where each
role definition is keyed by its name.
[IMPORTANT]
==============================
If the same role name is used in the `roles.yml` file and through the
_Role Management APIs_, the role found in the file will be used.
==============================
While the _Role Management APIs_ is the preferred mechanism to define roles,
using the `roles.yml` file becomes useful if you want to define fixed roles that
no one (beside an administrator having physical access to the {es} nodes)
would be able to change.
[IMPORTANT]
==============================
The `roles.yml` file is managed locally by the node and is not globally by the
cluster. This means that with a typical multi-node cluster, the exact same
changes need to be applied on each and every node in the cluster.
A safer approach would be to apply the change on one of the nodes and have the
`roles.yml` distributed/copied to all other nodes in the cluster (either
manually or using a configuration management system such as Puppet or Chef).
==============================
The following snippet shows an example of the `roles.yml` file configuration:
[source,yaml]
-----------------------------------
click_admins:
run_as: [ 'clicks_watcher_1' ]
cluster: [ 'monitor' ]
indices:
- names: [ 'events-*' ]
privileges: [ 'read' ]
field_security:
grant: ['category', '@timestamp', 'message' ]
query: '{"match": {"category": "click"}}'
-----------------------------------
{security} continuously monitors the `roles.yml` file and automatically picks
up and applies any changes to it.

View File

@ -49,295 +49,11 @@ As an administrator, you will need to define the roles that you want to use,
then assign users to the roles. These can be assigned to users in a number of then assign users to the roles. These can be assigned to users in a number of
ways depending on the realms by which the users are authenticated. ways depending on the realms by which the users are authenticated.
[[built-in-roles]] include::built-in-roles.asciidoc[]
=== Built-in roles
{security} applies a default role to all users, including include::managing-roles.asciidoc[]
<<anonymous-access, anonymous users>>. The default role enables users to access
the authenticate endpoint, change their own passwords, and get information about
themselves.
{security} also provides a set of built-in roles you can explicitly assign include::privileges.asciidoc[]
to users. These roles have a fixed set of privileges and cannot be updated.
[[built-in-roles-ingest-user]] `ingest_admin` ::
Grants access to manage *all* index templates and *all* ingest pipeline configurations.
+
NOTE: This role does *not* provide the ability to create indices; those privileges
must be defined in a separate role.
[[built-in-roles-kibana-dashboard]] `kibana_dashboard_only_user` ::
Grants access to the {kib} Dashboard and read-only permissions on the `.kibana`
index. This role does not have access to editing tools in {kib}. For more
information, see
{kibana-ref}/xpack-dashboard-only-mode.html[{kib} Dashboard Only Mode].
[[built-in-roles-kibana-system]] `kibana_system` ::
Grants access necessary for the {kib} system user to read from and write to the
{kib} indices, manage index templates, and check the availability of the {es} cluster.
This role grants read access to the `.monitoring-*` indices and read and write access
to the `.reporting-*` indices. For more information, see
{kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}].
+
NOTE: This role should not be assigned to users as the granted permissions may
change between releases.
[[built-in-roles-kibana-user]] `kibana_user`::
Grants the minimum privileges required for any user of {kib}. This role grants
access to the {kib} indices and grants monitoring privileges for the cluster.
[[built-in-roles-logstash-admin]] `logstash_admin` ::
Grants access to the `.logstash*` indices for managing configurations.
[[built-in-roles-logstash-system]] `logstash_system` ::
Grants access necessary for the Logstash system user to send system-level data
(such as monitoring) to {es}. For more information, see
{logstash-ref}/ls-security.html[Configuring Security in Logstash].
+
NOTE: This role should not be assigned to users as the granted permissions may
change between releases.
+
NOTE: This role does not provide access to the logstash indices and is not
suitable for use within a Logstash pipeline.
[[built-in-roles-beats-system]] `beats_system` ::
Grants access necessary for the Beats system user to send system-level data
(such as monitoring) to {es}.
+
NOTE: This role should not be assigned to users as the granted permissions may
change between releases.
+
NOTE: This role does not provide access to the beats indices and is not
suitable for writing beats output to {es}.
[[built-in-roles-ml-admin]] `machine_learning_admin`::
Grants `manage_ml` cluster privileges and read access to the `.ml-*` indices.
[[built-in-roles-ml-user]] `machine_learning_user`::
Grants the minimum privileges required to view {xpackml} configuration,
status, and results. This role grants `monitor_ml` cluster privileges and
read access to the `.ml-notifications` and `.ml-anomalies*` indices,
which store {ml} results.
[[built-in-roles-monitoring-user]] `monitoring_user`::
Grants the minimum privileges required for any user of {monitoring} other than those
required to use {kib}. This role grants access to the monitoring indices and grants
privileges necessary for reading basic cluster information. Monitoring users should
also be assigned the `kibana_user` role.
[[built-in-roles-remote-monitoring-agent]] `remote_monitoring_agent`::
Grants the minimum privileges required for a remote monitoring agent to write data
into this cluster.
[[built-in-roles-reporting-user]] `reporting_user`::
Grants the specific privileges required for users of {reporting} other than those
required to use {kib}. This role grants access to the reporting indices. Reporting
users should also be assigned the `kibana_user` role and a role that grants them
access to the data that will be used to generate reports with.
[[built-in-roles-superuser]] `superuser`::
Grants full access to the cluster, including all indices and data. A user with
the `superuser` role can also manage users and roles and
<<run-as-privilege, impersonate>> any other user in the system. Due to the
permissive nature of this role, take extra care when assigning it to a user.
[[built-in-roles-transport-client]] `transport_client`::
Grants the privileges required to access the cluster through the Java Transport
Client. The Java Transport Client fetches information about the nodes in the
cluster using the _Node Liveness API_ and the _Cluster State API_ (when
sniffing is enabled). Assign your users this role if they use the
Transport Client.
+
NOTE: Using the Transport Client effectively means the users are granted access
to the cluster state. This means users can view the metadata over all indices,
index templates, mappings, node and basically everything about the cluster.
However, this role does not grant permission to view the data in all indices.
[[built-in-roles-watcher-admin]] `watcher_admin`::
+
Grants write access to the `.watches` index, read access to the watch history and
the triggered watches index and allows to execute all watcher actions.
[[built-in-roles-watcher-user]] `watcher_user`::
+
Grants read access to the `.watches` index, the get watch action and the watcher
stats.
[[defining-roles]]
=== Defining roles
A role is defined by the following JSON structure:
[source,js]
-----
{
"run_as": [ ... ], <1>
"cluster": [ ... ], <2>
"indices": [ ... ] <3>
}
-----
<1> A list of usernames the owners of this role can <<run-as-privilege, impersonate>>.
<2> A list of cluster privileges. These privileges define the
cluster level actions users with this role are able to execute. This field
is optional (missing `cluster` privileges effectively mean no cluster level
permissions).
<3> A list of indices permissions entries. This field is optional (missing `indices`
privileges effectively mean no index level permissions).
[[valid-role-name]]
NOTE: Role names must be at least 1 and no more than 1024 characters. They can
contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces,
punctuation, and printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block].
Leading or trailing whitespace is not allowed.
The following describes the structure of an indices permissions entry:
[source,js]
-------
{
"names": [ ... ], <1>
"privileges": [ ... ], <2>
"field_security" : { ... }, <3>
"query": "..." <4>
}
-------
<1> A list of indices (or index name patterns) to which the permissions in this
entry apply.
<2> The index level privileges the owners of the role have on the associated
indices (those indices that are specified in the `name` field)
<3> Specification for document fields the owners of the role have read access to.
See <<field-and-document-access-control>> for details.
<4> A search query that defines the documents the owners of the role have read
access to. A document within the associated indices must match this query
in order for it to be accessible by the owners of the role.
[TIP]
==============================================================================
When specifying index names, you can use indices and aliases with their full
names or regular expressions that refer to multiple indices.
* Wildcard (default) - simple wildcard matching where `*` is a placeholder
for zero or more characters, `?` is a placeholder for a single character
and `\` may be used as an escape character.
* Regular Expressions - A more powerful syntax for matching more complex
patterns. This regular expression is based on Lucene's regexp automaton
syntax. To enable this syntax, it must be wrapped within a pair of
forward slashes (`/`). Any pattern starting with `/` and not ending with
`/` is considered to be malformed.
.Example Regular Expressions
[source,yaml]
------------------------------------------------------------------------------
"foo-bar": # match the literal `foo-bar`
"foo-*": # match anything beginning with "foo-"
"logstash-201?-*": # ? matches any one character
"/.*-201[0-9]-.*/": # use a regex to match anything containing 2010-2019
"/foo": # syntax error - missing final /
------------------------------------------------------------------------------
==============================================================================
The following snippet shows an example definition of a `clicks_admin` role:
[source,js]
-----------
{
"run_as": [ "clicks_watcher_1" ]
"cluster": [ "monitor" ],
"indices": [
{
"names": [ "events-*" ],
"privileges": [ "read" ],
"field_security" : {
"grant" : [ "category", "@timestamp", "message" ]
},
"query": "{\"match\": {\"category\": \"click\"}}"
}
]
}
-----------
Based on the above definition, users owning the `clicks_admin` role can:
* Impersonate the `clicks_watcher_1` user and execute requests on its behalf.
* Monitor the {es} cluster
* Read data from all indices prefixed with `events-`
* Within these indices, only read the events of the `click` category
* Within these document, only read the `category`, `@timestamp` and `message`
fields.
TIP: For a complete list of available <<security-privileges, cluster and indices privileges>>
There are two available mechanisms to define roles: using the _Role Management APIs_
or in local files on the {es} nodes. {security} also supports implementing
custom roles providers. If you need to integrate with another system to retrieve
user roles, you can build a custom roles provider plugin. For more information,
see <<custom-roles-provider, Custom Roles Provider Extension>>.
[float]
[[roles-management-ui]]
=== Role management UI
{security} enables you to easily manage users and roles from within {kib}. To
manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*.
[float]
[[roles-management-api]]
=== Role management API
The _Role Management APIs_ enable you to add, update, remove and retrieve roles
dynamically. When you use the APIs to manage roles in the `native` realm, the
roles are stored in an internal {es} index. For more information and examples,
see {ref}/security-api-roles.html[Role Management APIs].
[float]
[[roles-management-file]]
=== File-based role management
Apart from the _Role Management APIs_, roles can also be defined in local
`roles.yml` file located in `CONFIG_DIR`. This is a YAML file where each
role definition is keyed by its name.
[IMPORTANT]
==============================
If the same role name is used in the `roles.yml` file and through the
_Role Management APIs_, the role found in the file will be used.
==============================
While the _Role Management APIs_ is the preferred mechanism to define roles,
using the `roles.yml` file becomes useful if you want to define fixed roles that
no one (beside an administrator having physical access to the {es} nodes)
would be able to change.
[IMPORTANT]
==============================
The `roles.yml` file is managed locally by the node and is not globally by the
cluster. This means that with a typical multi-node cluster, the exact same
changes need to be applied on each and every node in the cluster.
A safer approach would be to apply the change on one of the nodes and have the
`roles.yml` distributed/copied to all other nodes in the cluster (either
manually or using a configuration management system such as Puppet or Chef).
==============================
The following snippet shows an example of the `roles.yml` file configuration:
[source,yaml]
-----------------------------------
click_admins:
run_as: [ 'clicks_watcher_1' ]
cluster: [ 'monitor' ]
indices:
- names: [ 'events-*' ]
privileges: [ 'read' ]
field_security:
grant: ['category', '@timestamp', 'message' ]
query: '{"match": {"category": "click"}}'
-----------------------------------
{security} continuously monitors the `roles.yml` file and automatically picks
up and applies any changes to it.
include::alias-privileges.asciidoc[] include::alias-privileges.asciidoc[]

View File

@ -1,10 +1,11 @@
[role="xpack"]
[[security-privileges]] [[security-privileges]]
=== Security Privileges === Security privileges
This section lists the privileges that you can assign to a role. This section lists the privileges that you can assign to a role.
[[privileges-list-cluster]] [[privileges-list-cluster]]
==== Cluster Privileges ==== Cluster privileges
[horizontal] [horizontal]
`all`:: `all`::
@ -66,7 +67,7 @@ All privileges necessary for a transport client to connect. Required by the rem
cluster to enable <<cross-cluster-configuring,Cross Cluster Search>>. cluster to enable <<cross-cluster-configuring,Cross Cluster Search>>.
[[privileges-list-indices]] [[privileges-list-indices]]
==== Indices Privileges ==== Indices privileges
[horizontal] [horizontal]
`all`:: `all`::
@ -125,7 +126,7 @@ Privilege to create an index. A create index request may contain aliases to be
added to the index once created. In that case the request requires the `manage` added to the index once created. In that case the request requires the `manage`
privilege as well, on both the index and the aliases names. privilege as well, on both the index and the aliases names.
==== Run As Privilege ==== Run as privilege
The `run_as` permission enables an authenticated user to submit requests on The `run_as` permission enables an authenticated user to submit requests on
behalf of another user. The value can be a user name or a comma-separated list behalf of another user. The value can be a user name or a comma-separated list

View File

@ -7,6 +7,4 @@
* {ref}/security-api.html[Security API] * {ref}/security-api.html[Security API]
* {ref}/xpack-commands.html[Security Commands] * {ref}/xpack-commands.html[Security Commands]
include::reference/privileges.asciidoc[]
include::reference/files.asciidoc[] include::reference/files.asciidoc[]

View File

@ -8,17 +8,11 @@ import java.nio.file.Path
import java.nio.file.StandardCopyOption import java.nio.file.StandardCopyOption
import org.elasticsearch.gradle.test.RunTask; import org.elasticsearch.gradle.test.RunTask;
apply plugin: 'elasticsearch.es-meta-plugin' apply plugin: 'elasticsearch.standalone-rest-test'
apply plugin: 'elasticsearch.rest-test'
archivesBaseName = 'x-pack' archivesBaseName = 'x-pack'
es_meta_plugin {
name = 'x-pack'
description = 'Elasticsearch Expanded Pack Plugin'
plugins = ['core', 'deprecation', 'graph', 'logstash',
'ml', 'monitoring', 'security', 'upgrade', 'watcher', 'sql', 'rollup', 'index-lifecycle']
}
dependencies { dependencies {
testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts')
} }

View File

@ -5,4 +5,4 @@
# you may not use this file except in compliance with the Elastic License. # you may not use this file except in compliance with the Elastic License.
# include x-pack-core jars in classpath # include x-pack-core jars in classpath
ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack/x-pack-core/*" ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack-core/*"

View File

@ -2,4 +2,4 @@ rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
rem or more contributor license agreements. Licensed under the Elastic License; rem or more contributor license agreements. Licensed under the Elastic License;
rem you may not use this file except in compliance with the Elastic License. rem you may not use this file except in compliance with the Elastic License.
set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack/x-pack-core/* set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack-core/*

View File

@ -5,11 +5,11 @@
*/ */
package org.elasticsearch.xpack.core.ml.action; package org.elasticsearch.xpack.core.ml.action;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.Action; import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksRequest;
import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.action.support.tasks.BaseTasksResponse;
@ -297,7 +297,7 @@ public class GetJobsStatsAction extends Action<GetJobsStatsAction.Request, GetJo
this.jobsStats = jobsStats; this.jobsStats = jobsStats;
} }
public Response(List<TaskOperationFailure> taskFailures, List<? extends FailedNodeException> nodeFailures, public Response(List<TaskOperationFailure> taskFailures, List<? extends ElasticsearchException> nodeFailures,
QueryPage<JobStats> jobsStats) { QueryPage<JobStats> jobsStats) {
super(taskFailures, nodeFailures); super(taskFailures, nodeFailures);
this.jobsStats = jobsStats; this.jobsStats = jobsStats;

View File

@ -15,6 +15,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.core.ClientHelper;
import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction;
import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.MachineLearning;
import org.elasticsearch.xpack.ml.job.retention.ExpiredForecastsRemover; import org.elasticsearch.xpack.ml.job.retention.ExpiredForecastsRemover;
@ -40,7 +41,7 @@ public class TransportDeleteExpiredDataAction extends HandledTransportAction<Del
Client client, ClusterService clusterService) { Client client, ClusterService clusterService) {
super(settings, DeleteExpiredDataAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, super(settings, DeleteExpiredDataAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
DeleteExpiredDataAction.Request::new); DeleteExpiredDataAction.Request::new);
this.client = client; this.client = ClientHelper.clientWithOrigin(client, ClientHelper.ML_ORIGIN);
this.clusterService = clusterService; this.clusterService = clusterService;
} }

View File

@ -45,6 +45,10 @@ import java.util.Objects;
* Removes up to {@link #MAX_FORECASTS} forecasts (stats + forecasts docs) that have expired. * Removes up to {@link #MAX_FORECASTS} forecasts (stats + forecasts docs) that have expired.
* A forecast is deleted if its expiration timestamp is earlier * A forecast is deleted if its expiration timestamp is earlier
* than the start of the current day (local time-zone). * than the start of the current day (local time-zone).
*
* This is expected to be used by actions requiring admin rights. Thus,
* it is also expected that the provided client will be a client with the
* ML origin so that permissions to manage ML indices are met.
*/ */
public class ExpiredForecastsRemover implements MlDataRemover { public class ExpiredForecastsRemover implements MlDataRemover {

View File

@ -34,6 +34,10 @@ import java.util.Objects;
* of their respective job with the exception of the currently used snapshot. * of their respective job with the exception of the currently used snapshot.
* A snapshot is deleted if its timestamp is earlier than the start of the * A snapshot is deleted if its timestamp is earlier than the start of the
* current day (local time-zone) minus the retention period. * current day (local time-zone) minus the retention period.
*
* This is expected to be used by actions requiring admin rights. Thus,
* it is also expected that the provided client will be a client with the
* ML origin so that permissions to manage ML indices are met.
*/ */
public class ExpiredModelSnapshotsRemover extends AbstractExpiredJobDataRemover { public class ExpiredModelSnapshotsRemover extends AbstractExpiredJobDataRemover {

View File

@ -33,14 +33,15 @@ import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatter;
import java.util.Objects; import java.util.Objects;
import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
/** /**
* Removes all results that have expired the configured retention time * Removes all results that have expired the configured retention time
* of their respective job. A result is deleted if its timestamp is earlier * of their respective job. A result is deleted if its timestamp is earlier
* than the start of the current day (local time-zone) minus the retention * than the start of the current day (local time-zone) minus the retention
* period. * period.
*
* This is expected to be used by actions requiring admin rights. Thus,
* it is also expected that the provided client will be a client with the
* ML origin so that permissions to manage ML indices are met.
*/ */
public class ExpiredResultsRemover extends AbstractExpiredJobDataRemover { public class ExpiredResultsRemover extends AbstractExpiredJobDataRemover {
@ -65,7 +66,7 @@ public class ExpiredResultsRemover extends AbstractExpiredJobDataRemover {
LOGGER.debug("Removing results of job [{}] that have a timestamp before [{}]", job.getId(), cutoffEpochMs); LOGGER.debug("Removing results of job [{}] that have a timestamp before [{}]", job.getId(), cutoffEpochMs);
DeleteByQueryRequest request = createDBQRequest(job, cutoffEpochMs); DeleteByQueryRequest request = createDBQRequest(job, cutoffEpochMs);
executeAsyncWithOrigin(client, ML_ORIGIN, DeleteByQueryAction.INSTANCE, request, new ActionListener<BulkByScrollResponse>() { client.execute(DeleteByQueryAction.INSTANCE, request, new ActionListener<BulkByScrollResponse>() {
@Override @Override
public void onResponse(BulkByScrollResponse bulkByScrollResponse) { public void onResponse(BulkByScrollResponse bulkByScrollResponse) {
try { try {

View File

@ -7,4 +7,4 @@
source "`dirname "$0"`"/x-pack-env source "`dirname "$0"`"/x-pack-env
# include x-pack-security jars in classpath # include x-pack-security jars in classpath
ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack/x-pack-security/*" ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack-security/*"

View File

@ -4,4 +4,4 @@ rem you may not use this file except in compliance with the Elastic License.
call "%~dp0x-pack-env.bat" || exit /b 1 call "%~dp0x-pack-env.bat" || exit /b 1
set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack/x-pack-security/* set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack-security/*

View File

@ -53,7 +53,8 @@ import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.notNullValue;
@TestLogging("org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE") @TestLogging("org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE,org.elasticsearch.action.search:TRACE," +
"org.elasticsearch.search:TRACE")
public class LicensingTests extends SecurityIntegTestCase { public class LicensingTests extends SecurityIntegTestCase {
public static final String ROLES = public static final String ROLES =
SecuritySettingsSource.TEST_ROLE + ":\n" + SecuritySettingsSource.TEST_ROLE + ":\n" +

View File

@ -8,6 +8,7 @@ package org.elasticsearch.xpack.sql.jdbc.net.client;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.client.HttpClient;
import org.elasticsearch.xpack.sql.client.shared.Version;
import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo;
import org.elasticsearch.xpack.sql.jdbc.net.protocol.InfoResponse; import org.elasticsearch.xpack.sql.jdbc.net.protocol.InfoResponse;
@ -79,7 +80,8 @@ public class JdbcHttpClient {
private InfoResponse fetchServerInfo() throws SQLException { private InfoResponse fetchServerInfo() throws SQLException {
MainResponse mainResponse = httpClient.serverInfo(); MainResponse mainResponse = httpClient.serverInfo();
return new InfoResponse(mainResponse.getClusterName(), mainResponse.getVersion().major, mainResponse.getVersion().minor); Version version = Version.fromString(mainResponse.getVersion());
return new InfoResponse(mainResponse.getClusterName(), version.major, version.minor);
} }
/** /**

View File

@ -65,8 +65,9 @@ public class CliSession {
} catch (SQLException ex) { } catch (SQLException ex) {
throw new ClientException(ex); throw new ClientException(ex);
} }
Version version = Version.fromString(response.getVersion());
// TODO: We can relax compatibility requirement later when we have a better idea about protocol compatibility guarantees // TODO: We can relax compatibility requirement later when we have a better idea about protocol compatibility guarantees
if (response.getVersion().major != Version.CURRENT.major || response.getVersion().minor != Version.CURRENT.minor) { if (version.major != Version.CURRENT.major || version.minor != Version.CURRENT.minor) {
throw new ClientException("This alpha version of CLI is only compatible with Elasticsearch version " + throw new ClientException("This alpha version of CLI is only compatible with Elasticsearch version " +
Version.CURRENT.toString()); Version.CURRENT.toString());
} }

View File

@ -31,7 +31,7 @@ public class ServerInfoCliCommand extends AbstractServerCliCommand {
terminal.line() terminal.line()
.text("Node:").em(info.getNodeName()) .text("Node:").em(info.getNodeName())
.text(" Cluster:").em(info.getClusterName()) .text(" Cluster:").em(info.getClusterName())
.text(" Version:").em(info.getVersion().toString()) .text(" Version:").em(info.getVersion())
.ln(); .ln();
return true; return true;
} }

View File

@ -27,7 +27,7 @@ public class CliSessionTests extends ESTestCase {
public void testProperConnection() throws Exception { public void testProperConnection() throws Exception {
HttpClient httpClient = mock(HttpClient.class); HttpClient httpClient = mock(HttpClient.class);
when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), org.elasticsearch.Version.CURRENT, when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), org.elasticsearch.Version.CURRENT.toString(),
ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT)); ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT));
CliSession cliSession = new CliSession(httpClient); CliSession cliSession = new CliSession(httpClient);
cliSession.checkConnection(); cliSession.checkConnection();
@ -57,7 +57,7 @@ public class CliSessionTests extends ESTestCase {
} }
when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5),
org.elasticsearch.Version.fromString(major + "." + minor + ".23"), org.elasticsearch.Version.fromString(major + "." + minor + ".23").toString(),
ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT)); ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT));
CliSession cliSession = new CliSession(httpClient); CliSession cliSession = new CliSession(httpClient);
expectThrows(ClientException.class, cliSession::checkConnection); expectThrows(ClientException.class, cliSession::checkConnection);

View File

@ -35,7 +35,7 @@ public class ServerInfoCliCommandTests extends ESTestCase {
TestTerminal testTerminal = new TestTerminal(); TestTerminal testTerminal = new TestTerminal();
HttpClient client = mock(HttpClient.class); HttpClient client = mock(HttpClient.class);
CliSession cliSession = new CliSession(client); CliSession cliSession = new CliSession(client);
when(client.serverInfo()).thenReturn(new MainResponse("my_node", org.elasticsearch.Version.fromString("1.2.3"), when(client.serverInfo()).thenReturn(new MainResponse("my_node", "1.2.3",
new ClusterName("my_cluster").value(), UUIDs.randomBase64UUID(), Build.CURRENT)); new ClusterName("my_cluster").value(), UUIDs.randomBase64UUID(), Build.CURRENT));
ServerInfoCliCommand cliCommand = new ServerInfoCliCommand(); ServerInfoCliCommand cliCommand = new ServerInfoCliCommand();
assertTrue(cliCommand.handle(testTerminal, cliSession, "info")); assertTrue(cliCommand.handle(testTerminal, cliSession, "info"));

View File

@ -7,7 +7,6 @@
package org.elasticsearch.xpack.sql.proto; package org.elasticsearch.xpack.sql.proto;
import org.elasticsearch.Build; import org.elasticsearch.Build;
import org.elasticsearch.Version;
import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
@ -19,8 +18,7 @@ import java.util.Objects;
*/ */
public class MainResponse { public class MainResponse {
private String nodeName; private String nodeName;
// TODO: Add parser for Version private String version;
private Version version;
private String clusterName; private String clusterName;
private String clusterUuid; private String clusterUuid;
// TODO: Add parser for Build // TODO: Add parser for Build
@ -29,7 +27,7 @@ public class MainResponse {
private MainResponse() { private MainResponse() {
} }
public MainResponse(String nodeName, Version version, String clusterName, String clusterUuid, Build build) { public MainResponse(String nodeName, String version, String clusterName, String clusterUuid, Build build) {
this.nodeName = nodeName; this.nodeName = nodeName;
this.version = version; this.version = version;
this.clusterName = clusterName; this.clusterName = clusterName;
@ -41,7 +39,7 @@ public class MainResponse {
return nodeName; return nodeName;
} }
public Version getVersion() { public String getVersion() {
return version; return version;
} }
@ -76,7 +74,7 @@ public class MainResponse {
(String) value.get("build_hash"), (String) value.get("build_hash"),
(String) value.get("build_date"), (String) value.get("build_date"),
(boolean) value.get("build_snapshot")); (boolean) value.get("build_snapshot"));
response.version = Version.fromString((String) value.get("number")); response.version = (String) value.get("number");
}, (parser, context) -> parser.map(), new ParseField("version")); }, (parser, context) -> parser.map(), new ParseField("version"));
} }

View File

@ -7,4 +7,4 @@
source "`dirname "$0"`"/x-pack-env source "`dirname "$0"`"/x-pack-env
# include x-pack-security jars in classpath # include x-pack-security jars in classpath
ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack/x-pack-watcher/*" ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack-watcher/*"

View File

@ -4,4 +4,4 @@ rem you may not use this file except in compliance with the Elastic License.
call "%~dp0x-pack-env.bat" || exit /b 1 call "%~dp0x-pack-env.bat" || exit /b 1
set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack/x-pack-watcher/* set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack-watcher/*

View File

@ -11,41 +11,6 @@ esvagrant {
} }
dependencies { dependencies {
// Packaging tests use the x-pack meta plugin
packaging project(path: xpackProject('plugin').path, configuration: 'zip')
// Inherit Bats test utils from :qa:vagrant project // Inherit Bats test utils from :qa:vagrant project
packaging project(path: ':qa:vagrant', configuration: 'packaging') packaging project(path: ':qa:vagrant', configuration: 'packaging')
} }
Map<String, List<String>> metaPlugins = [:]
for (Project metaPlugin : project.rootProject.subprojects) {
if (metaPlugin.plugins.hasPlugin(MetaPluginBuildPlugin)) {
MetaPluginPropertiesExtension extension = metaPlugin.extensions.findByName('es_meta_plugin')
if (extension != null) {
List<String> plugins = []
metaPlugin.subprojects.each {
if (extension.plugins.contains(it.name)) {
Project plugin = (Project) it
if (plugin.plugins.hasPlugin(PluginBuildPlugin)) {
PluginPropertiesExtension esplugin = plugin.extensions.findByName('esplugin')
if (esplugin != null) {
plugins.add(esplugin.name)
}
}
}
}
metaPlugins.put(extension.name, plugins.toSorted())
}
}
}
setupPackagingTest {
doLast {
metaPlugins.each{ name, plugins ->
File expectedMetaPlugins = file("build/plugins/${name}.expected")
expectedMetaPlugins.parentFile.mkdirs()
expectedMetaPlugins.setText(plugins.join('\n'), 'UTF-8')
}
}
}

Some files were not shown because too many files have changed in this diff Show More