Merge branch 'master' into feature/reindex

This commit is contained in:
Nik Everett 2016-02-10 14:07:41 -05:00
commit 0da30d5eae
491 changed files with 18630 additions and 4569 deletions

View File

@ -82,4 +82,7 @@
(c-set-offset 'func-decl-cont '++)
))
(c-basic-offset . 4)
(c-comment-only-line-offset . (0 . 0)))))
(c-comment-only-line-offset . (0 . 0))
(fill-column . 140)
(fci-rule-column . 140)
(compile-command . "gradle compileTestJava"))))

6
.gitignore vendored
View File

@ -4,12 +4,13 @@
*.iml
*.ipr
*.iws
build-idea/
# eclipse files
.project
.classpath
eclipse-build
.settings
build-eclipse/
# netbeans files
nb-configuration.xml
@ -18,7 +19,6 @@ nbactions.xml
# gradle stuff
.gradle/
build/
generated-resources/
# maven stuff (to be removed when trunk becomes 4.x)
*-execution-hints.log
@ -38,5 +38,5 @@ html_docs
# random old stuff that we should look at the necessity of...
/tmp/
backwards/
eclipse-build

View File

@ -75,8 +75,9 @@ subprojects {
allprojects {
// injecting groovy property variables into all projects
project.ext {
// for eclipse hacks...
// for ide hacks...
isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse')
isIdea = System.getProperty("idea.active") != null || gradle.startParameter.taskNames.contains('idea') || gradle.startParameter.taskNames.contains('cleanIdea')
}
}
@ -170,12 +171,14 @@ gradle.projectsEvaluated {
allprojects {
apply plugin: 'idea'
if (isIdea) {
project.buildDir = file('build-idea')
}
idea {
module {
// same as for the IntelliJ Gradle tooling integration
inheritOutputDirs = false
outputDir = file('build/classes/main')
testOutputDir = file('build/classes/test')
outputDir = file('build-idea/classes/main')
testOutputDir = file('build-idea/classes/test')
iml {
// fix so that Gradle idea plugin properly generates support for resource folders
@ -222,14 +225,19 @@ allprojects {
apply plugin: 'eclipse'
plugins.withType(JavaBasePlugin) {
eclipse.classpath.defaultOutputDir = new File(project.buildDir, 'eclipse')
File eclipseBuild = project.file('build-eclipse')
eclipse.classpath.defaultOutputDir = eclipseBuild
if (isEclipse) {
// set this so generated dirs will be relative to eclipse build
project.buildDir = eclipseBuild
}
eclipse.classpath.file.whenMerged { classpath ->
// give each source folder a unique corresponding output folder
int i = 0;
classpath.entries.findAll { it instanceof SourceFolder }.each { folder ->
i++;
// this is *NOT* a path or a file.
folder.output = "build/eclipse/" + i
folder.output = "build-eclipse/" + i
}
}
}

View File

@ -76,9 +76,17 @@ extraArchive {
tests = false
}
idea {
module {
inheritOutputDirs = false
outputDir = file('build-idea/classes/main')
testOutputDir = file('build-idea/classes/test')
}
}
eclipse {
classpath {
defaultOutputDir = new File(file('build'), 'eclipse')
defaultOutputDir = file('build-eclipse')
}
}

View File

@ -112,6 +112,9 @@ public class PluginBuildPlugin extends BuildPlugin {
include 'config/**'
include 'bin/**'
}
if (project.path.startsWith(':modules:') == false) {
into('elasticsearch')
}
}
project.assemble.dependsOn(bundle)

View File

@ -29,7 +29,7 @@ import org.gradle.api.tasks.Copy
class PluginPropertiesTask extends Copy {
PluginPropertiesExtension extension
File generatedResourcesDir = new File(project.projectDir, 'generated-resources')
File generatedResourcesDir = new File(project.buildDir, 'generated-resources')
PluginPropertiesTask() {
File templateFile = new File(project.buildDir, 'templates/plugin-descriptor.properties')

View File

@ -91,11 +91,12 @@ class PrecommitTasks {
// on them. But we want `precommit` to depend on `checkstyle` which depends on them so
// we have to swap them.
project.pluginManager.apply('checkstyle')
URL checkstyleSuppressions = PrecommitTasks.getResource('/checkstyle_suppressions.xml')
project.checkstyle {
config = project.resources.text.fromFile(
PrecommitTasks.getResource('/checkstyle.xml'), 'UTF-8')
configProperties = [
suppressions: PrecommitTasks.getResource('/checkstyle_suppressions.xml')
suppressions: checkstyleSuppressions
]
}
for (String taskName : ['checkstyleMain', 'checkstyleTest']) {
@ -103,6 +104,7 @@ class PrecommitTasks {
if (task != null) {
project.tasks['check'].dependsOn.remove(task)
checkstyleTask.dependsOn(task)
task.inputs.file(checkstyleSuppressions)
}
}
return checkstyleTask

View File

@ -340,7 +340,7 @@ class ClusterFormationTasks {
}
// delay reading the file location until execution time by wrapping in a closure within a GString
String file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
Object[] args = [new File(node.homeDir, 'bin/plugin'), 'install', file]
Object[] args = [new File(node.homeDir, 'bin/elasticsearch-plugin'), 'install', file]
return configureExecTask(name, project, setup, node, args)
}

View File

@ -11,11 +11,12 @@
</module>
<module name="TreeWalker">
<!-- ~3500 violations
<!-- Its our official line length! See checkstyle_suppressions.xml for the files that don't pass this. For now we
suppress the check there but enforce it everywhere else. This prevents the list from getting longer even if it is
unfair. -->
<module name="LineLength">
<property name="max" value="140"/>
</module>
-->
<module name="AvoidStarImport" />
<!-- Doesn't pass but we could make it pass pretty quick.
@ -36,6 +37,8 @@
hard to distinguish from the digit 1 (one). -->
<module name="UpperEll"/>
<module name="EqualsHashCode" />
<!-- We don't use Java's builtin serialization and we suppress all warning
about it. The flip side of that coin is that we shouldn't _try_ to use
it. We can't outright ban it with ForbiddenApis because it complain about

File diff suppressed because it is too large Load Diff

View File

@ -129,3 +129,5 @@ java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffl
@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness
java.util.Random#<init>()
java.util.concurrent.ThreadLocalRandom
java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests

View File

@ -1,13 +1,14 @@
# Elasticsearch plugin descriptor file
# This file must exist as 'plugin-descriptor.properties' at
# the root directory of all plugins.
# This file must exist as 'plugin-descriptor.properties' in a folder named `elasticsearch`
# inside all plugins.
#
### example plugin for "foo"
#
# foo.zip <-- zip file for the plugin, with this structure:
# <arbitrary name1>.jar <-- classes, resources, dependencies
# <arbitrary nameN>.jar <-- any number of jars
# plugin-descriptor.properties <-- example contents below:
#|____elasticsearch/
#| |____ <arbitrary name1>.jar <-- classes, resources, dependencies
#| |____ <arbitrary nameN>.jar <-- any number of jars
#| |____ plugin-descriptor.properties <-- example contents below:
#
# classname=foo.bar.BazPlugin
# description=My cool plugin

View File

@ -28,6 +28,8 @@ import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction
import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction;
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
@ -190,14 +192,11 @@ import org.elasticsearch.action.termvectors.TermVectorsAction;
import org.elasticsearch.action.termvectors.TransportMultiTermVectorsAction;
import org.elasticsearch.action.termvectors.TransportShardMultiTermsVectorAction;
import org.elasticsearch.action.termvectors.TransportTermVectorsAction;
import org.elasticsearch.action.termvectors.dfs.TransportDfsOnlyAction;
import org.elasticsearch.action.update.TransportUpdateAction;
import org.elasticsearch.action.update.UpdateAction;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.MapBinder;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.NodeModule;
import java.util.ArrayList;
import java.util.HashMap;
@ -271,6 +270,7 @@ public class ActionModule extends AbstractModule {
registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
registerAction(ListTasksAction.INSTANCE, TransportListTasksAction.class);
registerAction(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class);
registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class);
registerAction(ClusterStateAction.INSTANCE, TransportClusterStateAction.class);
@ -323,8 +323,7 @@ public class ActionModule extends AbstractModule {
registerAction(IndexAction.INSTANCE, TransportIndexAction.class);
registerAction(GetAction.INSTANCE, TransportGetAction.class);
registerAction(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class,
TransportDfsOnlyAction.class);
registerAction(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class);
registerAction(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class,
TransportShardMultiTermsVectorAction.class);
registerAction(DeleteAction.INSTANCE, TransportDeleteAction.class);

View File

@ -0,0 +1,46 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
import org.elasticsearch.action.Action;
import org.elasticsearch.client.ElasticsearchClient;
/**
* Action for cancelling running tasks
*/
public class CancelTasksAction extends Action<CancelTasksRequest, CancelTasksResponse, CancelTasksRequestBuilder> {
public static final CancelTasksAction INSTANCE = new CancelTasksAction();
public static final String NAME = "cluster:admin/tasks/cancel";
private CancelTasksAction() {
super(NAME);
}
@Override
public CancelTasksResponse newResponse() {
return new CancelTasksResponse();
}
@Override
public CancelTasksRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new CancelTasksRequestBuilder(client, this);
}
}

View File

@ -17,46 +17,57 @@
* under the License.
*/
package org.elasticsearch.action.termvectors.dfs;
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.Task;
import java.io.IOException;
/**
*
* A request to cancel tasks
*/
class ShardDfsOnlyResponse extends BroadcastShardResponse {
public class CancelTasksRequest extends BaseTasksRequest<CancelTasksRequest> {
private DfsSearchResult dfsSearchResult = new DfsSearchResult();
public static final String DEFAULT_REASON = "by user request";
ShardDfsOnlyResponse() {
private String reason = DEFAULT_REASON;
}
ShardDfsOnlyResponse(ShardId shardId, DfsSearchResult dfsSearchResult) {
super(shardId);
this.dfsSearchResult = dfsSearchResult;
}
public DfsSearchResult getDfsSearchResult() {
return dfsSearchResult;
/**
* Cancel tasks on the specified nodes. If none are passed, all cancellable tasks on
* all nodes will be cancelled.
*/
public CancelTasksRequest(String... nodesIds) {
super(nodesIds);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
dfsSearchResult.readFrom(in);
reason = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
dfsSearchResult.writeTo(out);
out.writeString(reason);
}
@Override
public boolean match(Task task) {
return super.match(task) && task instanceof CancellableTask;
}
public CancelTasksRequest reason(String reason) {
this.reason = reason;
return this;
}
public String reason() {
return reason;
}
}

View File

@ -0,0 +1,34 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
import org.elasticsearch.action.support.tasks.TasksRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
/**
* Builder for the request to cancel tasks running on the specified nodes
*/
public class CancelTasksRequestBuilder extends TasksRequestBuilder<CancelTasksRequest, CancelTasksResponse, CancelTasksRequestBuilder> {
public CancelTasksRequestBuilder(ElasticsearchClient client, CancelTasksAction action) {
super(client, action, new CancelTasksRequest());
}
}

View File

@ -0,0 +1,42 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
import java.util.List;
/**
* Returns the list of tasks that were cancelled
*/
public class CancelTasksResponse extends ListTasksResponse {
public CancelTasksResponse() {
}
public CancelTasksResponse(List<TaskInfo> tasks, List<TaskOperationFailure> taskFailures, List<? extends FailedNodeException>
nodeFailures) {
super(tasks, taskFailures, nodeFailures);
}
}

View File

@ -0,0 +1,285 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
import org.elasticsearch.action.support.tasks.TransportTasksAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.EmptyTransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
/**
* Transport action that can be used to cancel currently running cancellable tasks.
* <p>
* For a task to be cancellable it has to return an instance of
* {@link CancellableTask} from {@link TransportRequest#createTask(long, String, String)}
*/
public class TransportCancelTasksAction extends TransportTasksAction<CancellableTask, CancelTasksRequest, CancelTasksResponse, TaskInfo> {
public static final String BAN_PARENT_ACTION_NAME = "internal:admin/tasks/ban";
@Inject
public TransportCancelTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver
indexNameExpressionResolver) {
super(settings, CancelTasksAction.NAME, clusterName, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, CancelTasksRequest::new, CancelTasksResponse::new, ThreadPool.Names.MANAGEMENT);
transportService.registerRequestHandler(BAN_PARENT_ACTION_NAME, BanParentTaskRequest::new, ThreadPool.Names.SAME, new
BanParentRequestHandler());
}
@Override
protected CancelTasksResponse newResponse(CancelTasksRequest request, List<TaskInfo> tasks, List<TaskOperationFailure>
taskOperationFailures, List<FailedNodeException> failedNodeExceptions) {
return new CancelTasksResponse(tasks, taskOperationFailures, failedNodeExceptions);
}
@Override
protected TaskInfo readTaskResponse(StreamInput in) throws IOException {
return new TaskInfo(in);
}
protected void processTasks(CancelTasksRequest request, Consumer<CancellableTask> operation) {
if (request.taskId() != BaseTasksRequest.ALL_TASKS) {
// we are only checking one task, we can optimize it
CancellableTask task = taskManager.getCancellableTask(request.taskId());
if (task != null) {
if (request.match(task)) {
operation.accept(task);
} else {
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support this operation");
}
} else {
if (taskManager.getTask(request.taskId()) != null) {
// The task exists, but doesn't support cancellation
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support cancellation");
} else {
throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.taskId());
}
}
} else {
for (CancellableTask task : taskManager.getCancellableTasks().values()) {
if (request.match(task)) {
operation.accept(task);
}
}
}
}
@Override
protected synchronized TaskInfo taskOperation(CancelTasksRequest request, CancellableTask cancellableTask) {
final BanLock banLock = new BanLock(nodes -> removeBanOnNodes(cancellableTask, nodes));
Set<String> childNodes = taskManager.cancel(cancellableTask, request.reason(), banLock::onTaskFinished);
if (childNodes != null) {
if (childNodes.isEmpty()) {
logger.trace("cancelling task {} with no children", cancellableTask.getId());
return cancellableTask.taskInfo(clusterService.localNode(), false);
} else {
logger.trace("cancelling task {} with children on nodes [{}]", cancellableTask.getId(), childNodes);
setBanOnNodes(request.reason(), cancellableTask, childNodes, banLock);
return cancellableTask.taskInfo(clusterService.localNode(), false);
}
} else {
logger.trace("task {} is already cancelled", cancellableTask.getId());
throw new IllegalStateException("task with id " + cancellableTask.getId() + " is already cancelled");
}
}
@Override
protected boolean accumulateExceptions() {
return true;
}
private void setBanOnNodes(String reason, CancellableTask task, Set<String> nodes, BanLock banLock) {
sendSetBanRequest(nodes, new BanParentTaskRequest(clusterService.localNode().getId(), task.getId(), reason), banLock);
}
private void removeBanOnNodes(CancellableTask task, Set<String> nodes) {
sendRemoveBanRequest(nodes, new BanParentTaskRequest(clusterService.localNode().getId(), task.getId()));
}
private void sendSetBanRequest(Set<String> nodes, BanParentTaskRequest request, BanLock banLock) {
ClusterState clusterState = clusterService.state();
for (String node : nodes) {
DiscoveryNode discoveryNode = clusterState.getNodes().get(node);
if (discoveryNode != null) {
// Check if node still in the cluster
logger.debug("Sending ban for tasks with the parent [{}:{}] to the node [{}], ban [{}]", request.parentNodeId, request
.parentTaskId, node, request.ban);
transportService.sendRequest(discoveryNode, BAN_PARENT_ACTION_NAME, request,
new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty response) {
banLock.onBanSet();
}
@Override
public void handleException(TransportException exp) {
banLock.onBanSet();
}
});
} else {
banLock.onBanSet();
logger.debug("Cannot send ban for tasks with the parent [{}:{}] to the node [{}] - the node no longer in the cluster",
request.parentNodeId, request.parentTaskId, node);
}
}
}
private void sendRemoveBanRequest(Set<String> nodes, BanParentTaskRequest request) {
ClusterState clusterState = clusterService.state();
for (String node : nodes) {
DiscoveryNode discoveryNode = clusterState.getNodes().get(node);
if (discoveryNode != null) {
// Check if node still in the cluster
logger.debug("Sending remove ban for tasks with the parent [{}:{}] to the node [{}]", request.parentNodeId,
request.parentTaskId, node);
transportService.sendRequest(discoveryNode, BAN_PARENT_ACTION_NAME, request, EmptyTransportResponseHandler
.INSTANCE_SAME);
} else {
logger.debug("Cannot send remove ban request for tasks with the parent [{}:{}] to the node [{}] - the node no longer in " +
"the cluster", request.parentNodeId, request.parentTaskId, node);
}
}
}
private static class BanLock {
private final Consumer<Set<String>> finish;
private final AtomicInteger counter;
private final AtomicReference<Set<String>> nodes = new AtomicReference<>();
public BanLock(Consumer<Set<String>> finish) {
counter = new AtomicInteger(0);
this.finish = finish;
}
public void onBanSet() {
if (counter.decrementAndGet() == 0) {
finish();
}
}
public void onTaskFinished(Set<String> nodes) {
this.nodes.set(nodes);
if (counter.addAndGet(nodes.size()) == 0) {
finish();
}
}
public void finish() {
finish.accept(nodes.get());
}
}
private static class BanParentTaskRequest extends TransportRequest {
private String parentNodeId;
private long parentTaskId;
private boolean ban;
private String reason;
BanParentTaskRequest(String parentNodeId, long parentTaskId, String reason) {
this.parentNodeId = parentNodeId;
this.parentTaskId = parentTaskId;
this.ban = true;
this.reason = reason;
}
BanParentTaskRequest(String parentNodeId, long parentTaskId) {
this.parentNodeId = parentNodeId;
this.parentTaskId = parentTaskId;
this.ban = false;
}
public BanParentTaskRequest() {
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
parentNodeId = in.readString();
parentTaskId = in.readLong();
ban = in.readBoolean();
if (ban) {
reason = in.readString();
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(parentNodeId);
out.writeLong(parentTaskId);
out.writeBoolean(ban);
if (ban) {
out.writeString(reason);
}
}
}
class BanParentRequestHandler implements TransportRequestHandler<BanParentTaskRequest> {
@Override
public void messageReceived(final BanParentTaskRequest request, final TransportChannel channel) throws Exception {
if (request.ban) {
logger.debug("Received ban for the parent [{}:{}] on the node [{}], reason: [{}]", request.parentNodeId, request
.parentTaskId, clusterService.localNode().getId(), request.reason);
taskManager.setBan(request.parentNodeId, request.parentTaskId, request.reason);
} else {
logger.debug("Removing ban for the parent [{}:{}] on the node [{}]", request.parentNodeId, request.parentTaskId,
clusterService.localNode().getId());
taskManager.removeBan(request.parentNodeId, request.parentTaskId);
}
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
}

View File

@ -105,7 +105,9 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
if (getTaskFailures() != null && getTaskFailures().size() > 0) {
builder.startArray("task_failures");
for (TaskOperationFailure ex : getTaskFailures()){
builder.startObject();
builder.value(ex);
builder.endObject();
}
builder.endArray();
}
@ -113,7 +115,9 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
if (getNodeFailures() != null && getNodeFailures().size() > 0) {
builder.startArray("node_failures");
for (FailedNodeException ex : getNodeFailures()) {
builder.value(ex);
builder.startObject();
ex.toXContent(builder, params);
builder.endObject();
}
builder.endArray();
}

View File

@ -30,17 +30,17 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
/**
*
*/
public class TransportListTasksAction extends TransportTasksAction<ListTasksRequest, ListTasksResponse, TaskInfo> {
public class TransportListTasksAction extends TransportTasksAction<Task, ListTasksRequest, ListTasksResponse, TaskInfo> {
@Inject
public TransportListTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {

View File

@ -286,24 +286,25 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
return addValidationError("Must specify at least one alias action", validationException);
}
for (AliasActions aliasAction : allAliasActions) {
if (aliasAction.aliases.length == 0) {
if (CollectionUtils.isEmpty(aliasAction.aliases)) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ "]: aliases may not be empty", validationException);
}
+ "]: Property [alias/aliases] is either missing or null", validationException);
} else {
for (String alias : aliasAction.aliases) {
if (!Strings.hasText(alias)) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ "]: [alias] may not be empty string", validationException);
+ "]: [alias/aliases] may not be empty string", validationException);
}
}
}
if (CollectionUtils.isEmpty(aliasAction.indices)) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ "]: Property [index] was either missing or null", validationException);
+ "]: Property [index/indices] is either missing or null", validationException);
} else {
for (String index : aliasAction.indices) {
if (!Strings.hasText(index)) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ "]: [index] may not be empty string", validationException);
+ "]: [index/indices] may not be empty string", validationException);
}
}
}

View File

@ -34,6 +34,7 @@ import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.shard.ShardStateMetaData;
import java.io.IOException;
import java.util.ArrayList;
@ -55,7 +56,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
*/
public static class StoreStatus implements Streamable, ToXContent, Comparable<StoreStatus> {
private DiscoveryNode node;
private long version;
private long legacyVersion;
private String allocationId;
private Throwable storeException;
private AllocationStatus allocationStatus;
@ -116,9 +117,9 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
private StoreStatus() {
}
public StoreStatus(DiscoveryNode node, long version, String allocationId, AllocationStatus allocationStatus, Throwable storeException) {
public StoreStatus(DiscoveryNode node, long legacyVersion, String allocationId, AllocationStatus allocationStatus, Throwable storeException) {
this.node = node;
this.version = version;
this.legacyVersion = legacyVersion;
this.allocationId = allocationId;
this.allocationStatus = allocationStatus;
this.storeException = storeException;
@ -132,10 +133,10 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
}
/**
* Version of the store
* Version of the store for pre-3.0 shards that have not yet been active
*/
public long getVersion() {
return version;
public long getLegacyVersion() {
return legacyVersion;
}
/**
@ -173,7 +174,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
@Override
public void readFrom(StreamInput in) throws IOException {
node = DiscoveryNode.readNode(in);
version = in.readLong();
legacyVersion = in.readLong();
allocationId = in.readOptionalString();
allocationStatus = AllocationStatus.readFrom(in);
if (in.readBoolean()) {
@ -184,7 +185,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
@Override
public void writeTo(StreamOutput out) throws IOException {
node.writeTo(out);
out.writeLong(version);
out.writeLong(legacyVersion);
out.writeOptionalString(allocationId);
allocationStatus.writeTo(out);
if (storeException != null) {
@ -198,8 +199,12 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
node.toXContent(builder, params);
builder.field(Fields.VERSION, version);
if (legacyVersion != ShardStateMetaData.NO_VERSION) {
builder.field(Fields.LEGACY_VERSION, legacyVersion);
}
if (allocationId != null) {
builder.field(Fields.ALLOCATION_ID, allocationId);
}
builder.field(Fields.ALLOCATED, allocationStatus.value());
if (storeException != null) {
builder.startObject(Fields.STORE_EXCEPTION);
@ -215,12 +220,23 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
return 1;
} else if (other.storeException != null && storeException == null) {
return -1;
} else {
int compare = Long.compare(other.version, version);
}
if (allocationId != null && other.allocationId == null) {
return -1;
} else if (allocationId == null && other.allocationId != null) {
return 1;
} else if (allocationId == null && other.allocationId == null) {
int compare = Long.compare(other.legacyVersion, legacyVersion);
if (compare == 0) {
return Integer.compare(allocationStatus.id, other.allocationStatus.id);
}
return compare;
} else {
int compare = Integer.compare(allocationStatus.id, other.allocationStatus.id);
if (compare == 0) {
return allocationId.compareTo(other.allocationId);
}
return compare;
}
}
}
@ -390,7 +406,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
static final XContentBuilderString FAILURES = new XContentBuilderString("failures");
static final XContentBuilderString STORES = new XContentBuilderString("stores");
// StoreStatus fields
static final XContentBuilderString VERSION = new XContentBuilderString("version");
static final XContentBuilderString LEGACY_VERSION = new XContentBuilderString("legacy_version");
static final XContentBuilderString ALLOCATION_ID = new XContentBuilderString("allocation_id");
static final XContentBuilderString STORE_EXCEPTION = new XContentBuilderString("store_exception");
static final XContentBuilderString ALLOCATED = new XContentBuilderString("allocation");

View File

@ -180,7 +180,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
for (NodeGatewayStartedShards response : fetchResponse.responses) {
if (shardExistsInNode(response)) {
IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndexName(), fetchResponse.shardId.id(), response.getNode());
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), response.allocationId(), allocationStatus, response.storeException()));
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.legacyVersion(), response.allocationId(), allocationStatus, response.storeException()));
}
}
CollectionUtil.timSort(storeStatuses);
@ -213,7 +213,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
* A shard exists/existed in a node only if shard state file exists in the node
*/
private boolean shardExistsInNode(final NodeGatewayStartedShards response) {
return response.storeException() != null || response.version() != -1 || response.allocationId() != null;
return response.storeException() != null || response.legacyVersion() != -1 || response.allocationId() != null;
}
@Override

View File

@ -164,7 +164,8 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().id());
final QueryShardContext queryShardContext = indexShard.getQueryShardContext();
final QueryShardContext queryShardContext = indexService.newQueryShardContext();
queryShardContext.setTypes(request.types());
boolean valid;
String explanation = null;

View File

@ -121,7 +121,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
SearchContext.setCurrent(context);
try {
context.parsedQuery(indexShard.getQueryShardContext().toQuery(request.query()));
context.parsedQuery(context.getQueryShardContext().toQuery(request.query()));
context.preProcess();
int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase;
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);

View File

@ -88,7 +88,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
void processIndexRequest(Task task, String action, ActionListener listener, ActionFilterChain chain, IndexRequest indexRequest) {
executionService.execute(indexRequest, t -> {
executionService.executeIndexRequest(indexRequest, t -> {
logger.error("failed to execute pipeline [{}]", t, indexRequest.getPipeline());
listener.onFailure(t);
}, success -> {
@ -102,7 +102,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
void processBulkIndexRequest(Task task, BulkRequest original, String action, ActionFilterChain chain, ActionListener<BulkResponse> listener) {
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
executionService.execute(() -> bulkRequestModifier, (indexRequest, throwable) -> {
executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, throwable) -> {
logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id(), throwable);
bulkRequestModifier.markCurrentItemAsFailed(throwable);
}, (throwable) -> {

View File

@ -34,9 +34,7 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
import static org.elasticsearch.ingest.core.IngestDocument.MetaData;
public class SimulatePipelineRequest extends ActionRequest<SimulatePipelineRequest> {
@ -140,7 +138,7 @@ public class SimulatePipelineRequest extends ActionRequest<SimulatePipelineReque
static Parsed parse(Map<String, Object> config, boolean verbose, PipelineStore pipelineStore) throws Exception {
Map<String, Object> pipelineConfig = ConfigurationUtils.readMap(null, null, config, Fields.PIPELINE);
Pipeline pipeline = PIPELINE_FACTORY.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorFactoryRegistry());
Pipeline pipeline = PIPELINE_FACTORY.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorRegistry());
List<IngestDocument> ingestDocumentList = parseDocs(config);
return new Parsed(pipeline, ingestDocumentList, verbose);
}

View File

@ -69,8 +69,10 @@ final class WriteableIngestDocument implements Writeable<WriteableIngestDocument
builder.startObject("doc");
Map<IngestDocument.MetaData, String> metadataMap = ingestDocument.extractMetadata();
for (Map.Entry<IngestDocument.MetaData, String> metadata : metadataMap.entrySet()) {
if (metadata.getValue() != null) {
builder.field(metadata.getKey().getFieldName(), metadata.getValue());
}
}
builder.field("_source", ingestDocument.getSourceAndMetadata());
builder.startObject("_ingest");
for (Map.Entry<String, String> ingestMetadata : ingestDocument.getIngestMetadata().entrySet()) {

View File

@ -44,20 +44,6 @@ public abstract class ChildTaskActionRequest<Request extends ActionRequest<Reque
this.parentTaskId = parentTaskId;
}
/**
* The node that owns the parent task.
*/
public String getParentTaskNode() {
return parentTaskNode;
}
/**
* The task id of the parent task on the parent node.
*/
public long getParentTaskId() {
return parentTaskId;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -73,8 +59,12 @@ public abstract class ChildTaskActionRequest<Request extends ActionRequest<Reque
}
@Override
public Task createTask(long id, String type, String action) {
return new Task(id, type, action, this::getDescription, parentTaskNode, parentTaskId);
public final Task createTask(long id, String type, String action) {
return createTask(id, type, action, parentTaskNode, parentTaskId);
}
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
return new Task(id, type, action, getDescription(), parentTaskNode, parentTaskId);
}
}

View File

@ -58,7 +58,11 @@ public class ChildTaskRequest extends TransportRequest {
}
@Override
public Task createTask(long id, String type, String action) {
return new Task(id, type, action, this::getDescription, parentTaskNode, parentTaskId);
public final Task createTask(long id, String type, String action) {
return createTask(id, type, action, parentTaskNode, parentTaskId);
}
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
return new Task(id, type, action, getDescription(), parentTaskNode, parentTaskId);
}
}

View File

@ -87,6 +87,10 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
protected abstract ShardResponse shardOperation(ShardRequest request);
protected ShardResponse shardOperation(ShardRequest request, Task task) {
return shardOperation(request);
}
/**
* Determines the shards this operation will be executed on. The operation is executed once per shard iterator, typically
* on the first shard in it. If the operation fails, it will be retried on the next shard in the iterator.
@ -172,6 +176,7 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
// no node connected, act as failure
onOperation(shard, shardIt, shardIndex, new NoShardAvailableActionException(shardIt.shardId()));
} else {
taskManager.registerChildTask(task, node.getId());
transportService.sendRequest(node, transportShardAction, shardRequest, new BaseTransportResponseHandler<ShardResponse>() {
@Override
public ShardResponse newInstance() {
@ -278,8 +283,13 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
class ShardTransportHandler implements TransportRequestHandler<ShardRequest> {
@Override
public void messageReceived(final ShardRequest request, final TransportChannel channel) throws Exception {
public void messageReceived(ShardRequest request, TransportChannel channel, Task task) throws Exception {
channel.sendResponse(shardOperation(request));
}
@Override
public final void messageReceived(final ShardRequest request, final TransportChannel channel) throws Exception {
throw new UnsupportedOperationException("the task parameter is required");
}
}
}

View File

@ -301,6 +301,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
NodeRequest nodeRequest = new NodeRequest(node.getId(), request, shards);
if (task != null) {
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
taskManager.registerChildTask(task, node.getId());
}
transportService.sendRequest(node, transportNodeBroadcastAction, nodeRequest, new BaseTransportResponseHandler<NodeResponse>() {
@Override

View File

@ -159,6 +159,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
}
}
};
taskManager.registerChildTask(task, nodes.getLocalNodeId());
threadPool.executor(executor).execute(new ActionRunnable(delegate) {
@Override
protected void doRun() throws Exception {
@ -171,6 +172,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
logger.debug("no known master node, scheduling a retry");
retry(null, MasterNodeChangePredicate.INSTANCE);
} else {
taskManager.registerChildTask(task, nodes.masterNode().getId());
transportService.sendRequest(nodes.masterNode(), actionName, request, new ActionListenerResponseHandler<Response>(listener) {
@Override
public Response newInstance() {

View File

@ -95,6 +95,10 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
protected abstract NodeResponse nodeOperation(NodeRequest request);
protected NodeResponse nodeOperation(NodeRequest request, Task task) {
return nodeOperation(request);
}
protected abstract boolean accumulateExceptions();
protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
@ -163,6 +167,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
ChildTaskRequest nodeRequest = newNodeRequest(nodeId, request);
if (task != null) {
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
taskManager.registerChildTask(task, node.getId());
}
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(), new BaseTransportResponseHandler<NodeResponse>() {
@ -228,8 +233,14 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
class NodeTransportHandler implements TransportRequestHandler<NodeRequest> {
@Override
public void messageReceived(final NodeRequest request, final TransportChannel channel) throws Exception {
public void messageReceived(NodeRequest request, TransportChannel channel, Task task) throws Exception {
channel.sendResponse(nodeOperation(request, task));
}
@Override
public void messageReceived(NodeRequest request, TransportChannel channel) throws Exception {
channel.sendResponse(nodeOperation(request));
}
}
}

View File

@ -196,8 +196,8 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
}
@Override
public Task createTask(long id, String type, String action) {
return new ReplicationTask(id, type, action, this::getDescription, getParentTaskNode(), getParentTaskId());
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
return new ReplicationTask(id, type, action, getDescription(), parentTaskNode, parentTaskId);
}
/**
@ -218,4 +218,9 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
return index;
}
}
@Override
public String getDescription() {
return toString();
}
}

View File

@ -35,7 +35,7 @@ import static java.util.Objects.requireNonNull;
public class ReplicationTask extends Task {
private volatile String phase = "starting";
public ReplicationTask(long id, String type, String action, Provider<String> description, String parentNode, long parentId) {
public ReplicationTask(long id, String type, String action, String description, String parentNode, long parentId) {
super(id, type, action, description, parentNode, parentId);
}

View File

@ -121,6 +121,7 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
protected void shardExecute(Task task, Request request, ShardId shardId, ActionListener<ShardResponse> shardActionListener) {
ShardRequest shardRequest = newShardRequest(request, shardId);
shardRequest.setParentTask(clusterService.localNode().getId(), task.getId());
taskManager.registerChildTask(task, clusterService.localNode().getId());
replicatedBroadcastShardAction.execute(shardRequest, shardActionListener);
}

View File

@ -415,7 +415,11 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
public static class RetryOnPrimaryException extends ElasticsearchException {
public RetryOnPrimaryException(ShardId shardId, String msg) {
super(msg);
this(shardId, msg, null);
}
public RetryOnPrimaryException(ShardId shardId, String msg, Throwable cause) {
super(msg, cause);
setShard(shardId);
}
@ -486,6 +490,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
return;
}
final DiscoveryNode node = state.nodes().get(primary.currentNodeId());
taskManager.registerChildTask(task, node.getId());
if (primary.currentNodeId().equals(state.nodes().localNodeId())) {
setPhase(task, "waiting_on_primary");
if (logger.isTraceEnabled()) {
@ -800,6 +805,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
* relocating copies
*/
final class ReplicationPhase extends AbstractRunnable {
private final ReplicationTask task;
private final ReplicaRequest replicaRequest;
private final Response finalResponse;
@ -981,11 +987,19 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
@Override
public void onFailure(Throwable t) {
// TODO: handle catastrophic non-channel failures
public void onFailure(Throwable shardFailedError) {
if (shardFailedError instanceof ShardStateAction.NoLongerPrimaryShardException) {
ShardRouting primaryShard = indexShardReference.routingEntry();
String message = String.format(Locale.ROOT, "primary shard [%s] was demoted while failing replica shard [%s] for [%s]", primaryShard, shard, exp);
// we are no longer the primary, fail ourselves and start over
indexShardReference.failShard(message, shardFailedError);
forceFinishAsFailed(new RetryOnPrimaryException(shardId, message, shardFailedError));
} else {
assert false : shardFailedError;
onReplicaFailure(nodeId, exp);
}
}
}
);
}
}
@ -1069,7 +1083,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
interface IndexShardReference extends Releasable {
boolean isRelocated();
void failShard(String reason, @Nullable Throwable e);
ShardRouting routingEntry();
}
@ -1097,6 +1111,11 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
return indexShard.state() == IndexShardState.RELOCATED;
}
@Override
public void failShard(String reason, @Nullable Throwable e) {
indexShard.failShard(reason, e);
}
@Override
public ShardRouting routingEntry() {
return indexShard.routingEntry();

View File

@ -35,7 +35,6 @@ import java.io.IOException;
*/
public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends ActionRequest<Request> {
public static final String[] ALL_ACTIONS = Strings.EMPTY_ARRAY;
public static final String[] ALL_NODES = Strings.EMPTY_ARRAY;
@ -52,6 +51,8 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
private long parentTaskId = ALL_TASKS;
private long taskId = ALL_TASKS;
public BaseTasksRequest() {
}
@ -94,6 +95,22 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
return (Request) this;
}
/**
* Returns the id of the task that should be processed.
*
* By default tasks with any ids are returned.
*/
public long taskId() {
return taskId;
}
@SuppressWarnings("unchecked")
public final Request taskId(long taskId) {
this.taskId = taskId;
return (Request) this;
}
/**
* Returns the parent node id that tasks should be filtered by
*/
@ -141,6 +158,7 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
nodesIds = in.readStringArray();
taskId = in.readLong();
actions = in.readStringArray();
parentNode = in.readOptionalString();
parentTaskId = in.readLong();
@ -153,6 +171,7 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArrayNullable(nodesIds);
out.writeLong(taskId);
out.writeStringArrayNullable(actions);
out.writeOptionalString(parentNode);
out.writeLong(parentTaskId);
@ -163,12 +182,17 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
if (actions() != null && actions().length > 0 && Regex.simpleMatch(actions(), task.getAction()) == false) {
return false;
}
if (taskId() != ALL_TASKS) {
if(taskId() != task.getId()) {
return false;
}
}
if (parentNode() != null) {
if (parentNode().equals(task.getParentNode()) == false) {
return false;
}
}
if (parentTaskId() != BaseTasksRequest.ALL_TASKS) {
if (parentTaskId() != ALL_TASKS) {
if (parentTaskId() != task.getParentId()) {
return false;
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.tasks;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.NoSuchNodeException;
@ -53,12 +54,14 @@ import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.function.Consumer;
import java.util.function.Supplier;
/**
* The base class for transport actions that are interacting with currently running tasks.
*/
public abstract class TransportTasksAction<
OperationTask extends Task,
TasksRequest extends BaseTasksRequest<TasksRequest>,
TasksResponse extends BaseTasksResponse,
TaskResponse extends Writeable<TaskResponse>
@ -103,16 +106,16 @@ public abstract class TransportTasksAction<
TasksRequest request = nodeTaskRequest.tasksRequest;
List<TaskResponse> results = new ArrayList<>();
List<TaskOperationFailure> exceptions = new ArrayList<>();
for (Task task : taskManager.getTasks().values()) {
// First check action and node filters
if (request.match(task)) {
processTasks(request, task -> {
try {
results.add(taskOperation(request, task));
TaskResponse response = taskOperation(request, task);
if (response != null) {
results.add(response);
}
} catch (Exception ex) {
exceptions.add(new TaskOperationFailure(clusterService.localNode().id(), task.getId(), ex));
}
}
}
});
return new NodeTasksResponse(clusterService.localNode().id(), results, exceptions);
}
@ -124,6 +127,28 @@ public abstract class TransportTasksAction<
return clusterState.nodes().resolveNodesIds(request.nodesIds());
}
protected void processTasks(TasksRequest request, Consumer<OperationTask> operation) {
if (request.taskId() != BaseTasksRequest.ALL_TASKS) {
// we are only checking one task, we can optimize it
Task task = taskManager.getTask(request.taskId());
if (task != null) {
if (request.match(task)) {
operation.accept((OperationTask) task);
} else {
throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.taskId());
}
} else {
throw new ResourceNotFoundException("task [{}] is missing", request.taskId());
}
} else {
for (Task task : taskManager.getTasks().values()) {
if (request.match(task)) {
operation.accept((OperationTask)task);
}
}
}
}
protected abstract TasksResponse newResponse(TasksRequest request, List<TaskResponse> tasks, List<TaskOperationFailure> taskOperationFailures, List<FailedNodeException> failedNodeExceptions);
@SuppressWarnings("unchecked")
@ -150,7 +175,7 @@ public abstract class TransportTasksAction<
protected abstract TaskResponse readTaskResponse(StreamInput in) throws IOException;
protected abstract TaskResponse taskOperation(TasksRequest request, Task task);
protected abstract TaskResponse taskOperation(TasksRequest request, OperationTask task);
protected boolean transportCompress() {
return false;
@ -213,6 +238,7 @@ public abstract class TransportTasksAction<
} else {
NodeTaskRequest nodeRequest = new NodeTaskRequest(request);
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
taskManager.registerChildTask(task, node.getId());
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(), new BaseTransportResponseHandler<NodeTasksResponse>() {
@Override
public NodeTasksResponse newInstance() {

View File

@ -373,22 +373,6 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
return this;
}
/**
* @return <code>true</code> if distributed frequencies should be returned. Otherwise
* <code>false</code>
*/
public boolean dfs() {
return flagsEnum.contains(Flag.Dfs);
}
/**
* Use distributed frequencies instead of shard statistics.
*/
public TermVectorsRequest dfs(boolean dfs) {
setFlag(Flag.Dfs, dfs);
return this;
}
/**
* Return only term vectors for special selected fields. Returns for term
* vectors for all fields if selectedFields == null
@ -583,7 +567,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
public static enum Flag {
// Do not change the order of these flags we use
// the ordinal for encoding! Only append to the end!
Positions, Offsets, Payloads, FieldStatistics, TermStatistics, Dfs
Positions, Offsets, Payloads, FieldStatistics, TermStatistics
}
/**
@ -616,7 +600,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
} else if (currentFieldName.equals("field_statistics") || currentFieldName.equals("fieldStatistics")) {
termVectorsRequest.fieldStatistics(parser.booleanValue());
} else if (currentFieldName.equals("dfs")) {
termVectorsRequest.dfs(parser.booleanValue());
throw new IllegalArgumentException("distributed frequencies is not supported anymore for term vectors");
} else if (currentFieldName.equals("per_field_analyzer") || currentFieldName.equals("perFieldAnalyzer")) {
termVectorsRequest.perFieldAnalyzer(readPerFieldAnalyzer(parser.map()));
} else if (currentFieldName.equals("filter")) {

View File

@ -149,14 +149,6 @@ public class TermVectorsRequestBuilder extends ActionRequestBuilder<TermVectorsR
return this;
}
/**
* Sets whether to use distributed frequencies instead of shard statistics.
*/
public TermVectorsRequestBuilder setDfs(boolean dfs) {
request.dfs(dfs);
return this;
}
/**
* Sets whether to return only term vectors for special selected fields. Returns the term
* vectors for all fields if selectedFields == null

View File

@ -32,6 +32,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.termvectors.TermVectorsService;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -80,7 +81,7 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
try {
IndexService indexService = indicesService.indexServiceSafe(request.index());
IndexShard indexShard = indexService.getShard(shardId.id());
TermVectorsResponse termVectorsResponse = indexShard.getTermVectors(termVectorsRequest);
TermVectorsResponse termVectorsResponse = TermVectorsService.getTermVectors(indexShard, termVectorsRequest);
termVectorsResponse.updateTookInMillis(termVectorsRequest.startTime());
response.add(request.locations.get(i), termVectorsResponse);
} catch (Throwable t) {

View File

@ -32,6 +32,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.termvectors.TermVectorsService;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -56,6 +57,7 @@ public class TransportTermVectorsAction extends TransportSingleShardAction<TermV
super(settings, TermVectorsAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
TermVectorsRequest::new, ThreadPool.Names.GET);
this.indicesService = indicesService;
}
@Override
@ -83,7 +85,7 @@ public class TransportTermVectorsAction extends TransportSingleShardAction<TermV
protected TermVectorsResponse shardOperation(TermVectorsRequest request, ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id());
TermVectorsResponse response = indexShard.getTermVectors(request);
TermVectorsResponse response = TermVectorsService.getTermVectors(indexShard, request);
response.updateTookInMillis(request.startTime());
return response;
}

View File

@ -1,112 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.termvectors.dfs;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import java.io.IOException;
import java.util.Arrays;
import java.util.Set;
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
public class DfsOnlyRequest extends BroadcastRequest<DfsOnlyRequest> {
private SearchRequest searchRequest = new SearchRequest();
long nowInMillis;
public DfsOnlyRequest() {
}
public DfsOnlyRequest(Fields termVectorsFields, String[] indices, String[] types, Set<String> selectedFields) throws IOException {
super(indices);
// build a search request with a query of all the terms
final BoolQueryBuilder boolBuilder = boolQuery();
for (String fieldName : termVectorsFields) {
if ((selectedFields != null) && (!selectedFields.contains(fieldName))) {
continue;
}
Terms terms = termVectorsFields.terms(fieldName);
TermsEnum iterator = terms.iterator();
while (iterator.next() != null) {
String text = iterator.term().utf8ToString();
boolBuilder.should(QueryBuilders.termQuery(fieldName, text));
}
}
// wrap a search request object
this.searchRequest = new SearchRequest(indices).types(types).source(new SearchSourceBuilder().query(boolBuilder));
}
public SearchRequest getSearchRequest() {
return searchRequest;
}
@Override
public ActionRequestValidationException validate() {
return searchRequest.validate();
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
this.searchRequest.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
this.searchRequest.writeTo(out);
}
public String[] types() {
return this.searchRequest.types();
}
public String routing() {
return this.searchRequest.routing();
}
public String preference() {
return this.searchRequest.preference();
}
@Override
public String toString() {
String sSource = "_na_";
if (searchRequest.source() != null) {
sSource = searchRequest.source().toString();
}
return "[" + Arrays.toString(indices) + "]" + Arrays.toString(types()) + ", source[" + sSource + "]";
}
}

View File

@ -1,73 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.termvectors.dfs;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.search.dfs.AggregatedDfs;
import java.io.IOException;
import java.util.List;
/**
* A response of a dfs only request.
*/
public class DfsOnlyResponse extends BroadcastResponse {
private AggregatedDfs dfs;
private long tookInMillis;
DfsOnlyResponse(AggregatedDfs dfs, int totalShards, int successfulShards, int failedShards,
List<ShardOperationFailedException> shardFailures, long tookInMillis) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.dfs = dfs;
this.tookInMillis = tookInMillis;
}
public AggregatedDfs getDfs() {
return dfs;
}
public TimeValue getTook() {
return new TimeValue(tookInMillis);
}
public long getTookInMillis() {
return tookInMillis;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
AggregatedDfs.readAggregatedDfs(in);
tookInMillis = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
dfs.writeTo(out);
out.writeVLong(tookInMillis);
}
}

View File

@ -1,62 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.termvectors.dfs;
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
import java.io.IOException;
public class ShardDfsOnlyRequest extends BroadcastShardRequest {
private ShardSearchTransportRequest shardSearchRequest = new ShardSearchTransportRequest();
public ShardDfsOnlyRequest() {
}
ShardDfsOnlyRequest(ShardRouting shardRouting, int numberOfShards, @Nullable String[] filteringAliases, long nowInMillis, DfsOnlyRequest request) {
super(shardRouting.shardId(), request);
this.shardSearchRequest = new ShardSearchTransportRequest(request.getSearchRequest(), shardRouting, numberOfShards,
filteringAliases, nowInMillis);
}
public ShardSearchRequest getShardSearchRequest() {
return shardSearchRequest;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardSearchRequest.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardSearchRequest.writeTo(out);
}
}

View File

@ -1,146 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.termvectors.dfs;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReferenceArray;
/**
* Get the dfs only with no fetch phase. This is for internal use only.
*/
public class TransportDfsOnlyAction extends TransportBroadcastAction<DfsOnlyRequest, DfsOnlyResponse, ShardDfsOnlyRequest, ShardDfsOnlyResponse> {
public static final String NAME = "internal:index/termvectors/dfs";
private final SearchService searchService;
private final SearchPhaseController searchPhaseController;
@Inject
public TransportDfsOnlyAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, SearchService searchService, SearchPhaseController searchPhaseController) {
super(settings, NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
DfsOnlyRequest::new, ShardDfsOnlyRequest::new, ThreadPool.Names.SEARCH);
this.searchService = searchService;
this.searchPhaseController = searchPhaseController;
}
@Override
protected void doExecute(Task task, DfsOnlyRequest request, ActionListener<DfsOnlyResponse> listener) {
request.nowInMillis = System.currentTimeMillis();
super.doExecute(task, request, listener);
}
@Override
protected ShardDfsOnlyRequest newShardRequest(int numShards, ShardRouting shard, DfsOnlyRequest request) {
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterService.state(), shard.index().getName(), request.indices());
return new ShardDfsOnlyRequest(shard, numShards, filteringAliases, request.nowInMillis, request);
}
@Override
protected ShardDfsOnlyResponse newShardResponse() {
return new ShardDfsOnlyResponse();
}
@Override
protected GroupShardsIterator shards(ClusterState clusterState, DfsOnlyRequest request, String[] concreteIndices) {
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices());
return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, DfsOnlyRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, DfsOnlyRequest countRequest, String[] concreteIndices) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
}
@Override
protected DfsOnlyResponse newResponse(DfsOnlyRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
int successfulShards = 0;
int failedShards = 0;
List<ShardOperationFailedException> shardFailures = null;
AtomicArray<DfsSearchResult> dfsResults = new AtomicArray<>(shardsResponses.length());
for (int i = 0; i < shardsResponses.length(); i++) {
Object shardResponse = shardsResponses.get(i);
if (shardResponse == null) {
// simply ignore non active shards
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
failedShards++;
if (shardFailures == null) {
shardFailures = new ArrayList<>();
}
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
} else {
dfsResults.set(i, ((ShardDfsOnlyResponse) shardResponse).getDfsSearchResult());
successfulShards++;
}
}
AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsResults);
return new DfsOnlyResponse(dfs, shardsResponses.length(), successfulShards, failedShards, shardFailures, buildTookInMillis(request));
}
@Override
protected ShardDfsOnlyResponse shardOperation(ShardDfsOnlyRequest request) {
DfsSearchResult dfsSearchResult = searchService.executeDfsPhase(request.getShardSearchRequest());
searchService.freeContext(dfsSearchResult.id());
return new ShardDfsOnlyResponse(request.shardId(), dfsSearchResult);
}
/**
* Builds how long it took to execute the dfs request.
*/
protected final long buildTookInMillis(DfsOnlyRequest request) {
// protect ourselves against time going backwards
// negative values don't make sense and we want to be able to serialize that thing as a vLong
return Math.max(1, System.currentTimeMillis() - request.nowInMillis);
}
}

View File

@ -33,6 +33,7 @@ import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.logging.log4j.LogConfigurator;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.monitor.jvm.JvmInfo;
@ -148,10 +149,11 @@ final class Bootstrap {
}
private void setup(boolean addShutdownHook, Settings settings, Environment environment) throws Exception {
initializeNatives(environment.tmpFile(),
settings.getAsBoolean("bootstrap.mlockall", false),
settings.getAsBoolean("bootstrap.seccomp", true),
settings.getAsBoolean("bootstrap.ctrlhandler", true));
initializeNatives(
environment.tmpFile(),
BootstrapSettings.MLOCKALL_SETTING.get(settings),
BootstrapSettings.SECCOMP_SETTING.get(settings),
BootstrapSettings.CTRLHANDLER_SETTING.get(settings));
// initialize probes before the security manager is installed
initializeProbes();
@ -186,22 +188,11 @@ final class Bootstrap {
node = new Node(nodeSettings);
}
/**
* option for elasticsearch.yml etc to turn off our security manager completely,
* for example if you want to have your own configuration or just disable.
*/
// TODO: remove this: http://www.openbsd.org/papers/hackfest2015-pledge/mgp00005.jpg
static final String SECURITY_SETTING = "security.manager.enabled";
/**
* option for elasticsearch.yml to fully respect the system policy, including bad defaults
* from java.
*/
// TODO: remove this hack when insecure defaults are removed from java
static final String SECURITY_FILTER_BAD_DEFAULTS_SETTING = "security.manager.filter_bad_defaults";
private void setupSecurity(Settings settings, Environment environment) throws Exception {
if (settings.getAsBoolean(SECURITY_SETTING, true)) {
Security.configure(environment, settings.getAsBoolean(SECURITY_FILTER_BAD_DEFAULTS_SETTING, true));
if (BootstrapSettings.SECURITY_MANAGER_ENABLED_SETTING.get(settings)) {
Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
}
}
@ -263,10 +254,6 @@ final class Bootstrap {
INSTANCE = new Bootstrap();
boolean foreground = !"false".equals(System.getProperty("es.foreground", System.getProperty("es-foreground")));
// handle the wrapper system property, if its a service, don't run as a service
if (System.getProperty("wrapper.service", "XXX").equalsIgnoreCase("true")) {
foreground = false;
}
Environment environment = initialSettings(foreground);
Settings settings = environment.settings();

View File

@ -32,10 +32,12 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.monitor.jvm.JvmInfo;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.optionBuilder;
@ -131,6 +133,7 @@ final class BootstrapCLIParser extends CliTool {
// hacky way to extract all the fancy extra args, there is no CLI tool helper for this
Iterator<String> iterator = cli.getArgList().iterator();
final Map<String, String> properties = new HashMap<>();
while (iterator.hasNext()) {
String arg = iterator.next();
if (!arg.startsWith("--")) {
@ -148,20 +151,22 @@ final class BootstrapCLIParser extends CliTool {
String[] splitArg = arg.split("=", 2);
String key = splitArg[0];
String value = splitArg[1];
System.setProperty("es." + key, value);
properties.put("es." + key, value);
} else {
if (iterator.hasNext()) {
String value = iterator.next();
if (value.startsWith("--")) {
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
}
System.setProperty("es." + arg, value);
properties.put("es." + arg, value);
} else {
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
}
}
}
for (Map.Entry<String, String> entry : properties.entrySet()) {
System.setProperty(entry.getKey(), entry.getValue());
}
return new Start(terminal);
}

View File

@ -0,0 +1,46 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
public final class BootstrapSettings {
private BootstrapSettings() {
}
// TODO: remove this: http://www.openbsd.org/papers/hackfest2015-pledge/mgp00005.jpg
/**
* option to turn off our security manager completely, for example
* if you want to have your own configuration or just disable
*/
public static final Setting<Boolean> SECURITY_MANAGER_ENABLED_SETTING =
Setting.boolSetting("security.manager.enabled", true, false, Scope.CLUSTER);
// TODO: remove this hack when insecure defaults are removed from java
public static final Setting<Boolean> SECURITY_FILTER_BAD_DEFAULTS_SETTING =
Setting.boolSetting("security.manager.filter_bad_defaults", true, false, Scope.CLUSTER);
public static final Setting<Boolean> MLOCKALL_SETTING = Setting.boolSetting("bootstrap.mlockall", false, false, Scope.CLUSTER);
public static final Setting<Boolean> SECCOMP_SETTING = Setting.boolSetting("bootstrap.seccomp", true, false, Scope.CLUSTER);
public static final Setting<Boolean> CTRLHANDLER_SETTING = Setting.boolSetting("bootstrap.ctrlhandler", true, false, Scope.CLUSTER);
}

View File

@ -19,28 +19,26 @@
package org.elasticsearch.bootstrap;
import org.elasticsearch.common.Strings;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
class JavaVersion implements Comparable<JavaVersion> {
private final List<Integer> version;
public List<Integer> getVersion() {
return Collections.unmodifiableList(version);
return version;
}
private JavaVersion(List<Integer> version) {
this.version = version;
this.version = Collections.unmodifiableList(version);
}
public static JavaVersion parse(String value) {
if (value == null) {
throw new NullPointerException("value");
}
if ("".equals(value)) {
Objects.requireNonNull(value);
if (!isValid(value)) {
throw new IllegalArgumentException("value");
}
@ -79,6 +77,6 @@ class JavaVersion implements Comparable<JavaVersion> {
@Override
public String toString() {
return Strings.collectionToDelimitedString(version, ".");
return version.stream().map(v -> Integer.toString(v)).collect(Collectors.joining("."));
}
}

View File

@ -33,6 +33,9 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequestBuilder;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
@ -287,6 +290,29 @@ public interface ClusterAdminClient extends ElasticsearchClient {
*/
ListTasksRequestBuilder prepareListTasks(String... nodesIds);
/**
* Cancel tasks
*
* @param request The nodes tasks request
* @return The result future
* @see org.elasticsearch.client.Requests#cancelTasksRequest(String...)
*/
ActionFuture<CancelTasksResponse> cancelTasks(CancelTasksRequest request);
/**
* Cancel active tasks
*
* @param request The nodes tasks request
* @param listener A cancelener to be notified with a result
* @see org.elasticsearch.client.Requests#cancelTasksRequest(String...)
*/
void cancelTasks(CancelTasksRequest request, ActionListener<CancelTasksResponse> listener);
/**
* Cancel active tasks
*/
CancelTasksRequestBuilder prepareCancelTasks(String... nodesIds);
/**
* Returns list of shards the given search would be executed on.
*/

View File

@ -22,6 +22,7 @@ package org.elasticsearch.client;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
@ -420,12 +421,23 @@ public class Requests {
*
* @param nodesIds The nodes ids to get the tasks for
* @return The nodes tasks request
* @see org.elasticsearch.client.ClusterAdminClient#nodesStats(org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest)
* @see org.elasticsearch.client.ClusterAdminClient#listTasks(ListTasksRequest)
*/
public static ListTasksRequest listTasksRequest(String... nodesIds) {
return new ListTasksRequest(nodesIds);
}
/**
* Creates a nodes tasks request against one or more nodes. Pass <tt>null</tt> or an empty array for all nodes.
*
* @param nodesIds The nodes ids to cancel the tasks on
* @return The nodes tasks request
* @see org.elasticsearch.client.ClusterAdminClient#cancelTasks(CancelTasksRequest)
*/
public static CancelTasksRequest cancelTasksRequest(String... nodesIds) {
return new CancelTasksRequest(nodesIds);
}
/**
* Registers snapshot repository
*

View File

@ -41,6 +41,10 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequestBuilder;
@ -992,6 +996,22 @@ public abstract class AbstractClient extends AbstractComponent implements Client
return new ListTasksRequestBuilder(this, ListTasksAction.INSTANCE).setNodesIds(nodesIds);
}
@Override
public ActionFuture<CancelTasksResponse> cancelTasks(CancelTasksRequest request) {
return execute(CancelTasksAction.INSTANCE, request);
}
@Override
public void cancelTasks(CancelTasksRequest request, ActionListener<CancelTasksResponse> listener) {
execute(CancelTasksAction.INSTANCE, request, listener);
}
@Override
public CancelTasksRequestBuilder prepareCancelTasks(String... nodesIds) {
return new CancelTasksRequestBuilder(this, CancelTasksAction.INSTANCE).setNodesIds(nodesIds);
}
@Override
public ActionFuture<ClusterSearchShardsResponse> searchShards(final ClusterSearchShardsRequest request) {
return execute(ClusterSearchShardsAction.INSTANCE, request);

View File

@ -53,7 +53,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.mapper.DocumentMapper;
@ -336,7 +335,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
}
}
QueryShardContext queryShardContext = indexService.getQueryShardContext();
final QueryShardContext queryShardContext = indexService.newQueryShardContext();
for (Alias alias : request.aliases()) {
if (Strings.hasLength(alias.filter())) {
aliasValidator.validateAliasFilter(alias.name(), alias.filter(), queryShardContext);

View File

@ -117,7 +117,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
indices.put(indexMetaData.getIndex().getName(), indexService);
}
aliasValidator.validateAliasFilter(aliasAction.alias(), filter, indexService.getQueryShardContext());
aliasValidator.validateAliasFilter(aliasAction.alias(), filter, indexService.newQueryShardContext());
}
AliasMetaData newAliasMd = AliasMetaData.newAliasMetaDataBuilder(
aliasAction.alias())

View File

@ -290,7 +290,7 @@ public class MetaDataMappingService extends AbstractComponent {
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
}
final Map<String, MappingMetaData> mappings = new HashMap<>();
MetaData.Builder builder = MetaData.builder(currentState.metaData());
for (String index : request.indices()) {
// do the actual merge here on the master, and update the mapping source
IndexService indexService = indicesService.indexService(index);
@ -311,7 +311,6 @@ public class MetaDataMappingService extends AbstractComponent {
// same source, no changes, ignore it
} else {
// use the merged mapping source
mappings.put(index, new MappingMetaData(mergedMapper));
if (logger.isDebugEnabled()) {
logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
} else if (logger.isInfoEnabled()) {
@ -320,28 +319,24 @@ public class MetaDataMappingService extends AbstractComponent {
}
} else {
mappings.put(index, new MappingMetaData(mergedMapper));
if (logger.isDebugEnabled()) {
logger.debug("[{}] create_mapping [{}] with source [{}]", index, mappingType, updatedSource);
} else if (logger.isInfoEnabled()) {
logger.info("[{}] create_mapping [{}]", index, mappingType);
}
}
}
if (mappings.isEmpty()) {
// no changes, return
return currentState;
}
MetaData.Builder builder = MetaData.builder(currentState.metaData());
for (String indexName : request.indices()) {
IndexMetaData indexMetaData = currentState.metaData().index(indexName);
IndexMetaData indexMetaData = currentState.metaData().index(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(indexName);
throw new IndexNotFoundException(index);
}
MappingMetaData mappingMd = mappings.get(indexName);
if (mappingMd != null) {
builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd));
IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
// Mapping updates on a single type may have side-effects on other types so we need to
// update mapping metadata on all types
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource()));
}
builder.put(indexMetaDataBuilder);
}
return ClusterState.builder(currentState).metaData(builder).build();

View File

@ -94,19 +94,6 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
return index;
}
/**
* creates a new {@link IndexRoutingTable} with all shard versions normalized
*
* @return new {@link IndexRoutingTable}
*/
public IndexRoutingTable normalizeVersions() {
IndexRoutingTable.Builder builder = new Builder(this.index);
for (IntObjectCursor<IndexShardRoutingTable> cursor : shards) {
builder.addIndexShard(cursor.value.normalizeVersions());
}
return builder.build();
}
public void validate(RoutingTableValidation validation, MetaData metaData) {
if (!metaData.hasIndex(index.getName())) {
validation.addIndexFailure(index.getName(), "Exists in routing does not exists in metadata");

View File

@ -119,40 +119,6 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
this.allInitializingShards = Collections.unmodifiableList(allInitializingShards);
}
/**
* Normalizes all shard routings to the same version.
*/
public IndexShardRoutingTable normalizeVersions() {
if (shards.isEmpty()) {
return this;
}
if (shards.size() == 1) {
return this;
}
long highestVersion = shards.get(0).version();
boolean requiresNormalization = false;
for (int i = 1; i < shards.size(); i++) {
if (shards.get(i).version() != highestVersion) {
requiresNormalization = true;
}
if (shards.get(i).version() > highestVersion) {
highestVersion = shards.get(i).version();
}
}
if (!requiresNormalization) {
return this;
}
List<ShardRouting> shardRoutings = new ArrayList<>(shards.size());
for (int i = 0; i < shards.size(); i++) {
if (shards.get(i).version() == highestVersion) {
shardRoutings.add(shards.get(i));
} else {
shardRoutings.add(new ShardRouting(shards.get(i), highestVersion));
}
}
return new IndexShardRoutingTable(shardId, Collections.unmodifiableList(shardRoutings));
}
/**
* Returns the shards id
*

View File

@ -693,9 +693,9 @@ public class RoutingNodes implements Iterable<RoutingNode> {
/**
* Initializes the current unassigned shard and moves it from the unassigned list.
*/
public void initialize(String nodeId, long version, long expectedShardSize) {
public void initialize(String nodeId, long expectedShardSize) {
innerRemove();
nodes.initialize(new ShardRouting(current, version), nodeId, expectedShardSize);
nodes.initialize(new ShardRouting(current), nodeId, expectedShardSize);
}
/**
@ -711,7 +711,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
/**
* Unsupported operation, just there for the interface. Use {@link #removeAndIgnore()} or
* {@link #initialize(String, long, long)}.
* {@link #initialize(String, long)}.
*/
@Override
public void remove() {

View File

@ -588,7 +588,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
}
// normalize the versions right before we build it...
for (ObjectCursor<IndexRoutingTable> indexRoutingTable : indicesRouting.values()) {
indicesRouting.put(indexRoutingTable.value.getIndex().getName(), indexRoutingTable.value.normalizeVersions());
indicesRouting.put(indexRoutingTable.value.getIndex().getName(), indexRoutingTable.value);
}
RoutingTable table = new RoutingTable(version, indicesRouting.build());
indicesRouting = null;

View File

@ -51,7 +51,6 @@ public final class ShardRouting implements Streamable, ToXContent {
private String relocatingNodeId;
private boolean primary;
private ShardRoutingState state;
private long version;
private RestoreSource restoreSource;
private UnassignedInfo unassignedInfo;
private AllocationId allocationId;
@ -65,11 +64,7 @@ public final class ShardRouting implements Streamable, ToXContent {
}
public ShardRouting(ShardRouting copy) {
this(copy, copy.version());
}
public ShardRouting(ShardRouting copy, long version) {
this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), version, copy.unassignedInfo(), copy.allocationId(), true, copy.getExpectedShardSize());
this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), copy.unassignedInfo(), copy.allocationId(), true, copy.getExpectedShardSize());
}
/**
@ -77,7 +72,7 @@ public final class ShardRouting implements Streamable, ToXContent {
* by either this class or tests. Visible for testing.
*/
ShardRouting(Index index, int shardId, String currentNodeId,
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version,
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state,
UnassignedInfo unassignedInfo, AllocationId allocationId, boolean internal, long expectedShardSize) {
this.index = index;
this.shardId = shardId;
@ -86,7 +81,6 @@ public final class ShardRouting implements Streamable, ToXContent {
this.primary = primary;
this.state = state;
this.asList = Collections.singletonList(this);
this.version = version;
this.restoreSource = restoreSource;
this.unassignedInfo = unassignedInfo;
this.allocationId = allocationId;
@ -107,7 +101,7 @@ public final class ShardRouting implements Streamable, ToXContent {
* Creates a new unassigned shard.
*/
public static ShardRouting newUnassigned(Index index, int shardId, RestoreSource restoreSource, boolean primary, UnassignedInfo unassignedInfo) {
return new ShardRouting(index, shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, 0, unassignedInfo, null, true, UNAVAILABLE_EXPECTED_SHARD_SIZE);
return new ShardRouting(index, shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, unassignedInfo, null, true, UNAVAILABLE_EXPECTED_SHARD_SIZE);
}
public Index index() {
@ -136,13 +130,6 @@ public final class ShardRouting implements Streamable, ToXContent {
}
/**
* The routing version associated with the shard.
*/
public long version() {
return this.version;
}
/**
* The shard is unassigned (not allocated to any node).
*/
@ -214,7 +201,7 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
public ShardRouting buildTargetRelocatingShard() {
assert relocating();
return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, version, unassignedInfo,
return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, unassignedInfo,
AllocationId.newTargetRelocation(allocationId), true, expectedShardSize);
}
@ -313,7 +300,6 @@ public final class ShardRouting implements Streamable, ToXContent {
}
public void readFromThin(StreamInput in) throws IOException {
version = in.readLong();
if (in.readBoolean()) {
currentNodeId = in.readString();
}
@ -352,7 +338,6 @@ public final class ShardRouting implements Streamable, ToXContent {
* @throws IOException if something happens during write
*/
public void writeToThin(StreamOutput out) throws IOException {
out.writeLong(version);
if (currentNodeId != null) {
out.writeBoolean(true);
out.writeString(currentNodeId);
@ -414,7 +399,6 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
void moveToUnassigned(UnassignedInfo unassignedInfo) {
ensureNotFrozen();
version++;
assert state != ShardRoutingState.UNASSIGNED : this;
state = ShardRoutingState.UNASSIGNED;
currentNodeId = null;
@ -429,7 +413,6 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
void initialize(String nodeId, long expectedShardSize) {
ensureNotFrozen();
version++;
assert state == ShardRoutingState.UNASSIGNED : this;
assert relocatingNodeId == null : this;
state = ShardRoutingState.INITIALIZING;
@ -445,7 +428,6 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
void relocate(String relocatingNodeId, long expectedShardSize) {
ensureNotFrozen();
version++;
assert state == ShardRoutingState.STARTED : "current shard has to be started in order to be relocated " + this;
state = ShardRoutingState.RELOCATING;
this.relocatingNodeId = relocatingNodeId;
@ -459,7 +441,6 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
void cancelRelocation() {
ensureNotFrozen();
version++;
assert state == ShardRoutingState.RELOCATING : this;
assert assignedToNode() : this;
assert relocatingNodeId != null : this;
@ -470,12 +451,11 @@ public final class ShardRouting implements Streamable, ToXContent {
}
/**
* Moves the shard from started to initializing and bumps the version
* Moves the shard from started to initializing
*/
void reinitializeShard() {
ensureNotFrozen();
assert state == ShardRoutingState.STARTED;
version++;
state = ShardRoutingState.INITIALIZING;
allocationId = AllocationId.newInitializing();
this.unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null);
@ -488,7 +468,6 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
void moveToStarted() {
ensureNotFrozen();
version++;
assert state == ShardRoutingState.INITIALIZING : "expected an initializing shard " + this;
relocatingNodeId = null;
restoreSource = null;
@ -507,7 +486,6 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
void moveToPrimary() {
ensureNotFrozen();
version++;
if (primary) {
throw new IllegalShardRoutingStateException(this, "Already primary, can't move to primary");
}
@ -519,7 +497,6 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
void moveFromPrimary() {
ensureNotFrozen();
version++;
if (!primary) {
throw new IllegalShardRoutingStateException(this, "Not primary, can't move to replica");
}
@ -638,26 +615,22 @@ public final class ShardRouting implements Streamable, ToXContent {
if (this == o) {
return true;
}
// we check on instanceof so we also handle the ImmutableShardRouting case as well
if (o == null || !(o instanceof ShardRouting)) {
return false;
}
ShardRouting that = (ShardRouting) o;
if (version != that.version) {
return false;
}
if (unassignedInfo != null ? !unassignedInfo.equals(that.unassignedInfo) : that.unassignedInfo != null) {
return false;
}
return equalsIgnoringMetaData(that);
}
private long hashVersion = version - 1;
private boolean usePreComputedHashCode = false;
private int hashCode = 0;
@Override
public int hashCode() {
if (hashVersion == version) {
if (frozen && usePreComputedHashCode) {
return hashCode;
}
int result = index != null ? index.hashCode() : 0;
@ -666,10 +639,12 @@ public final class ShardRouting implements Streamable, ToXContent {
result = 31 * result + (relocatingNodeId != null ? relocatingNodeId.hashCode() : 0);
result = 31 * result + (primary ? 1 : 0);
result = 31 * result + (state != null ? state.hashCode() : 0);
result = 31 * result + Long.hashCode(version);
result = 31 * result + (restoreSource != null ? restoreSource.hashCode() : 0);
result = 31 * result + (allocationId != null ? allocationId.hashCode() : 0);
result = 31 * result + (unassignedInfo != null ? unassignedInfo.hashCode() : 0);
if (frozen) {
usePreComputedHashCode = true;
}
return hashCode = result;
}
@ -693,7 +668,6 @@ public final class ShardRouting implements Streamable, ToXContent {
} else {
sb.append("[R]");
}
sb.append(", v[").append(version).append("]");
if (this.restoreSource != null) {
sb.append(", restoring[" + restoreSource + "]");
}
@ -718,8 +692,7 @@ public final class ShardRouting implements Streamable, ToXContent {
.field("node", currentNodeId())
.field("relocating_node", relocatingNodeId())
.field("shard", shardId().id())
.field("index", shardId().getIndex().getName())
.field("version", version);
.field("index", shardId().getIndex().getName());
if (expectedShardSize != UNAVAILABLE_EXPECTED_SHARD_SIZE) {
builder.field("expected_shard_size_in_bytes", expectedShardSize);
}

View File

@ -242,8 +242,7 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom
if (shardRoutingChanges != null) {
shardRoutingChanges.accept(unassigned);
}
it.initialize(routingNode.nodeId(), unassigned.version(),
allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
it.initialize(routingNode.nodeId(), allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
return;
}
assert false : "shard to initialize not found in list of unassigned shards";

View File

@ -189,6 +189,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
@Override
protected void doStart() {
add(localNodeMasterListeners);
add(taskManager);
this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build();
this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME), threadPool.getThreadContext());
this.reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ReconnectToNodes());

View File

@ -93,7 +93,7 @@ import java.util.Locale;
* <a href="http://www.faqs.org/rfcs/rfc3548.html">RFC3548</a>.</li>
* <li><em>Throws exceptions instead of returning null values.</em> Because some operations
* (especially those that may permit the GZIP option) use IO streams, there
* is a possiblity of an java.io.IOException being thrown. After some discussion and
* is a possibility of an java.io.IOException being thrown. After some discussion and
* thought, I've changed the behavior of the methods to throw java.io.IOExceptions
* rather than return null if ever there's an error. I think this is more
* appropriate, though it will require some changes to your code. Sorry,
@ -1511,7 +1511,7 @@ public class Base64 {
if (suspendEncoding) {
this.out.write(theByte);
return;
} // end if: supsended
} // end if: suspended
// Encode?
if (encode) {
@ -1565,7 +1565,7 @@ public class Base64 {
if (suspendEncoding) {
this.out.write(theBytes, off, len);
return;
} // end if: supsended
} // end if: suspended
for (int i = 0; i < len; i++) {
write(theBytes[off + i]);

View File

@ -147,7 +147,7 @@ public class PagedBytesReference implements BytesReference {
bytearray.get(offset, length, ref);
// undo the single-page optimization by ByteArray.get(), otherwise
// a materialized stream will contain traling garbage/zeros
// a materialized stream will contain trailing garbage/zeros
byte[] result = ref.bytes;
if (result.length != length || ref.offset != 0) {
result = Arrays.copyOfRange(result, ref.offset, ref.offset + length);
@ -403,7 +403,7 @@ public class PagedBytesReference implements BytesReference {
return -1;
}
final int numBytesToCopy = Math.min(len, length - pos); // copy the full lenth or the remaining part
final int numBytesToCopy = Math.min(len, length - pos); // copy the full length or the remaining part
// current offset into the underlying ByteArray
long byteArrayOffset = offset + pos;

View File

@ -33,7 +33,7 @@ package org.elasticsearch.common.component;
* following logic can be applied:
* <pre>
* public void stop() {
* if (!lifeccycleState.moveToStopped()) {
* if (!lifecycleState.moveToStopped()) {
* return;
* }
* // continue with stop logic
@ -50,7 +50,7 @@ package org.elasticsearch.common.component;
* if (!lifecycleState.moveToClosed()) {
* return;
* }
* // perofrm close logic here
* // perform close logic here
* }
* </pre>
*/

View File

@ -124,7 +124,7 @@ public abstract class CompressedIndexInput extends IndexInput {
@Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
int result = read(b, offset, len, true /* we want to have full reads, thats the contract... */);
int result = read(b, offset, len, true /* we want to have full reads, that's the contract... */);
if (result < len) {
throw new EOFException();
}

View File

@ -121,7 +121,7 @@ public abstract class CompressedStreamInput extends StreamInput {
@Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
int result = read(b, offset, len, true /* we want to have full reads, thats the contract... */);
int result = read(b, offset, len, true /* we want to have full reads, that's the contract... */);
if (result < len) {
throw new EOFException();
}

View File

@ -52,7 +52,7 @@ public class DeflateCompressor implements Compressor {
private static final byte[] HEADER = new byte[] { 'D', 'F', 'L', '\0' };
// 3 is a good trade-off between speed and compression ratio
private static final int LEVEL = 3;
// We use buffering on the input and ouput of in/def-laters in order to
// We use buffering on the input and output of in/def-laters in order to
// limit the number of JNI calls
private static final int BUFFER_SIZE = 4096;

View File

@ -19,52 +19,83 @@
package org.elasticsearch.common.hash;
import org.elasticsearch.ElasticsearchException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Objects;
public class MessageDigests {
/**
* This MessageDigests class provides convenience methods for obtaining
* thread local {@link MessageDigest} instances for MD5, SHA-1, and
* SHA-256 message digests.
*/
public final class MessageDigests {
private static final MessageDigest MD5_DIGEST;
private static final MessageDigest SHA_1_DIGEST;
private static final MessageDigest SHA_256_DIGEST;
static {
private static ThreadLocal<MessageDigest> createThreadLocalMessageDigest(String digest) {
return ThreadLocal.withInitial(() -> {
try {
MD5_DIGEST = MessageDigest.getInstance("MD5");
SHA_1_DIGEST = MessageDigest.getInstance("SHA-1");
SHA_256_DIGEST = MessageDigest.getInstance("SHA-256");
return MessageDigest.getInstance(digest);
} catch (NoSuchAlgorithmException e) {
throw new ElasticsearchException("Unexpected exception creating MessageDigest instance", e);
throw new IllegalStateException("unexpected exception creating MessageDigest instance for [" + digest + "]", e);
}
});
}
private static final ThreadLocal<MessageDigest> MD5_DIGEST = createThreadLocalMessageDigest("MD5");
private static final ThreadLocal<MessageDigest> SHA_1_DIGEST = createThreadLocalMessageDigest("SHA-1");
private static final ThreadLocal<MessageDigest> SHA_256_DIGEST = createThreadLocalMessageDigest("SHA-256");
/**
* Returns a {@link MessageDigest} instance for MD5 digests; note
* that the instance returned is thread local and must not be
* shared amongst threads.
*
* @return a thread local {@link MessageDigest} instance that
* provides MD5 message digest functionality.
*/
public static MessageDigest md5() {
return clone(MD5_DIGEST);
return get(MD5_DIGEST);
}
/**
* Returns a {@link MessageDigest} instance for SHA-1 digests; note
* that the instance returned is thread local and must not be
* shared amongst threads.
*
* @return a thread local {@link MessageDigest} instance that
* provides SHA-1 message digest functionality.
*/
public static MessageDigest sha1() {
return clone(SHA_1_DIGEST);
return get(SHA_1_DIGEST);
}
/**
* Returns a {@link MessageDigest} instance for SHA-256 digests;
* note that the instance returned is thread local and must not be
* shared amongst threads.
*
* @return a thread local {@link MessageDigest} instance that
* provides SHA-256 message digest functionality.
*/
public static MessageDigest sha256() {
return clone(SHA_256_DIGEST);
return get(SHA_256_DIGEST);
}
private static MessageDigest clone(MessageDigest messageDigest) {
try {
return (MessageDigest) messageDigest.clone();
} catch (CloneNotSupportedException e) {
throw new ElasticsearchException("Unexpected exception cloning MessageDigest instance", e);
}
private static MessageDigest get(ThreadLocal<MessageDigest> messageDigest) {
MessageDigest instance = messageDigest.get();
instance.reset();
return instance;
}
private static final char[] HEX_DIGITS = "0123456789abcdef".toCharArray();
/**
* Format a byte array as a hex string.
*
* @param bytes the input to be represented as hex.
* @return a hex representation of the input as a String.
*/
public static String toHexString(byte[] bytes) {
if (bytes == null) {
throw new NullPointerException("bytes");
}
Objects.requireNonNull(bytes);
StringBuilder sb = new StringBuilder(2 * bytes.length);
for (int i = 0; i < bytes.length; i++) {
@ -74,4 +105,5 @@ public class MessageDigests {
return sb.toString();
}
}

View File

@ -24,7 +24,7 @@ import java.lang.annotation.Target;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
/**
* Acccompanies a {@literal @}{@link org.elasticsearch.common.inject.Provides Provides} method annotation in a
* Accompanies a {@literal @}{@link org.elasticsearch.common.inject.Provides Provides} method annotation in a
* private module to indicate that the provided binding is exposed.
*
* @author jessewilson@google.com (Jesse Wilson)

View File

@ -489,7 +489,7 @@ class InjectorImpl implements Injector, Lookups {
ParameterizedType parameterizedType = (ParameterizedType) typeLiteralType;
Type innerType = parameterizedType.getActualTypeArguments()[0];
// this is unforunate. We don't support building TypeLiterals for type variable like 'T'. If
// this is unfortunate. We don't support building TypeLiterals for type variable like 'T'. If
// this proves problematic, we can probably fix TypeLiteral to support type variables
if (!(innerType instanceof Class)
&& !(innerType instanceof GenericArrayType)

View File

@ -41,7 +41,7 @@ import java.util.Objects;
* TypeLiteral}.
* <p>
* Keys do not differentiate between primitive types (int, char, etc.) and
* their correpsonding wrapper types (Integer, Character, etc.). Primitive
* their corresponding wrapper types (Integer, Character, etc.). Primitive
* types will be replaced with their wrapper types when keys are created.
*
* @author crazybob@google.com (Bob Lee)

View File

@ -30,7 +30,7 @@ import java.util.List;
import java.util.Set;
/**
* Internal respresentation of a constructor annotated with
* Internal representation of a constructor annotated with
* {@link AssistedInject}
*
* @author jmourits@google.com (Jerome Mourits)

View File

@ -24,7 +24,7 @@ import java.util.Arrays;
import java.util.List;
/**
* A list of {@link TypeLiteral}s to match an injectable Constructor's assited
* A list of {@link TypeLiteral}s to match an injectable Constructor's assisted
* parameter types to the corresponding factory method.
*
* @author jmourits@google.com (Jerome Mourits)

View File

@ -91,7 +91,7 @@ public final class Dependency<T> {
/**
* Returns the index of this dependency in the injection point's parameter list, or {@code -1} if
* this dependency does not belong to a parameter list. Only method and constuctor dependencies
* this dependency does not belong to a parameter list. Only method and constructor dependencies
* are elements in a parameter list.
*/
public int getParameterIndex() {

View File

@ -125,7 +125,7 @@ public final class InjectionPoint {
return Collections.unmodifiableList(dependencies);
}
// This metohd is necessary to create a Dependency<T> with proper generic type information
// This method is necessary to create a Dependency<T> with proper generic type information
private <T> Dependency<T> newDependency(Key<T> key, boolean allowsNull, int parameterIndex) {
return new Dependency<>(this, key, allowsNull, parameterIndex);
}

View File

@ -34,7 +34,7 @@ import java.util.Objects;
*/
public final class ProviderLookup<T> implements Element {
// NOTE: this class is not part of guice and was added so the provder lookup's key can be acessible for tests
// NOTE: this class is not part of guice and was added so the provider lookup's key can be accessible for tests
public static class ProviderImpl<T> implements Provider<T> {
private ProviderLookup<T> lookup;

View File

@ -28,7 +28,7 @@ import java.io.IOException;
/**
* This exception can be used to wrap a given, not serializable exception
* to serialize via {@link StreamOutput#writeThrowable(Throwable)}.
* This class will perserve the stacktrace as well as the suppressed exceptions of
* This class will preserve the stacktrace as well as the suppressed exceptions of
* the throwable it was created with instead of it's own. The stacktrace has no indication
* of where this exception was created.
*/

View File

@ -627,7 +627,7 @@ public class Lucene {
}
/**
* Parses the version string lenient and returns the the default value if the given string is null or emtpy
* Parses the version string lenient and returns the default value if the given string is null or emtpy
*/
public static Version parseVersionLenient(String toParse, Version defaultValue) {
return LenientParser.parse(toParse, defaultValue);

View File

@ -116,6 +116,11 @@ public class FieldValueFactorFunction extends ScoreFunction {
Objects.equals(this.modifier, fieldValueFactorFunction.modifier);
}
@Override
protected int doHashCode() {
return Objects.hash(boostFactor, field, modifier);
}
/**
* The Type class encapsulates the modification types that can be applied
* to the score/value product.

View File

@ -25,6 +25,8 @@ import org.elasticsearch.index.fielddata.AtomicFieldData;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import java.util.Objects;
/**
* Pseudo randomly generate a score for each {@link LeafScoreFunction#score}.
*/
@ -92,4 +94,9 @@ public class RandomScoreFunction extends ScoreFunction {
return this.originalSeed == randomScoreFunction.originalSeed &&
this.saltedSeed == randomScoreFunction.saltedSeed;
}
@Override
protected int doHashCode() {
return Objects.hash(originalSeed, saltedSeed);
}
}

View File

@ -66,4 +66,15 @@ public abstract class ScoreFunction {
* Indicates whether some other {@link ScoreFunction} object of the same type is "equal to" this one.
*/
protected abstract boolean doEquals(ScoreFunction other);
@Override
public final int hashCode() {
/*
* Override hashCode here and forward to an abstract method to force extensions of this class to override hashCode in the same
* way that we force them to override equals. This also prevents false positives in CheckStyle's EqualsHashCode check.
*/
return Objects.hash(scoreCombiner, doHashCode());
}
protected abstract int doHashCode();
}

View File

@ -133,4 +133,9 @@ public class ScriptScoreFunction extends ScoreFunction {
ScriptScoreFunction scriptScoreFunction = (ScriptScoreFunction) other;
return Objects.equals(this.sScript, scriptScoreFunction.sScript);
}
@Override
protected int doHashCode() {
return Objects.hash(sScript);
}
}

View File

@ -93,6 +93,11 @@ public class WeightFactorFunction extends ScoreFunction {
Objects.equals(this.scoreFunction, weightFactorFunction.scoreFunction);
}
@Override
protected int doHashCode() {
return Objects.hash(weight, scoreFunction);
}
private static class ScoreOne extends ScoreFunction {
protected ScoreOne(CombineFunction scoreCombiner) {
@ -123,5 +128,10 @@ public class WeightFactorFunction extends ScoreFunction {
protected boolean doEquals(ScoreFunction other) {
return true;
}
@Override
protected int doHashCode() {
return 0;
}
}
}

View File

@ -40,6 +40,7 @@ import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthActio
import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction;
import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction;
import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction;
import org.elasticsearch.rest.action.admin.cluster.node.tasks.RestCancelTasksAction;
import org.elasticsearch.rest.action.admin.cluster.node.tasks.RestListTasksAction;
import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction;
import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction;
@ -265,6 +266,7 @@ public class NetworkModule extends AbstractModule {
// Tasks API
RestListTasksAction.class,
RestCancelTasksAction.class,
// Ingest API
RestPutPipelineAction.class,

View File

@ -21,7 +21,7 @@ package org.elasticsearch.common.recycler;
abstract class FilterRecycler<T> implements Recycler<T> {
/** Get the delegate instance to foward calls to. */
/** Get the delegate instance to forward calls to. */
protected abstract Recycler<T> getDelegate();
/** Wrap a recycled reference. */

View File

@ -60,6 +60,7 @@ import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.analysis.HunspellService;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
@ -87,6 +88,7 @@ import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.TransportSettings;
import org.elasticsearch.transport.netty.NettyTransport;
import org.elasticsearch.tribe.TribeService;
import org.elasticsearch.bootstrap.BootstrapSettings;
import java.util.Arrays;
import java.util.Collections;
@ -290,7 +292,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
ScriptService.SCRIPT_CACHE_EXPIRE_SETTING,
ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
IndicesService.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
@ -377,6 +379,11 @@ public final class ClusterSettings extends AbstractScopedSettings {
PageCacheRecycler.WEIGHT_LONG_SETTING,
PageCacheRecycler.WEIGHT_OBJECTS_SETTING,
PageCacheRecycler.TYPE_SETTING,
PluginsService.MANDATORY_SETTING
PluginsService.MANDATORY_SETTING,
BootstrapSettings.SECURITY_MANAGER_ENABLED_SETTING,
BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING,
BootstrapSettings.MLOCKALL_SETTING,
BootstrapSettings.SECCOMP_SETTING,
BootstrapSettings.CTRLHANDLER_SETTING
)));
}

View File

@ -38,8 +38,8 @@ import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
import org.elasticsearch.index.store.FsDirectoryService;
import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.IndexWarmer;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
import org.elasticsearch.search.SearchService;
import java.util.Arrays;
import java.util.Collections;
@ -117,7 +117,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING,
EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING,
EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING,
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTTING,
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING,
IndexFieldDataService.INDEX_FIELDDATA_CACHE_KEY,
FieldMapper.IGNORE_MALFORMED_SETTING,
FieldMapper.COERCE_SETTING,
@ -132,7 +132,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS_SETTING,
FsDirectoryService.INDEX_LOCK_FACTOR_SETTING,
EngineConfig.INDEX_CODEC_SETTING,
SearchService.INDEX_NORMS_LOADING_SETTING,
IndexWarmer.INDEX_NORMS_LOADING_SETTING,
// this sucks but we can't really validate all the analyzers/similarity in here
Setting.groupSetting("index.similarity.", false, Setting.Scope.INDEX), // this allows similarity settings to be passed
Setting.groupSetting("index.analysis.", false, Setting.Scope.INDEX) // this allows analysis settings to be passed

View File

@ -47,7 +47,7 @@ import java.util.stream.Collectors;
* A setting. Encapsulates typical stuff like default value, parsing, and scope.
* Some (dynamic=true) can by modified at run time using the API.
* All settings inside elasticsearch or in any of the plugins should use this type-safe and generic settings infrastructure
* together with {@link AbstractScopedSettings}. This class contains several untility methods that makes it straight forward
* together with {@link AbstractScopedSettings}. This class contains several utility methods that makes it straight forward
* to add settings for the majority of the cases. For instance a simple boolean settings can be defined like this:
* <pre>{@code
* public static final Setting<Boolean>; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, false, Scope.CLUSTER);}
@ -256,8 +256,8 @@ public class Setting<T> extends ToXContentToBytes {
* this is used for settings that depend on each other... see {@link org.elasticsearch.common.settings.AbstractScopedSettings#addSettingsUpdateConsumer(Setting, Setting, BiConsumer)} and it's
* usage for details.
*/
static <A, B> AbstractScopedSettings.SettingUpdater<Tuple<A, B>> compoundUpdater(final BiConsumer<A,B> consumer, final Setting<A> aSettting, final Setting<B> bSetting, ESLogger logger) {
final AbstractScopedSettings.SettingUpdater<A> aSettingUpdater = aSettting.newUpdater(null, logger);
static <A, B> AbstractScopedSettings.SettingUpdater<Tuple<A, B>> compoundUpdater(final BiConsumer<A,B> consumer, final Setting<A> aSetting, final Setting<B> bSetting, ESLogger logger) {
final AbstractScopedSettings.SettingUpdater<A> aSettingUpdater = aSetting.newUpdater(null, logger);
final AbstractScopedSettings.SettingUpdater<B> bSettingUpdater = bSetting.newUpdater(null, logger);
return new AbstractScopedSettings.SettingUpdater<Tuple<A, B>>() {
@Override

View File

@ -471,7 +471,7 @@ public final class Settings implements ToXContent {
/**
* Returns the setting value (as size) associated with the setting key. Provided values can either be
* absolute values (intepreted as a number of bytes), byte sizes (eg. 1mb) or percentage of the heap size
* absolute values (interpreted as a number of bytes), byte sizes (eg. 1mb) or percentage of the heap size
* (eg. 12%). If it does not exists, parses the default value provided.
*/
public ByteSizeValue getAsMemory(String setting, String defaultValue) throws SettingsException {
@ -480,7 +480,7 @@ public final class Settings implements ToXContent {
/**
* Returns the setting value (as size) associated with the setting key. Provided values can either be
* absolute values (intepreted as a number of bytes), byte sizes (eg. 1mb) or percentage of the heap size
* absolute values (interpreted as a number of bytes), byte sizes (eg. 1mb) or percentage of the heap size
* (eg. 12%). If it does not exists, parses the default value provided.
*/
public ByteSizeValue getAsMemory(String[] settings, String defaultValue) throws SettingsException {

View File

@ -112,7 +112,7 @@ abstract class AbstractPagedHashMap implements Releasable {
}
}
// The only entries which have not been put in their final position in the previous loop are those that were stored in a slot that
// is < slot(key, mask). This only happens when slot(key, mask) returned a slot that was close to the end of the array and colision
// is < slot(key, mask). This only happens when slot(key, mask) returned a slot that was close to the end of the array and collision
// resolution has put it back in the first slots. This time, collision resolution will have put them at the beginning of the newly
// allocated slots. Let's re-add them to make sure they are in the right slot. This 2nd loop will typically exit very early.
for (long i = buckets; i < newBuckets; ++i) {

View File

@ -78,7 +78,7 @@ public abstract class ExtensionPoint {
/**
* Creates a new {@link ClassMap}
*
* @param name the human readable underscore case name of the extension poing. This is used in error messages etc.
* @param name the human readable underscore case name of the extension point. This is used in error messages etc.
* @param extensionClass the base class that should be extended
* @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)}
* @param reservedKeys a set of reserved keys by internal implementations
@ -120,7 +120,7 @@ public abstract class ExtensionPoint {
}
/**
* A Type extension point which basically allows to registerd keyed extensions like {@link ClassMap}
* A Type extension point which basically allows to registered keyed extensions like {@link ClassMap}
* but doesn't instantiate and bind all the registered key value pairs but instead replace a singleton based on a given setting via {@link #bindType(Binder, Settings, String, String)}
* Note: {@link #bind(Binder)} is not supported by this class
*/
@ -169,7 +169,7 @@ public abstract class ExtensionPoint {
/**
* Creates a new {@link ClassSet}
*
* @param name the human readable underscore case name of the extension poing. This is used in error messages etc.
* @param name the human readable underscore case name of the extension point. This is used in error messages etc.
* @param extensionClass the base class that should be extended
* @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)}
*/

View File

@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.smile.SmileXContent;
import org.elasticsearch.common.xcontent.yaml.YamlXContent;
import java.io.IOException;
import java.util.Locale;
/**
* The content type of {@link org.elasticsearch.common.xcontent.XContent}.
@ -38,7 +39,12 @@ public enum XContentType {
*/
JSON(0) {
@Override
public String restContentType() {
protected String mediaTypeWithoutParameters() {
return "application/json";
}
@Override
public String mediaType() {
return "application/json; charset=UTF-8";
}
@ -57,7 +63,7 @@ public enum XContentType {
*/
SMILE(1) {
@Override
public String restContentType() {
protected String mediaTypeWithoutParameters() {
return "application/smile";
}
@ -76,7 +82,7 @@ public enum XContentType {
*/
YAML(2) {
@Override
public String restContentType() {
protected String mediaTypeWithoutParameters() {
return "application/yaml";
}
@ -95,7 +101,7 @@ public enum XContentType {
*/
CBOR(3) {
@Override
public String restContentType() {
protected String mediaTypeWithoutParameters() {
return "application/cbor";
}
@ -108,31 +114,30 @@ public enum XContentType {
public XContent xContent() {
return CborXContent.cborXContent;
}
},;
};
public static XContentType fromRestContentType(String contentType) {
if (contentType == null) {
public static XContentType fromMediaTypeOrFormat(String mediaType) {
if (mediaType == null) {
return null;
}
if ("application/json".equals(contentType) || "json".equalsIgnoreCase(contentType)) {
for (XContentType type : values()) {
if (isSameMediaTypeAs(mediaType, type)) {
return type;
}
}
if(mediaType.toLowerCase(Locale.ROOT).startsWith("application/*")) {
return JSON;
}
if ("application/smile".equals(contentType) || "smile".equalsIgnoreCase(contentType)) {
return SMILE;
}
if ("application/yaml".equals(contentType) || "yaml".equalsIgnoreCase(contentType)) {
return YAML;
}
if ("application/cbor".equals(contentType) || "cbor".equalsIgnoreCase(contentType)) {
return CBOR;
}
return null;
}
private static boolean isSameMediaTypeAs(String stringType, XContentType type) {
return type.mediaTypeWithoutParameters().equalsIgnoreCase(stringType) ||
stringType.toLowerCase(Locale.ROOT).startsWith(type.mediaTypeWithoutParameters().toLowerCase(Locale.ROOT) + ";") ||
type.shortName().equalsIgnoreCase(stringType);
}
private int index;
XContentType(int index) {
@ -143,12 +148,16 @@ public enum XContentType {
return index;
}
public abstract String restContentType();
public String mediaType() {
return mediaTypeWithoutParameters();
}
public abstract String shortName();
public abstract XContent xContent();
protected abstract String mediaTypeWithoutParameters();
public static XContentType readFrom(StreamInput in) throws IOException {
int index = in.readVInt();
for (XContentType contentType : values()) {

View File

@ -38,16 +38,16 @@ public abstract class AbstractXContentParser implements XContentParser {
private ParseFieldMatcher matcher = ParseFieldMatcher.STRICT;
//Currently this is not a setting that can be changed and is a policy
// Currently this is not a setting that can be changed and is a policy
// that relates to how parsing of things like "boost" are done across
// the whole of Elasticsearch (eg if String "1.0" is a valid float).
// The idea behind keeping it as a constant is that we can track
// references to this policy decision throughout the codebase and find
// and change any code that needs to apply an alternative policy.
public static final boolean DEFAULT_NUMBER_COEERCE_POLICY = true;
public static final boolean DEFAULT_NUMBER_COERCE_POLICY = true;
private static void checkCoerceString(boolean coeerce, Class<? extends Number> clazz) {
if (!coeerce) {
private static void checkCoerceString(boolean coerce, Class<? extends Number> clazz) {
if (!coerce) {
//Need to throw type IllegalArgumentException as current catch logic in
//NumberFieldMapper.parseCreateField relies on this for "malformed" value detection
throw new IllegalArgumentException(clazz.getSimpleName() + " value passed as String");
@ -102,7 +102,7 @@ public abstract class AbstractXContentParser implements XContentParser {
@Override
public short shortValue() throws IOException {
return shortValue(DEFAULT_NUMBER_COEERCE_POLICY);
return shortValue(DEFAULT_NUMBER_COERCE_POLICY);
}
@Override
@ -121,7 +121,7 @@ public abstract class AbstractXContentParser implements XContentParser {
@Override
public int intValue() throws IOException {
return intValue(DEFAULT_NUMBER_COEERCE_POLICY);
return intValue(DEFAULT_NUMBER_COERCE_POLICY);
}
@ -141,7 +141,7 @@ public abstract class AbstractXContentParser implements XContentParser {
@Override
public long longValue() throws IOException {
return longValue(DEFAULT_NUMBER_COEERCE_POLICY);
return longValue(DEFAULT_NUMBER_COERCE_POLICY);
}
@Override
@ -160,7 +160,7 @@ public abstract class AbstractXContentParser implements XContentParser {
@Override
public float floatValue() throws IOException {
return floatValue(DEFAULT_NUMBER_COEERCE_POLICY);
return floatValue(DEFAULT_NUMBER_COERCE_POLICY);
}
@Override
@ -178,7 +178,7 @@ public abstract class AbstractXContentParser implements XContentParser {
@Override
public double doubleValue() throws IOException {
return doubleValue(DEFAULT_NUMBER_COEERCE_POLICY);
return doubleValue(DEFAULT_NUMBER_COERCE_POLICY);
}
@Override

View File

@ -910,7 +910,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
activeNodes.add(localNode);
long joinsCounter = clusterJoinsCounter.get();
if (joinsCounter > 0) {
logger.trace("adding local node to the list of active nodes who has previously joined the cluster (joins counter is [{}})", joinsCounter);
logger.trace("adding local node to the list of active nodes that have previously joined the cluster (joins counter is [{}])", joinsCounter);
joinedOnceActiveNodes.add(localNode);
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.env;
import org.apache.lucene.util.Constants;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;

View File

@ -47,6 +47,7 @@ import org.elasticsearch.index.store.FsDirectoryService;
import org.elasticsearch.monitor.fs.FsInfo;
import org.elasticsearch.monitor.fs.FsProbe;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.process.ProcessProbe;
import java.io.Closeable;
import java.io.IOException;
@ -221,6 +222,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
maybeLogPathDetails();
maybeLogHeapDetails();
maybeWarnFileDescriptors();
applySegmentInfosTrace(settings);
}
@ -313,6 +315,20 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops);
}
private void maybeWarnFileDescriptors() {
long maxFileDescriptorCount = ProcessProbe.getInstance().getMaxFileDescriptorCount();
if (maxFileDescriptorCount == -1) {
return;
}
int fileDescriptorCountThreshold = (1 << 16);
if (maxFileDescriptorCount < fileDescriptorCountThreshold) {
logger.warn(
"max file descriptors [{}] for elasticsearch process likely too low, consider increasing to at least [{}]",
maxFileDescriptorCount,
fileDescriptorCountThreshold);
}
}
@SuppressForbidden(reason = "System.out.*")
static void applySegmentInfosTrace(Settings settings) {
if (ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING.get(settings)) {

Some files were not shown because too many files have changed in this diff Show More