Merge branch 'master' into feature/aggs-refactoring

# Conflicts:
#	core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
#	core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
This commit is contained in:
Colin Goodheart-Smithe 2016-02-05 09:22:14 +00:00
commit f06f17f328
202 changed files with 4917 additions and 2096 deletions

View File

@ -82,4 +82,7 @@
(c-set-offset 'func-decl-cont '++)
))
(c-basic-offset . 4)
(c-comment-only-line-offset . (0 . 0)))))
(c-comment-only-line-offset . (0 . 0))
(fill-column . 140)
(fci-rule-column . 140)
(compile-command . "gradle compileTestJava"))))

6
.gitignore vendored
View File

@ -4,12 +4,13 @@
*.iml
*.ipr
*.iws
build-idea/
# eclipse files
.project
.classpath
eclipse-build
.settings
build-eclipse/
# netbeans files
nb-configuration.xml
@ -18,7 +19,6 @@ nbactions.xml
# gradle stuff
.gradle/
build/
generated-resources/
# maven stuff (to be removed when trunk becomes 4.x)
*-execution-hints.log
@ -38,5 +38,5 @@ html_docs
# random old stuff that we should look at the necessity of...
/tmp/
backwards/
eclipse-build

View File

@ -75,8 +75,9 @@ subprojects {
allprojects {
// injecting groovy property variables into all projects
project.ext {
// for eclipse hacks...
// for ide hacks...
isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse')
isIdea = System.getProperty("idea.active") != null || gradle.startParameter.taskNames.contains('idea') || gradle.startParameter.taskNames.contains('cleanIdea')
}
}
@ -170,12 +171,14 @@ gradle.projectsEvaluated {
allprojects {
apply plugin: 'idea'
if (isIdea) {
project.buildDir = file('build-idea')
}
idea {
module {
// same as for the IntelliJ Gradle tooling integration
inheritOutputDirs = false
outputDir = file('build/classes/main')
testOutputDir = file('build/classes/test')
outputDir = file('build-idea/classes/main')
testOutputDir = file('build-idea/classes/test')
iml {
// fix so that Gradle idea plugin properly generates support for resource folders
@ -222,14 +225,19 @@ allprojects {
apply plugin: 'eclipse'
plugins.withType(JavaBasePlugin) {
eclipse.classpath.defaultOutputDir = new File(project.buildDir, 'eclipse')
File eclipseBuild = project.file('build-eclipse')
eclipse.classpath.defaultOutputDir = eclipseBuild
if (isEclipse) {
// set this so generated dirs will be relative to eclipse build
project.buildDir = eclipseBuild
}
eclipse.classpath.file.whenMerged { classpath ->
// give each source folder a unique corresponding output folder
int i = 0;
classpath.entries.findAll { it instanceof SourceFolder }.each { folder ->
i++;
// this is *NOT* a path or a file.
folder.output = "build/eclipse/" + i
folder.output = "build-eclipse/" + i
}
}
}

View File

@ -76,9 +76,17 @@ extraArchive {
tests = false
}
idea {
module {
inheritOutputDirs = false
outputDir = file('build-idea/classes/main')
testOutputDir = file('build-idea/classes/test')
}
}
eclipse {
classpath {
defaultOutputDir = new File(file('build'), 'eclipse')
defaultOutputDir = file('build-eclipse')
}
}

View File

@ -78,15 +78,17 @@ class BuildPlugin implements Plugin<Project> {
if (project.rootProject.ext.has('buildChecksDone') == false) {
String javaHome = findJavaHome()
File gradleJavaHome = Jvm.current().javaHome
String gradleJavaVersionDetails = "${System.getProperty('java.vendor')} ${System.getProperty('java.version')}" +
String javaVendor = System.getProperty('java.vendor')
String javaVersion = System.getProperty('java.version')
String gradleJavaVersionDetails = "${javaVendor} ${javaVersion}" +
" [${System.getProperty('java.vm.name')} ${System.getProperty('java.vm.version')}]"
String javaVersionDetails = gradleJavaVersionDetails
String javaVersion = System.getProperty('java.version')
JavaVersion javaVersionEnum = JavaVersion.current()
if (new File(javaHome).canonicalPath != gradleJavaHome.canonicalPath) {
javaVersionDetails = findJavaVersionDetails(project, javaHome)
javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, javaHome))
javaVendor = findJavaVendor(project, javaHome)
javaVersion = findJavaVersion(project, javaHome)
}
@ -114,6 +116,25 @@ class BuildPlugin implements Plugin<Project> {
throw new GradleException("Java ${minimumJava} or above is required to build Elasticsearch")
}
// this block of code detecting buggy JDK 8 compiler versions can be removed when minimum Java version is incremented
assert minimumJava == JavaVersion.VERSION_1_8 : "Remove JDK compiler bug detection only applicable to JDK 8"
if (javaVersionEnum == JavaVersion.VERSION_1_8) {
if (Objects.equals("Oracle Corporation", javaVendor)) {
def matcher = javaVersion =~ /1\.8\.0(?:_(\d+))?/
if (matcher.matches()) {
int update;
if (matcher.group(1) == null) {
update = 0
} else {
update = matcher.group(1).toInteger()
}
if (update < 40) {
throw new GradleException("JDK ${javaVendor} ${javaVersion} has compiler bug JDK-8052388, update your JDK to at least 8u40")
}
}
}
}
project.rootProject.ext.javaHome = javaHome
project.rootProject.ext.javaVersion = javaVersion
project.rootProject.ext.buildChecksDone = true
@ -153,6 +174,11 @@ class BuildPlugin implements Plugin<Project> {
return runJavascript(project, javaHome, versionScript)
}
private static String findJavaVendor(Project project, String javaHome) {
String vendorScript = 'print(java.lang.System.getProperty("java.vendor"));'
return runJavascript(project, javaHome, vendorScript)
}
/** Finds the parsable java specification version */
private static String findJavaVersion(Project project, String javaHome) {
String versionScript = 'print(java.lang.System.getProperty("java.version"));'

View File

@ -29,7 +29,7 @@ import org.gradle.api.tasks.Copy
class PluginPropertiesTask extends Copy {
PluginPropertiesExtension extension
File generatedResourcesDir = new File(project.projectDir, 'generated-resources')
File generatedResourcesDir = new File(project.buildDir, 'generated-resources')
PluginPropertiesTask() {
File templateFile = new File(project.buildDir, 'templates/plugin-descriptor.properties')

View File

@ -11,11 +11,12 @@
</module>
<module name="TreeWalker">
<!-- ~3500 violations
<!-- Its our official line length! See checkstyle_suppressions.xml for the files that don't pass this. For now we
suppress the check there but enforce it everywhere else. This prevents the list from getting longer even if it is
unfair. -->
<module name="LineLength">
<property name="max" value="140"/>
</module>
-->
<module name="AvoidStarImport" />
<!-- Doesn't pass but we could make it pass pretty quick.

File diff suppressed because it is too large Load Diff

View File

@ -190,14 +190,11 @@ import org.elasticsearch.action.termvectors.TermVectorsAction;
import org.elasticsearch.action.termvectors.TransportMultiTermVectorsAction;
import org.elasticsearch.action.termvectors.TransportShardMultiTermsVectorAction;
import org.elasticsearch.action.termvectors.TransportTermVectorsAction;
import org.elasticsearch.action.termvectors.dfs.TransportDfsOnlyAction;
import org.elasticsearch.action.update.TransportUpdateAction;
import org.elasticsearch.action.update.UpdateAction;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.MapBinder;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.NodeModule;
import java.util.ArrayList;
import java.util.HashMap;
@ -323,8 +320,7 @@ public class ActionModule extends AbstractModule {
registerAction(IndexAction.INSTANCE, TransportIndexAction.class);
registerAction(GetAction.INSTANCE, TransportGetAction.class);
registerAction(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class,
TransportDfsOnlyAction.class);
registerAction(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class);
registerAction(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class,
TransportShardMultiTermsVectorAction.class);
registerAction(DeleteAction.INSTANCE, TransportDeleteAction.class);

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.admin.cluster.node.tasks.list;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.support.tasks.BaseTasksResponse;
@ -111,7 +112,7 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
if (getNodeFailures() != null && getNodeFailures().size() > 0) {
builder.startArray("node_failures");
for (FailedNodeException ex : getNodeFailures()){
for (FailedNodeException ex : getNodeFailures()) {
builder.value(ex);
}
builder.endArray();

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.tasks.Task;
import java.io.IOException;
@ -48,20 +49,23 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
private final String description;
private final Task.Status status;
private final String parentNode;
private final long parentId;
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description) {
this(node, id, type, action, description, null, -1L);
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status) {
this(node, id, type, action, description, status, null, -1L);
}
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, String parentNode, long parentId) {
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status, String parentNode, long parentId) {
this.node = node;
this.id = id;
this.type = type;
this.action = action;
this.description = description;
this.status = status;
this.parentNode = parentNode;
this.parentId = parentId;
}
@ -72,6 +76,11 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
type = in.readString();
action = in.readString();
description = in.readOptionalString();
if (in.readBoolean()) {
status = in.readTaskStatus();
} else {
status = null;
}
parentNode = in.readOptionalString();
parentId = in.readLong();
}
@ -96,6 +105,14 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
return description;
}
/**
* The status of the running task. Only available if TaskInfos were build
* with the detailed flag.
*/
public Task.Status getStatus() {
return status;
}
public String getParentNode() {
return parentNode;
}
@ -116,6 +133,12 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
out.writeString(type);
out.writeString(action);
out.writeOptionalString(description);
if (status != null) {
out.writeBoolean(true);
out.writeTaskStatus(status);
} else {
out.writeBoolean(false);
}
out.writeOptionalString(parentNode);
out.writeLong(parentId);
}
@ -127,6 +150,9 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
builder.field("id", id);
builder.field("type", type);
builder.field("action", action);
if (status != null) {
builder.field("status", status, params);
}
if (description != null) {
builder.field("description", description);
}

View File

@ -80,7 +80,7 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction<Ge
continue;
}
Settings settings = SettingsFilter.filterSettings(settingsFilter.getPatterns(), indexMetaData.getSettings());
Settings settings = settingsFilter.filter(indexMetaData.getSettings());
if (request.humanReadable()) {
settings = IndexMetaData.addHumanReadableSettings(settings);
}

View File

@ -34,6 +34,7 @@ import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.shard.ShardStateMetaData;
import java.io.IOException;
import java.util.ArrayList;
@ -55,7 +56,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
*/
public static class StoreStatus implements Streamable, ToXContent, Comparable<StoreStatus> {
private DiscoveryNode node;
private long version;
private long legacyVersion;
private String allocationId;
private Throwable storeException;
private AllocationStatus allocationStatus;
@ -116,9 +117,9 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
private StoreStatus() {
}
public StoreStatus(DiscoveryNode node, long version, String allocationId, AllocationStatus allocationStatus, Throwable storeException) {
public StoreStatus(DiscoveryNode node, long legacyVersion, String allocationId, AllocationStatus allocationStatus, Throwable storeException) {
this.node = node;
this.version = version;
this.legacyVersion = legacyVersion;
this.allocationId = allocationId;
this.allocationStatus = allocationStatus;
this.storeException = storeException;
@ -132,10 +133,10 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
}
/**
* Version of the store
* Version of the store for pre-3.0 shards that have not yet been active
*/
public long getVersion() {
return version;
public long getLegacyVersion() {
return legacyVersion;
}
/**
@ -173,7 +174,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
@Override
public void readFrom(StreamInput in) throws IOException {
node = DiscoveryNode.readNode(in);
version = in.readLong();
legacyVersion = in.readLong();
allocationId = in.readOptionalString();
allocationStatus = AllocationStatus.readFrom(in);
if (in.readBoolean()) {
@ -184,7 +185,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
@Override
public void writeTo(StreamOutput out) throws IOException {
node.writeTo(out);
out.writeLong(version);
out.writeLong(legacyVersion);
out.writeOptionalString(allocationId);
allocationStatus.writeTo(out);
if (storeException != null) {
@ -198,8 +199,12 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
node.toXContent(builder, params);
builder.field(Fields.VERSION, version);
builder.field(Fields.ALLOCATION_ID, allocationId);
if (legacyVersion != ShardStateMetaData.NO_VERSION) {
builder.field(Fields.LEGACY_VERSION, legacyVersion);
}
if (allocationId != null) {
builder.field(Fields.ALLOCATION_ID, allocationId);
}
builder.field(Fields.ALLOCATED, allocationStatus.value());
if (storeException != null) {
builder.startObject(Fields.STORE_EXCEPTION);
@ -215,12 +220,23 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
return 1;
} else if (other.storeException != null && storeException == null) {
return -1;
} else {
int compare = Long.compare(other.version, version);
}
if (allocationId != null && other.allocationId == null) {
return -1;
} else if (allocationId == null && other.allocationId != null) {
return 1;
} else if (allocationId == null && other.allocationId == null) {
int compare = Long.compare(other.legacyVersion, legacyVersion);
if (compare == 0) {
return Integer.compare(allocationStatus.id, other.allocationStatus.id);
}
return compare;
} else {
int compare = Integer.compare(allocationStatus.id, other.allocationStatus.id);
if (compare == 0) {
return allocationId.compareTo(other.allocationId);
}
return compare;
}
}
}
@ -390,7 +406,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
static final XContentBuilderString FAILURES = new XContentBuilderString("failures");
static final XContentBuilderString STORES = new XContentBuilderString("stores");
// StoreStatus fields
static final XContentBuilderString VERSION = new XContentBuilderString("version");
static final XContentBuilderString LEGACY_VERSION = new XContentBuilderString("legacy_version");
static final XContentBuilderString ALLOCATION_ID = new XContentBuilderString("allocation_id");
static final XContentBuilderString STORE_EXCEPTION = new XContentBuilderString("store_exception");
static final XContentBuilderString ALLOCATED = new XContentBuilderString("allocation");

View File

@ -180,7 +180,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
for (NodeGatewayStartedShards response : fetchResponse.responses) {
if (shardExistsInNode(response)) {
IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndexName(), fetchResponse.shardId.id(), response.getNode());
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), response.allocationId(), allocationStatus, response.storeException()));
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.legacyVersion(), response.allocationId(), allocationStatus, response.storeException()));
}
}
CollectionUtil.timSort(storeStatuses);
@ -213,7 +213,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
* A shard exists/existed in a node only if shard state file exists in the node
*/
private boolean shardExistsInNode(final NodeGatewayStartedShards response) {
return response.storeException() != null || response.version() != -1 || response.allocationId() != null;
return response.storeException() != null || response.legacyVersion() != -1 || response.allocationId() != null;
}
@Override

View File

@ -34,9 +34,7 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
import static org.elasticsearch.ingest.core.IngestDocument.MetaData;
public class SimulatePipelineRequest extends ActionRequest<SimulatePipelineRequest> {
@ -140,7 +138,7 @@ public class SimulatePipelineRequest extends ActionRequest<SimulatePipelineReque
static Parsed parse(Map<String, Object> config, boolean verbose, PipelineStore pipelineStore) throws Exception {
Map<String, Object> pipelineConfig = ConfigurationUtils.readMap(null, null, config, Fields.PIPELINE);
Pipeline pipeline = PIPELINE_FACTORY.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorFactoryRegistry());
Pipeline pipeline = PIPELINE_FACTORY.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorRegistry());
List<IngestDocument> ingestDocumentList = parseDocs(config);
return new Parsed(pipeline, ingestDocumentList, verbose);
}

View File

@ -44,6 +44,20 @@ public abstract class ChildTaskActionRequest<Request extends ActionRequest<Reque
this.parentTaskId = parentTaskId;
}
/**
* The node that owns the parent task.
*/
public String getParentTaskNode() {
return parentTaskNode;
}
/**
* The task id of the parent task on the parent node.
*/
public long getParentTaskId() {
return parentTaskId;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.support.replication;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.WriteConsistencyLevel;
@ -30,6 +29,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.tasks.Task;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
@ -195,6 +195,11 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
out.writeVLong(routedBasedOnClusterVersion);
}
@Override
public Task createTask(long id, String type, String action) {
return new ReplicationTask(id, type, action, this::getDescription, getParentTaskNode(), getParentTaskId());
}
/**
* Sets the target shard id for the request. The shard id is set when a
* index/delete request is resolved by the transport action

View File

@ -0,0 +1,97 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.support.replication;
import org.elasticsearch.common.inject.Provider;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.tasks.Task;
import java.io.IOException;
import static java.util.Objects.requireNonNull;
/**
* Task that tracks replication actions.
*/
public class ReplicationTask extends Task {
private volatile String phase = "starting";
public ReplicationTask(long id, String type, String action, Provider<String> description, String parentNode, long parentId) {
super(id, type, action, description, parentNode, parentId);
}
/**
* Set the current phase of the task.
*/
public void setPhase(String phase) {
this.phase = phase;
}
/**
* Get the current phase of the task.
*/
public String getPhase() {
return phase;
}
@Override
public Status getStatus() {
return new Status(phase);
}
public static class Status implements Task.Status {
public static final Status PROTOTYPE = new Status("prototype");
private final String phase;
public Status(String phase) {
this.phase = requireNonNull(phase, "Phase cannot be null");
}
public Status(StreamInput in) throws IOException {
phase = in.readString();
}
@Override
public String getWriteableName() {
return "replication";
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("phase", phase);
builder.endObject();
return builder;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(phase);
}
@Override
public Status readFrom(StreamInput in) throws IOException {
return new Status(in);
}
}
}

View File

@ -142,7 +142,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
@Override
protected void doExecute(Task task, Request request, ActionListener<Response> listener) {
new ReroutePhase(task, request, listener).run();
new ReroutePhase((ReplicationTask) task, request, listener).run();
}
protected abstract Response newResponseInstance();
@ -283,14 +283,24 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
class PrimaryOperationTransportHandler implements TransportRequestHandler<Request> {
@Override
public void messageReceived(final Request request, final TransportChannel channel) throws Exception {
new PrimaryPhase(request, channel).run();
throw new UnsupportedOperationException("the task parameter is required for this operation");
}
@Override
public void messageReceived(Request request, TransportChannel channel, Task task) throws Exception {
new PrimaryPhase((ReplicationTask) task, request, channel).run();
}
}
class ReplicaOperationTransportHandler implements TransportRequestHandler<ReplicaRequest> {
@Override
public void messageReceived(final ReplicaRequest request, final TransportChannel channel) throws Exception {
new AsyncReplicaAction(request, channel).run();
throw new UnsupportedOperationException("the task parameter is required for this operation");
}
@Override
public void messageReceived(ReplicaRequest request, TransportChannel channel, Task task) throws Exception {
new AsyncReplicaAction(request, channel, (ReplicationTask) task).run();
}
}
@ -309,13 +319,18 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
private final class AsyncReplicaAction extends AbstractRunnable {
private final ReplicaRequest request;
private final TransportChannel channel;
/**
* The task on the node with the replica shard.
*/
private final ReplicationTask task;
// important: we pass null as a timeout as failing a replica is
// something we want to avoid at all costs
private final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
AsyncReplicaAction(ReplicaRequest request, TransportChannel channel) {
AsyncReplicaAction(ReplicaRequest request, TransportChannel channel, ReplicationTask task) {
this.request = request;
this.channel = channel;
this.task = task;
}
@Override
@ -385,6 +400,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
@Override
protected void doRun() throws Exception {
setPhase(task, "replica");
assert request.shardId() != null : "request shardId must be set";
try (Releasable ignored = getIndexShardReferenceOnReplica(request.shardId())) {
shardOperationOnReplica(request);
@ -392,6 +408,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), request);
}
}
setPhase(task, "finished");
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
@ -417,15 +434,17 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
final class ReroutePhase extends AbstractRunnable {
private final ActionListener<Response> listener;
private final Request request;
private final ReplicationTask task;
private final ClusterStateObserver observer;
private final AtomicBoolean finished = new AtomicBoolean();
ReroutePhase(Task task, Request request, ActionListener<Response> listener) {
ReroutePhase(ReplicationTask task, Request request, ActionListener<Response> listener) {
this.request = request;
if (task != null) {
this.request.setParentTask(clusterService.localNode().getId(), task.getId());
}
this.listener = listener;
this.task = task;
this.observer = new ClusterStateObserver(clusterService, request.timeout(), logger, threadPool.getThreadContext());
}
@ -436,6 +455,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
@Override
protected void doRun() {
setPhase(task, "routing");
final ClusterState state = observer.observedState();
ClusterBlockException blockException = state.blocks().globalBlockedException(globalBlockLevel());
if (blockException != null) {
@ -467,6 +487,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
final DiscoveryNode node = state.nodes().get(primary.currentNodeId());
if (primary.currentNodeId().equals(state.nodes().localNodeId())) {
setPhase(task, "waiting_on_primary");
if (logger.isTraceEnabled()) {
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}] ", transportPrimaryAction, request.shardId(), request, state.version(), primary.currentNodeId());
}
@ -484,6 +505,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
if (logger.isTraceEnabled()) {
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}]", actionName, request.shardId(), request, state.version(), primary.currentNodeId());
}
setPhase(task, "rerouted");
performAction(node, actionName, false);
}
}
@ -540,6 +562,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
finishAsFailed(failure);
return;
}
setPhase(task, "waiting_for_retry");
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
@ -564,6 +587,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
void finishAsFailed(Throwable failure) {
if (finished.compareAndSet(false, true)) {
setPhase(task, "failed");
logger.trace("operation failed. action [{}], request [{}]", failure, actionName, request);
listener.onFailure(failure);
} else {
@ -574,6 +598,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
void finishWithUnexpectedFailure(Throwable failure) {
logger.warn("unexpected error during the primary phase for action [{}], request [{}]", failure, actionName, request);
if (finished.compareAndSet(false, true)) {
setPhase(task, "failed");
listener.onFailure(failure);
} else {
assert false : "finishWithUnexpectedFailure called but operation is already finished";
@ -582,6 +607,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
void finishOnSuccess(Response response) {
if (finished.compareAndSet(false, true)) {
setPhase(task, "finished");
if (logger.isTraceEnabled()) {
logger.trace("operation succeeded. action [{}],request [{}]", actionName, request);
}
@ -603,6 +629,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
* Note that as soon as we move to replication action, state responsibility is transferred to {@link ReplicationPhase}.
*/
class PrimaryPhase extends AbstractRunnable {
private final ReplicationTask task;
private final Request request;
private final ShardId shardId;
private final TransportChannel channel;
@ -610,8 +637,9 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
private final AtomicBoolean finished = new AtomicBoolean();
private IndexShardReference indexShardReference;
PrimaryPhase(Request request, TransportChannel channel) {
PrimaryPhase(ReplicationTask task, Request request, TransportChannel channel) {
this.state = clusterService.state();
this.task = task;
this.request = request;
assert request.shardId() != null : "request shardId must be set prior to primary phase";
this.shardId = request.shardId();
@ -634,6 +662,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
@Override
protected void doRun() throws Exception {
setPhase(task, "primary");
// request shardID was set in ReroutePhase
final String writeConsistencyFailure = checkWriteConsistency(shardId);
if (writeConsistencyFailure != null) {
@ -648,7 +677,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
if (logger.isTraceEnabled()) {
logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version());
}
ReplicationPhase replicationPhase = new ReplicationPhase(primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
ReplicationPhase replicationPhase = new ReplicationPhase(task, primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
finishAndMoveToReplication(replicationPhase);
} else {
// delegate primary phase to relocation target
@ -728,6 +757,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
*/
void finishAsFailed(Throwable failure) {
if (finished.compareAndSet(false, true)) {
setPhase(task, "failed");
Releasables.close(indexShardReference);
logger.trace("operation failed", failure);
try {
@ -770,7 +800,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
* relocating copies
*/
final class ReplicationPhase extends AbstractRunnable {
private final ReplicationTask task;
private final ReplicaRequest replicaRequest;
private final Response finalResponse;
private final TransportChannel channel;
@ -785,8 +815,9 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
private final int totalShards;
private final IndexShardReference indexShardReference;
public ReplicationPhase(ReplicaRequest replicaRequest, Response finalResponse, ShardId shardId,
public ReplicationPhase(ReplicationTask task, ReplicaRequest replicaRequest, Response finalResponse, ShardId shardId,
TransportChannel channel, IndexShardReference indexShardReference) {
this.task = task;
this.replicaRequest = replicaRequest;
this.channel = channel;
this.finalResponse = finalResponse;
@ -870,6 +901,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
*/
@Override
protected void doRun() {
setPhase(task, "replicating");
if (pending.get() == 0) {
doFinish();
return;
@ -981,6 +1013,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
private void forceFinishAsFailed(Throwable t) {
setPhase(task, "failed");
if (finished.compareAndSet(false, true)) {
Releasables.close(indexShardReference);
try {
@ -994,6 +1027,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
private void doFinish() {
if (finished.compareAndSet(false, true)) {
setPhase(task, "finished");
Releasables.close(indexShardReference);
final ReplicationResponse.ShardInfo.Failure[] failuresArray;
if (!shardReplicaFailures.isEmpty()) {
@ -1082,4 +1116,14 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
indexShard.maybeFlush();
}
/**
* Sets the current phase on the task if it isn't null. Pulled into its own
* method because its more convenient that way.
*/
static void setPhase(ReplicationTask task, String phase) {
if (task != null) {
task.setPhase(phase);
}
}
}

View File

@ -373,22 +373,6 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
return this;
}
/**
* @return <code>true</code> if distributed frequencies should be returned. Otherwise
* <code>false</code>
*/
public boolean dfs() {
return flagsEnum.contains(Flag.Dfs);
}
/**
* Use distributed frequencies instead of shard statistics.
*/
public TermVectorsRequest dfs(boolean dfs) {
setFlag(Flag.Dfs, dfs);
return this;
}
/**
* Return only term vectors for special selected fields. Returns for term
* vectors for all fields if selectedFields == null
@ -583,7 +567,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
public static enum Flag {
// Do not change the order of these flags we use
// the ordinal for encoding! Only append to the end!
Positions, Offsets, Payloads, FieldStatistics, TermStatistics, Dfs
Positions, Offsets, Payloads, FieldStatistics, TermStatistics
}
/**
@ -616,7 +600,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
} else if (currentFieldName.equals("field_statistics") || currentFieldName.equals("fieldStatistics")) {
termVectorsRequest.fieldStatistics(parser.booleanValue());
} else if (currentFieldName.equals("dfs")) {
termVectorsRequest.dfs(parser.booleanValue());
throw new IllegalArgumentException("distributed frequencies is not supported anymore for term vectors");
} else if (currentFieldName.equals("per_field_analyzer") || currentFieldName.equals("perFieldAnalyzer")) {
termVectorsRequest.perFieldAnalyzer(readPerFieldAnalyzer(parser.map()));
} else if (currentFieldName.equals("filter")) {

View File

@ -149,14 +149,6 @@ public class TermVectorsRequestBuilder extends ActionRequestBuilder<TermVectorsR
return this;
}
/**
* Sets whether to use distributed frequencies instead of shard statistics.
*/
public TermVectorsRequestBuilder setDfs(boolean dfs) {
request.dfs(dfs);
return this;
}
/**
* Sets whether to return only term vectors for special selected fields. Returns the term
* vectors for all fields if selectedFields == null

View File

@ -32,6 +32,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.termvectors.TermVectorsService;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -80,7 +81,7 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
try {
IndexService indexService = indicesService.indexServiceSafe(request.index());
IndexShard indexShard = indexService.getShard(shardId.id());
TermVectorsResponse termVectorsResponse = indexShard.getTermVectors(termVectorsRequest);
TermVectorsResponse termVectorsResponse = TermVectorsService.getTermVectors(indexShard, termVectorsRequest);
termVectorsResponse.updateTookInMillis(termVectorsRequest.startTime());
response.add(request.locations.get(i), termVectorsResponse);
} catch (Throwable t) {

View File

@ -32,6 +32,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.termvectors.TermVectorsService;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -56,6 +57,7 @@ public class TransportTermVectorsAction extends TransportSingleShardAction<TermV
super(settings, TermVectorsAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
TermVectorsRequest::new, ThreadPool.Names.GET);
this.indicesService = indicesService;
}
@Override
@ -83,7 +85,7 @@ public class TransportTermVectorsAction extends TransportSingleShardAction<TermV
protected TermVectorsResponse shardOperation(TermVectorsRequest request, ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id());
TermVectorsResponse response = indexShard.getTermVectors(request);
TermVectorsResponse response = TermVectorsService.getTermVectors(indexShard, request);
response.updateTookInMillis(request.startTime());
return response;
}

View File

@ -1,112 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.termvectors.dfs;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import java.io.IOException;
import java.util.Arrays;
import java.util.Set;
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
public class DfsOnlyRequest extends BroadcastRequest<DfsOnlyRequest> {
private SearchRequest searchRequest = new SearchRequest();
long nowInMillis;
public DfsOnlyRequest() {
}
public DfsOnlyRequest(Fields termVectorsFields, String[] indices, String[] types, Set<String> selectedFields) throws IOException {
super(indices);
// build a search request with a query of all the terms
final BoolQueryBuilder boolBuilder = boolQuery();
for (String fieldName : termVectorsFields) {
if ((selectedFields != null) && (!selectedFields.contains(fieldName))) {
continue;
}
Terms terms = termVectorsFields.terms(fieldName);
TermsEnum iterator = terms.iterator();
while (iterator.next() != null) {
String text = iterator.term().utf8ToString();
boolBuilder.should(QueryBuilders.termQuery(fieldName, text));
}
}
// wrap a search request object
this.searchRequest = new SearchRequest(indices).types(types).source(new SearchSourceBuilder().query(boolBuilder));
}
public SearchRequest getSearchRequest() {
return searchRequest;
}
@Override
public ActionRequestValidationException validate() {
return searchRequest.validate();
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
this.searchRequest.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
this.searchRequest.writeTo(out);
}
public String[] types() {
return this.searchRequest.types();
}
public String routing() {
return this.searchRequest.routing();
}
public String preference() {
return this.searchRequest.preference();
}
@Override
public String toString() {
String sSource = "_na_";
if (searchRequest.source() != null) {
sSource = searchRequest.source().toString();
}
return "[" + Arrays.toString(indices) + "]" + Arrays.toString(types()) + ", source[" + sSource + "]";
}
}

View File

@ -1,73 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.termvectors.dfs;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.search.dfs.AggregatedDfs;
import java.io.IOException;
import java.util.List;
/**
* A response of a dfs only request.
*/
public class DfsOnlyResponse extends BroadcastResponse {
private AggregatedDfs dfs;
private long tookInMillis;
DfsOnlyResponse(AggregatedDfs dfs, int totalShards, int successfulShards, int failedShards,
List<ShardOperationFailedException> shardFailures, long tookInMillis) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.dfs = dfs;
this.tookInMillis = tookInMillis;
}
public AggregatedDfs getDfs() {
return dfs;
}
public TimeValue getTook() {
return new TimeValue(tookInMillis);
}
public long getTookInMillis() {
return tookInMillis;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
AggregatedDfs.readAggregatedDfs(in);
tookInMillis = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
dfs.writeTo(out);
out.writeVLong(tookInMillis);
}
}

View File

@ -1,62 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.termvectors.dfs;
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
import java.io.IOException;
public class ShardDfsOnlyRequest extends BroadcastShardRequest {
private ShardSearchTransportRequest shardSearchRequest = new ShardSearchTransportRequest();
public ShardDfsOnlyRequest() {
}
ShardDfsOnlyRequest(ShardRouting shardRouting, int numberOfShards, @Nullable String[] filteringAliases, long nowInMillis, DfsOnlyRequest request) {
super(shardRouting.shardId(), request);
this.shardSearchRequest = new ShardSearchTransportRequest(request.getSearchRequest(), shardRouting, numberOfShards,
filteringAliases, nowInMillis);
}
public ShardSearchRequest getShardSearchRequest() {
return shardSearchRequest;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardSearchRequest.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardSearchRequest.writeTo(out);
}
}

View File

@ -1,62 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.termvectors.dfs;
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.dfs.DfsSearchResult;
import java.io.IOException;
/**
*
*/
class ShardDfsOnlyResponse extends BroadcastShardResponse {
private DfsSearchResult dfsSearchResult = new DfsSearchResult();
ShardDfsOnlyResponse() {
}
ShardDfsOnlyResponse(ShardId shardId, DfsSearchResult dfsSearchResult) {
super(shardId);
this.dfsSearchResult = dfsSearchResult;
}
public DfsSearchResult getDfsSearchResult() {
return dfsSearchResult;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
dfsSearchResult.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
dfsSearchResult.writeTo(out);
}
}

View File

@ -1,146 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.termvectors.dfs;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReferenceArray;
/**
* Get the dfs only with no fetch phase. This is for internal use only.
*/
public class TransportDfsOnlyAction extends TransportBroadcastAction<DfsOnlyRequest, DfsOnlyResponse, ShardDfsOnlyRequest, ShardDfsOnlyResponse> {
public static final String NAME = "internal:index/termvectors/dfs";
private final SearchService searchService;
private final SearchPhaseController searchPhaseController;
@Inject
public TransportDfsOnlyAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, SearchService searchService, SearchPhaseController searchPhaseController) {
super(settings, NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
DfsOnlyRequest::new, ShardDfsOnlyRequest::new, ThreadPool.Names.SEARCH);
this.searchService = searchService;
this.searchPhaseController = searchPhaseController;
}
@Override
protected void doExecute(Task task, DfsOnlyRequest request, ActionListener<DfsOnlyResponse> listener) {
request.nowInMillis = System.currentTimeMillis();
super.doExecute(task, request, listener);
}
@Override
protected ShardDfsOnlyRequest newShardRequest(int numShards, ShardRouting shard, DfsOnlyRequest request) {
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterService.state(), shard.index().getName(), request.indices());
return new ShardDfsOnlyRequest(shard, numShards, filteringAliases, request.nowInMillis, request);
}
@Override
protected ShardDfsOnlyResponse newShardResponse() {
return new ShardDfsOnlyResponse();
}
@Override
protected GroupShardsIterator shards(ClusterState clusterState, DfsOnlyRequest request, String[] concreteIndices) {
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices());
return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, DfsOnlyRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, DfsOnlyRequest countRequest, String[] concreteIndices) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
}
@Override
protected DfsOnlyResponse newResponse(DfsOnlyRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
int successfulShards = 0;
int failedShards = 0;
List<ShardOperationFailedException> shardFailures = null;
AtomicArray<DfsSearchResult> dfsResults = new AtomicArray<>(shardsResponses.length());
for (int i = 0; i < shardsResponses.length(); i++) {
Object shardResponse = shardsResponses.get(i);
if (shardResponse == null) {
// simply ignore non active shards
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
failedShards++;
if (shardFailures == null) {
shardFailures = new ArrayList<>();
}
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
} else {
dfsResults.set(i, ((ShardDfsOnlyResponse) shardResponse).getDfsSearchResult());
successfulShards++;
}
}
AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsResults);
return new DfsOnlyResponse(dfs, shardsResponses.length(), successfulShards, failedShards, shardFailures, buildTookInMillis(request));
}
@Override
protected ShardDfsOnlyResponse shardOperation(ShardDfsOnlyRequest request) {
DfsSearchResult dfsSearchResult = searchService.executeDfsPhase(request.getShardSearchRequest());
searchService.freeContext(dfsSearchResult.id());
return new ShardDfsOnlyResponse(request.shardId(), dfsSearchResult);
}
/**
* Builds how long it took to execute the dfs request.
*/
protected final long buildTookInMillis(DfsOnlyRequest request) {
// protect ourselves against time going backwards
// negative values don't make sense and we want to be able to serialize that thing as a vLong
return Math.max(1, System.currentTimeMillis() - request.nowInMillis);
}
}

View File

@ -1,23 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Distributed frequencies.
*/
package org.elasticsearch.action.termvectors.dfs;

View File

@ -129,7 +129,6 @@ public class TransportClient extends AbstractClient {
final ThreadPool threadPool = new ThreadPool(settings);
final NetworkService networkService = new NetworkService(settings);
final SettingsFilter settingsFilter = new SettingsFilter(settings);
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
boolean success = false;
try {
@ -140,7 +139,7 @@ public class TransportClient extends AbstractClient {
modules.add(pluginModule);
}
modules.add(new PluginsModule(pluginsService));
modules.add(new SettingsModule(settings, settingsFilter));
modules.add(new SettingsModule(settings));
modules.add(new NetworkModule(networkService, settings, true, namedWriteableRegistry));
modules.add(new ClusterNameModule(settings));
modules.add(new ThreadPoolModule(threadPool));

View File

@ -94,19 +94,6 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
return index;
}
/**
* creates a new {@link IndexRoutingTable} with all shard versions normalized
*
* @return new {@link IndexRoutingTable}
*/
public IndexRoutingTable normalizeVersions() {
IndexRoutingTable.Builder builder = new Builder(this.index);
for (IntObjectCursor<IndexShardRoutingTable> cursor : shards) {
builder.addIndexShard(cursor.value.normalizeVersions());
}
return builder.build();
}
public void validate(RoutingTableValidation validation, MetaData metaData) {
if (!metaData.hasIndex(index.getName())) {
validation.addIndexFailure(index.getName(), "Exists in routing does not exists in metadata");

View File

@ -119,40 +119,6 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
this.allInitializingShards = Collections.unmodifiableList(allInitializingShards);
}
/**
* Normalizes all shard routings to the same version.
*/
public IndexShardRoutingTable normalizeVersions() {
if (shards.isEmpty()) {
return this;
}
if (shards.size() == 1) {
return this;
}
long highestVersion = shards.get(0).version();
boolean requiresNormalization = false;
for (int i = 1; i < shards.size(); i++) {
if (shards.get(i).version() != highestVersion) {
requiresNormalization = true;
}
if (shards.get(i).version() > highestVersion) {
highestVersion = shards.get(i).version();
}
}
if (!requiresNormalization) {
return this;
}
List<ShardRouting> shardRoutings = new ArrayList<>(shards.size());
for (int i = 0; i < shards.size(); i++) {
if (shards.get(i).version() == highestVersion) {
shardRoutings.add(shards.get(i));
} else {
shardRoutings.add(new ShardRouting(shards.get(i), highestVersion));
}
}
return new IndexShardRoutingTable(shardId, Collections.unmodifiableList(shardRoutings));
}
/**
* Returns the shards id
*

View File

@ -693,9 +693,9 @@ public class RoutingNodes implements Iterable<RoutingNode> {
/**
* Initializes the current unassigned shard and moves it from the unassigned list.
*/
public void initialize(String nodeId, long version, long expectedShardSize) {
public void initialize(String nodeId, long expectedShardSize) {
innerRemove();
nodes.initialize(new ShardRouting(current, version), nodeId, expectedShardSize);
nodes.initialize(new ShardRouting(current), nodeId, expectedShardSize);
}
/**
@ -711,7 +711,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
/**
* Unsupported operation, just there for the interface. Use {@link #removeAndIgnore()} or
* {@link #initialize(String, long, long)}.
* {@link #initialize(String, long)}.
*/
@Override
public void remove() {

View File

@ -588,7 +588,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
}
// normalize the versions right before we build it...
for (ObjectCursor<IndexRoutingTable> indexRoutingTable : indicesRouting.values()) {
indicesRouting.put(indexRoutingTable.value.getIndex().getName(), indexRoutingTable.value.normalizeVersions());
indicesRouting.put(indexRoutingTable.value.getIndex().getName(), indexRoutingTable.value);
}
RoutingTable table = new RoutingTable(version, indicesRouting.build());
indicesRouting = null;

View File

@ -51,7 +51,6 @@ public final class ShardRouting implements Streamable, ToXContent {
private String relocatingNodeId;
private boolean primary;
private ShardRoutingState state;
private long version;
private RestoreSource restoreSource;
private UnassignedInfo unassignedInfo;
private AllocationId allocationId;
@ -65,11 +64,7 @@ public final class ShardRouting implements Streamable, ToXContent {
}
public ShardRouting(ShardRouting copy) {
this(copy, copy.version());
}
public ShardRouting(ShardRouting copy, long version) {
this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), version, copy.unassignedInfo(), copy.allocationId(), true, copy.getExpectedShardSize());
this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), copy.unassignedInfo(), copy.allocationId(), true, copy.getExpectedShardSize());
}
/**
@ -77,7 +72,7 @@ public final class ShardRouting implements Streamable, ToXContent {
* by either this class or tests. Visible for testing.
*/
ShardRouting(Index index, int shardId, String currentNodeId,
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version,
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state,
UnassignedInfo unassignedInfo, AllocationId allocationId, boolean internal, long expectedShardSize) {
this.index = index;
this.shardId = shardId;
@ -86,7 +81,6 @@ public final class ShardRouting implements Streamable, ToXContent {
this.primary = primary;
this.state = state;
this.asList = Collections.singletonList(this);
this.version = version;
this.restoreSource = restoreSource;
this.unassignedInfo = unassignedInfo;
this.allocationId = allocationId;
@ -107,7 +101,7 @@ public final class ShardRouting implements Streamable, ToXContent {
* Creates a new unassigned shard.
*/
public static ShardRouting newUnassigned(Index index, int shardId, RestoreSource restoreSource, boolean primary, UnassignedInfo unassignedInfo) {
return new ShardRouting(index, shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, 0, unassignedInfo, null, true, UNAVAILABLE_EXPECTED_SHARD_SIZE);
return new ShardRouting(index, shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, unassignedInfo, null, true, UNAVAILABLE_EXPECTED_SHARD_SIZE);
}
public Index index() {
@ -136,13 +130,6 @@ public final class ShardRouting implements Streamable, ToXContent {
}
/**
* The routing version associated with the shard.
*/
public long version() {
return this.version;
}
/**
* The shard is unassigned (not allocated to any node).
*/
@ -214,7 +201,7 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
public ShardRouting buildTargetRelocatingShard() {
assert relocating();
return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, version, unassignedInfo,
return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, unassignedInfo,
AllocationId.newTargetRelocation(allocationId), true, expectedShardSize);
}
@ -313,7 +300,6 @@ public final class ShardRouting implements Streamable, ToXContent {
}
public void readFromThin(StreamInput in) throws IOException {
version = in.readLong();
if (in.readBoolean()) {
currentNodeId = in.readString();
}
@ -352,7 +338,6 @@ public final class ShardRouting implements Streamable, ToXContent {
* @throws IOException if something happens during write
*/
public void writeToThin(StreamOutput out) throws IOException {
out.writeLong(version);
if (currentNodeId != null) {
out.writeBoolean(true);
out.writeString(currentNodeId);
@ -414,7 +399,6 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
void moveToUnassigned(UnassignedInfo unassignedInfo) {
ensureNotFrozen();
version++;
assert state != ShardRoutingState.UNASSIGNED : this;
state = ShardRoutingState.UNASSIGNED;
currentNodeId = null;
@ -429,7 +413,6 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
void initialize(String nodeId, long expectedShardSize) {
ensureNotFrozen();
version++;
assert state == ShardRoutingState.UNASSIGNED : this;
assert relocatingNodeId == null : this;
state = ShardRoutingState.INITIALIZING;
@ -445,7 +428,6 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
void relocate(String relocatingNodeId, long expectedShardSize) {
ensureNotFrozen();
version++;
assert state == ShardRoutingState.STARTED : "current shard has to be started in order to be relocated " + this;
state = ShardRoutingState.RELOCATING;
this.relocatingNodeId = relocatingNodeId;
@ -459,7 +441,6 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
void cancelRelocation() {
ensureNotFrozen();
version++;
assert state == ShardRoutingState.RELOCATING : this;
assert assignedToNode() : this;
assert relocatingNodeId != null : this;
@ -475,7 +456,6 @@ public final class ShardRouting implements Streamable, ToXContent {
void reinitializeShard() {
ensureNotFrozen();
assert state == ShardRoutingState.STARTED;
version++;
state = ShardRoutingState.INITIALIZING;
allocationId = AllocationId.newInitializing();
this.unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null);
@ -488,7 +468,6 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
void moveToStarted() {
ensureNotFrozen();
version++;
assert state == ShardRoutingState.INITIALIZING : "expected an initializing shard " + this;
relocatingNodeId = null;
restoreSource = null;
@ -507,7 +486,6 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
void moveToPrimary() {
ensureNotFrozen();
version++;
if (primary) {
throw new IllegalShardRoutingStateException(this, "Already primary, can't move to primary");
}
@ -519,7 +497,6 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
void moveFromPrimary() {
ensureNotFrozen();
version++;
if (!primary) {
throw new IllegalShardRoutingStateException(this, "Not primary, can't move to replica");
}
@ -638,26 +615,22 @@ public final class ShardRouting implements Streamable, ToXContent {
if (this == o) {
return true;
}
// we check on instanceof so we also handle the ImmutableShardRouting case as well
if (o == null || !(o instanceof ShardRouting)) {
return false;
}
ShardRouting that = (ShardRouting) o;
if (version != that.version) {
return false;
}
if (unassignedInfo != null ? !unassignedInfo.equals(that.unassignedInfo) : that.unassignedInfo != null) {
return false;
}
return equalsIgnoringMetaData(that);
}
private long hashVersion = version - 1;
private boolean usePreComputedHashCode = false;
private int hashCode = 0;
@Override
public int hashCode() {
if (hashVersion == version) {
if (frozen && usePreComputedHashCode) {
return hashCode;
}
int result = index != null ? index.hashCode() : 0;
@ -666,10 +639,12 @@ public final class ShardRouting implements Streamable, ToXContent {
result = 31 * result + (relocatingNodeId != null ? relocatingNodeId.hashCode() : 0);
result = 31 * result + (primary ? 1 : 0);
result = 31 * result + (state != null ? state.hashCode() : 0);
result = 31 * result + Long.hashCode(version);
result = 31 * result + (restoreSource != null ? restoreSource.hashCode() : 0);
result = 31 * result + (allocationId != null ? allocationId.hashCode() : 0);
result = 31 * result + (unassignedInfo != null ? unassignedInfo.hashCode() : 0);
if (frozen) {
usePreComputedHashCode = true;
}
return hashCode = result;
}
@ -693,7 +668,6 @@ public final class ShardRouting implements Streamable, ToXContent {
} else {
sb.append("[R]");
}
sb.append(", v[").append(version).append("]");
if (this.restoreSource != null) {
sb.append(", restoring[" + restoreSource + "]");
}
@ -718,8 +692,7 @@ public final class ShardRouting implements Streamable, ToXContent {
.field("node", currentNodeId())
.field("relocating_node", relocatingNodeId())
.field("shard", shardId().id())
.field("index", shardId().getIndex().getName())
.field("version", version);
.field("index", shardId().getIndex().getName());
if (expectedShardSize != UNAVAILABLE_EXPECTED_SHARD_SIZE) {
builder.field("expected_shard_size_in_bytes", expectedShardSize);
}

View File

@ -242,8 +242,7 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom
if (shardRoutingChanges != null) {
shardRoutingChanges.accept(unassigned);
}
it.initialize(routingNode.nodeId(), unassigned.version(),
allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
it.initialize(routingNode.nodeId(), allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
return;
}
assert false : "shard to initialize not found in list of unassigned shards";

View File

@ -1140,4 +1140,5 @@ public class Strings {
return sb.toString();
}
}
}

View File

@ -100,10 +100,10 @@ public abstract class CheckFileCommand extends CliTool.Command {
Set<PosixFilePermission> permissionsBeforeWrite = entry.getValue();
Set<PosixFilePermission> permissionsAfterWrite = Files.getPosixFilePermissions(entry.getKey());
if (!permissionsBeforeWrite.equals(permissionsAfterWrite)) {
terminal.printWarn("The file permissions of [" + entry.getKey() + "] have changed "
terminal.println(Terminal.Verbosity.SILENT, "WARNING: The file permissions of [" + entry.getKey() + "] have changed "
+ "from [" + PosixFilePermissions.toString(permissionsBeforeWrite) + "] "
+ "to [" + PosixFilePermissions.toString(permissionsAfterWrite) + "]");
terminal.printWarn("Please ensure that the user account running Elasticsearch has read access to this file!");
terminal.println(Terminal.Verbosity.SILENT, "Please ensure that the user account running Elasticsearch has read access to this file!");
}
}
@ -116,7 +116,7 @@ public abstract class CheckFileCommand extends CliTool.Command {
String ownerBeforeWrite = entry.getValue();
String ownerAfterWrite = Files.getOwner(entry.getKey()).getName();
if (!ownerAfterWrite.equals(ownerBeforeWrite)) {
terminal.printWarn("WARN: Owner of file [" + entry.getKey() + "] used to be [" + ownerBeforeWrite + "], but now is [" + ownerAfterWrite + "]");
terminal.println(Terminal.Verbosity.SILENT, "WARNING: Owner of file [" + entry.getKey() + "] used to be [" + ownerBeforeWrite + "], but now is [" + ownerAfterWrite + "]");
}
}
@ -129,7 +129,7 @@ public abstract class CheckFileCommand extends CliTool.Command {
String groupBeforeWrite = entry.getValue();
String groupAfterWrite = Files.readAttributes(entry.getKey(), PosixFileAttributes.class).group().getName();
if (!groupAfterWrite.equals(groupBeforeWrite)) {
terminal.printWarn("WARN: Group of file [" + entry.getKey() + "] used to be [" + groupBeforeWrite + "], but now is [" + groupAfterWrite + "]");
terminal.println(Terminal.Verbosity.SILENT, "WARNING: Group of file [" + entry.getKey() + "] used to be [" + groupBeforeWrite + "], but now is [" + groupAfterWrite + "]");
}
}

View File

@ -117,7 +117,7 @@ public abstract class CliTool {
} else {
if (args.length == 0) {
terminal.printError("command not specified");
terminal.println(Terminal.Verbosity.SILENT, "ERROR: command not specified");
config.printUsage(terminal);
return ExitStatus.USAGE;
}
@ -125,7 +125,7 @@ public abstract class CliTool {
String cmdName = args[0];
cmd = config.cmd(cmdName);
if (cmd == null) {
terminal.printError("unknown command [" + cmdName + "]. Use [-h] option to list available commands");
terminal.println(Terminal.Verbosity.SILENT, "ERROR: unknown command [" + cmdName + "]. Use [-h] option to list available commands");
return ExitStatus.USAGE;
}
@ -142,7 +142,7 @@ public abstract class CliTool {
try {
return parse(cmd, args).execute(settings, env);
} catch (UserError error) {
terminal.printError(error.getMessage());
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + error.getMessage());
return error.exitStatus;
}
}
@ -165,8 +165,14 @@ public abstract class CliTool {
// the stack trace into cli parsing lib is not important
throw new UserError(ExitStatus.USAGE, e.toString());
}
Terminal.Verbosity verbosity = Terminal.Verbosity.resolve(cli);
terminal.verbosity(verbosity);
if (cli.hasOption("v")) {
terminal.setVerbosity(Terminal.Verbosity.VERBOSE);
} else if (cli.hasOption("s")) {
terminal.setVerbosity(Terminal.Verbosity.SILENT);
} else {
terminal.setVerbosity(Terminal.Verbosity.NORMAL);
}
return parse(cmd.name(), cli);
}
@ -224,7 +230,7 @@ public abstract class CliTool {
public ExitStatus execute(Settings settings, Environment env) throws Exception {
if (msg != null) {
if (status != ExitStatus.OK) {
terminal.printError(msg);
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + msg);
} else {
terminal.println(msg);
}

View File

@ -41,7 +41,7 @@ public class HelpPrinter {
}
private static void print(Class clazz, String name, final Terminal terminal) {
terminal.println(Terminal.Verbosity.SILENT);
terminal.println(Terminal.Verbosity.SILENT, "");
try (InputStream input = clazz.getResourceAsStream(name + HELP_FILE_EXT)) {
Streams.readAllLines(input, new Callback<String>() {
@Override
@ -52,6 +52,6 @@ public class HelpPrinter {
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
terminal.println();
terminal.println(Terminal.Verbosity.SILENT, "");
}
}

View File

@ -19,114 +19,71 @@
package org.elasticsearch.common.cli;
import org.apache.commons.cli.CommandLine;
import org.elasticsearch.common.SuppressForbidden;
import java.io.BufferedReader;
import java.io.Console;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.util.Locale;
import java.nio.charset.Charset;
import org.elasticsearch.common.SuppressForbidden;
/**
*
* A Terminal wraps access to reading input and writing output for a {@link CliTool}.
*
* The available methods are similar to those of {@link Console}, with the ability
* to read either normal text or a password, and the ability to print a line
* of text. Printing is also gated by the {@link Verbosity} of the terminal,
* which allows {@link #println(Verbosity,String)} calls which act like a logger,
* only actually printing if the verbosity level of the terminal is above
* the verbosity of the message.
*/
@SuppressForbidden(reason = "System#out")
public abstract class Terminal {
public static final Terminal DEFAULT = ConsoleTerminal.supported() ? new ConsoleTerminal() : new SystemTerminal();
/** The default terminal implementation, which will be a console if available, or stdout/stderr if not. */
public static final Terminal DEFAULT = ConsoleTerminal.isSupported() ? new ConsoleTerminal() : new SystemTerminal();
public static enum Verbosity {
SILENT(0), NORMAL(1), VERBOSE(2);
private final int level;
private Verbosity(int level) {
this.level = level;
}
public boolean enabled(Verbosity verbosity) {
return level >= verbosity.level;
}
public static Verbosity resolve(CommandLine cli) {
if (cli.hasOption("s")) {
return SILENT;
}
if (cli.hasOption("v")) {
return VERBOSE;
}
return NORMAL;
}
/** Defines the available verbosity levels of messages to be printed. */
public enum Verbosity {
SILENT, /* always printed */
NORMAL, /* printed when no options are given to cli */
VERBOSE /* printed only when cli is passed verbose option */
}
/** The current verbosity for the terminal, defaulting to {@link Verbosity#NORMAL}. */
private Verbosity verbosity = Verbosity.NORMAL;
public Terminal() {
this(Verbosity.NORMAL);
}
public Terminal(Verbosity verbosity) {
/** Sets the verbosity of the terminal. */
void setVerbosity(Verbosity verbosity) {
this.verbosity = verbosity;
}
public void verbosity(Verbosity verbosity) {
this.verbosity = verbosity;
}
/** Reads clear text from the terminal input. See {@link Console#readLine()}. */
public abstract String readText(String prompt);
public Verbosity verbosity() {
return verbosity;
}
/** Reads password text from the terminal input. See {@link Console#readPassword()}}. */
public abstract char[] readSecret(String prompt);
public abstract String readText(String text, Object... args);
/** Print a message directly to the terminal. */
protected abstract void doPrint(String msg);
public abstract char[] readSecret(String text, Object... args);
protected abstract void printStackTrace(Throwable t);
public void println() {
println(Verbosity.NORMAL);
}
public void println(String msg) {
/** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level. */
public final void println(String msg) {
println(Verbosity.NORMAL, msg);
}
public void print(String msg) {
print(Verbosity.NORMAL, msg);
}
public void println(Verbosity verbosity) {
println(verbosity, "");
}
public void println(Verbosity verbosity, String msg) {
print(verbosity, msg + System.lineSeparator());
}
public void print(Verbosity verbosity, String msg) {
if (this.verbosity.enabled(verbosity)) {
doPrint(msg);
/** Prints a line to the terminal at {@code verbosity} level. */
public final void println(Verbosity verbosity, String msg) {
if (this.verbosity.ordinal() >= verbosity.ordinal()) {
doPrint(msg + System.lineSeparator());
}
}
public void printError(String msg) {
println(Verbosity.SILENT, "ERROR: " + msg);
}
public void printWarn(String msg) {
println(Verbosity.SILENT, "WARN: " + msg);
}
protected abstract void doPrint(String msg);
private static class ConsoleTerminal extends Terminal {
final Console console = System.console();
private static final Console console = System.console();
static boolean supported() {
return System.console() != null;
static boolean isSupported() {
return console != null;
}
@Override
@ -136,35 +93,29 @@ public abstract class Terminal {
}
@Override
public String readText(String text, Object... args) {
return console.readLine(text, args);
public String readText(String prompt) {
return console.readLine("%s", prompt);
}
@Override
public char[] readSecret(String text, Object... args) {
return console.readPassword(text, args);
}
@Override
public void printStackTrace(Throwable t) {
t.printStackTrace(console.writer());
public char[] readSecret(String prompt) {
return console.readPassword("%s", prompt);
}
}
@SuppressForbidden(reason = "System#out")
private static class SystemTerminal extends Terminal {
private final PrintWriter printWriter = new PrintWriter(System.out);
@Override
@SuppressForbidden(reason = "System#out")
public void doPrint(String msg) {
System.out.print(msg);
System.out.flush();
}
@Override
public String readText(String text, Object... args) {
print(text);
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
public String readText(String text) {
doPrint(text);
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset()));
try {
return reader.readLine();
} catch (IOException ioe) {
@ -173,13 +124,8 @@ public abstract class Terminal {
}
@Override
public char[] readSecret(String text, Object... args) {
return readText(text, args).toCharArray();
}
@Override
public void printStackTrace(Throwable t) {
t.printStackTrace(printWriter);
public char[] readSecret(String text) {
return readText(text).toCharArray();
}
}
}

View File

@ -38,6 +38,7 @@ import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
import org.elasticsearch.search.rescore.RescoreBuilder;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorFactory;
import org.joda.time.DateTime;
@ -706,6 +707,13 @@ public abstract class StreamInput extends InputStream {
return readNamedWriteable(ScoreFunctionBuilder.class);
}
/**
* Reads a {@link Task.Status} from the current stream.
*/
public Task.Status readTaskStatus() throws IOException {
return readNamedWriteable(Task.Status.class);
}
/**
* Reads a list of objects
*/

View File

@ -37,6 +37,7 @@ import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
import org.elasticsearch.search.rescore.RescoreBuilder;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorFactory;
import org.joda.time.ReadableInstant;
@ -676,6 +677,13 @@ public abstract class StreamOutput extends OutputStream {
writeNamedWriteable(scoreFunctionBuilder);
}
/**
* Writes a {@link Task.Status} to the current stream.
*/
public void writeTaskStatus(Task.Status status) throws IOException {
writeNamedWriteable(status);
}
/**
* Writes the given {@link GeoPoint} to the stream
*/

View File

@ -46,6 +46,24 @@ import java.util.stream.Collectors;
/**
* A setting. Encapsulates typical stuff like default value, parsing, and scope.
* Some (dynamic=true) can by modified at run time using the API.
* All settings inside elasticsearch or in any of the plugins should use this type-safe and generic settings infrastructure
* together with {@link AbstractScopedSettings}. This class contains several untility methods that makes it straight forward
* to add settings for the majority of the cases. For instance a simple boolean settings can be defined like this:
* <pre>{@code
* public static final Setting<Boolean>; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, false, Scope.CLUSTER);}
* </pre>
* To retrieve the value of the setting a {@link Settings} object can be passed directly to the {@link Setting#get(Settings)} method.
* <pre>
* final boolean myBooleanValue = MY_BOOLEAN.get(settings);
* </pre>
* It's recommended to use typed settings rather than string based settings. For example adding a setting for an enum type:
* <pre>{@code
* public enum Color {
* RED, GREEN, BLUE;
* }
* public static final Setting<Color> MY_BOOLEAN = new Setting<>("my.color.setting", Color.RED.toString(), Color::valueOf, false, Scope.CLUSTER);
* }
* </pre>
*/
public class Setting<T> extends ToXContentToBytes {
private final String key;
@ -84,7 +102,9 @@ public class Setting<T> extends ToXContentToBytes {
}
/**
* Returns the settings key or a prefix if this setting is a group setting
* Returns the settings key or a prefix if this setting is a group setting.
* <b>Note: this method should not be used to retrieve a value from a {@link Settings} object.
* Use {@link #get(Settings)} instead</b>
*
* @see #isGroupSetting()
*/

View File

@ -25,14 +25,18 @@ import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.rest.RestRequest;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.Set;
/**
*
* A class that allows to filter settings objects by simple regular expression patterns or full settings keys.
* It's used for response filtering on the rest layer to for instance filter out sensitive information like access keys.
*/
public final class SettingsFilter extends AbstractComponent {
/**
@ -40,50 +44,62 @@ public final class SettingsFilter extends AbstractComponent {
*/
public static String SETTINGS_FILTER_PARAM = "settings_filter";
private final CopyOnWriteArrayList<String> patterns = new CopyOnWriteArrayList<>();
private final Set<String> patterns;
private final String patternString;
public SettingsFilter(Settings settings) {
public SettingsFilter(Settings settings, Collection<String> patterns) {
super(settings);
HashSet<String> set = new HashSet<>();
for (String pattern : patterns) {
if (isValidPattern(pattern) == false) {
throw new IllegalArgumentException("invalid pattern: " + pattern);
}
}
this.patterns = Collections.unmodifiableSet(new HashSet<>(patterns));
patternString = Strings.collectionToDelimitedString(patterns, ",");
}
/**
* Adds a new simple pattern to the list of filters
* Returns a set of patterns
*/
public void addFilter(String pattern) {
patterns.add(pattern);
public Set<String> getPatterns() {
return patterns;
}
/**
* Removes a simple pattern from the list of filters
* Returns <code>true</code> iff the given string is either a valid settings key pattern or a simple regular expression
* @see Regex
* @see AbstractScopedSettings#isValidKey(String)
*/
public void removeFilter(String pattern) {
patterns.remove(pattern);
}
public String getPatterns() {
return Strings.collectionToDelimitedString(patterns, ",");
public static boolean isValidPattern(String pattern) {
return AbstractScopedSettings.isValidKey(pattern) || Regex.isSimpleMatchPattern(pattern);
}
public void addFilterSettingParams(RestRequest request) {
if (patterns.isEmpty() == false) {
request.params().put(SETTINGS_FILTER_PARAM, getPatterns());
request.params().put(SETTINGS_FILTER_PARAM, patternString);
}
}
public static Settings filterSettings(Params params, Settings settings) {
String patterns = params.param(SETTINGS_FILTER_PARAM);
Settings filteredSettings = settings;
final Settings filteredSettings;
if (patterns != null && patterns.isEmpty() == false) {
filteredSettings = SettingsFilter.filterSettings(patterns, filteredSettings);
filteredSettings = filterSettings(Strings.commaDelimitedListToSet(patterns), settings);
} else {
filteredSettings = settings;
}
return filteredSettings;
}
public static Settings filterSettings(String patterns, Settings settings) {
String[] patternArray = Strings.delimitedListToStringArray(patterns, ",");
public Settings filter(Settings settings) {
return filterSettings(patterns, settings);
}
private static Settings filterSettings(Iterable<String> patterns, Settings settings) {
Settings.Builder builder = Settings.settingsBuilder().put(settings);
List<String> simpleMatchPatternList = new ArrayList<>();
for (String pattern : patternArray) {
for (String pattern : patterns) {
if (Regex.isSimpleMatchPattern(pattern)) {
simpleMatchPatternList.add(pattern);
} else {
@ -102,4 +118,4 @@ public final class SettingsFilter extends AbstractComponent {
}
return builder.build();
}
}
}

View File

@ -20,30 +20,28 @@
package org.elasticsearch.common.settings;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.tribe.TribeService;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.function.Predicate;
/**
* A module that binds the provided settings to the {@link Settings} interface.
*
*
*/
public class SettingsModule extends AbstractModule {
private final Settings settings;
private final SettingsFilter settingsFilter;
private final Set<String> settingsFilterPattern = new HashSet<>();
private final Map<String, Setting<?>> clusterSettings = new HashMap<>();
private final Map<String, Setting<?>> indexSettings = new HashMap<>();
private static final Predicate<String> TRIBE_CLIENT_NODE_SETTINGS_PREDICATE = (s) -> s.startsWith("tribe.") && TribeService.TRIBE_SETTING_KEYS.contains(s) == false;
public SettingsModule(Settings settings, SettingsFilter settingsFilter) {
public SettingsModule(Settings settings) {
this.settings = settings;
this.settingsFilter = settingsFilter;
for (Setting<?> setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) {
registerSetting(setting);
}
@ -62,12 +60,17 @@ public class SettingsModule extends AbstractModule {
clusterSettings.validate(settings.filter(acceptOnlyClusterSettings));
validateTribeSettings(settings, clusterSettings);
bind(Settings.class).toInstance(settings);
bind(SettingsFilter.class).toInstance(settingsFilter);
bind(SettingsFilter.class).toInstance(new SettingsFilter(settings, settingsFilterPattern));
bind(ClusterSettings.class).toInstance(clusterSettings);
bind(IndexScopedSettings.class).toInstance(indexScopedSettings);
}
/**
* Registers a new setting. This method should be used by plugins in order to expose any custom settings the plugin defines.
* Unless a setting is registered the setting is unusable. If a setting is never the less specified the node will reject
* the setting during startup.
*/
public void registerSetting(Setting<?> setting) {
switch (setting.getScope()) {
case CLUSTER:
@ -85,7 +88,28 @@ public class SettingsModule extends AbstractModule {
}
}
public void validateTribeSettings(Settings settings, ClusterSettings clusterSettings) {
/**
* Registers a settings filter pattern that allows to filter out certain settings that for instance contain sensitive information
* or if a setting is for internal purposes only. The given patter must either be a valid settings key or a simple regesp pattern.
*/
public void registerSettingsFilter(String filter) {
if (SettingsFilter.isValidPattern(filter) == false) {
throw new IllegalArgumentException("filter [" + filter +"] is invalid must be either a key or a regex pattern");
}
if (settingsFilterPattern.contains(filter)) {
throw new IllegalArgumentException("filter [" + filter + "] has already been registered");
}
settingsFilterPattern.add(filter);
}
public void registerSettingsFilterIfMissing(String filter) {
if (settingsFilterPattern.contains(filter)) {
registerSettingsFilter(filter);
}
}
private void validateTribeSettings(Settings settings, ClusterSettings clusterSettings) {
Map<String, Settings> groups = settings.filter(TRIBE_CLIENT_NODE_SETTINGS_PREDICATE).getGroups("tribe.", true);
for (Map.Entry<String, Settings> tribeSettings : groups.entrySet()) {
Settings thisTribesSettings = tribeSettings.getValue();

View File

@ -93,4 +93,10 @@ public interface Discovery extends LifecycleComponent<Discovery> {
*/
DiscoveryStats stats();
/***
* @return the current value of minimum master nodes, or -1 for not set
*/
int getMinimumMasterNodes();
}

View File

@ -299,6 +299,11 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
return new DiscoveryStats(null);
}
@Override
public int getMinimumMasterNodes() {
return -1;
}
private LocalDiscovery[] members() {
ClusterGroup clusterGroup = clusterGroups.get(clusterName);
if (clusterGroup == null) {

View File

@ -354,6 +354,11 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
return new DiscoveryStats(queueStats);
}
@Override
public int getMinimumMasterNodes() {
return electMaster.minimumMasterNodes();
}
/**
* returns true if zen discovery is started and there is a currently a background thread active for (re)joining
* the cluster used for testing.

View File

@ -25,18 +25,18 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.env.NodeEnvironment;
import java.nio.file.Path;
import java.util.function.Supplier;
/**
*
@ -51,23 +51,17 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
private final TransportNodesListGatewayMetaState listGatewayMetaState;
private final String initialMeta;
private final ClusterName clusterName;
private final Supplier<Integer> minimumMasterNodesProvider;
@Inject
public Gateway(Settings settings, ClusterService clusterService, NodeEnvironment nodeEnv, GatewayMetaState metaState,
TransportNodesListGatewayMetaState listGatewayMetaState, ClusterName clusterName) {
TransportNodesListGatewayMetaState listGatewayMetaState, Discovery discovery) {
super(settings);
this.clusterService = clusterService;
this.nodeEnv = nodeEnv;
this.metaState = metaState;
this.listGatewayMetaState = listGatewayMetaState;
this.clusterName = clusterName;
this.minimumMasterNodesProvider = discovery::getMinimumMasterNodes;
clusterService.addLast(this);
// we define what is our minimum "master" nodes, use that to allow for recovery
this.initialMeta = settings.get("gateway.initial_meta", settings.get("gateway.local.initial_meta", settings.get("discovery.zen.minimum_master_nodes", "1")));
}
public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException {
@ -76,7 +70,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
TransportNodesListGatewayMetaState.NodesGatewayMetaState nodesState = listGatewayMetaState.list(nodesIds.toArray(String.class), null).actionGet();
int requiredAllocation = calcRequiredAllocations(this.initialMeta, nodesIds.size());
int requiredAllocation = Math.max(1, minimumMasterNodesProvider.get());
if (nodesState.failures().length > 0) {
@ -139,39 +133,10 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
}
}
}
ClusterState.Builder builder = ClusterState.builder(clusterName);
ClusterState.Builder builder = ClusterState.builder(clusterService.state().getClusterName());
builder.metaData(metaDataBuilder);
listener.onSuccess(builder.build());
}
protected int calcRequiredAllocations(final String setting, final int nodeCount) {
int requiredAllocation = 1;
try {
if ("quorum".equals(setting)) {
if (nodeCount > 2) {
requiredAllocation = (nodeCount / 2) + 1;
}
} else if ("quorum-1".equals(setting) || "half".equals(setting)) {
if (nodeCount > 2) {
requiredAllocation = ((1 + nodeCount) / 2);
}
} else if ("one".equals(setting)) {
requiredAllocation = 1;
} else if ("full".equals(setting) || "all".equals(setting)) {
requiredAllocation = nodeCount;
} else if ("full-1".equals(setting) || "all-1".equals(setting)) {
if (nodeCount > 1) {
requiredAllocation = nodeCount - 1;
}
} else {
requiredAllocation = Integer.parseInt(setting);
}
} catch (Exception e) {
logger.warn("failed to derived initial_meta from value {}", setting);
}
return requiredAllocation;
}
public void reset() throws Exception {
try {
Path[] dataPaths = nodeEnv.nodeDataPaths();

View File

@ -21,28 +21,16 @@ package org.elasticsearch.gateway;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint;
/**
*
*/
public class GatewayModule extends AbstractModule {
public static final String GATEWAY_TYPE_KEY = "gateway.type";
private final ExtensionPoint.SelectedType<Gateway> gatewayTypes = new ExtensionPoint.SelectedType<>("gateway", Gateway.class);
private final Settings settings;
public GatewayModule(Settings settings) {
this.settings = settings;
registerGatewayType("default", Gateway.class);
}
/**
* Adds a custom Discovery type.
*/
public void registerGatewayType(String type, Class<? extends Gateway> clazz) {
gatewayTypes.registerExtension(type, clazz);
}
@Override
@ -50,7 +38,6 @@ public class GatewayModule extends AbstractModule {
bind(MetaStateService.class).asEagerSingleton();
bind(DanglingIndicesState.class).asEagerSingleton();
bind(GatewayService.class).asEagerSingleton();
gatewayTypes.bindType(binder(), settings, GATEWAY_TYPE_KEY, "default");
bind(TransportNodesListGatewayMetaState.class).asEagerSingleton();
bind(GatewayMetaState.class).asEagerSingleton();
bind(TransportNodesListGatewayStartedShards.class).asEagerSingleton();

View File

@ -39,7 +39,9 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.threadpool.ThreadPool;
@ -92,9 +94,12 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
private final AtomicBoolean scheduledRecovery = new AtomicBoolean();
@Inject
public GatewayService(Settings settings, Gateway gateway, AllocationService allocationService, ClusterService clusterService, DiscoveryService discoveryService, ThreadPool threadPool) {
public GatewayService(Settings settings, AllocationService allocationService, ClusterService clusterService,
DiscoveryService discoveryService, ThreadPool threadPool,
NodeEnvironment nodeEnvironment, GatewayMetaState metaState,
TransportNodesListGatewayMetaState listGatewayMetaState, Discovery discovery) {
super(settings);
this.gateway = gateway;
this.gateway = new Gateway(settings, clusterService, nodeEnvironment, metaState, listGatewayMetaState, discovery);
this.allocationService = allocationService;
this.clusterService = clusterService;
this.discoveryService = discoveryService;
@ -233,6 +238,10 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
}
}
public Gateway getGateway() {
return gateway;
}
class GatewayRecoveryListener implements Gateway.GatewayStateRecoveredListener {
@Override

View File

@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardStateMetaData;
import java.util.ArrayList;
import java.util.Collections;
@ -109,7 +110,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
final boolean snapshotRestore = shard.restoreSource() != null;
final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData);
final NodesAndVersions nodesAndVersions;
final NodesResult nodesResult;
final boolean enoughAllocationsFound;
if (lastActiveAllocationIds.isEmpty()) {
@ -117,20 +118,20 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
// when we load an old index (after upgrading cluster) or restore a snapshot of an old index
// fall back to old version-based allocation mode
// Note that once the shard has been active, lastActiveAllocationIds will be non-empty
nodesAndVersions = buildNodesAndVersions(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState);
nodesResult = buildVersionBasedNodes(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState);
if (snapshotRestore || recoverOnAnyNode) {
enoughAllocationsFound = nodesAndVersions.allocationsFound > 0;
enoughAllocationsFound = nodesResult.allocationsFound > 0;
} else {
enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(shard, indexMetaData, nodesAndVersions);
enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(shard, indexMetaData, nodesResult);
}
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}, highest version: [{}]", shard.index(), shard.id(), Version.V_3_0_0, nodesAndVersions.allocationsFound, shard, nodesAndVersions.highestVersion);
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_3_0_0, nodesResult.allocationsFound, shard);
} else {
assert lastActiveAllocationIds.isEmpty() == false;
// use allocation ids to select nodes
nodesAndVersions = buildAllocationIdBasedNodes(shard, snapshotRestore || recoverOnAnyNode,
nodesResult = buildAllocationIdBasedNodes(shard, snapshotRestore || recoverOnAnyNode,
allocation.getIgnoreNodes(shard.shardId()), lastActiveAllocationIds, shardState);
enoughAllocationsFound = nodesAndVersions.allocationsFound > 0;
logger.debug("[{}][{}]: found {} allocations of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound, shard, lastActiveAllocationIds);
enoughAllocationsFound = nodesResult.allocationsFound > 0;
logger.debug("[{}][{}]: found {} allocations of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodesResult.allocationsFound, shard, lastActiveAllocationIds);
}
if (enoughAllocationsFound == false){
@ -143,22 +144,22 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
} else {
// we can't really allocate, so ignore it and continue
unassignedIterator.removeAndIgnore();
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound);
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodesResult.allocationsFound);
}
continue;
}
final NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesAndVersions.nodes);
final NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesResult.nodes);
if (nodesToAllocate.yesNodes.isEmpty() == false) {
DiscoveryNode node = nodesToAllocate.yesNodes.get(0);
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node);
changed = true;
unassignedIterator.initialize(node.id(), nodesAndVersions.highestVersion, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
unassignedIterator.initialize(node.id(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
} else if (nodesToAllocate.throttleNodes.isEmpty() == true && nodesToAllocate.noNodes.isEmpty() == false) {
DiscoveryNode node = nodesToAllocate.noNodes.get(0);
logger.debug("[{}][{}]: forcing allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node);
changed = true;
unassignedIterator.initialize(node.id(), nodesAndVersions.highestVersion, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
unassignedIterator.initialize(node.id(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
} else {
// we are throttling this, but we have enough to allocate to this node, ignore it for now
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodes);
@ -173,11 +174,10 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
* lastActiveAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but
* entries with matching allocation id are always at the front of the list.
*/
protected NodesAndVersions buildAllocationIdBasedNodes(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
Set<String> lastActiveAllocationIds, AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState) {
protected NodesResult buildAllocationIdBasedNodes(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
Set<String> lastActiveAllocationIds, AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState) {
LinkedList<DiscoveryNode> matchingNodes = new LinkedList<>();
LinkedList<DiscoveryNode> nonMatchingNodes = new LinkedList<>();
long highestVersion = -1;
for (TransportNodesListGatewayStartedShards.NodeGatewayStartedShards nodeShardState : shardState.getData().values()) {
DiscoveryNode node = nodeShardState.getNode();
String allocationId = nodeShardState.allocationId();
@ -187,7 +187,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
}
if (nodeShardState.storeException() == null) {
if (allocationId == null && nodeShardState.version() != -1) {
if (allocationId == null && nodeShardState.legacyVersion() != ShardStateMetaData.NO_VERSION) {
// old shard with no allocation id, assign dummy value so that it gets added below in case of matchAnyShard
allocationId = "_n/a_";
}
@ -205,14 +205,12 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
} else {
matchingNodes.addLast(node);
}
highestVersion = Math.max(highestVersion, nodeShardState.version());
} else if (matchAnyShard) {
if (nodeShardState.primary()) {
nonMatchingNodes.addFirst(node);
} else {
nonMatchingNodes.addLast(node);
}
highestVersion = Math.max(highestVersion, nodeShardState.version());
}
}
}
@ -224,13 +222,13 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
if (logger.isTraceEnabled()) {
logger.trace("{} candidates for allocation: {}", shard, nodes.stream().map(DiscoveryNode::name).collect(Collectors.joining(", ")));
}
return new NodesAndVersions(nodes, nodes.size(), highestVersion);
return new NodesResult(nodes, nodes.size());
}
/**
* used by old version-based allocation
*/
private boolean isEnoughVersionBasedAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesAndVersions nodesAndVersions) {
private boolean isEnoughVersionBasedAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesResult nodesAndVersions) {
// check if the counts meets the minimum set
int requiredAllocation = 1;
// if we restore from a repository one copy is more then enough
@ -288,29 +286,29 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
* are added to the list. Otherwise, any node that has a shard is added to the list, but entries with highest
* version are always at the front of the list.
*/
NodesAndVersions buildNodesAndVersions(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState) {
NodesResult buildVersionBasedNodes(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState) {
final Map<DiscoveryNode, Long> nodesWithVersion = new HashMap<>();
int numberOfAllocationsFound = 0;
long highestVersion = -1;
long highestVersion = ShardStateMetaData.NO_VERSION;
for (TransportNodesListGatewayStartedShards.NodeGatewayStartedShards nodeShardState : shardState.getData().values()) {
long version = nodeShardState.version();
long version = nodeShardState.legacyVersion();
DiscoveryNode node = nodeShardState.getNode();
if (ignoreNodes.contains(node.id())) {
continue;
}
// -1 version means it does not exists, which is what the API returns, and what we expect to
// no version means it does not exists, which is what the API returns, and what we expect to
if (nodeShardState.storeException() == null) {
logger.trace("[{}] on node [{}] has version [{}] of shard", shard, nodeShardState.getNode(), version);
} else {
// when there is an store exception, we disregard the reported version and assign it as -1 (same as shard does not exist)
logger.trace("[{}] on node [{}] has version [{}] but the store can not be opened, treating as version -1", nodeShardState.storeException(), shard, nodeShardState.getNode(), version);
version = -1;
// when there is an store exception, we disregard the reported version and assign it as no version (same as shard does not exist)
logger.trace("[{}] on node [{}] has version [{}] but the store can not be opened, treating no version", nodeShardState.storeException(), shard, nodeShardState.getNode(), version);
version = ShardStateMetaData.NO_VERSION;
}
if (version != -1) {
if (version != ShardStateMetaData.NO_VERSION) {
numberOfAllocationsFound++;
// If we've found a new "best" candidate, clear the
// current candidates and add it
@ -348,7 +346,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
logger.trace("{} candidates for allocation: {}", shard, sb.toString());
}
return new NodesAndVersions(Collections.unmodifiableList(nodesWithHighestVersion), numberOfAllocationsFound, highestVersion);
return new NodesResult(Collections.unmodifiableList(nodesWithHighestVersion), numberOfAllocationsFound);
}
/**
@ -362,15 +360,13 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
protected abstract AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);
static class NodesAndVersions {
static class NodesResult {
public final List<DiscoveryNode> nodes;
public final int allocationsFound;
public final long highestVersion;
public NodesAndVersions(List<DiscoveryNode> nodes, int allocationsFound, long highestVersion) {
public NodesResult(List<DiscoveryNode> nodes, int allocationsFound) {
this.nodes = nodes;
this.allocationsFound = allocationsFound;
this.highestVersion = highestVersion;
}
}

View File

@ -173,7 +173,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node());
// we found a match
changed = true;
unassignedIterator.initialize(nodeWithHighestMatch.nodeId(), shard.version(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
unassignedIterator.initialize(nodeWithHighestMatch.nodeId(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
}
} else if (matchingNodes.hasAnyData() == false) {
// if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation of the replica shard needs to be delayed

View File

@ -138,7 +138,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
} catch (Exception exception) {
logger.trace("{} can't open index for shard [{}] in path [{}]", exception, shardId, shardStateMetaData, (shardPath != null) ? shardPath.resolveIndex() : "");
String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null;
return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, allocationId, shardStateMetaData.primary, exception);
return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.legacyVersion, allocationId, shardStateMetaData.primary, exception);
}
}
// old shard metadata doesn't have the actual index UUID so we need to check if the actual uuid in the metadata
@ -149,11 +149,11 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
} else {
logger.debug("{} shard state info found: [{}]", shardId, shardStateMetaData);
String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null;
return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, allocationId, shardStateMetaData.primary);
return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.legacyVersion, allocationId, shardStateMetaData.primary);
}
}
logger.trace("{} no local shard info found", shardId);
return new NodeGatewayStartedShards(clusterService.localNode(), -1, null, false);
return new NodeGatewayStartedShards(clusterService.localNode(), ShardStateMetaData.NO_VERSION, null, false);
} catch (Exception e) {
throw new ElasticsearchException("failed to load started shards", e);
}
@ -276,27 +276,27 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
public static class NodeGatewayStartedShards extends BaseNodeResponse {
private long version = -1;
private long legacyVersion = ShardStateMetaData.NO_VERSION; // for pre-3.0 shards that have not yet been active
private String allocationId = null;
private boolean primary = false;
private Throwable storeException = null;
public NodeGatewayStartedShards() {
}
public NodeGatewayStartedShards(DiscoveryNode node, long version, String allocationId, boolean primary) {
this(node, version, allocationId, primary, null);
public NodeGatewayStartedShards(DiscoveryNode node, long legacyVersion, String allocationId, boolean primary) {
this(node, legacyVersion, allocationId, primary, null);
}
public NodeGatewayStartedShards(DiscoveryNode node, long version, String allocationId, boolean primary, Throwable storeException) {
public NodeGatewayStartedShards(DiscoveryNode node, long legacyVersion, String allocationId, boolean primary, Throwable storeException) {
super(node);
this.version = version;
this.legacyVersion = legacyVersion;
this.allocationId = allocationId;
this.primary = primary;
this.storeException = storeException;
}
public long version() {
return this.version;
public long legacyVersion() {
return this.legacyVersion;
}
public String allocationId() {
@ -314,7 +314,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
version = in.readLong();
legacyVersion = in.readLong();
allocationId = in.readOptionalString();
primary = in.readBoolean();
if (in.readBoolean()) {
@ -325,7 +325,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeLong(version);
out.writeLong(legacyVersion);
out.writeOptionalString(allocationId);
out.writeBoolean(primary);
if (storeException != null) {

View File

@ -20,9 +20,7 @@
package org.elasticsearch.http.netty;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.netty.pipelining.OrderedUpstreamMessageEvent;
import org.elasticsearch.rest.support.RestUtils;
import org.jboss.netty.channel.ChannelHandler;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ExceptionEvent;
@ -30,9 +28,6 @@ import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.jboss.netty.handler.codec.http.HttpRequest;
import java.util.regex.Pattern;
/**
*
*/
@ -40,15 +35,12 @@ import java.util.regex.Pattern;
public class HttpRequestHandler extends SimpleChannelUpstreamHandler {
private final NettyHttpServerTransport serverTransport;
private final Pattern corsPattern;
private final boolean httpPipeliningEnabled;
private final boolean detailedErrorsEnabled;
private final ThreadContext threadContext;
public HttpRequestHandler(NettyHttpServerTransport serverTransport, boolean detailedErrorsEnabled, ThreadContext threadContext) {
this.serverTransport = serverTransport;
this.corsPattern = RestUtils
.checkCorsSettingForRegex(HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN.get(serverTransport.settings()));
this.httpPipeliningEnabled = serverTransport.pipelining;
this.detailedErrorsEnabled = detailedErrorsEnabled;
this.threadContext = threadContext;
@ -70,9 +62,9 @@ public class HttpRequestHandler extends SimpleChannelUpstreamHandler {
// when reading, or using a cumalation buffer
NettyHttpRequest httpRequest = new NettyHttpRequest(request, e.getChannel());
if (oue != null) {
serverTransport.dispatchRequest(httpRequest, new NettyHttpChannel(serverTransport, httpRequest, corsPattern, oue, detailedErrorsEnabled));
serverTransport.dispatchRequest(httpRequest, new NettyHttpChannel(serverTransport, httpRequest, oue, detailedErrorsEnabled));
} else {
serverTransport.dispatchRequest(httpRequest, new NettyHttpChannel(serverTransport, httpRequest, corsPattern, detailedErrorsEnabled));
serverTransport.dispatchRequest(httpRequest, new NettyHttpChannel(serverTransport, httpRequest, detailedErrorsEnabled));
}
super.messageReceived(ctx, e);
}

View File

@ -19,18 +19,17 @@
package org.elasticsearch.http.netty;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.netty.ReleaseChannelFutureListener;
import org.elasticsearch.http.HttpChannel;
import org.elasticsearch.http.netty.cors.CorsHandler;
import org.elasticsearch.http.netty.pipelining.OrderedDownstreamChannelEvent;
import org.elasticsearch.http.netty.pipelining.OrderedUpstreamMessageEvent;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.rest.support.RestUtils;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelFuture;
@ -40,28 +39,17 @@ import org.jboss.netty.handler.codec.http.CookieDecoder;
import org.jboss.netty.handler.codec.http.CookieEncoder;
import org.jboss.netty.handler.codec.http.DefaultHttpResponse;
import org.jboss.netty.handler.codec.http.HttpHeaders;
import org.jboss.netty.handler.codec.http.HttpMethod;
import org.jboss.netty.handler.codec.http.HttpResponse;
import org.jboss.netty.handler.codec.http.HttpResponseStatus;
import org.jboss.netty.handler.codec.http.HttpVersion;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.regex.Pattern;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_MAX_AGE;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ORIGIN;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.USER_AGENT;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.CONNECTION;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Values.CLOSE;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Values.KEEP_ALIVE;
/**
*
@ -72,18 +60,18 @@ public class NettyHttpChannel extends HttpChannel {
private final Channel channel;
private final org.jboss.netty.handler.codec.http.HttpRequest nettyRequest;
private OrderedUpstreamMessageEvent orderedUpstreamMessageEvent = null;
private Pattern corsPattern;
public NettyHttpChannel(NettyHttpServerTransport transport, NettyHttpRequest request, Pattern corsPattern, boolean detailedErrorsEnabled) {
public NettyHttpChannel(NettyHttpServerTransport transport, NettyHttpRequest request,
boolean detailedErrorsEnabled) {
super(request, detailedErrorsEnabled);
this.transport = transport;
this.channel = request.getChannel();
this.nettyRequest = request.request();
this.corsPattern = corsPattern;
}
public NettyHttpChannel(NettyHttpServerTransport transport, NettyHttpRequest request, Pattern corsPattern, OrderedUpstreamMessageEvent orderedUpstreamMessageEvent, boolean detailedErrorsEnabled) {
this(transport, request, corsPattern, detailedErrorsEnabled);
public NettyHttpChannel(NettyHttpServerTransport transport, NettyHttpRequest request,
OrderedUpstreamMessageEvent orderedUpstreamMessageEvent, boolean detailedErrorsEnabled) {
this(transport, request, detailedErrorsEnabled);
this.orderedUpstreamMessageEvent = orderedUpstreamMessageEvent;
}
@ -95,48 +83,12 @@ public class NettyHttpChannel extends HttpChannel {
@Override
public void sendResponse(RestResponse response) {
// Decide whether to close the connection or not.
boolean http10 = nettyRequest.getProtocolVersion().equals(HttpVersion.HTTP_1_0);
boolean close =
HttpHeaders.Values.CLOSE.equalsIgnoreCase(nettyRequest.headers().get(HttpHeaders.Names.CONNECTION)) ||
(http10 && !HttpHeaders.Values.KEEP_ALIVE.equalsIgnoreCase(nettyRequest.headers().get(HttpHeaders.Names.CONNECTION)));
// if the response object was created upstream, then use it;
// otherwise, create a new one
HttpResponse resp = newResponse();
resp.setStatus(getStatus(response.status()));
// Build the response object.
HttpResponseStatus status = getStatus(response.status());
org.jboss.netty.handler.codec.http.HttpResponse resp;
if (http10) {
resp = new DefaultHttpResponse(HttpVersion.HTTP_1_0, status);
if (!close) {
resp.headers().add(HttpHeaders.Names.CONNECTION, "Keep-Alive");
}
} else {
resp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status);
}
if (RestUtils.isBrowser(nettyRequest.headers().get(USER_AGENT))) {
if (SETTING_CORS_ENABLED.get(transport.settings())) {
String originHeader = request.header(ORIGIN);
if (!Strings.isNullOrEmpty(originHeader)) {
if (corsPattern == null) {
String allowedOrigins = SETTING_CORS_ALLOW_ORIGIN.get(transport.settings());
if (!Strings.isNullOrEmpty(allowedOrigins)) {
resp.headers().add(ACCESS_CONTROL_ALLOW_ORIGIN, allowedOrigins);
}
} else {
resp.headers().add(ACCESS_CONTROL_ALLOW_ORIGIN, corsPattern.matcher(originHeader).matches() ? originHeader : "null");
}
}
if (nettyRequest.getMethod() == HttpMethod.OPTIONS) {
// Allow Ajax requests based on the CORS "preflight" request
resp.headers().add(ACCESS_CONTROL_MAX_AGE, SETTING_CORS_MAX_AGE.get(transport.settings()));
resp.headers().add(ACCESS_CONTROL_ALLOW_METHODS, SETTING_CORS_ALLOW_METHODS.get(transport.settings()));
resp.headers().add(ACCESS_CONTROL_ALLOW_HEADERS, SETTING_CORS_ALLOW_HEADERS.get(transport.settings()));
}
if (SETTING_CORS_ALLOW_CREDENTIALS.get(transport.settings())) {
resp.headers().add(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
}
}
CorsHandler.setCorsResponseHeaders(nettyRequest, resp, transport.getCorsConfig());
String opaque = nettyRequest.headers().get("X-Opaque-Id");
if (opaque != null) {
@ -201,7 +153,7 @@ public class NettyHttpChannel extends HttpChannel {
addedReleaseListener = true;
}
if (close) {
if (isCloseConnection()) {
future.addListener(ChannelFutureListener.CLOSE);
}
@ -212,6 +164,36 @@ public class NettyHttpChannel extends HttpChannel {
}
}
// Determine if the request protocol version is HTTP 1.0
private boolean isHttp10() {
return nettyRequest.getProtocolVersion().equals(HttpVersion.HTTP_1_0);
}
// Determine if the request connection should be closed on completion.
private boolean isCloseConnection() {
final boolean http10 = isHttp10();
return CLOSE.equalsIgnoreCase(nettyRequest.headers().get(CONNECTION)) ||
(http10 && !KEEP_ALIVE.equalsIgnoreCase(nettyRequest.headers().get(CONNECTION)));
}
// Create a new {@link HttpResponse} to transmit the response for the netty request.
private HttpResponse newResponse() {
final boolean http10 = isHttp10();
final boolean close = isCloseConnection();
// Build the response object.
HttpResponseStatus status = HttpResponseStatus.OK; // default to initialize
org.jboss.netty.handler.codec.http.HttpResponse resp;
if (http10) {
resp = new DefaultHttpResponse(HttpVersion.HTTP_1_0, status);
if (!close) {
resp.headers().add(CONNECTION, "Keep-Alive");
}
} else {
resp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status);
}
return resp;
}
private static final HttpResponseStatus TOO_MANY_REQUESTS = new HttpResponseStatus(429, "Too Many Requests");
private HttpResponseStatus getStatus(RestStatus status) {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.http.netty;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
@ -44,10 +45,13 @@ import org.elasticsearch.http.HttpRequest;
import org.elasticsearch.http.HttpServerAdapter;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.HttpStats;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.netty.cors.CorsConfig;
import org.elasticsearch.http.netty.cors.CorsConfigBuilder;
import org.elasticsearch.http.netty.cors.CorsHandler;
import org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.rest.support.RestUtils;
import org.elasticsearch.transport.BindTransportException;
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.channel.AdaptiveReceiveBufferSizePredictorFactory;
@ -63,6 +67,7 @@ import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
import org.jboss.netty.channel.socket.oio.OioServerSocketChannelFactory;
import org.jboss.netty.handler.codec.http.HttpChunkAggregator;
import org.jboss.netty.handler.codec.http.HttpContentCompressor;
import org.jboss.netty.handler.codec.http.HttpMethod;
import org.jboss.netty.handler.codec.http.HttpRequestDecoder;
import org.jboss.netty.handler.timeout.ReadTimeoutException;
@ -74,13 +79,34 @@ import java.util.Arrays;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicReference;
import java.util.regex.Pattern;
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_REUSE_ADDRESS;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE;
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PORT;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS;
import static org.elasticsearch.http.netty.cors.CorsHandler.ANY_ORIGIN;
/**
*
@ -146,6 +172,8 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
protected volatile HttpServerAdapter httpServerAdapter;
private final CorsConfig corsConfig;
@Inject
@SuppressForbidden(reason = "sets org.jboss.netty.epollBugWorkaround based on netty.epollBugWorkaround")
// TODO: why be confusing like this? just let the user do it with the netty parameter instead!
@ -158,25 +186,25 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
if (settings.getAsBoolean("netty.epollBugWorkaround", false)) {
System.setProperty("org.jboss.netty.epollBugWorkaround", "true");
}
ByteSizeValue maxContentLength = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings);
this.maxChunkSize = HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
this.maxHeaderSize = HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
this.maxInitialLineLength = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings);
this.resetCookies = HttpTransportSettings.SETTING_HTTP_RESET_COOKIES.get(settings);
ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings);
this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings);
this.resetCookies = SETTING_HTTP_RESET_COOKIES.get(settings);
this.maxCumulationBufferCapacity = settings.getAsBytesSize("http.netty.max_cumulation_buffer_capacity", null);
this.maxCompositeBufferComponents = settings.getAsInt("http.netty.max_composite_buffer_components", -1);
this.workerCount = settings.getAsInt("http.netty.worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2);
this.blockingServer = settings.getAsBoolean("http.netty.http.blocking_server", TCP_BLOCKING.get(settings));
this.port = HttpTransportSettings.SETTING_HTTP_PORT.get(settings);
this.port = SETTING_HTTP_PORT.get(settings);
this.bindHosts = settings.getAsArray("http.netty.bind_host", settings.getAsArray("http.bind_host", settings.getAsArray("http.host", null)));
this.publishHosts = settings.getAsArray("http.netty.publish_host", settings.getAsArray("http.publish_host", settings.getAsArray("http.host", null)));
this.publishPort = HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT.get(settings);
this.publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings);
this.tcpNoDelay = settings.getAsBoolean("http.netty.tcp_no_delay", TCP_NO_DELAY.get(settings));
this.tcpKeepAlive = settings.getAsBoolean("http.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings));
this.reuseAddress = settings.getAsBoolean("http.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings));
this.tcpSendBufferSize = settings.getAsBytesSize("http.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings));
this.tcpReceiveBufferSize = settings.getAsBytesSize("http.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings));
this.detailedErrorsEnabled = HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings);
this.detailedErrorsEnabled = SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings);
long defaultReceiverPredictor = 512 * 1024;
if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
@ -194,10 +222,11 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
}
this.compression = HttpTransportSettings.SETTING_HTTP_COMPRESSION.get(settings);
this.compressionLevel = HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL.get(settings);
this.pipelining = HttpTransportSettings.SETTING_PIPELINING.get(settings);
this.pipeliningMaxEvents = HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS.get(settings);
this.compression = SETTING_HTTP_COMPRESSION.get(settings);
this.compressionLevel = SETTING_HTTP_COMPRESSION_LEVEL.get(settings);
this.pipelining = SETTING_PIPELINING.get(settings);
this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings);
this.corsConfig = buildCorsConfig(settings);
// validate max content length
if (maxContentLength.bytes() > Integer.MAX_VALUE) {
@ -290,6 +319,39 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[boundAddresses.size()]), new InetSocketTransportAddress(publishAddress));
}
private CorsConfig buildCorsConfig(Settings settings) {
if (SETTING_CORS_ENABLED.get(settings) == false) {
return CorsConfigBuilder.forOrigins().disable().build();
}
String origin = SETTING_CORS_ALLOW_ORIGIN.get(settings);
final CorsConfigBuilder builder;
if (Strings.isNullOrEmpty(origin)) {
builder = CorsConfigBuilder.forOrigins();
} else if (origin.equals(ANY_ORIGIN)) {
builder = CorsConfigBuilder.forAnyOrigin();
} else {
Pattern p = RestUtils.checkCorsSettingForRegex(origin);
if (p == null) {
builder = CorsConfigBuilder.forOrigins(RestUtils.corsSettingAsArray(origin));
} else {
builder = CorsConfigBuilder.forPattern(p);
}
}
if (SETTING_CORS_ALLOW_CREDENTIALS.get(settings)) {
builder.allowCredentials();
}
String[] strMethods = settings.getAsArray(SETTING_CORS_ALLOW_METHODS.get(settings), new String[0]);
HttpMethod[] methods = Arrays.asList(strMethods)
.stream()
.map(HttpMethod::valueOf)
.toArray(size -> new HttpMethod[size]);
return builder.allowedRequestMethods(methods)
.maxAge(SETTING_CORS_MAX_AGE.get(settings))
.allowedRequestHeaders(settings.getAsArray(SETTING_CORS_ALLOW_HEADERS.get(settings), new String[0]))
.shortCircuit()
.build();
}
private InetSocketTransportAddress bindAddress(final InetAddress hostAddress) {
final AtomicReference<Exception> lastException = new AtomicReference<>();
final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();
@ -365,6 +427,10 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
return new HttpStats(channels == null ? 0 : channels.numberOfOpenChannels(), channels == null ? 0 : channels.totalChannels());
}
public CorsConfig getCorsConfig() {
return corsConfig;
}
protected void dispatchRequest(HttpRequest request, HttpChannel channel) {
httpServerAdapter.dispatchRequest(request, channel, threadPool.getThreadContext());
}
@ -430,6 +496,9 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
httpChunkAggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents);
}
pipeline.addLast("aggregator", httpChunkAggregator);
if (SETTING_CORS_ENABLED.get(transport.settings())) {
pipeline.addLast("cors", new CorsHandler(transport.getCorsConfig()));
}
pipeline.addLast("encoder", new ESHttpResponseEncoder());
if (transport.compression) {
pipeline.addLast("encoder_compress", new HttpContentCompressor(transport.compressionLevel));

View File

@ -0,0 +1,233 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.http.netty.cors;
import org.jboss.netty.handler.codec.http.DefaultHttpHeaders;
import org.jboss.netty.handler.codec.http.HttpHeaders;
import org.jboss.netty.handler.codec.http.HttpMethod;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.regex.Pattern;
/**
* Configuration for Cross-Origin Resource Sharing (CORS).
*
* This class was lifted from the Netty project:
* https://github.com/netty/netty
*/
public final class CorsConfig {
private final Optional<Set<String>> origins;
private final Optional<Pattern> pattern;
private final boolean anyOrigin;
private final boolean enabled;
private final boolean allowCredentials;
private final long maxAge;
private final Set<HttpMethod> allowedRequestMethods;
private final Set<String> allowedRequestHeaders;
private final boolean allowNullOrigin;
private final Map<CharSequence, Callable<?>> preflightHeaders;
private final boolean shortCircuit;
CorsConfig(final CorsConfigBuilder builder) {
origins = builder.origins.map(s -> new LinkedHashSet<>(s));
pattern = builder.pattern;
anyOrigin = builder.anyOrigin;
enabled = builder.enabled;
allowCredentials = builder.allowCredentials;
maxAge = builder.maxAge;
allowedRequestMethods = builder.requestMethods;
allowedRequestHeaders = builder.requestHeaders;
allowNullOrigin = builder.allowNullOrigin;
preflightHeaders = builder.preflightHeaders;
shortCircuit = builder.shortCircuit;
}
/**
* Determines if support for CORS is enabled.
*
* @return {@code true} if support for CORS is enabled, false otherwise.
*/
public boolean isCorsSupportEnabled() {
return enabled;
}
/**
* Determines whether a wildcard origin, '*', is supported.
*
* @return {@code boolean} true if any origin is allowed.
*/
public boolean isAnyOriginSupported() {
return anyOrigin;
}
/**
* Returns the set of allowed origins.
*
* @return {@code Set} the allowed origins.
*/
public Optional<Set<String>> origins() {
return origins;
}
/**
* Returns whether the input origin is allowed by this configuration.
*
* @return {@code true} if the origin is allowed, otherwise {@code false}
*/
public boolean isOriginAllowed(final String origin) {
if (origins.isPresent()) {
return origins.get().contains(origin);
} else if (pattern.isPresent()) {
return pattern.get().matcher(origin).matches();
}
return false;
}
/**
* Web browsers may set the 'Origin' request header to 'null' if a resource is loaded
* from the local file system.
*
* If isNullOriginAllowed is true then the server will response with the wildcard for the
* the CORS response header 'Access-Control-Allow-Origin'.
*
* @return {@code true} if a 'null' origin should be supported.
*/
public boolean isNullOriginAllowed() {
return allowNullOrigin;
}
/**
* Determines if cookies are supported for CORS requests.
*
* By default cookies are not included in CORS requests but if isCredentialsAllowed returns
* true cookies will be added to CORS requests. Setting this value to true will set the
* CORS 'Access-Control-Allow-Credentials' response header to true.
*
* Please note that cookie support needs to be enabled on the client side as well.
* The client needs to opt-in to send cookies by calling:
* <pre>
* xhr.withCredentials = true;
* </pre>
* The default value for 'withCredentials' is false in which case no cookies are sent.
* Settning this to true will included cookies in cross origin requests.
*
* @return {@code true} if cookies are supported.
*/
public boolean isCredentialsAllowed() {
return allowCredentials;
}
/**
* Gets the maxAge setting.
*
* When making a preflight request the client has to perform two request with can be inefficient.
* This setting will set the CORS 'Access-Control-Max-Age' response header and enables the
* caching of the preflight response for the specified time. During this time no preflight
* request will be made.
*
* @return {@code long} the time in seconds that a preflight request may be cached.
*/
public long maxAge() {
return maxAge;
}
/**
* Returns the allowed set of Request Methods. The Http methods that should be returned in the
* CORS 'Access-Control-Request-Method' response header.
*
* @return {@code Set} of {@link HttpMethod}s that represent the allowed Request Methods.
*/
public Set<HttpMethod> allowedRequestMethods() {
return Collections.unmodifiableSet(allowedRequestMethods);
}
/**
* Returns the allowed set of Request Headers.
*
* The header names returned from this method will be used to set the CORS
* 'Access-Control-Allow-Headers' response header.
*
* @return {@code Set<String>} of strings that represent the allowed Request Headers.
*/
public Set<String> allowedRequestHeaders() {
return Collections.unmodifiableSet(allowedRequestHeaders);
}
/**
* Returns HTTP response headers that should be added to a CORS preflight response.
*
* @return {@link HttpHeaders} the HTTP response headers to be added.
*/
public HttpHeaders preflightResponseHeaders() {
if (preflightHeaders.isEmpty()) {
return HttpHeaders.EMPTY_HEADERS;
}
final HttpHeaders preflightHeaders = new DefaultHttpHeaders();
for (Map.Entry<CharSequence, Callable<?>> entry : this.preflightHeaders.entrySet()) {
final Object value = getValue(entry.getValue());
if (value instanceof Iterable) {
preflightHeaders.add(entry.getKey().toString(), (Iterable<?>) value);
} else {
preflightHeaders.add(entry.getKey().toString(), value);
}
}
return preflightHeaders;
}
/**
* Determines whether a CORS request should be rejected if it's invalid before being
* further processing.
*
* CORS headers are set after a request is processed. This may not always be desired
* and this setting will check that the Origin is valid and if it is not valid no
* further processing will take place, and a error will be returned to the calling client.
*
* @return {@code true} if a CORS request should short-curcuit upon receiving an invalid Origin header.
*/
public boolean isShortCircuit() {
return shortCircuit;
}
private static <T> T getValue(final Callable<T> callable) {
try {
return callable.call();
} catch (final Exception e) {
throw new IllegalStateException("Could not generate value for callable [" + callable + ']', e);
}
}
@Override
public String toString() {
return "CorsConfig[enabled=" + enabled +
", origins=" + origins +
", anyOrigin=" + anyOrigin +
", isCredentialsAllowed=" + allowCredentials +
", maxAge=" + maxAge +
", allowedRequestMethods=" + allowedRequestMethods +
", allowedRequestHeaders=" + allowedRequestHeaders +
", preflightHeaders=" + preflightHeaders + ']';
}
}

View File

@ -0,0 +1,356 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.http.netty.cors;
import org.jboss.netty.handler.codec.http.HttpMethod;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.regex.Pattern;
/**
* Builder used to configure and build a {@link CorsConfig} instance.
*
* This class was lifted from the Netty project:
* https://github.com/netty/netty
*/
public final class CorsConfigBuilder {
/**
* Creates a Builder instance with it's origin set to '*'.
*
* @return Builder to support method chaining.
*/
public static CorsConfigBuilder forAnyOrigin() {
return new CorsConfigBuilder();
}
/**
* Creates a {@link CorsConfigBuilder} instance with the specified origin.
*
* @return {@link CorsConfigBuilder} to support method chaining.
*/
public static CorsConfigBuilder forOrigin(final String origin) {
if ("*".equals(origin)) {
return new CorsConfigBuilder();
}
return new CorsConfigBuilder(origin);
}
/**
* Create a {@link CorsConfigBuilder} instance with the specified pattern origin.
*
* @param pattern the regular expression pattern to match incoming origins on.
* @return {@link CorsConfigBuilder} with the configured origin pattern.
*/
public static CorsConfigBuilder forPattern(final Pattern pattern) {
if (pattern == null) {
throw new IllegalArgumentException("CORS pattern cannot be null");
}
return new CorsConfigBuilder(pattern);
}
/**
* Creates a {@link CorsConfigBuilder} instance with the specified origins.
*
* @return {@link CorsConfigBuilder} to support method chaining.
*/
public static CorsConfigBuilder forOrigins(final String... origins) {
return new CorsConfigBuilder(origins);
}
Optional<Set<String>> origins;
Optional<Pattern> pattern;
final boolean anyOrigin;
boolean allowNullOrigin;
boolean enabled = true;
boolean allowCredentials;
long maxAge;
final Set<HttpMethod> requestMethods = new HashSet<>();
final Set<String> requestHeaders = new HashSet<>();
final Map<CharSequence, Callable<?>> preflightHeaders = new HashMap<>();
private boolean noPreflightHeaders;
boolean shortCircuit;
/**
* Creates a new Builder instance with the origin passed in.
*
* @param origins the origin to be used for this builder.
*/
CorsConfigBuilder(final String... origins) {
this.origins = Optional.of(new LinkedHashSet<>(Arrays.asList(origins)));
pattern = Optional.empty();
anyOrigin = false;
}
/**
* Creates a new Builder instance allowing any origin, "*" which is the
* wildcard origin.
*
*/
CorsConfigBuilder() {
anyOrigin = true;
origins = Optional.empty();
pattern = Optional.empty();
}
/**
* Creates a new Builder instance allowing any origin that matches the pattern.
*
* @param pattern the pattern to match against for incoming origins.
*/
CorsConfigBuilder(final Pattern pattern) {
this.pattern = Optional.of(pattern);
origins = Optional.empty();
anyOrigin = false;
}
/**
* Web browsers may set the 'Origin' request header to 'null' if a resource is loaded
* from the local file system. Calling this method will enable a successful CORS response
* with a wildcard for the the CORS response header 'Access-Control-Allow-Origin'.
*
* @return {@link CorsConfigBuilder} to support method chaining.
*/
CorsConfigBuilder allowNullOrigin() {
allowNullOrigin = true;
return this;
}
/**
* Disables CORS support.
*
* @return {@link CorsConfigBuilder} to support method chaining.
*/
public CorsConfigBuilder disable() {
enabled = false;
return this;
}
/**
* By default cookies are not included in CORS requests, but this method will enable cookies to
* be added to CORS requests. Calling this method will set the CORS 'Access-Control-Allow-Credentials'
* response header to true.
*
* Please note, that cookie support needs to be enabled on the client side as well.
* The client needs to opt-in to send cookies by calling:
* <pre>
* xhr.withCredentials = true;
* </pre>
* The default value for 'withCredentials' is false in which case no cookies are sent.
* Setting this to true will included cookies in cross origin requests.
*
* @return {@link CorsConfigBuilder} to support method chaining.
*/
public CorsConfigBuilder allowCredentials() {
allowCredentials = true;
return this;
}
/**
* When making a preflight request the client has to perform two request with can be inefficient.
* This setting will set the CORS 'Access-Control-Max-Age' response header and enables the
* caching of the preflight response for the specified time. During this time no preflight
* request will be made.
*
* @param max the maximum time, in seconds, that the preflight response may be cached.
* @return {@link CorsConfigBuilder} to support method chaining.
*/
public CorsConfigBuilder maxAge(final long max) {
maxAge = max;
return this;
}
/**
* Specifies the allowed set of HTTP Request Methods that should be returned in the
* CORS 'Access-Control-Request-Method' response header.
*
* @param methods the {@link HttpMethod}s that should be allowed.
* @return {@link CorsConfigBuilder} to support method chaining.
*/
public CorsConfigBuilder allowedRequestMethods(final HttpMethod... methods) {
requestMethods.addAll(Arrays.asList(methods));
return this;
}
/**
* Specifies the if headers that should be returned in the CORS 'Access-Control-Allow-Headers'
* response header.
*
* If a client specifies headers on the request, for example by calling:
* <pre>
* xhr.setRequestHeader('My-Custom-Header', "SomeValue");
* </pre>
* the server will receive the above header name in the 'Access-Control-Request-Headers' of the
* preflight request. The server will then decide if it allows this header to be sent for the
* real request (remember that a preflight is not the real request but a request asking the server
* if it allow a request).
*
* @param headers the headers to be added to the preflight 'Access-Control-Allow-Headers' response header.
* @return {@link CorsConfigBuilder} to support method chaining.
*/
public CorsConfigBuilder allowedRequestHeaders(final String... headers) {
requestHeaders.addAll(Arrays.asList(headers));
return this;
}
/**
* Returns HTTP response headers that should be added to a CORS preflight response.
*
* An intermediary like a load balancer might require that a CORS preflight request
* have certain headers set. This enables such headers to be added.
*
* @param name the name of the HTTP header.
* @param values the values for the HTTP header.
* @return {@link CorsConfigBuilder} to support method chaining.
*/
public CorsConfigBuilder preflightResponseHeader(final CharSequence name, final Object... values) {
if (values.length == 1) {
preflightHeaders.put(name, new ConstantValueGenerator(values[0]));
} else {
preflightResponseHeader(name, Arrays.asList(values));
}
return this;
}
/**
* Returns HTTP response headers that should be added to a CORS preflight response.
*
* An intermediary like a load balancer might require that a CORS preflight request
* have certain headers set. This enables such headers to be added.
*
* @param name the name of the HTTP header.
* @param value the values for the HTTP header.
* @param <T> the type of values that the Iterable contains.
* @return {@link CorsConfigBuilder} to support method chaining.
*/
public <T> CorsConfigBuilder preflightResponseHeader(final CharSequence name, final Iterable<T> value) {
preflightHeaders.put(name, new ConstantValueGenerator(value));
return this;
}
/**
* Returns HTTP response headers that should be added to a CORS preflight response.
*
* An intermediary like a load balancer might require that a CORS preflight request
* have certain headers set. This enables such headers to be added.
*
* Some values must be dynamically created when the HTTP response is created, for
* example the 'Date' response header. This can be accomplished by using a Callable
* which will have its 'call' method invoked when the HTTP response is created.
*
* @param name the name of the HTTP header.
* @param valueGenerator a Callable which will be invoked at HTTP response creation.
* @param <T> the type of the value that the Callable can return.
* @return {@link CorsConfigBuilder} to support method chaining.
*/
public <T> CorsConfigBuilder preflightResponseHeader(final CharSequence name, final Callable<T> valueGenerator) {
preflightHeaders.put(name, valueGenerator);
return this;
}
/**
* Specifies that no preflight response headers should be added to a preflight response.
*
* @return {@link CorsConfigBuilder} to support method chaining.
*/
public CorsConfigBuilder noPreflightResponseHeaders() {
noPreflightHeaders = true;
return this;
}
/**
* Specifies that a CORS request should be rejected if it's invalid before being
* further processing.
*
* CORS headers are set after a request is processed. This may not always be desired
* and this setting will check that the Origin is valid and if it is not valid no
* further processing will take place, and a error will be returned to the calling client.
*
* @return {@link CorsConfigBuilder} to support method chaining.
*/
public CorsConfigBuilder shortCircuit() {
shortCircuit = true;
return this;
}
/**
* Builds a {@link CorsConfig} with settings specified by previous method calls.
*
* @return {@link CorsConfig} the configured CorsConfig instance.
*/
public CorsConfig build() {
if (preflightHeaders.isEmpty() && !noPreflightHeaders) {
preflightHeaders.put("date", DateValueGenerator.INSTANCE);
preflightHeaders.put("content-length", new ConstantValueGenerator("0"));
}
return new CorsConfig(this);
}
/**
* This class is used for preflight HTTP response values that do not need to be
* generated, but instead the value is "static" in that the same value will be returned
* for each call.
*/
private static final class ConstantValueGenerator implements Callable<Object> {
private final Object value;
/**
* Sole constructor.
*
* @param value the value that will be returned when the call method is invoked.
*/
private ConstantValueGenerator(final Object value) {
if (value == null) {
throw new IllegalArgumentException("value must not be null");
}
this.value = value;
}
@Override
public Object call() {
return value;
}
}
/**
* This callable is used for the DATE preflight HTTP response HTTP header.
* It's value must be generated when the response is generated, hence will be
* different for every call.
*/
private static final class DateValueGenerator implements Callable<Date> {
static final DateValueGenerator INSTANCE = new DateValueGenerator();
@Override
public Date call() throws Exception {
return new Date();
}
}
}

View File

@ -0,0 +1,231 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.http.netty.cors;
import org.elasticsearch.common.Strings;
import org.elasticsearch.rest.support.RestUtils;
import org.jboss.netty.channel.ChannelFutureListener;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.jboss.netty.handler.codec.http.DefaultHttpResponse;
import org.jboss.netty.handler.codec.http.HttpHeaders;
import org.jboss.netty.handler.codec.http.HttpMethod;
import org.jboss.netty.handler.codec.http.HttpRequest;
import org.jboss.netty.handler.codec.http.HttpResponse;
import java.util.List;
import java.util.stream.Collectors;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_ORIGIN;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_MAX_AGE;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ORIGIN;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.USER_AGENT;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.VARY;
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.FORBIDDEN;
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK;
/**
* Handles <a href="http://www.w3.org/TR/cors/">Cross Origin Resource Sharing</a> (CORS) requests.
* <p>
* This handler can be configured using a {@link CorsConfig}, please
* refer to this class for details about the configuration options available.
*
* This code was borrowed from Netty 4 and refactored to work for Elasticsearch's Netty 3 setup.
*/
public class CorsHandler extends SimpleChannelUpstreamHandler {
public static final String ANY_ORIGIN = "*";
private final CorsConfig config;
private HttpRequest request;
/**
* Creates a new instance with the specified {@link CorsConfig}.
*/
public CorsHandler(final CorsConfig config) {
if (config == null) {
throw new IllegalArgumentException("Config cannot be null");
}
this.config = config;
}
@Override
public void messageReceived(final ChannelHandlerContext ctx, final MessageEvent e) throws Exception {
if (config.isCorsSupportEnabled() && e.getMessage() instanceof HttpRequest) {
request = (HttpRequest) e.getMessage();
if (RestUtils.isBrowser(request.headers().get(USER_AGENT))) {
if (isPreflightRequest(request)) {
handlePreflight(ctx, request);
return;
}
if (config.isShortCircuit() && !validateOrigin()) {
forbidden(ctx, request);
return;
}
}
}
super.messageReceived(ctx, e);
}
public static void setCorsResponseHeaders(HttpRequest request, HttpResponse resp, CorsConfig config) {
if (!config.isCorsSupportEnabled()) {
return;
}
String originHeader = request.headers().get(ORIGIN);
if (!Strings.isNullOrEmpty(originHeader)) {
final String originHeaderVal;
if (config.isAnyOriginSupported()) {
originHeaderVal = ANY_ORIGIN;
} else if (config.isOriginAllowed(originHeader)) {
originHeaderVal = originHeader;
} else {
originHeaderVal = null;
}
if (originHeaderVal != null) {
resp.headers().add(ACCESS_CONTROL_ALLOW_ORIGIN, originHeaderVal);
}
}
if (config.isCredentialsAllowed()) {
resp.headers().add(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
}
private void handlePreflight(final ChannelHandlerContext ctx, final HttpRequest request) {
final HttpResponse response = new DefaultHttpResponse(request.getProtocolVersion(), OK);
if (setOrigin(response)) {
setAllowMethods(response);
setAllowHeaders(response);
setAllowCredentials(response);
setMaxAge(response);
setPreflightHeaders(response);
ctx.getChannel().write(response).addListener(ChannelFutureListener.CLOSE);
} else {
forbidden(ctx, request);
}
}
private static void forbidden(final ChannelHandlerContext ctx, final HttpRequest request) {
ctx.getChannel().write(new DefaultHttpResponse(request.getProtocolVersion(), FORBIDDEN))
.addListener(ChannelFutureListener.CLOSE);
}
/**
* This is a non CORS specification feature which enables the setting of preflight
* response headers that might be required by intermediaries.
*
* @param response the HttpResponse to which the preflight response headers should be added.
*/
private void setPreflightHeaders(final HttpResponse response) {
response.headers().add(config.preflightResponseHeaders());
}
private boolean setOrigin(final HttpResponse response) {
final String origin = request.headers().get(ORIGIN);
if (!Strings.isNullOrEmpty(origin)) {
if ("null".equals(origin) && config.isNullOriginAllowed()) {
setAnyOrigin(response);
return true;
}
if (config.isAnyOriginSupported()) {
if (config.isCredentialsAllowed()) {
echoRequestOrigin(response);
setVaryHeader(response);
} else {
setAnyOrigin(response);
}
return true;
}
if (config.isOriginAllowed(origin)) {
setOrigin(response, origin);
setVaryHeader(response);
return true;
}
}
return false;
}
private boolean validateOrigin() {
if (config.isAnyOriginSupported()) {
return true;
}
final String origin = request.headers().get(ORIGIN);
if (Strings.isNullOrEmpty(origin)) {
// Not a CORS request so we cannot validate it. It may be a non CORS request.
return true;
}
if ("null".equals(origin) && config.isNullOriginAllowed()) {
return true;
}
return config.isOriginAllowed(origin);
}
private void echoRequestOrigin(final HttpResponse response) {
setOrigin(response, request.headers().get(ORIGIN));
}
private static void setVaryHeader(final HttpResponse response) {
response.headers().set(VARY, ORIGIN);
}
private static void setAnyOrigin(final HttpResponse response) {
setOrigin(response, ANY_ORIGIN);
}
private static void setOrigin(final HttpResponse response, final String origin) {
response.headers().set(ACCESS_CONTROL_ALLOW_ORIGIN, origin);
}
private void setAllowCredentials(final HttpResponse response) {
if (config.isCredentialsAllowed()
&& !response.headers().get(ACCESS_CONTROL_ALLOW_ORIGIN).equals(ANY_ORIGIN)) {
response.headers().set(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
}
private static boolean isPreflightRequest(final HttpRequest request) {
final HttpHeaders headers = request.headers();
return request.getMethod().equals(HttpMethod.OPTIONS) &&
headers.contains(HttpHeaders.Names.ORIGIN) &&
headers.contains(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD);
}
private void setAllowMethods(final HttpResponse response) {
response.headers().set(ACCESS_CONTROL_ALLOW_METHODS,
String.join(", ", config.allowedRequestMethods().stream()
.map(HttpMethod::getName)
.collect(Collectors.toList())).trim());
}
private void setAllowHeaders(final HttpResponse response) {
response.headers().set(ACCESS_CONTROL_ALLOW_HEADERS, config.allowedRequestHeaders());
}
private void setMaxAge(final HttpResponse response) {
response.headers().set(ACCESS_CONTROL_MAX_AGE, config.maxAge());
}
}

View File

@ -23,7 +23,6 @@ import org.elasticsearch.client.Client;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.termvectors.TermVectorsService;
import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
@ -41,7 +40,6 @@ public final class NodeServicesProvider {
private final ThreadPool threadPool;
private final IndicesQueryCache indicesQueryCache;
private final TermVectorsService termVectorsService;
private final IndicesWarmer warmer;
private final BigArrays bigArrays;
private final Client client;
@ -51,10 +49,9 @@ public final class NodeServicesProvider {
private final CircuitBreakerService circuitBreakerService;
@Inject
public NodeServicesProvider(ThreadPool threadPool, IndicesQueryCache indicesQueryCache, TermVectorsService termVectorsService, @Nullable IndicesWarmer warmer, BigArrays bigArrays, Client client, ScriptService scriptService, IndicesQueriesRegistry indicesQueriesRegistry, IndicesFieldDataCache indicesFieldDataCache, CircuitBreakerService circuitBreakerService) {
public NodeServicesProvider(ThreadPool threadPool, IndicesQueryCache indicesQueryCache, @Nullable IndicesWarmer warmer, BigArrays bigArrays, Client client, ScriptService scriptService, IndicesQueriesRegistry indicesQueriesRegistry, IndicesFieldDataCache indicesFieldDataCache, CircuitBreakerService circuitBreakerService) {
this.threadPool = threadPool;
this.indicesQueryCache = indicesQueryCache;
this.termVectorsService = termVectorsService;
this.warmer = warmer;
this.bigArrays = bigArrays;
this.client = client;
@ -72,10 +69,6 @@ public final class NodeServicesProvider {
return indicesQueryCache;
}
public TermVectorsService getTermVectorsService() {
return termVectorsService;
}
public IndicesWarmer getWarmer() {
return warmer;
}

View File

@ -287,8 +287,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
.offsets(false)
.payloads(false)
.fieldStatistics(false)
.termStatistics(false)
.dfs(false);
.termStatistics(false);
// for artificial docs to make sure that the id has changed in the item too
if (doc != null) {
termVectorsRequest.doc(doc, true);

View File

@ -34,8 +34,6 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
import org.elasticsearch.action.termvectors.TermVectorsRequest;
import org.elasticsearch.action.termvectors.TermVectorsResponse;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
@ -102,7 +100,6 @@ import org.elasticsearch.index.store.StoreFileMetaData;
import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.index.suggest.stats.ShardSuggestMetric;
import org.elasticsearch.index.suggest.stats.SuggestStats;
import org.elasticsearch.index.termvectors.TermVectorsService;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.index.translog.TranslogConfig;
import org.elasticsearch.index.translog.TranslogStats;
@ -148,7 +145,6 @@ public class IndexShard extends AbstractIndexShardComponent {
private final ShardRequestCache shardQueryCache;
private final ShardFieldData shardFieldData;
private final PercolatorQueriesRegistry percolatorQueriesRegistry;
private final TermVectorsService termVectorsService;
private final IndexFieldDataService indexFieldDataService;
private final ShardSuggestMetric shardSuggestMetric = new ShardSuggestMetric();
private final ShardBitsetFilterCache shardBitsetFilterCache;
@ -232,7 +228,6 @@ public class IndexShard extends AbstractIndexShardComponent {
listenersList.add(internalIndexingStats);
this.indexingOperationListeners = new IndexingOperationListener.CompositeListener(listenersList, logger);
this.getService = new ShardGetService(indexSettings, this, mapperService);
this.termVectorsService = provider.getTermVectorsService();
this.searchService = new ShardSearchStats(slowLog);
this.shardWarmerService = new ShardIndexWarmerService(shardId, indexSettings);
this.indicesQueryCache = provider.getIndicesQueryCache();
@ -345,9 +340,8 @@ public class IndexShard extends AbstractIndexShardComponent {
if (!newRouting.primary() && currentRouting.primary()) {
logger.warn("suspect illegal state: trying to move shard from primary mode to replica mode");
}
// if its the same routing except for some metadata info, return
if (currentRouting.equalsIgnoringMetaData(newRouting)) {
this.shardRouting = newRouting; // might have a new version
// if its the same routing, return
if (currentRouting.equals(newRouting)) {
return;
}
}
@ -657,10 +651,6 @@ public class IndexShard extends AbstractIndexShardComponent {
return segmentsStats;
}
public TermVectorsResponse getTermVectors(TermVectorsRequest request) {
return this.termVectorsService.getTermVectors(this, request);
}
public WarmerStats warmerStats() {
return shardWarmerService.stats();
}
@ -1365,21 +1355,16 @@ public class IndexShard extends AbstractIndexShardComponent {
try {
final String writeReason;
if (currentRouting == null) {
writeReason = "freshly started, version [" + newRouting.version() + "]";
} else if (currentRouting.version() < newRouting.version()) {
writeReason = "version changed from [" + currentRouting.version() + "] to [" + newRouting.version() + "]";
writeReason = "freshly started, allocation id [" + newRouting.allocationId() + "]";
} else if (currentRouting.equals(newRouting) == false) {
writeReason = "routing changed from " + currentRouting + " to " + newRouting;
} else {
logger.trace("skip writing shard state, has been written before; previous version: [" +
currentRouting.version() + "] current version [" + newRouting.version() + "]");
assert currentRouting.version() <= newRouting.version() : "version should not go backwards for shardID: " + shardId +
" previous version: [" + currentRouting.version() + "] current version [" + newRouting.version() + "]";
logger.trace("{} skip writing shard state, has been written before", shardId);
return;
}
final ShardStateMetaData newShardStateMetadata = new ShardStateMetaData(newRouting.version(), newRouting.primary(), getIndexUUID(), newRouting.allocationId());
final ShardStateMetaData newShardStateMetadata = new ShardStateMetaData(newRouting.primary(), getIndexUUID(), newRouting.allocationId());
logger.trace("{} writing shard state, reason [{}]", shardId, writeReason);
ShardStateMetaData.FORMAT.write(newShardStateMetadata, newShardStateMetadata.version, shardPath().getShardStatePath());
ShardStateMetaData.FORMAT.write(newShardStateMetadata, newShardStateMetadata.legacyVersion, shardPath().getShardStatePath());
} catch (IOException e) { // this is how we used to handle it.... :(
logger.warn("failed to write shard state", e);
// we failed to write the shard state, we will try and write

View File

@ -41,15 +41,21 @@ public final class ShardStateMetaData {
private static final String INDEX_UUID_KEY = "index_uuid";
private static final String ALLOCATION_ID_KEY = "allocation_id";
public final long version;
public static final long NO_VERSION = -1L;
public final long legacyVersion; // for pre-3.0 shards that have not yet been active
public final String indexUUID;
public final boolean primary;
@Nullable
public final AllocationId allocationId; // can be null if we read from legacy format (see fromXContent and MultiDataPathUpgrader)
public ShardStateMetaData(long version, boolean primary, String indexUUID, AllocationId allocationId) {
public ShardStateMetaData(boolean primary, String indexUUID, AllocationId allocationId) {
this(NO_VERSION, primary, indexUUID, allocationId);
}
ShardStateMetaData(long legacyVersion, boolean primary, String indexUUID, AllocationId allocationId) {
assert indexUUID != null;
this.version = version;
this.legacyVersion = legacyVersion;
this.primary = primary;
this.indexUUID = indexUUID;
this.allocationId = allocationId;
@ -69,7 +75,7 @@ public final class ShardStateMetaData {
if (primary != that.primary) {
return false;
}
if (version != that.version) {
if (legacyVersion != that.legacyVersion) {
return false;
}
if (indexUUID != null ? !indexUUID.equals(that.indexUUID) : that.indexUUID != null) {
@ -84,7 +90,7 @@ public final class ShardStateMetaData {
@Override
public int hashCode() {
int result = Long.hashCode(version);
int result = Long.hashCode(legacyVersion);
result = 31 * result + (indexUUID != null ? indexUUID.hashCode() : 0);
result = 31 * result + (allocationId != null ? allocationId.hashCode() : 0);
result = 31 * result + (primary ? 1 : 0);
@ -93,7 +99,7 @@ public final class ShardStateMetaData {
@Override
public String toString() {
return "version [" + version + "], primary [" + primary + "], allocation [" + allocationId + "]";
return "version [" + legacyVersion + "], primary [" + primary + "], allocation [" + allocationId + "]";
}
public static final MetaDataStateFormat<ShardStateMetaData> FORMAT = new MetaDataStateFormat<ShardStateMetaData>(XContentType.JSON, SHARD_STATE_FILE_PREFIX) {
@ -107,7 +113,7 @@ public final class ShardStateMetaData {
@Override
public void toXContent(XContentBuilder builder, ShardStateMetaData shardStateMetaData) throws IOException {
builder.field(VERSION_KEY, shardStateMetaData.version);
builder.field(VERSION_KEY, shardStateMetaData.legacyVersion);
builder.field(PRIMARY_KEY, shardStateMetaData.primary);
builder.field(INDEX_UUID_KEY, shardStateMetaData.indexUUID);
if (shardStateMetaData.allocationId != null) {
@ -121,7 +127,7 @@ public final class ShardStateMetaData {
if (token == null) {
return null;
}
long version = -1;
long version = NO_VERSION;
Boolean primary = null;
String currentFieldName = null;
String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
@ -152,9 +158,6 @@ public final class ShardStateMetaData {
if (primary == null) {
throw new CorruptStateException("missing value for [primary] in shard state");
}
if (version == -1) {
throw new CorruptStateException("missing value for [version] in shard state");
}
return new ShardStateMetaData(version, primary, indexUUID, allocationId);
}
};

View File

@ -31,14 +31,9 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.termvectors.TermVectorsFilter;
import org.elasticsearch.action.termvectors.TermVectorsRequest;
import org.elasticsearch.action.termvectors.TermVectorsResponse;
import org.elasticsearch.action.termvectors.dfs.DfsOnlyRequest;
import org.elasticsearch.action.termvectors.dfs.DfsOnlyResponse;
import org.elasticsearch.action.termvectors.dfs.TransportDfsOnlyAction;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.get.GetField;
@ -71,15 +66,10 @@ import static org.elasticsearch.index.mapper.SourceToParse.source;
public class TermVectorsService {
private final TransportDfsOnlyAction dfsAction;
@Inject
public TermVectorsService(TransportDfsOnlyAction dfsAction) {
this.dfsAction = dfsAction;
}
private TermVectorsService() {}
public TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequest request) {
public static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequest request) {
final TermVectorsResponse termVectorsResponse = new TermVectorsResponse(indexShard.shardId().getIndex().getName(), request.type(), request.id());
final Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
@ -137,10 +127,6 @@ public class TermVectorsService {
}
/* if there are term vectors, optional compute dfs and/or terms filtering */
if (termVectorsByField != null) {
if (useDfs(request)) {
dfs = getAggregatedDfs(termVectorsByField, request);
}
if (request.filterSettings() != null) {
termVectorsFilter = new TermVectorsFilter(termVectorsByField, topLevelFields, request.selectedFields(), dfs);
termVectorsFilter.setSettings(request.filterSettings());
@ -162,7 +148,7 @@ public class TermVectorsService {
return termVectorsResponse;
}
private void handleFieldWildcards(IndexShard indexShard, TermVectorsRequest request) {
private static void handleFieldWildcards(IndexShard indexShard, TermVectorsRequest request) {
Set<String> fieldNames = new HashSet<>();
for (String pattern : request.selectedFields()) {
fieldNames.addAll(indexShard.mapperService().simpleMatchToIndexNames(pattern));
@ -170,7 +156,7 @@ public class TermVectorsService {
request.selectedFields(fieldNames.toArray(Strings.EMPTY_ARRAY));
}
private boolean isValidField(MappedFieldType fieldType) {
private static boolean isValidField(MappedFieldType fieldType) {
// must be a string
if (!(fieldType instanceof StringFieldMapper.StringFieldType)) {
return false;
@ -182,7 +168,7 @@ public class TermVectorsService {
return true;
}
private Fields addGeneratedTermVectors(IndexShard indexShard, Engine.GetResult get, Fields termVectorsByField, TermVectorsRequest request, Set<String> selectedFields) throws IOException {
private static Fields addGeneratedTermVectors(IndexShard indexShard, Engine.GetResult get, Fields termVectorsByField, TermVectorsRequest request, Set<String> selectedFields) throws IOException {
/* only keep valid fields */
Set<String> validFields = new HashSet<>();
for (String field : selectedFields) {
@ -215,7 +201,7 @@ public class TermVectorsService {
}
}
private Analyzer getAnalyzerAtField(IndexShard indexShard, String field, @Nullable Map<String, String> perFieldAnalyzer) {
private static Analyzer getAnalyzerAtField(IndexShard indexShard, String field, @Nullable Map<String, String> perFieldAnalyzer) {
MapperService mapperService = indexShard.mapperService();
Analyzer analyzer;
if (perFieldAnalyzer != null && perFieldAnalyzer.containsKey(field)) {
@ -229,7 +215,7 @@ public class TermVectorsService {
return analyzer;
}
private Set<String> getFieldsToGenerate(Map<String, String> perAnalyzerField, Fields fieldsObject) {
private static Set<String> getFieldsToGenerate(Map<String, String> perAnalyzerField, Fields fieldsObject) {
Set<String> selectedFields = new HashSet<>();
for (String fieldName : fieldsObject) {
if (perAnalyzerField.containsKey(fieldName)) {
@ -239,7 +225,7 @@ public class TermVectorsService {
return selectedFields;
}
private Fields generateTermVectors(IndexShard indexShard, Collection<GetField> getFields, boolean withOffsets, @Nullable Map<String, String> perFieldAnalyzer, Set<String> fields)
private static Fields generateTermVectors(IndexShard indexShard, Collection<GetField> getFields, boolean withOffsets, @Nullable Map<String, String> perFieldAnalyzer, Set<String> fields)
throws IOException {
/* store document in memory index */
MemoryIndex index = new MemoryIndex(withOffsets);
@ -258,7 +244,7 @@ public class TermVectorsService {
return MultiFields.getFields(index.createSearcher().getIndexReader());
}
private Fields generateTermVectorsFromDoc(IndexShard indexShard, TermVectorsRequest request, boolean doAllFields) throws Throwable {
private static Fields generateTermVectorsFromDoc(IndexShard indexShard, TermVectorsRequest request, boolean doAllFields) throws Throwable {
// parse the document, at the moment we do update the mapping, just like percolate
ParsedDocument parsedDocument = parseDocument(indexShard, indexShard.shardId().getIndexName(), request.type(), request.doc());
@ -289,7 +275,7 @@ public class TermVectorsService {
return generateTermVectors(indexShard, getFields, request.offsets(), request.perFieldAnalyzer(), seenFields);
}
private ParsedDocument parseDocument(IndexShard indexShard, String index, String type, BytesReference doc) throws Throwable {
private static ParsedDocument parseDocument(IndexShard indexShard, String index, String type, BytesReference doc) throws Throwable {
MapperService mapperService = indexShard.mapperService();
DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type);
ParsedDocument parsedDocument = docMapper.getDocumentMapper().parse(source(doc).index(index).type(type).id("_id_for_tv_api"));
@ -299,7 +285,7 @@ public class TermVectorsService {
return parsedDocument;
}
private Fields mergeFields(Fields fields1, Fields fields2) throws IOException {
private static Fields mergeFields(Fields fields1, Fields fields2) throws IOException {
ParallelFields parallelFields = new ParallelFields();
for (String fieldName : fields2) {
Terms terms = fields2.terms(fieldName);
@ -346,14 +332,4 @@ public class TermVectorsService {
}
}
private boolean useDfs(TermVectorsRequest request) {
return request.dfs() && (request.fieldStatistics() || request.termStatistics());
}
private AggregatedDfs getAggregatedDfs(Fields termVectorsFields, TermVectorsRequest request) throws IOException {
DfsOnlyRequest dfsOnlyRequest = new DfsOnlyRequest(termVectorsFields, new String[]{request.index()},
new String[]{request.type()}, request.selectedFields());
DfsOnlyResponse response = dfsAction.execute(dfsOnlyRequest).actionGet();
return response.getDfs();
}
}

View File

@ -172,7 +172,6 @@ public class IndicesModule extends AbstractModule {
bind(UpdateHelper.class).asEagerSingleton();
bind(MetaDataIndexUpgradeService.class).asEagerSingleton();
bind(IndicesFieldDataCacheListener.class).asEagerSingleton();
bind(TermVectorsService.class).asEagerSingleton();
bind(NodeServicesProvider.class).asEagerSingleton();
}

View File

@ -33,10 +33,10 @@ public class IngestService implements Closeable {
private final PipelineStore pipelineStore;
private final PipelineExecutionService pipelineExecutionService;
private final ProcessorsRegistry processorsRegistry;
private final ProcessorsRegistry.Builder processorsRegistryBuilder;
public IngestService(Settings settings, ThreadPool threadPool, ProcessorsRegistry processorsRegistry) {
this.processorsRegistry = processorsRegistry;
public IngestService(Settings settings, ThreadPool threadPool, ProcessorsRegistry.Builder processorsRegistryBuilder) {
this.processorsRegistryBuilder = processorsRegistryBuilder;
this.pipelineStore = new PipelineStore(settings);
this.pipelineExecutionService = new PipelineExecutionService(pipelineStore, threadPool);
}
@ -50,7 +50,7 @@ public class IngestService implements Closeable {
}
public void setScriptService(ScriptService scriptService) {
pipelineStore.buildProcessorFactoryRegistry(processorsRegistry, scriptService);
pipelineStore.buildProcessorFactoryRegistry(processorsRegistryBuilder, scriptService);
}
@Override

View File

@ -19,7 +19,6 @@
package org.elasticsearch.ingest;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
@ -48,12 +47,11 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
public class PipelineStore extends AbstractComponent implements Closeable, ClusterStateListener {
private final Pipeline.Factory factory = new Pipeline.Factory();
private Map<String, Processor.Factory> processorFactoryRegistry;
private ProcessorsRegistry processorRegistry;
// Ideally this should be in IngestMetadata class, but we don't have the processor factories around there.
// We know of all the processor factories when a node with all its plugin have been initialized. Also some
@ -65,27 +63,16 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
super(settings);
}
public void buildProcessorFactoryRegistry(ProcessorsRegistry processorsRegistry, ScriptService scriptService) {
Map<String, Processor.Factory> processorFactories = new HashMap<>();
public void buildProcessorFactoryRegistry(ProcessorsRegistry.Builder processorsRegistryBuilder, ScriptService scriptService) {
TemplateService templateService = new InternalTemplateService(scriptService);
for (Map.Entry<String, Function<TemplateService, Processor.Factory<?>>> entry : processorsRegistry.entrySet()) {
Processor.Factory processorFactory = entry.getValue().apply(templateService);
processorFactories.put(entry.getKey(), processorFactory);
}
this.processorFactoryRegistry = Collections.unmodifiableMap(processorFactories);
this.processorRegistry = processorsRegistryBuilder.build(templateService);
}
@Override
public void close() throws IOException {
// TODO: When org.elasticsearch.node.Node can close Closable instances we should try to remove this code,
// since any wired closable should be able to close itself
List<Closeable> closeables = new ArrayList<>();
for (Processor.Factory factory : processorFactoryRegistry.values()) {
if (factory instanceof Closeable) {
closeables.add((Closeable) factory);
}
}
IOUtils.close(closeables);
processorRegistry.close();
}
@Override
@ -102,7 +89,7 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
Map<String, Pipeline> pipelines = new HashMap<>();
for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) {
try {
pipelines.put(pipeline.getId(), factory.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactoryRegistry));
pipelines.put(pipeline.getId(), factory.create(pipeline.getId(), pipeline.getConfigAsMap(), processorRegistry));
} catch (ElasticsearchParseException e) {
throw e;
} catch (Exception e) {
@ -156,7 +143,7 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
// validates the pipeline and processor configuration before submitting a cluster update task:
Map<String, Object> pipelineConfig = XContentHelper.convertToMap(request.getSource(), false).v2();
try {
factory.create(request.getId(), pipelineConfig, processorFactoryRegistry);
factory.create(request.getId(), pipelineConfig, processorRegistry);
} catch(Exception e) {
listener.onFailure(e);
return;
@ -199,8 +186,8 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
return pipelines.get(id);
}
public Map<String, Processor.Factory> getProcessorFactoryRegistry() {
return processorFactoryRegistry;
public ProcessorsRegistry getProcessorRegistry() {
return processorRegistry;
}
/**

View File

@ -19,29 +19,69 @@
package org.elasticsearch.ingest;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ingest.core.Processor;
import org.elasticsearch.ingest.core.TemplateService;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.function.BiFunction;
public class ProcessorsRegistry {
public final class ProcessorsRegistry implements Closeable {
private final Map<String, Function<TemplateService, Processor.Factory<?>>> processorFactoryProviders = new HashMap<>();
private final Map<String, Processor.Factory> processorFactories;
/**
* Adds a processor factory under a specific name.
*/
public void registerProcessor(String name, Function<TemplateService, Processor.Factory<?>> processorFactoryProvider) {
Function<TemplateService, Processor.Factory<?>> provider = processorFactoryProviders.putIfAbsent(name, processorFactoryProvider);
if (provider != null) {
throw new IllegalArgumentException("Processor factory already registered for name [" + name + "]");
private ProcessorsRegistry(TemplateService templateService,
Map<String, BiFunction<TemplateService, ProcessorsRegistry, Processor.Factory<?>>> providers) {
Map<String, Processor.Factory> processorFactories = new HashMap<>();
for (Map.Entry<String, BiFunction<TemplateService, ProcessorsRegistry, Processor.Factory<?>>> entry : providers.entrySet()) {
processorFactories.put(entry.getKey(), entry.getValue().apply(templateService, this));
}
this.processorFactories = Collections.unmodifiableMap(processorFactories);
}
public Set<Map.Entry<String, Function<TemplateService, Processor.Factory<?>>>> entrySet() {
return processorFactoryProviders.entrySet();
public Processor.Factory getProcessorFactory(String name) {
return processorFactories.get(name);
}
@Override
public void close() throws IOException {
List<Closeable> closeables = new ArrayList<>();
for (Processor.Factory factory : processorFactories.values()) {
if (factory instanceof Closeable) {
closeables.add((Closeable) factory);
}
}
IOUtils.close(closeables);
}
// For testing:
Map<String, Processor.Factory> getProcessorFactories() {
return processorFactories;
}
public static final class Builder {
private final Map<String, BiFunction<TemplateService, ProcessorsRegistry, Processor.Factory<?>>> providers = new HashMap<>();
/**
* Adds a processor factory under a specific name.
*/
public void registerProcessor(String name, BiFunction<TemplateService, ProcessorsRegistry, Processor.Factory<?>> provider) {
BiFunction<TemplateService, ProcessorsRegistry, Processor.Factory<?>> previous = this.providers.putIfAbsent(name, provider);
if (previous != null) {
throw new IllegalArgumentException("Processor factory already registered for name [" + name + "]");
}
}
public ProcessorsRegistry build(TemplateService templateService) {
return new ProcessorsRegistry(templateService, providers);
}
}
}

View File

@ -19,9 +19,12 @@
package org.elasticsearch.ingest.core;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ingest.ProcessorsRegistry;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
@ -178,4 +181,34 @@ public final class ConfigurationUtils {
}
return exception;
}
public static List<Processor> readProcessorConfigs(List<Map<String, Map<String, Object>>> processorConfigs, ProcessorsRegistry processorRegistry) throws Exception {
List<Processor> processors = new ArrayList<>();
if (processorConfigs != null) {
for (Map<String, Map<String, Object>> processorConfigWithKey : processorConfigs) {
for (Map.Entry<String, Map<String, Object>> entry : processorConfigWithKey.entrySet()) {
processors.add(readProcessor(processorRegistry, entry.getKey(), entry.getValue()));
}
}
}
return processors;
}
private static Processor readProcessor(ProcessorsRegistry processorRegistry, String type, Map<String, Object> config) throws Exception {
Processor.Factory factory = processorRegistry.getProcessorFactory(type);
if (factory != null) {
List<Map<String, Map<String, Object>>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, Pipeline.ON_FAILURE_KEY);
List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry);
Processor processor;
processor = factory.create(config);
if (!config.isEmpty()) {
throw new ElasticsearchParseException("processor [" + type + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
}
if (onFailureProcessors.isEmpty()) {
return processor;
}
return new CompoundProcessor(Collections.singletonList(processor), onFailureProcessors);
}
throw new ElasticsearchParseException("No processor type exists with name [" + type + "]");
}
}

View File

@ -20,8 +20,8 @@
package org.elasticsearch.ingest.core;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ingest.ProcessorsRegistry;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
@ -85,12 +85,12 @@ public final class Pipeline {
public final static class Factory {
public Pipeline create(String id, Map<String, Object> config, Map<String, Processor.Factory> processorRegistry) throws Exception {
public Pipeline create(String id, Map<String, Object> config, ProcessorsRegistry processorRegistry) throws Exception {
String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY);
List<Map<String, Map<String, Object>>> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY);
List<Processor> processors = readProcessorConfigs(processorConfigs, processorRegistry);
List<Processor> processors = ConfigurationUtils.readProcessorConfigs(processorConfigs, processorRegistry);
List<Map<String, Map<String, Object>>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY);
List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry);
List<Processor> onFailureProcessors = ConfigurationUtils.readProcessorConfigs(onFailureProcessorConfigs, processorRegistry);
if (config.isEmpty() == false) {
throw new ElasticsearchParseException("pipeline [" + id + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
}
@ -98,35 +98,5 @@ public final class Pipeline {
return new Pipeline(id, description, compoundProcessor);
}
private List<Processor> readProcessorConfigs(List<Map<String, Map<String, Object>>> processorConfigs, Map<String, Processor.Factory> processorRegistry) throws Exception {
List<Processor> processors = new ArrayList<>();
if (processorConfigs != null) {
for (Map<String, Map<String, Object>> processorConfigWithKey : processorConfigs) {
for (Map.Entry<String, Map<String, Object>> entry : processorConfigWithKey.entrySet()) {
processors.add(readProcessor(processorRegistry, entry.getKey(), entry.getValue()));
}
}
}
return processors;
}
private Processor readProcessor(Map<String, Processor.Factory> processorRegistry, String type, Map<String, Object> config) throws Exception {
Processor.Factory factory = processorRegistry.get(type);
if (factory != null) {
List<Map<String, Map<String, Object>>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY);
List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry);
Processor processor;
processor = factory.create(config);
if (!config.isEmpty()) {
throw new ElasticsearchParseException("processor [" + type + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
}
if (onFailureProcessors.isEmpty()) {
return processor;
}
return new CompoundProcessor(Collections.singletonList(processor), onFailureProcessors);
}
throw new ElasticsearchParseException("No processor type exists with name [" + type + "]");
}
}
}

View File

@ -0,0 +1,105 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest.processor;
import org.elasticsearch.ingest.ProcessorsRegistry;
import org.elasticsearch.ingest.core.AbstractProcessor;
import org.elasticsearch.ingest.core.AbstractProcessorFactory;
import org.elasticsearch.ingest.core.ConfigurationUtils;
import org.elasticsearch.ingest.core.IngestDocument;
import org.elasticsearch.ingest.core.Processor;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.ingest.core.ConfigurationUtils.readList;
import static org.elasticsearch.ingest.core.ConfigurationUtils.readStringProperty;
/**
* A processor that for each value in a list executes a one or more processors.
*
* This can be useful in cases to do string operations on json array of strings,
* or remove a field from objects inside a json array.
*/
public final class ForEachProcessor extends AbstractProcessor {
public static final String TYPE = "foreach";
private final String field;
private final List<Processor> processors;
ForEachProcessor(String tag, String field, List<Processor> processors) {
super(tag);
this.field = field;
this.processors = processors;
}
@Override
public void execute(IngestDocument ingestDocument) throws Exception {
List<Object> values = ingestDocument.getFieldValue(field, List.class);
List<Object> newValues = new ArrayList<>(values.size());
for (Object value : values) {
Map<String, Object> innerSource = new HashMap<>();
innerSource.put("_value", value);
for (IngestDocument.MetaData metaData : IngestDocument.MetaData.values()) {
innerSource.put(metaData.getFieldName(), ingestDocument.getSourceAndMetadata().get(metaData.getFieldName()));
}
IngestDocument innerIngestDocument = new IngestDocument(innerSource, ingestDocument.getIngestMetadata());
for (Processor processor : processors) {
processor.execute(innerIngestDocument);
}
newValues.add(innerSource.get("_value"));
}
ingestDocument.setFieldValue(field, newValues);
}
@Override
public String getType() {
return TYPE;
}
String getField() {
return field;
}
List<Processor> getProcessors() {
return processors;
}
public static final class Factory extends AbstractProcessorFactory<ForEachProcessor> {
private final ProcessorsRegistry processorRegistry;
public Factory(ProcessorsRegistry processorRegistry) {
this.processorRegistry = processorRegistry;
}
@Override
protected ForEachProcessor doCreate(String tag, Map<String, Object> config) throws Exception {
String field = readStringProperty(TYPE, tag, config, "field");
List<Map<String, Map<String, Object>>> processorConfigs = readList(TYPE, tag, config, "processors");
List<Processor> processors = ConfigurationUtils.readProcessorConfigs(processorConfigs, processorRegistry);
return new ForEachProcessor(tag, field, Collections.unmodifiableList(processors));
}
}
}

View File

@ -183,7 +183,6 @@ public class Node implements Closeable {
throw new IllegalStateException("Failed to created node environment", ex);
}
final NetworkService networkService = new NetworkService(settings);
final SettingsFilter settingsFilter = new SettingsFilter(settings);
final ThreadPool threadPool = new ThreadPool(settings);
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
boolean success = false;
@ -197,7 +196,7 @@ public class Node implements Closeable {
modules.add(pluginModule);
}
modules.add(new PluginsModule(pluginsService));
SettingsModule settingsModule = new SettingsModule(this.settings, settingsFilter);
SettingsModule settingsModule = new SettingsModule(this.settings);
modules.add(settingsModule);
modules.add(new EnvironmentModule(environment));
modules.add(new NodeModule(this, monitorService));

View File

@ -29,6 +29,7 @@ import org.elasticsearch.ingest.processor.AppendProcessor;
import org.elasticsearch.ingest.processor.ConvertProcessor;
import org.elasticsearch.ingest.processor.DateProcessor;
import org.elasticsearch.ingest.processor.FailProcessor;
import org.elasticsearch.ingest.processor.ForEachProcessor;
import org.elasticsearch.ingest.processor.GsubProcessor;
import org.elasticsearch.ingest.processor.JoinProcessor;
import org.elasticsearch.ingest.processor.LowercaseProcessor;
@ -41,7 +42,7 @@ import org.elasticsearch.ingest.processor.UppercaseProcessor;
import org.elasticsearch.monitor.MonitorService;
import org.elasticsearch.node.service.NodeService;
import java.util.function.Function;
import java.util.function.BiFunction;
/**
*
@ -50,7 +51,7 @@ public class NodeModule extends AbstractModule {
private final Node node;
private final MonitorService monitorService;
private final ProcessorsRegistry processorsRegistry;
private final ProcessorsRegistry.Builder processorsRegistryBuilder;
// pkg private so tests can mock
Class<? extends PageCacheRecycler> pageCacheRecyclerImpl = PageCacheRecycler.class;
@ -59,21 +60,22 @@ public class NodeModule extends AbstractModule {
public NodeModule(Node node, MonitorService monitorService) {
this.node = node;
this.monitorService = monitorService;
this.processorsRegistry = new ProcessorsRegistry();
this.processorsRegistryBuilder = new ProcessorsRegistry.Builder();
registerProcessor(DateProcessor.TYPE, (templateService) -> new DateProcessor.Factory());
registerProcessor(SetProcessor.TYPE, SetProcessor.Factory::new);
registerProcessor(AppendProcessor.TYPE, AppendProcessor.Factory::new);
registerProcessor(RenameProcessor.TYPE, (templateService) -> new RenameProcessor.Factory());
registerProcessor(RemoveProcessor.TYPE, RemoveProcessor.Factory::new);
registerProcessor(SplitProcessor.TYPE, (templateService) -> new SplitProcessor.Factory());
registerProcessor(JoinProcessor.TYPE, (templateService) -> new JoinProcessor.Factory());
registerProcessor(UppercaseProcessor.TYPE, (templateService) -> new UppercaseProcessor.Factory());
registerProcessor(LowercaseProcessor.TYPE, (templateService) -> new LowercaseProcessor.Factory());
registerProcessor(TrimProcessor.TYPE, (templateService) -> new TrimProcessor.Factory());
registerProcessor(ConvertProcessor.TYPE, (templateService) -> new ConvertProcessor.Factory());
registerProcessor(GsubProcessor.TYPE, (templateService) -> new GsubProcessor.Factory());
registerProcessor(FailProcessor.TYPE, FailProcessor.Factory::new);
registerProcessor(DateProcessor.TYPE, (templateService, registry) -> new DateProcessor.Factory());
registerProcessor(SetProcessor.TYPE, (templateService, registry) -> new SetProcessor.Factory(templateService));
registerProcessor(AppendProcessor.TYPE, (templateService, registry) -> new AppendProcessor.Factory(templateService));
registerProcessor(RenameProcessor.TYPE, (templateService, registry) -> new RenameProcessor.Factory());
registerProcessor(RemoveProcessor.TYPE, (templateService, registry) -> new RemoveProcessor.Factory(templateService));
registerProcessor(SplitProcessor.TYPE, (templateService, registry) -> new SplitProcessor.Factory());
registerProcessor(JoinProcessor.TYPE, (templateService, registry) -> new JoinProcessor.Factory());
registerProcessor(UppercaseProcessor.TYPE, (templateService, registry) -> new UppercaseProcessor.Factory());
registerProcessor(LowercaseProcessor.TYPE, (templateService, registry) -> new LowercaseProcessor.Factory());
registerProcessor(TrimProcessor.TYPE, (templateService, registry) -> new TrimProcessor.Factory());
registerProcessor(ConvertProcessor.TYPE, (templateService, registry) -> new ConvertProcessor.Factory());
registerProcessor(GsubProcessor.TYPE, (templateService, registry) -> new GsubProcessor.Factory());
registerProcessor(FailProcessor.TYPE, (templateService, registry) -> new FailProcessor.Factory(templateService));
registerProcessor(ForEachProcessor.TYPE, (templateService, registry) -> new ForEachProcessor.Factory(registry));
}
@Override
@ -92,7 +94,7 @@ public class NodeModule extends AbstractModule {
bind(Node.class).toInstance(node);
bind(MonitorService.class).toInstance(monitorService);
bind(NodeService.class).asEagerSingleton();
bind(ProcessorsRegistry.class).toInstance(processorsRegistry);
bind(ProcessorsRegistry.Builder.class).toInstance(processorsRegistryBuilder);
}
/**
@ -105,7 +107,7 @@ public class NodeModule extends AbstractModule {
/**
* Adds a processor factory under a specific type name.
*/
public void registerProcessor(String type, Function<TemplateService, Processor.Factory<?>> processorFactoryProvider) {
processorsRegistry.registerProcessor(type, processorFactoryProvider);
public void registerProcessor(String type, BiFunction<TemplateService, ProcessorsRegistry, Processor.Factory<?>> provider) {
processorsRegistryBuilder.registerProcessor(type, provider);
}
}

View File

@ -237,8 +237,8 @@ public class InternalSettingsPreparer {
}
if (secret) {
return new String(terminal.readSecret("Enter value for [" + key + "]: ", key));
return new String(terminal.readSecret("Enter value for [" + key + "]: "));
}
return terminal.readText("Enter value for [" + key + "]: ", key);
return terminal.readText("Enter value for [" + key + "]: ");
}
}

View File

@ -29,8 +29,8 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.env.Environment;
import org.elasticsearch.http.HttpServer;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
@ -61,6 +61,7 @@ public class NodeService extends AbstractComponent implements Closeable {
private final PluginsService pluginService;
private final CircuitBreakerService circuitBreakerService;
private final IngestService ingestService;
private final SettingsFilter settingsFilter;
private ScriptService scriptService;
@Nullable
@ -73,10 +74,10 @@ public class NodeService extends AbstractComponent implements Closeable {
private final Discovery discovery;
@Inject
public NodeService(Settings settings, Environment environment, ThreadPool threadPool, MonitorService monitorService,
public NodeService(Settings settings, ThreadPool threadPool, MonitorService monitorService,
Discovery discovery, TransportService transportService, IndicesService indicesService,
PluginsService pluginService, CircuitBreakerService circuitBreakerService, Version version,
ProcessorsRegistry processorsRegistry, ClusterService clusterService) {
ProcessorsRegistry.Builder processorsRegistryBuilder, ClusterService clusterService, SettingsFilter settingsFilter) {
super(settings);
this.threadPool = threadPool;
this.monitorService = monitorService;
@ -87,7 +88,8 @@ public class NodeService extends AbstractComponent implements Closeable {
this.version = version;
this.pluginService = pluginService;
this.circuitBreakerService = circuitBreakerService;
this.ingestService = new IngestService(settings, threadPool, processorsRegistry);
this.ingestService = new IngestService(settings, threadPool, processorsRegistryBuilder);
this.settingsFilter = settingsFilter;
clusterService.add(ingestService.getPipelineStore());
}
@ -137,7 +139,7 @@ public class NodeService extends AbstractComponent implements Closeable {
public NodeInfo info(boolean settings, boolean os, boolean process, boolean jvm, boolean threadPool,
boolean transport, boolean http, boolean plugin) {
return new NodeInfo(version, Build.CURRENT, discovery.localNode(), serviceAttributes,
settings ? this.settings : null,
settings ? settingsFilter.filter(this.settings) : null,
os ? monitorService.osService().info() : null,
process ? monitorService.processService().info() : null,
jvm ? monitorService.jvmService().info() : null,

View File

@ -235,6 +235,7 @@ class InstallPluginCommand extends CliTool.Command {
zipInput.closeEntry();
}
}
Files.delete(zip);
return target;
}

View File

@ -47,7 +47,7 @@ class PluginSecurity {
PermissionCollection permissions = parsePermissions(terminal, file, environment.tmpFile());
List<Permission> requested = Collections.list(permissions.elements());
if (requested.isEmpty()) {
terminal.print(Verbosity.VERBOSE, "plugin has a policy file with no additional permissions");
terminal.println(Verbosity.VERBOSE, "plugin has a policy file with no additional permissions");
return;
}
@ -92,7 +92,7 @@ class PluginSecurity {
terminal.println(Verbosity.NORMAL, "See http://docs.oracle.com/javase/8/docs/technotes/guides/security/permissions.html");
terminal.println(Verbosity.NORMAL, "for descriptions of what these permissions allow and the associated risks.");
if (!batch) {
terminal.println(Verbosity.NORMAL);
terminal.println(Verbosity.NORMAL, "");
String text = terminal.readText("Continue with installation? [y/N]");
if (!text.equalsIgnoreCase("y")) {
throw new RuntimeException("installation aborted by user");

View File

@ -25,8 +25,10 @@ import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.AbstractScopedSettings;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.BaseRestHandler;
@ -45,12 +47,14 @@ import java.io.IOException;
public class RestClusterGetSettingsAction extends BaseRestHandler {
private final ClusterSettings clusterSettings;
private final SettingsFilter settingsFilter;
@Inject
public RestClusterGetSettingsAction(Settings settings, RestController controller, Client client, ClusterSettings clusterSettings) {
public RestClusterGetSettingsAction(Settings settings, RestController controller, Client client, ClusterSettings clusterSettings, SettingsFilter settingsFilter) {
super(settings, client);
this.clusterSettings = clusterSettings;
controller.registerHandler(RestRequest.Method.GET, "/_cluster/settings", this);
this.settingsFilter = settingsFilter;
}
@Override
@ -81,7 +85,7 @@ public class RestClusterGetSettingsAction extends BaseRestHandler {
if (renderDefaults) {
builder.startObject("defaults");
clusterSettings.diff(state.metaData().settings(), this.settings).toXContent(builder, params);
settingsFilter.filter(clusterSettings.diff(state.metaData().settings(), this.settings)).toXContent(builder, params);
builder.endObject();
}

View File

@ -31,6 +31,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
@ -54,13 +55,15 @@ import static org.elasticsearch.rest.RestStatus.OK;
public class RestGetIndicesAction extends BaseRestHandler {
private final IndexScopedSettings indexScopedSettings;
private final SettingsFilter settingsFilter;
@Inject
public RestGetIndicesAction(Settings settings, RestController controller, Client client, IndexScopedSettings indexScopedSettings) {
public RestGetIndicesAction(Settings settings, RestController controller, Client client, IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter) {
super(settings, client);
this.indexScopedSettings = indexScopedSettings;
controller.registerHandler(GET, "/{index}", this);
controller.registerHandler(GET, "/{index}/{type}", this);
this.settingsFilter = settingsFilter;
}
@Override
@ -143,7 +146,7 @@ public class RestGetIndicesAction extends BaseRestHandler {
builder.endObject();
if (renderDefaults) {
builder.startObject("defaults");
indexScopedSettings.diff(settings, settings).toXContent(builder, request);
settingsFilter.filter(indexScopedSettings.diff(settings, RestGetIndicesAction.this.settings)).toXContent(builder, request);
builder.endObject();
}
}

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.BytesRestResponse;
@ -43,14 +44,16 @@ import static org.elasticsearch.rest.RestStatus.OK;
public class RestGetSettingsAction extends BaseRestHandler {
private final IndexScopedSettings indexScopedSettings;
private final SettingsFilter settingsFilter;
@Inject
public RestGetSettingsAction(Settings settings, RestController controller, Client client, IndexScopedSettings indexScopedSettings) {
public RestGetSettingsAction(Settings settings, RestController controller, Client client, IndexScopedSettings indexScopedSettings, final SettingsFilter settingsFilter) {
super(settings, client);
this.indexScopedSettings = indexScopedSettings;
controller.registerHandler(GET, "/{index}/_settings/{name}", this);
controller.registerHandler(GET, "/_settings/{name}", this);
controller.registerHandler(GET, "/{index}/_setting/{name}", this);
this.settingsFilter = settingsFilter;
}
@Override
@ -80,7 +83,7 @@ public class RestGetSettingsAction extends BaseRestHandler {
builder.endObject();
if (renderDefaults) {
builder.startObject("defaults");
indexScopedSettings.diff(cursor.value, settings).toXContent(builder, request);
settingsFilter.filter(indexScopedSettings.diff(cursor.value, settings)).toXContent(builder, request);
builder.endObject();
}
builder.endObject();

View File

@ -91,7 +91,6 @@ public class RestTermVectorsAction extends BaseRestHandler {
termVectorsRequest.termStatistics(request.paramAsBoolean("term_statistics", termVectorsRequest.termStatistics()));
termVectorsRequest.fieldStatistics(request.paramAsBoolean("fieldStatistics", termVectorsRequest.fieldStatistics()));
termVectorsRequest.fieldStatistics(request.paramAsBoolean("field_statistics", termVectorsRequest.fieldStatistics()));
termVectorsRequest.dfs(request.paramAsBoolean("dfs", termVectorsRequest.dfs()));
}
static public void addFieldStringsFromParameter(TermVectorsRequest termVectorsRequest, String fields) {

View File

@ -20,10 +20,12 @@
package org.elasticsearch.rest.support;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.path.PathTrie;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Map;
import java.util.regex.Pattern;
@ -238,4 +240,21 @@ public class RestUtils {
return null;
}
/**
* Return the CORS setting as an array of origins.
*
* @param corsSetting the CORS allow origin setting as configured by the user;
* should never pass null, but we check for it anyway.
* @return an array of origins if set, otherwise {@code null}.
*/
public static String[] corsSettingAsArray(String corsSetting) {
if (Strings.isNullOrEmpty(corsSetting)) {
return new String[0];
}
return Arrays.asList(corsSetting.split(","))
.stream()
.map(String::trim)
.toArray(size -> new String[size]);
}
}

View File

@ -23,6 +23,8 @@ package org.elasticsearch.tasks;
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.Provider;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.xcontent.ToXContent;
/**
* Current task information
@ -57,9 +59,24 @@ public class Task {
this.parentId = parentId;
}
/**
* Build a version of the task status you can throw over the wire and back
* to the user.
*
* @param node
* the node this task is running on
* @param detailed
* should the information include detailed, potentially slow to
* generate data?
*/
public TaskInfo taskInfo(DiscoveryNode node, boolean detailed) {
return new TaskInfo(node, getId(), getType(), getAction(), detailed ? getDescription() : null, parentNode, parentId);
String description = null;
Task.Status status = null;
if (detailed) {
description = getDescription();
status = getStatus();
}
return new TaskInfo(node, getId(), getType(), getAction(), description, status, parentNode, parentId);
}
/**
@ -104,4 +121,15 @@ public class Task {
return parentId;
}
/**
* Build a status for this task or null if this task doesn't have status.
* Since most tasks don't have status this defaults to returning null. While
* this can never perform IO it might be a costly operation, requiring
* collating lists of results, etc. So only use it if you need the value.
*/
public Status getStatus() {
return null;
}
public interface Status extends ToXContent, NamedWriteable<Status> {}
}

View File

@ -20,11 +20,13 @@
package org.elasticsearch.transport;
import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
import org.elasticsearch.action.support.replication.ReplicationTask;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.metrics.MeanMetric;
@ -41,6 +43,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentMapLong;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.threadpool.ThreadPool;
@ -109,11 +112,11 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
volatile DiscoveryNode localNode = null;
public TransportService(Transport transport, ThreadPool threadPool) {
this(EMPTY_SETTINGS, transport, threadPool);
this(EMPTY_SETTINGS, transport, threadPool, new NamedWriteableRegistry());
}
@Inject
public TransportService(Settings settings, Transport transport, ThreadPool threadPool) {
public TransportService(Settings settings, Transport transport, ThreadPool threadPool, NamedWriteableRegistry namedWriteableRegistry) {
super(settings);
this.transport = transport;
this.threadPool = threadPool;
@ -122,6 +125,7 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
tracerLog = Loggers.getLogger(logger, ".tracer");
adapter = createAdapter();
taskManager = createTaskManager();
namedWriteableRegistry.registerPrototype(Task.Status.class, ReplicationTask.Status.PROTOTYPE);
}
/**

View File

@ -333,6 +333,7 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
}
protected void handleResponse(StreamInput buffer, LocalTransport sourceTransport, final TransportResponseHandler handler) {
buffer = new NamedWriteableAwareStreamInput(buffer, namedWriteableRegistry);
final TransportResponse response = handler.newInstance();
response.remoteAddress(sourceTransport.boundAddress.publishAddress());
try {

View File

@ -192,6 +192,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
}
protected void handleResponse(Channel channel, StreamInput buffer, final TransportResponseHandler handler) {
buffer = new NamedWriteableAwareStreamInput(buffer, transport.namedWriteableRegistry);
final TransportResponse response = handler.newInstance();
response.remoteAddress(new InetSocketTransportAddress((InetSocketAddress) channel.getRemoteAddress()));
response.remoteAddress();

View File

@ -226,7 +226,7 @@ public class ExceptionSerializationTests extends ESTestCase {
}
public void testIllegalShardRoutingStateException() throws IOException {
final ShardRouting routing = TestShardRouting.newShardRouting("test", 0, "xyz", "def", false, ShardRoutingState.STARTED, 0);
final ShardRouting routing = TestShardRouting.newShardRouting("test", 0, "xyz", "def", false, ShardRoutingState.STARTED);
final String routingAsString = routing.toString();
IllegalShardRoutingStateException serialize = serialize(
new IllegalShardRoutingStateException(routing, "foo", new NullPointerException()));

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.action.admin.cluster.node.tasks;
import org.elasticsearch.action.ListenableActionFuture;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
@ -25,6 +26,7 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
import org.elasticsearch.action.admin.indices.refresh.RefreshAction;
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeAction;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
import org.elasticsearch.action.index.IndexAction;
import org.elasticsearch.action.percolate.PercolateAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.node.DiscoveryNode;
@ -32,20 +34,27 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.tasks.MockTaskManager;
import org.elasticsearch.test.tasks.MockTaskManagerListener;
import org.elasticsearch.test.transport.MockTransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;
import static org.hamcrest.Matchers.emptyCollectionOf;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.hamcrest.Matchers.not;
/**
* Integration tests for task management API
@ -218,6 +227,59 @@ public class TasksIT extends ESIntegTestCase {
}
}
/**
* Very basic "is it plugged in" style test that indexes a document and
* makes sure that you can fetch the status of the process. The goal here is
* to verify that the large moving parts that make fetching task status work
* fit together rather than to verify any particular status results from
* indexing. For that, look at
* {@link org.elasticsearch.action.support.replication.TransportReplicationActionTests}
* . We intentionally don't use the task recording mechanism used in other
* places in this test so we can make sure that the status fetching works
* properly over the wire.
*/
public void testCanFetchIndexStatus() throws InterruptedException, ExecutionException, IOException {
/*
* We prevent any tasks from unregistering until the test is done so we
* can fetch them. This will gum up the server if we leave it enabled
* but we'll be quick so it'll be OK (TM).
*/
ReentrantLock taskFinishLock = new ReentrantLock();
taskFinishLock.lock();
for (ClusterService clusterService : internalCluster().getInstances(ClusterService.class)) {
((MockTaskManager)clusterService.getTaskManager()).addListener(new MockTaskManagerListener() {
@Override
public void onTaskRegistered(Task task) {
// Intentional noop
}
@Override
public void onTaskUnregistered(Task task) {
/*
* We can't block all tasks here or the task listing task
* would never return.
*/
if (false == task.getAction().startsWith(IndexAction.NAME)) {
return;
}
logger.debug("Blocking {} from being unregistered", task);
taskFinishLock.lock();
taskFinishLock.unlock();
}
});
}
ListenableActionFuture<?> indexFuture = client().prepareIndex("test", "test").setSource("test", "test").execute();
ListTasksResponse tasks = client().admin().cluster().prepareListTasks().setActions("indices:data/write/index*").setDetailed(true)
.get();
taskFinishLock.unlock();
indexFuture.get();
assertThat(tasks.getTasks(), not(emptyCollectionOf(TaskInfo.class)));
for (TaskInfo task : tasks.getTasks()) {
assertNotNull(task.getStatus());
}
}
@Override
public void tearDown() throws Exception {
for (Map.Entry<Tuple<String, String>, RecordingTaskManagerListener> entry : listeners.entrySet()) {

View File

@ -58,7 +58,6 @@ import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.local.LocalTransport;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import java.io.IOException;
@ -115,7 +114,7 @@ public class TransportTasksActionTests extends ESTestCase {
public TestNode(String name, ThreadPool threadPool, Settings settings) {
transportService = new TransportService(settings,
new LocalTransport(settings, threadPool, Version.CURRENT, new NamedWriteableRegistry()),
threadPool){
threadPool, new NamedWriteableRegistry()) {
@Override
protected TaskManager createTaskManager() {
if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) {

View File

@ -158,7 +158,7 @@ public class SyncedFlushUnitTests extends ESTestCase {
Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses = new HashMap<>();
for (int copy = 0; copy < replicas + 1; copy++) {
final ShardRouting shardRouting = TestShardRouting.newShardRouting(index, shard, "node_" + shardId + "_" + copy, null,
copy == 0, ShardRoutingState.STARTED, 0);
copy == 0, ShardRoutingState.STARTED);
if (randomInt(5) < 2) {
// shard copy failure
failed++;

View File

@ -93,7 +93,6 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase {
assertThat(shardStores.values().size(), equalTo(2));
for (ObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> shardStoreStatuses : shardStores.values()) {
for (IndicesShardStoresResponse.StoreStatus storeStatus : shardStoreStatuses.value) {
assertThat(storeStatus.getVersion(), greaterThan(-1L));
assertThat(storeStatus.getAllocationId(), notNullValue());
assertThat(storeStatus.getNode(), notNullValue());
assertThat(storeStatus.getStoreException(), nullValue());
@ -191,10 +190,10 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase {
for (IndicesShardStoresResponse.StoreStatus status : shardStatus.value) {
if (corruptedShardIDMap.containsKey(shardStatus.key)
&& corruptedShardIDMap.get(shardStatus.key).contains(status.getNode().name())) {
assertThat(status.getVersion(), greaterThanOrEqualTo(0L));
assertThat(status.getLegacyVersion(), greaterThanOrEqualTo(0L));
assertThat(status.getStoreException(), notNullValue());
} else {
assertThat(status.getVersion(), greaterThanOrEqualTo(0L));
assertThat(status.getLegacyVersion(), greaterThanOrEqualTo(0L));
assertNull(status.getStoreException());
}
}

View File

@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.shard.ShardStateMetaData;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.transport.NodeDisconnectedException;
@ -54,8 +55,8 @@ public class IndicesShardStoreResponseTests extends ESTestCase {
DiscoveryNode node2 = new DiscoveryNode("node2", DummyTransportAddress.INSTANCE, Version.CURRENT);
List<IndicesShardStoresResponse.StoreStatus> storeStatusList = new ArrayList<>();
storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null));
storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, 2, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null));
storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, new IOException("corrupted")));
storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, ShardStateMetaData.NO_VERSION, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null));
storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, ShardStateMetaData.NO_VERSION, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, new IOException("corrupted")));
storeStatuses.put(0, storeStatusList);
storeStatuses.put(1, storeStatusList);
ImmutableOpenIntMap<List<IndicesShardStoresResponse.StoreStatus>> storesMap = storeStatuses.build();
@ -96,10 +97,16 @@ public class IndicesShardStoreResponseTests extends ESTestCase {
for (int i = 0; i < stores.size(); i++) {
HashMap storeInfo = ((HashMap) stores.get(i));
IndicesShardStoresResponse.StoreStatus storeStatus = storeStatusList.get(i);
assertThat(storeInfo.containsKey("version"), equalTo(true));
assertThat(((int) storeInfo.get("version")), equalTo(((int) storeStatus.getVersion())));
assertThat(storeInfo.containsKey("allocation_id"), equalTo(true));
assertThat(((String) storeInfo.get("allocation_id")), equalTo((storeStatus.getAllocationId())));
boolean eitherLegacyVersionOrAllocationIdSet = false;
if (storeInfo.containsKey("legacy_version")) {
assertThat(((int) storeInfo.get("legacy_version")), equalTo(((int) storeStatus.getLegacyVersion())));
eitherLegacyVersionOrAllocationIdSet = true;
}
if (storeInfo.containsKey("allocation_id")) {
assertThat(((String) storeInfo.get("allocation_id")), equalTo((storeStatus.getAllocationId())));
eitherLegacyVersionOrAllocationIdSet = true;
}
assertThat(eitherLegacyVersionOrAllocationIdSet, equalTo(true));
assertThat(storeInfo.containsKey("allocation"), equalTo(true));
assertThat(((String) storeInfo.get("allocation")), equalTo(storeStatus.getAllocationStatus().value()));
assertThat(storeInfo.containsKey(storeStatus.getNode().id()), equalTo(true));
@ -115,11 +122,15 @@ public class IndicesShardStoreResponseTests extends ESTestCase {
public void testStoreStatusOrdering() throws Exception {
DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT);
List<IndicesShardStoresResponse.StoreStatus> orderedStoreStatuses = new ArrayList<>();
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 2, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, null));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, new IOException("corrupted")));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, ShardStateMetaData.NO_VERSION, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, ShardStateMetaData.NO_VERSION, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, ShardStateMetaData.NO_VERSION, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, null));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 2, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, null));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, ShardStateMetaData.NO_VERSION, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, new IOException("corrupted")));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, new IOException("corrupted")));
List<IndicesShardStoresResponse.StoreStatus> storeStatuses = new ArrayList<>(orderedStoreStatuses);
Collections.shuffle(storeStatuses, random());

View File

@ -20,7 +20,9 @@
package org.elasticsearch.action.ingest;
import org.elasticsearch.ingest.PipelineStore;
import org.elasticsearch.ingest.ProcessorsRegistry;
import org.elasticsearch.ingest.TestProcessor;
import org.elasticsearch.ingest.TestTemplateService;
import org.elasticsearch.ingest.core.CompoundProcessor;
import org.elasticsearch.ingest.core.IngestDocument;
import org.elasticsearch.ingest.core.Pipeline;
@ -54,11 +56,12 @@ public class SimulatePipelineRequestParsingTests extends ESTestCase {
TestProcessor processor = new TestProcessor(ingestDocument -> {});
CompoundProcessor pipelineCompoundProcessor = new CompoundProcessor(processor);
Pipeline pipeline = new Pipeline(SimulatePipelineRequest.SIMULATED_PIPELINE_ID, null, pipelineCompoundProcessor);
Map<String, Processor.Factory> processorRegistry = new HashMap<>();
processorRegistry.put("mock_processor", mock(Processor.Factory.class));
ProcessorsRegistry.Builder processorRegistryBuilder = new ProcessorsRegistry.Builder();
processorRegistryBuilder.registerProcessor("mock_processor", ((templateService, registry) -> mock(Processor.Factory.class)));
ProcessorsRegistry processorRegistry = processorRegistryBuilder.build(TestTemplateService.instance());
store = mock(PipelineStore.class);
when(store.get(SimulatePipelineRequest.SIMULATED_PIPELINE_ID)).thenReturn(pipeline);
when(store.getProcessorFactoryRegistry()).thenReturn(processorRegistry);
when(store.getProcessorRegistry()).thenReturn(processorRegistry);
}
public void testParseUsingPipelineStore() throws Exception {

View File

@ -208,7 +208,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
int numberOfShards = randomIntBetween(1, 10);
for (int j = 0; j < numberOfShards; j++) {
final ShardId shardId = new ShardId(index, "_na_", ++shardIndex);
ShardRouting shard = TestShardRouting.newShardRouting(index, shardId.getId(), node.id(), true, ShardRoutingState.STARTED, 1);
ShardRouting shard = TestShardRouting.newShardRouting(index, shardId.getId(), node.id(), true, ShardRoutingState.STARTED);
IndexShardRoutingTable.Builder indexShard = new IndexShardRoutingTable.Builder(shardId);
indexShard.addShard(shard);
indexRoutingTable.addIndexShard(indexShard.build());

View File

@ -109,7 +109,7 @@ public class ClusterStateCreationUtils {
} else {
unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null);
}
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, 0, primaryNode, relocatingNode, null, true, primaryState, 0, unassignedInfo));
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, 0, primaryNode, relocatingNode, null, true, primaryState, unassignedInfo));
for (ShardRoutingState replicaState : replicaStates) {
String replicaNode = null;
@ -125,7 +125,7 @@ public class ClusterStateCreationUtils {
unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null);
}
indexShardRoutingBuilder.addShard(
TestShardRouting.newShardRouting(index, shardId.id(), replicaNode, relocatingNode, null, false, replicaState, 0, unassignedInfo));
TestShardRouting.newShardRouting(index, shardId.id(), replicaNode, relocatingNode, null, false, replicaState, unassignedInfo));
}
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
@ -161,8 +161,8 @@ public class ClusterStateCreationUtils {
routing.addAsNew(indexMetaData);
final ShardId shardId = new ShardId(index, "_na_", i);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(0).id(), null, null, true, ShardRoutingState.STARTED, 0, null));
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(1).id(), null, null, false, ShardRoutingState.STARTED, 0, null));
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(0).id(), null, null, true, ShardRoutingState.STARTED, null));
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(1).id(), null, null, false, ShardRoutingState.STARTED, null));
indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build());
}
state.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build());

View File

@ -18,6 +18,8 @@
*/
package org.elasticsearch.action.support.replication;
import com.carrotsearch.randomizedtesting.annotations.Repeat;
import org.apache.lucene.index.CorruptIndexException;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ReplicationResponse;
@ -44,10 +46,10 @@ import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.IndexShardNotStartedException;
@ -64,6 +66,7 @@ import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponseOptions;
import org.elasticsearch.transport.TransportService;
import org.hamcrest.Matcher;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
@ -86,6 +89,7 @@ import static org.elasticsearch.action.support.replication.ClusterStateCreationU
import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.Matchers.arrayWithSize;
import static org.hamcrest.Matchers.either;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasItem;
@ -142,27 +146,30 @@ public class TransportReplicationActionTests extends ESTestCase {
public void testBlocks() throws ExecutionException, InterruptedException {
Request request = new Request();
PlainActionFuture<Response> listener = new PlainActionFuture<>();
ReplicationTask task = maybeTask();
ClusterBlocks.Builder block = ClusterBlocks.builder()
.addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block));
TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(null, request, listener);
TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener);
reroutePhase.run();
assertListenerThrows("primary phase should fail operation", listener, ClusterBlockException.class);
assertPhase(task, "failed");
block = ClusterBlocks.builder()
.addGlobalBlock(new ClusterBlock(1, "retryable", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block));
listener = new PlainActionFuture<>();
reroutePhase = action.new ReroutePhase(null, new Request().timeout("5ms"), listener);
reroutePhase = action.new ReroutePhase(task, new Request().timeout("5ms"), listener);
reroutePhase.run();
assertListenerThrows("failed to timeout on retryable block", listener, ClusterBlockException.class);
assertPhase(task, "failed");
listener = new PlainActionFuture<>();
reroutePhase = action.new ReroutePhase(null, new Request(), listener);
reroutePhase = action.new ReroutePhase(task, new Request(), listener);
reroutePhase.run();
assertFalse("primary phase should wait on retryable block", listener.isDone());
assertPhase(task, "waiting_for_retry");
block = ClusterBlocks.builder()
.addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
@ -181,20 +188,23 @@ public class TransportReplicationActionTests extends ESTestCase {
// no replicas in oder to skip the replication part
clusterService.setState(state(index, true,
randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED));
ReplicationTask task = maybeTask();
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
Request request = new Request(shardId).timeout("1ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(null, request, listener);
TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener);
reroutePhase.run();
assertListenerThrows("unassigned primary didn't cause a timeout", listener, UnavailableShardsException.class);
assertPhase(task, "failed");
request = new Request(shardId);
listener = new PlainActionFuture<>();
reroutePhase = action.new ReroutePhase(null, request, listener);
reroutePhase = action.new ReroutePhase(task, request, listener);
reroutePhase.run();
assertFalse("unassigned primary didn't cause a retry", listener.isDone());
assertPhase(task, "waiting_for_retry");
clusterService.setState(state(index, true, ShardRoutingState.STARTED));
logger.debug("--> primary assigned state:\n{}", clusterService.state().prettyPrint());
@ -267,9 +277,12 @@ public class TransportReplicationActionTests extends ESTestCase {
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
Request request = new Request(new ShardId("unknown_index", "_na_", 0)).timeout("1ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(null, request, listener);
ReplicationTask task = maybeTask();
TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener);
reroutePhase.run();
assertListenerThrows("must throw index not found exception", listener, IndexNotFoundException.class);
assertPhase(task, "failed");
request = new Request(new ShardId(index, "_na_", 10)).timeout("1ms");
listener = new PlainActionFuture<>();
reroutePhase = action.new ReroutePhase(null, request, listener);
@ -280,9 +293,9 @@ public class TransportReplicationActionTests extends ESTestCase {
public void testRoutePhaseExecutesRequest() {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
ReplicationTask task = maybeTask();
clusterService.setState(stateWithActivePrimary(index, randomBoolean(), 3));
logger.debug("using state: \n{}", clusterService.state().prettyPrint());
final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
@ -290,7 +303,7 @@ public class TransportReplicationActionTests extends ESTestCase {
Request request = new Request(shardId);
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(null, request, listener);
TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener);
reroutePhase.run();
assertThat(request.shardId(), equalTo(shardId));
logger.info("--> primary is assigned to [{}], checking request forwarded", primaryNodeId);
@ -299,8 +312,10 @@ public class TransportReplicationActionTests extends ESTestCase {
assertThat(capturedRequests.size(), equalTo(1));
if (clusterService.state().nodes().localNodeId().equals(primaryNodeId)) {
assertThat(capturedRequests.get(0).action, equalTo("testAction[p]"));
assertPhase(task, "waiting_on_primary");
} else {
assertThat(capturedRequests.get(0).action, equalTo("testAction"));
assertPhase(task, "rerouted");
}
assertIndexShardUninitialized();
}
@ -312,8 +327,9 @@ public class TransportReplicationActionTests extends ESTestCase {
clusterService.setState(state);
Request request = new Request(shardId).timeout("1ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>();
ReplicationTask task = maybeTask();
AtomicBoolean movedToReplication = new AtomicBoolean();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener)) {
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(task, request, createTransportChannel(listener)) {
@Override
void finishAndMoveToReplication(TransportReplicationAction.ReplicationPhase replicationPhase) {
super.finishAndMoveToReplication(replicationPhase);
@ -335,6 +351,9 @@ public class TransportReplicationActionTests extends ESTestCase {
assertThat(requests, notNullValue());
assertThat(requests.size(), equalTo(1));
assertThat("primary request was not delegated to relocation target", requests.get(0).action, equalTo("testAction[p]"));
assertPhase(task, "primary");
} else {
assertPhase(task, either(equalTo("finished")).or(equalTo("replicating")));
}
}
@ -348,8 +367,9 @@ public class TransportReplicationActionTests extends ESTestCase {
clusterService.setState(state);
Request request = new Request(shardId).timeout("1ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>();
ReplicationTask task = maybeTask();
AtomicBoolean movedToReplication = new AtomicBoolean();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener)) {
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(task, request, createTransportChannel(listener)) {
@Override
void finishAndMoveToReplication(TransportReplicationAction.ReplicationPhase replicationPhase) {
super.finishAndMoveToReplication(replicationPhase);
@ -359,6 +379,7 @@ public class TransportReplicationActionTests extends ESTestCase {
primaryPhase.run();
assertThat("request was not processed on primary relocation target", request.processedOnPrimary.get(), equalTo(true));
assertThat(movedToReplication.get(), equalTo(true));
assertPhase(task, "replicating");
}
public void testAddedReplicaAfterPrimaryOperation() {
@ -368,6 +389,7 @@ public class TransportReplicationActionTests extends ESTestCase {
clusterService.setState(stateWithActivePrimary(index, true, 0));
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
final ClusterState stateWithAddedReplicas = state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED);
ReplicationTask task = maybeTask();
final Action actionWithAddedReplicaAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
@ -382,9 +404,10 @@ public class TransportReplicationActionTests extends ESTestCase {
Request request = new Request(shardId);
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = actionWithAddedReplicaAfterPrimaryOp.new PrimaryPhase(request, createTransportChannel(listener));
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = actionWithAddedReplicaAfterPrimaryOp.new PrimaryPhase(task, request, createTransportChannel(listener));
primaryPhase.run();
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
assertPhase(task, "replicating");
Map<String, List<CapturingTransport.CapturedRequest>> capturedRequestsByTargetNode = transport.getCapturedRequestsByTargetNodeAndClear();
for (ShardRouting replica : stateWithAddedReplicas.getRoutingTable().shardRoutingTable(index, shardId.id()).replicaShards()) {
List<CapturingTransport.CapturedRequest> requests = capturedRequestsByTargetNode.get(replica.currentNodeId());
@ -415,11 +438,14 @@ public class TransportReplicationActionTests extends ESTestCase {
Request request = new Request(shardId);
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = actionWithRelocatingReplicasAfterPrimaryOp.new PrimaryPhase(request, createTransportChannel(listener));
ReplicationTask task = maybeTask();
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = actionWithRelocatingReplicasAfterPrimaryOp.new PrimaryPhase(
task, request, createTransportChannel(listener));
primaryPhase.run();
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
ShardRouting relocatingReplicaShard = stateWithRelocatingReplica.getRoutingTable().shardRoutingTable(index, shardId.id()).replicaShards().get(0);
Map<String, List<CapturingTransport.CapturedRequest>> capturedRequestsByTargetNode = transport.getCapturedRequestsByTargetNodeAndClear();
assertPhase(task, "replicating");
for (String node : new String[] {relocatingReplicaShard.currentNodeId(), relocatingReplicaShard.relocatingNodeId()}) {
List<CapturingTransport.CapturedRequest> requests = capturedRequestsByTargetNode.get(node);
assertThat(requests, notNullValue());
@ -448,10 +474,13 @@ public class TransportReplicationActionTests extends ESTestCase {
Request request = new Request(shardId);
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = actionWithDeletedIndexAfterPrimaryOp.new PrimaryPhase(request, createTransportChannel(listener));
ReplicationTask task = maybeTask();
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = actionWithDeletedIndexAfterPrimaryOp.new PrimaryPhase(
task, request, createTransportChannel(listener));
primaryPhase.run();
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
assertThat("replication phase should be skipped if index gets deleted after primary operation", transport.capturedRequestsByTargetNode().size(), equalTo(0));
assertPhase(task, "finished");
}
public void testWriteConsistency() throws ExecutionException, InterruptedException {
@ -496,16 +525,18 @@ public class TransportReplicationActionTests extends ESTestCase {
final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener));
ReplicationTask task = maybeTask();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(task, request, createTransportChannel(listener));
if (passesWriteConsistency) {
assertThat(primaryPhase.checkWriteConsistency(shardRoutingTable.primaryShard().shardId()), nullValue());
primaryPhase.run();
assertTrue("operations should have been perform, consistency level is met", request.processedOnPrimary.get());
assertTrue("operations should have been performed, consistency level is met", request.processedOnPrimary.get());
if (assignedReplicas > 0) {
assertIndexShardCounter(2);
} else {
assertIndexShardCounter(1);
}
assertPhase(task, either(equalTo("finished")).or(equalTo("replicating")));
} else {
assertThat(primaryPhase.checkWriteConsistency(shardRoutingTable.primaryShard().shardId()), notNullValue());
primaryPhase.run();
@ -517,10 +548,11 @@ public class TransportReplicationActionTests extends ESTestCase {
}
clusterService.setState(state(index, true, ShardRoutingState.STARTED, replicaStates));
listener = new PlainActionFuture<>();
primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener));
primaryPhase = action.new PrimaryPhase(task, request, createTransportChannel(listener));
primaryPhase.run();
assertTrue("once the consistency level met, operation should continue", request.processedOnPrimary.get());
assertIndexShardCounter(2);
assertPhase(task, "replicating");
}
}
@ -590,6 +622,7 @@ public class TransportReplicationActionTests extends ESTestCase {
final ShardId shardId = shardIt.shardId();
final Request request = new Request(shardId);
final PlainActionFuture<Response> listener = new PlainActionFuture<>();
ReplicationTask task = maybeTask();
logger.debug("expecting [{}] assigned replicas, [{}] total shards. using state: \n{}", assignedReplicas, totalShards, clusterService.state().prettyPrint());
TransportReplicationAction.IndexShardReference reference = getOrCreateIndexShardOperationsCounter();
@ -599,15 +632,14 @@ public class TransportReplicationActionTests extends ESTestCase {
assertIndexShardCounter(2);
// TODO: set a default timeout
TransportReplicationAction<Request, Request, Response>.ReplicationPhase replicationPhase =
action.new ReplicationPhase(request,
new Response(),
request.shardId(), createTransportChannel(listener), reference);
TransportReplicationAction<Request, Request, Response>.ReplicationPhase replicationPhase = action.new ReplicationPhase(task,
request, new Response(), request.shardId(), createTransportChannel(listener), reference);
assertThat(replicationPhase.totalShards(), equalTo(totalShards));
assertThat(replicationPhase.pending(), equalTo(assignedReplicas));
replicationPhase.run();
final CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear();
assertPhase(task, either(equalTo("finished")).or(equalTo("replicating")));
HashMap<String, Request> nodesSentTo = new HashMap<>();
boolean executeOnReplica =
@ -718,11 +750,11 @@ public class TransportReplicationActionTests extends ESTestCase {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
// no replica, we only want to test on primary
clusterService.setState(state(index, true,
ShardRoutingState.STARTED));
clusterService.setState(state(index, true, ShardRoutingState.STARTED));
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
Request request = new Request(shardId).timeout("100ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>();
ReplicationTask task = maybeTask();
/**
* Execute an action that is stuck in shard operation until a latch is counted down.
@ -732,7 +764,7 @@ public class TransportReplicationActionTests extends ESTestCase {
* However, this failure would only become apparent once listener.get is called. Seems a little implicit.
* */
action = new ActionWithDelay(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool);
final TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener));
final TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(task, request, createTransportChannel(listener));
Thread t = new Thread() {
@Override
public void run() {
@ -751,6 +783,7 @@ public class TransportReplicationActionTests extends ESTestCase {
// operation finished, counter back to 0
assertIndexShardCounter(1);
assertThat(transport.capturedRequests().length, equalTo(0));
assertPhase(task, "finished");
}
public void testCounterIncrementedWhileReplicationOngoing() throws InterruptedException, ExecutionException, IOException {
@ -764,7 +797,9 @@ public class TransportReplicationActionTests extends ESTestCase {
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
Request request = new Request(shardId).timeout("100ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener));
ReplicationTask task = maybeTask();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(task, request, createTransportChannel(listener));
primaryPhase.run();
assertIndexShardCounter(2);
assertThat(transport.capturedRequests().length, equalTo(1));
@ -772,10 +807,14 @@ public class TransportReplicationActionTests extends ESTestCase {
transport.handleResponse(transport.capturedRequests()[0].requestId, TransportResponse.Empty.INSTANCE);
transport.clear();
assertIndexShardCounter(1);
assertPhase(task, "finished");
request = new Request(shardId).timeout("100ms");
primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener));
task = maybeTask();
primaryPhase = action.new PrimaryPhase(task, request, createTransportChannel(listener));
primaryPhase.run();
assertIndexShardCounter(2);
assertPhase(task, "replicating");
CapturingTransport.CapturedRequest[] replicationRequests = transport.getCapturedRequestsAndClear();
assertThat(replicationRequests.length, equalTo(1));
// try with failure response
@ -792,12 +831,14 @@ public class TransportReplicationActionTests extends ESTestCase {
ShardRoutingState.STARTED, ShardRoutingState.STARTED));
action = new ActionWithDelay(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool);
final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler();
final ReplicationTask task = maybeTask();
Thread t = new Thread() {
@Override
public void run() {
try {
replicaOperationTransportHandler.messageReceived(new Request().setShardId(shardId), createTransportChannel(new PlainActionFuture<>()));
replicaOperationTransportHandler.messageReceived(new Request().setShardId(shardId), createTransportChannel(new PlainActionFuture<>()), task);
} catch (Exception e) {
logger.error("Failed", e);
}
}
};
@ -807,13 +848,14 @@ public class TransportReplicationActionTests extends ESTestCase {
assertBusy(() -> assertIndexShardCounter(2));
((ActionWithDelay) action).countDownLatch.countDown();
t.join();
assertPhase(task, "finished");
// operation should have finished and counter decreased because no outstanding replica requests
assertIndexShardCounter(1);
// now check if this also works if operation throws exception
action = new ActionWithExceptions(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool);
final Action.ReplicaOperationTransportHandler replicaOperationTransportHandlerForException = action.new ReplicaOperationTransportHandler();
try {
replicaOperationTransportHandlerForException.messageReceived(new Request(shardId), createTransportChannel(new PlainActionFuture<>()));
replicaOperationTransportHandlerForException.messageReceived(new Request(shardId), createTransportChannel(new PlainActionFuture<>()), task);
fail();
} catch (Throwable t2) {
}
@ -829,12 +871,15 @@ public class TransportReplicationActionTests extends ESTestCase {
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
Request request = new Request(shardId).timeout("100ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener));
ReplicationTask task = maybeTask();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(task, request, createTransportChannel(listener));
primaryPhase.run();
// no replica request should have been sent yet
assertThat(transport.capturedRequests().length, equalTo(0));
// no matter if the operation is retried or not, counter must be be back to 1
assertIndexShardCounter(1);
assertPhase(task, "failed");
}
private void assertIndexShardCounter(int expected) {
@ -847,9 +892,9 @@ public class TransportReplicationActionTests extends ESTestCase {
private final AtomicReference<ShardRouting> indexShardRouting = new AtomicReference<>();
/*
* Returns testIndexShardOperationsCounter or initializes it if it was already created in this test run.
* */
/**
* Returns testIndexShardOperationsCounter or initializes it if it was already created in this test run.
*/
private synchronized TransportReplicationAction.IndexShardReference getOrCreateIndexShardOperationsCounter() {
count.incrementAndGet();
return new TransportReplicationAction.IndexShardReference() {
@ -872,6 +917,29 @@ public class TransportReplicationActionTests extends ESTestCase {
};
}
/**
* Sometimes build a ReplicationTask for tracking the phase of the
* TransportReplicationAction. Since TransportReplicationAction has to work
* if the task as null just as well as if it is supplied this returns null
* half the time.
*/
private ReplicationTask maybeTask() {
return random().nextBoolean() ? new ReplicationTask(0, null, null, null, null, 0) : null;
}
/**
* If the task is non-null this asserts that the phrase matches.
*/
private void assertPhase(@Nullable ReplicationTask task, String phase) {
assertPhase(task, equalTo(phase));
}
private void assertPhase(@Nullable ReplicationTask task, Matcher<String> phaseMatcher) {
if (task != null) {
assertThat(task.getPhase(), phaseMatcher);
}
}
public static class Request extends ReplicationRequest<Request> {
public AtomicBoolean processedOnPrimary = new AtomicBoolean();
public AtomicInteger processedOnReplicas = new AtomicInteger();
@ -959,9 +1027,9 @@ public class TransportReplicationActionTests extends ESTestCase {
}
}
/*
* Throws exceptions when executed. Used for testing if the counter is correctly decremented in case an operation fails.
* */
/**
* Throws exceptions when executed. Used for testing if the counter is correctly decremented in case an operation fails.
*/
class ActionWithExceptions extends Action {
ActionWithExceptions(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) throws IOException {
@ -1027,9 +1095,9 @@ public class TransportReplicationActionTests extends ESTestCase {
}
/*
* Transport channel that is needed for replica operation testing.
* */
/**
* Transport channel that is needed for replica operation testing.
*/
public TransportChannel createTransportChannel(final PlainActionFuture<Response> listener) {
return new TransportChannel() {

View File

@ -134,8 +134,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
ActionFuture<TermVectorsResponse> termVectors = client().termVectors(new TermVectorsRequest(indexOrAlias(), "type1", "0")
.selectedFields(randomBoolean() ? new String[]{"existingfield"} : null)
.termStatistics(true)
.fieldStatistics(true)
.dfs(true));
.fieldStatistics(true));
// lets see if the null term vectors are caught...
TermVectorsResponse actionGet = termVectors.actionGet();
@ -966,95 +965,6 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
return randomBoolean() ? "test" : "alias";
}
public void testDfs() throws ExecutionException, InterruptedException, IOException {
logger.info("Setting up the index ...");
Settings.Builder settings = settingsBuilder()
.put(indexSettings())
.put("index.analysis.analyzer", "standard")
.put("index.number_of_shards", randomIntBetween(2, 10)); // we need at least 2 shards
assertAcked(prepareCreate("test")
.setSettings(settings)
.addMapping("type1", "text", "type=string"));
ensureGreen();
int numDocs = scaledRandomIntBetween(25, 100);
logger.info("Indexing {} documents...", numDocs);
List<IndexRequestBuilder> builders = new ArrayList<>();
for (int i = 0; i < numDocs; i++) {
builders.add(client().prepareIndex("test", "type1", i + "").setSource("text", "cat"));
}
indexRandom(true, builders);
XContentBuilder expectedStats = jsonBuilder()
.startObject()
.startObject("text")
.startObject("field_statistics")
.field("sum_doc_freq", numDocs)
.field("doc_count", numDocs)
.field("sum_ttf", numDocs)
.endObject()
.startObject("terms")
.startObject("cat")
.field("doc_freq", numDocs)
.field("ttf", numDocs)
.endObject()
.endObject()
.endObject()
.endObject();
logger.info("Without dfs 'cat' should appear strictly less than {} times.", numDocs);
TermVectorsResponse response = client().prepareTermVectors("test", "type1", randomIntBetween(0, numDocs - 1) + "")
.setSelectedFields("text")
.setFieldStatistics(true)
.setTermStatistics(true)
.get();
checkStats(response.getFields(), expectedStats, false);
logger.info("With dfs 'cat' should appear exactly {} times.", numDocs);
response = client().prepareTermVectors("test", "type1", randomIntBetween(0, numDocs - 1) + "")
.setSelectedFields("text")
.setFieldStatistics(true)
.setTermStatistics(true)
.setDfs(true)
.get();
checkStats(response.getFields(), expectedStats, true);
}
private void checkStats(Fields fields, XContentBuilder xContentBuilder, boolean isEqual) throws IOException {
Map<String, Object> stats = JsonXContent.jsonXContent.createParser(xContentBuilder.bytes()).map();
assertThat("number of fields expected:", fields.size(), equalTo(stats.size()));
for (String fieldName : fields) {
logger.info("Checking field statistics for field: {}", fieldName);
Terms terms = fields.terms(fieldName);
Map<String, Integer> fieldStatistics = getFieldStatistics(stats, fieldName);
String msg = "field: " + fieldName + " ";
assertThat(msg + "sum_doc_freq:",
(int) terms.getSumDocFreq(),
equalOrLessThanTo(fieldStatistics.get("sum_doc_freq"), isEqual));
assertThat(msg + "doc_count:",
terms.getDocCount(),
equalOrLessThanTo(fieldStatistics.get("doc_count"), isEqual));
assertThat(msg + "sum_ttf:",
(int) terms.getSumTotalTermFreq(),
equalOrLessThanTo(fieldStatistics.get("sum_ttf"), isEqual));
final TermsEnum termsEnum = terms.iterator();
BytesRef text;
while((text = termsEnum.next()) != null) {
String term = text.utf8ToString();
logger.info("Checking term statistics for term: ({}, {})", fieldName, term);
Map<String, Integer> termStatistics = getTermStatistics(stats, fieldName, term);
msg = "term: (" + fieldName + "," + term + ") ";
assertThat(msg + "doc_freq:",
termsEnum.docFreq(),
equalOrLessThanTo(termStatistics.get("doc_freq"), isEqual));
assertThat(msg + "ttf:",
(int) termsEnum.totalTermFreq(),
equalOrLessThanTo(termStatistics.get("ttf"), isEqual));
}
}
}
private Map<String, Integer> getFieldStatistics(Map<String, Object> stats, String fieldName) throws IOException {
return (Map<String, Integer>) ((Map<String, Object>) stats.get(fieldName)).get("field_statistics");
}

View File

@ -32,13 +32,13 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.Transport;
@ -128,8 +128,8 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTestCase {
CountDownLatch clusterStateLatch = new CountDownLatch(1);
@Inject
public InternalTransportService(Settings settings, Transport transport, ThreadPool threadPool) {
super(settings, transport, threadPool);
public InternalTransportService(Settings settings, Transport transport, ThreadPool threadPool, NamedWriteableRegistry namedWriteableRegistry) {
super(settings, transport, threadPool, namedWriteableRegistry);
}
@Override @SuppressWarnings("unchecked")

Some files were not shown because too many files have changed in this diff Show More