Merge remote-tracking branch 'upstream/master' into

feature-suggest-refactoring
This commit is contained in:
Ali Beyad 2016-02-26 17:21:12 -05:00
commit a7f6488216
147 changed files with 1194 additions and 1679 deletions

View File

@ -400,7 +400,7 @@ class BuildPlugin implements Plugin<Project> {
// we use './temp' since this is per JVM and tests are forbidden from writing to CWD
systemProperty 'java.io.tmpdir', './temp'
systemProperty 'java.awt.headless', 'true'
systemProperty 'tests.maven', 'true' // TODO: rename this once we've switched to gradle!
systemProperty 'tests.gradle', 'true'
systemProperty 'tests.artifact', project.name
systemProperty 'tests.task', path
systemProperty 'tests.security.manager', 'true'

View File

@ -67,7 +67,6 @@ public class PluginBuildPlugin extends BuildPlugin {
provided "com.vividsolutions:jts:${project.versions.jts}"
provided "log4j:log4j:${project.versions.log4j}"
provided "log4j:apache-log4j-extras:${project.versions.log4j}"
provided "org.slf4j:slf4j-api:${project.versions.slf4j}"
provided "net.java.dev.jna:jna:${project.versions.jna}"
}
}
@ -101,11 +100,6 @@ public class PluginBuildPlugin extends BuildPlugin {
from pluginMetadata // metadata (eg custom security policy)
from project.jar // this plugin's jar
from project.configurations.runtime - project.configurations.provided // the dep jars
// hack just for slf4j, in case it is "upgrade" from provided to compile,
// since it is not actually provided in distributions
from project.configurations.runtime.fileCollection { Dependency dep ->
return dep.name == 'slf4j-api' && project.configurations.compile.dependencies.contains(dep)
}
// extra files for the plugin to go into the zip
from('src/main/packaging') // TODO: move all config/bin/_size/etc into packaging
from('src/main') {

View File

@ -245,7 +245,8 @@ class ClusterFormationTasks {
return setup
}
Copy copyConfig = project.tasks.create(name: name, type: Copy, dependsOn: setup)
copyConfig.into(new File(node.homeDir, 'config')) // copy must always have a general dest dir, even though we don't use it
File configDir = new File(node.homeDir, 'config')
copyConfig.into(configDir) // copy must always have a general dest dir, even though we don't use it
for (Map.Entry<String,Object> extraConfigFile : node.config.extraConfigFiles.entrySet()) {
copyConfig.doFirst {
// make sure the copy won't be a no-op or act on a directory
@ -258,9 +259,12 @@ class ClusterFormationTasks {
}
}
File destConfigFile = new File(node.homeDir, 'config/' + extraConfigFile.getKey())
copyConfig.into(destConfigFile.canonicalFile.parentFile)
.from({ extraConfigFile.getValue() }) // wrap in closure to delay resolution to execution time
.rename { destConfigFile.name }
// wrap source file in closure to delay resolution to execution time
copyConfig.from({ extraConfigFile.getValue() }) {
// this must be in a closure so it is only applied to the single file specified in from above
into(configDir.toPath().relativize(destConfigFile.canonicalFile.parentFile.toPath()).toFile())
rename { destConfigFile.name }
}
}
return copyConfig
}

View File

@ -374,9 +374,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]io[/\\]Channels.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]io[/\\]stream[/\\]NamedWriteableRegistry.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]joda[/\\]Joda.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]logging[/\\]ESLoggerFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]logging[/\\]Loggers.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]logging[/\\]log4j[/\\]LogConfigurator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]Lucene.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]all[/\\]AllTermQuery.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]index[/\\]ElasticsearchDirectoryReader.java" checks="LineLength" />

View File

@ -4,7 +4,7 @@ lucene = 5.5.0
# optional dependencies
spatial4j = 0.5
jts = 1.13
jackson = 2.6.2
jackson = 2.7.1
log4j = 1.2.17
slf4j = 1.6.2
jna = 4.1.0
@ -13,6 +13,8 @@ jna = 4.1.0
# test dependencies
randomizedrunner = 2.3.2
junit = 4.11
# TODO: Upgrade httpclient to a version > 4.5.1 once released. Then remove o.e.test.rest.client.StrictHostnameVerifier* and use
# DefaultHostnameVerifier instead since we no longer need to workaround https://issues.apache.org/jira/browse/HTTPCLIENT-1698
httpclient = 4.3.6
httpcore = 4.3.3
commonslogging = 1.1.3

View File

@ -77,7 +77,6 @@ dependencies {
// logging
compile "log4j:log4j:${versions.log4j}", optional
compile "log4j:apache-log4j-extras:${versions.log4j}", optional
compile "org.slf4j:slf4j-api:${versions.slf4j}", optional
compile "net.java.dev.jna:jna:${versions.jna}", optional
@ -224,8 +223,9 @@ thirdPartyAudit.excludes = [
'org.osgi.util.tracker.ServiceTracker',
'org.osgi.util.tracker.ServiceTrackerCustomizer',
'org.slf4j.impl.StaticMDCBinder',
'org.slf4j.impl.StaticMarkerBinder',
// from org.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional
'org.slf4j.Logger',
'org.slf4j.LoggerFactory',
]
// dependency license are currently checked in distribution

View File

@ -23,7 +23,7 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.Index;

View File

@ -36,14 +36,6 @@ public class CancelTasksRequest extends BaseTasksRequest<CancelTasksRequest> {
private String reason = DEFAULT_REASON;
/**
* Cancel tasks on the specified nodes. If none are passed, all cancellable tasks on
* all nodes will be cancelled.
*/
public CancelTasksRequest(String... nodesIds) {
super(nodesIds);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -54,7 +46,6 @@ public class CancelTasksRequest extends BaseTasksRequest<CancelTasksRequest> {
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(reason);
}
@Override

View File

@ -24,7 +24,6 @@ import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
import org.elasticsearch.action.support.tasks.TransportTasksAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
@ -36,6 +35,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.EmptyTransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
@ -84,9 +84,9 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
}
protected void processTasks(CancelTasksRequest request, Consumer<CancellableTask> operation) {
if (request.taskId() != BaseTasksRequest.ALL_TASKS) {
if (request.taskId().isSet() == false) {
// we are only checking one task, we can optimize it
CancellableTask task = taskManager.getCancellableTask(request.taskId());
CancellableTask task = taskManager.getCancellableTask(request.taskId().getId());
if (task != null) {
if (request.match(task)) {
operation.accept(task);
@ -94,7 +94,7 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support this operation");
}
} else {
if (taskManager.getTask(request.taskId()) != null) {
if (taskManager.getTask(request.taskId().getId()) != null) {
// The task exists, but doesn't support cancellation
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support cancellation");
} else {
@ -135,11 +135,14 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
}
private void setBanOnNodes(String reason, CancellableTask task, Set<String> nodes, BanLock banLock) {
sendSetBanRequest(nodes, new BanParentTaskRequest(clusterService.localNode().getId(), task.getId(), reason), banLock);
sendSetBanRequest(nodes,
BanParentTaskRequest.createSetBanParentTaskRequest(new TaskId(clusterService.localNode().getId(), task.getId()), reason),
banLock);
}
private void removeBanOnNodes(CancellableTask task, Set<String> nodes) {
sendRemoveBanRequest(nodes, new BanParentTaskRequest(clusterService.localNode().getId(), task.getId()));
sendRemoveBanRequest(nodes,
BanParentTaskRequest.createRemoveBanParentTaskRequest(new TaskId(clusterService.localNode().getId(), task.getId())));
}
private void sendSetBanRequest(Set<String> nodes, BanParentTaskRequest request, BanLock banLock) {
@ -148,8 +151,8 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
DiscoveryNode discoveryNode = clusterState.getNodes().get(node);
if (discoveryNode != null) {
// Check if node still in the cluster
logger.debug("Sending ban for tasks with the parent [{}:{}] to the node [{}], ban [{}]", request.parentNodeId, request
.parentTaskId, node, request.ban);
logger.debug("Sending ban for tasks with the parent [{}] to the node [{}], ban [{}]", request.parentTaskId, node,
request.ban);
transportService.sendRequest(discoveryNode, BAN_PARENT_ACTION_NAME, request,
new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
@ -164,8 +167,8 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
});
} else {
banLock.onBanSet();
logger.debug("Cannot send ban for tasks with the parent [{}:{}] to the node [{}] - the node no longer in the cluster",
request.parentNodeId, request.parentTaskId, node);
logger.debug("Cannot send ban for tasks with the parent [{}] to the node [{}] - the node no longer in the cluster",
request.parentTaskId, node);
}
}
}
@ -176,13 +179,12 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
DiscoveryNode discoveryNode = clusterState.getNodes().get(node);
if (discoveryNode != null) {
// Check if node still in the cluster
logger.debug("Sending remove ban for tasks with the parent [{}:{}] to the node [{}]", request.parentNodeId,
request.parentTaskId, node);
logger.debug("Sending remove ban for tasks with the parent [{}] to the node [{}]", request.parentTaskId, node);
transportService.sendRequest(discoveryNode, BAN_PARENT_ACTION_NAME, request, EmptyTransportResponseHandler
.INSTANCE_SAME);
} else {
logger.debug("Cannot send remove ban request for tasks with the parent [{}:{}] to the node [{}] - the node no longer in " +
"the cluster", request.parentNodeId, request.parentTaskId, node);
logger.debug("Cannot send remove ban request for tasks with the parent [{}] to the node [{}] - the node no longer in " +
"the cluster", request.parentTaskId, node);
}
}
}
@ -218,23 +220,27 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
private static class BanParentTaskRequest extends TransportRequest {
private String parentNodeId;
private long parentTaskId;
private TaskId parentTaskId;
private boolean ban;
private String reason;
BanParentTaskRequest(String parentNodeId, long parentTaskId, String reason) {
this.parentNodeId = parentNodeId;
static BanParentTaskRequest createSetBanParentTaskRequest(TaskId parentTaskId, String reason) {
return new BanParentTaskRequest(parentTaskId, reason);
}
static BanParentTaskRequest createRemoveBanParentTaskRequest(TaskId parentTaskId) {
return new BanParentTaskRequest(parentTaskId);
}
private BanParentTaskRequest(TaskId parentTaskId, String reason) {
this.parentTaskId = parentTaskId;
this.ban = true;
this.reason = reason;
}
BanParentTaskRequest(String parentNodeId, long parentTaskId) {
this.parentNodeId = parentNodeId;
private BanParentTaskRequest(TaskId parentTaskId) {
this.parentTaskId = parentTaskId;
this.ban = false;
}
@ -245,8 +251,7 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
parentNodeId = in.readString();
parentTaskId = in.readLong();
parentTaskId = new TaskId(in);
ban = in.readBoolean();
if (ban) {
reason = in.readString();
@ -256,8 +261,7 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(parentNodeId);
out.writeLong(parentTaskId);
parentTaskId.writeTo(out);
out.writeBoolean(ban);
if (ban) {
out.writeString(reason);
@ -269,13 +273,13 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
@Override
public void messageReceived(final BanParentTaskRequest request, final TransportChannel channel) throws Exception {
if (request.ban) {
logger.debug("Received ban for the parent [{}:{}] on the node [{}], reason: [{}]", request.parentNodeId, request
.parentTaskId, clusterService.localNode().getId(), request.reason);
taskManager.setBan(request.parentNodeId, request.parentTaskId, request.reason);
logger.debug("Received ban for the parent [{}] on the node [{}], reason: [{}]", request.parentTaskId,
clusterService.localNode().getId(), request.reason);
taskManager.setBan(request.parentTaskId, request.reason);
} else {
logger.debug("Removing ban for the parent [{}:{}] on the node [{}]", request.parentNodeId, request.parentTaskId,
logger.debug("Removing ban for the parent [{}] on the node [{}]", request.parentTaskId,
clusterService.localNode().getId());
taskManager.removeBan(request.parentNodeId, request.parentTaskId);
taskManager.removeBan(request.parentTaskId);
}
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}

View File

@ -32,14 +32,6 @@ public class ListTasksRequest extends BaseTasksRequest<ListTasksRequest> {
private boolean detailed = false;
/**
* Get information from nodes based on the nodes ids specified. If none are passed, information
* for all nodes will be returned.
*/
public ListTasksRequest(String... nodesIds) {
super(nodesIds);
}
/**
* Should the detailed task information be returned.
*/
@ -48,7 +40,7 @@ public class ListTasksRequest extends BaseTasksRequest<ListTasksRequest> {
}
/**
* Should the node settings be returned.
* Should the detailed task information be returned.
*/
public ListTasksRequest detailed(boolean detailed) {
this.detailed = detailed;

View File

@ -138,11 +138,13 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
}
builder.endObject();
}
builder.startArray("tasks");
builder.startObject("tasks");
for(TaskInfo task : entry.getValue()) {
builder.startObject(task.getTaskId().toString(), XContentBuilder.FieldCaseConversion.NONE);
task.toXContent(builder, params);
builder.endObject();
}
builder.endArray();
builder.endObject();
builder.endObject();
}
builder.endObject();

View File

@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
@ -41,7 +42,7 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
private final DiscoveryNode node;
private final long id;
private final TaskId taskId;
private final String type;
@ -51,28 +52,21 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
private final Task.Status status;
private final String parentNode;
private final TaskId parentTaskId;
private final long parentId;
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status) {
this(node, id, type, action, description, status, null, -1L);
}
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status, String parentNode, long parentId) {
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status, TaskId parentTaskId) {
this.node = node;
this.id = id;
this.taskId = new TaskId(node.getId(), id);
this.type = type;
this.action = action;
this.description = description;
this.status = status;
this.parentNode = parentNode;
this.parentId = parentId;
this.parentTaskId = parentTaskId;
}
public TaskInfo(StreamInput in) throws IOException {
node = DiscoveryNode.readNode(in);
id = in.readLong();
taskId = new TaskId(node.getId(), in.readLong());
type = in.readString();
action = in.readString();
description = in.readOptionalString();
@ -81,8 +75,11 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
} else {
status = null;
}
parentNode = in.readOptionalString();
parentId = in.readLong();
parentTaskId = new TaskId(in);
}
public TaskId getTaskId() {
return taskId;
}
public DiscoveryNode getNode() {
@ -90,7 +87,7 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
}
public long getId() {
return id;
return taskId.getId();
}
public String getType() {
@ -113,12 +110,8 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
return status;
}
public String getParentNode() {
return parentNode;
}
public long getParentId() {
return parentId;
public TaskId getParentTaskId() {
return parentTaskId;
}
@Override
@ -129,7 +122,7 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
@Override
public void writeTo(StreamOutput out) throws IOException {
node.writeTo(out);
out.writeLong(id);
out.writeLong(taskId.getId());
out.writeString(type);
out.writeString(action);
out.writeOptionalString(description);
@ -139,15 +132,13 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
} else {
out.writeBoolean(false);
}
out.writeOptionalString(parentNode);
out.writeLong(parentId);
parentTaskId.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("node", node.getId());
builder.field("id", id);
builder.field("id", taskId.getId());
builder.field("type", type);
builder.field("action", action);
if (status != null) {
@ -156,11 +147,9 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
if (description != null) {
builder.field("description", description);
}
if (parentNode != null) {
builder.field("parent_node", parentNode);
builder.field("parent_id", parentId);
if (parentTaskId.isSet() == false) {
builder.field("parent_task_id", parentTaskId.toString());
}
builder.endObject();
return builder;
}
}

View File

@ -62,7 +62,7 @@ public class FieldStatsRequest extends BroadcastRequest<FieldStatsRequest> {
public void setIndexConstraints(IndexConstraint[] indexConstraints) {
if (indexConstraints == null) {
throw new NullPointerException("specified index_contraints can't be null");
throw new NullPointerException("specified index_constraints can't be null");
}
this.indexConstraints = indexConstraints;
}

View File

@ -52,7 +52,6 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
private final TransportSearchQueryThenFetchAction queryThenFetchAction;
private final TransportSearchDfsQueryAndFetchAction dfsQueryAndFetchAction;
private final TransportSearchQueryAndFetchAction queryAndFetchAction;
private final boolean optimizeSingleShard;
@Inject
public TransportSearchAction(Settings settings, ThreadPool threadPool,
@ -68,27 +67,24 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
this.queryThenFetchAction = queryThenFetchAction;
this.dfsQueryAndFetchAction = dfsQueryAndFetchAction;
this.queryAndFetchAction = queryAndFetchAction;
this.optimizeSingleShard = this.settings.getAsBoolean("action.search.optimize_single_shard", true);
}
@Override
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
// optimize search type for cases where there is only one shard group to search on
if (optimizeSingleShard) {
try {
ClusterState clusterState = clusterService.state();
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest);
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices());
int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap);
if (shardCount == 1) {
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
searchRequest.searchType(QUERY_AND_FETCH);
}
} catch (IndexNotFoundException | IndexClosedException e) {
// ignore these failures, we will notify the search response if its really the case from the actual action
} catch (Exception e) {
logger.debug("failed to optimize search type, continue as normal", e);
try {
ClusterState clusterState = clusterService.state();
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest);
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices());
int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap);
if (shardCount == 1) {
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
searchRequest.searchType(QUERY_AND_FETCH);
}
} catch (IndexNotFoundException | IndexClosedException e) {
// ignore these failures, we will notify the search response if its really the case from the actual action
} catch (Exception e) {
logger.debug("failed to optimize search type, continue as normal", e);
}
if (searchRequest.searchType() == DFS_QUERY_THEN_FETCH) {
dfsQueryThenFetchAction.execute(searchRequest, listener);

View File

@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
@ -31,40 +32,35 @@ import java.io.IOException;
*/
public abstract class ChildTaskActionRequest<Request extends ActionRequest<Request>> extends ActionRequest<Request> {
private String parentTaskNode;
private long parentTaskId;
private TaskId parentTaskId = TaskId.EMPTY_TASK_ID;
protected ChildTaskActionRequest() {
}
public void setParentTask(String parentTaskNode, long parentTaskId) {
this.parentTaskNode = parentTaskNode;
this.parentTaskId = parentTaskId;
this.parentTaskId = new TaskId(parentTaskNode, parentTaskId);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
parentTaskNode = in.readOptionalString();
parentTaskId = in.readLong();
parentTaskId = new TaskId(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalString(parentTaskNode);
out.writeLong(parentTaskId);
parentTaskId.writeTo(out);
}
@Override
public final Task createTask(long id, String type, String action) {
return createTask(id, type, action, parentTaskNode, parentTaskId);
return createTask(id, type, action, parentTaskId);
}
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
return new Task(id, type, action, getDescription(), parentTaskNode, parentTaskId);
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
return new Task(id, type, action, getDescription(), parentTaskId);
}
}

View File

@ -22,6 +22,7 @@ package org.elasticsearch.action.support;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.transport.TransportRequest;
import java.io.IOException;
@ -31,38 +32,33 @@ import java.io.IOException;
*/
public class ChildTaskRequest extends TransportRequest {
private String parentTaskNode;
private long parentTaskId;
private TaskId parentTaskId = TaskId.EMPTY_TASK_ID;
protected ChildTaskRequest() {
}
public void setParentTask(String parentTaskNode, long parentTaskId) {
this.parentTaskNode = parentTaskNode;
this.parentTaskId = parentTaskId;
this.parentTaskId = new TaskId(parentTaskNode, parentTaskId);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
parentTaskNode = in.readOptionalString();
parentTaskId = in.readLong();
parentTaskId = new TaskId(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalString(parentTaskNode);
out.writeLong(parentTaskId);
parentTaskId.writeTo(out);
}
@Override
public final Task createTask(long id, String type, String action) {
return createTask(id, type, action, parentTaskNode, parentTaskId);
return createTask(id, type, action, parentTaskId);
}
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
return new Task(id, type, action, getDescription(), parentTaskNode, parentTaskId);
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
return new Task(id, type, action, getDescription(), parentTaskId);
}
}

View File

@ -30,6 +30,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
@ -186,8 +187,8 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
}
@Override
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
return new ReplicationTask(id, type, action, getDescription(), parentTaskNode, parentTaskId);
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
return new ReplicationTask(id, type, action, getDescription(), parentTaskId);
}
/**

View File

@ -19,11 +19,11 @@
package org.elasticsearch.action.support.replication;
import org.elasticsearch.common.inject.Provider;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
@ -35,8 +35,8 @@ import static java.util.Objects.requireNonNull;
public class ReplicationTask extends Task {
private volatile String phase = "starting";
public ReplicationTask(long id, String type, String action, String description, String parentNode, long parentId) {
super(id, type, action, description, parentNode, parentId);
public ReplicationTask(long id, String type, String action, String description, TaskId parentTaskId) {
super(id, type, action, description, parentTaskId);
}
/**

View File

@ -35,7 +35,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.shard.ShardId;

View File

@ -35,7 +35,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.threadpool.ThreadPool;

View File

@ -27,9 +27,12 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
import static org.elasticsearch.action.ValidateActions.addValidationError;
/**
* A base class for task requests
*/
@ -47,26 +50,21 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
private String[] actions = ALL_ACTIONS;
private String parentNode;
private TaskId parentTaskId = TaskId.EMPTY_TASK_ID;
private long parentTaskId = ALL_TASKS;
private long taskId = ALL_TASKS;
private TaskId taskId = TaskId.EMPTY_TASK_ID;
public BaseTasksRequest() {
}
@Override
public ActionRequestValidationException validate() {
return null;
}
/**
* Get information about tasks from nodes based on the nodes ids specified.
* If none are passed, information for all nodes will be returned.
*/
public BaseTasksRequest(String... nodesIds) {
this.nodesIds = nodesIds;
ActionRequestValidationException validationException = null;
if (taskId.isSet() == false && nodesIds.length > 0) {
validationException = addValidationError("task id cannot be used together with node ids",
validationException);
}
return validationException;
}
/**
@ -100,39 +98,26 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
*
* By default tasks with any ids are returned.
*/
public long taskId() {
public TaskId taskId() {
return taskId;
}
@SuppressWarnings("unchecked")
public final Request taskId(long taskId) {
public final Request taskId(TaskId taskId) {
this.taskId = taskId;
return (Request) this;
}
/**
* Returns the parent node id that tasks should be filtered by
*/
public String parentNode() {
return parentNode;
}
@SuppressWarnings("unchecked")
public Request parentNode(String parentNode) {
this.parentNode = parentNode;
return (Request) this;
}
/**
* Returns the parent task id that tasks should be filtered by
*/
public long parentTaskId() {
public TaskId parentTaskId() {
return parentTaskId;
}
@SuppressWarnings("unchecked")
public Request parentTaskId(long parentTaskId) {
public Request parentTaskId(TaskId parentTaskId) {
this.parentTaskId = parentTaskId;
return (Request) this;
}
@ -157,11 +142,10 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
taskId = new TaskId(in);
parentTaskId = new TaskId(in);
nodesIds = in.readStringArray();
taskId = in.readLong();
actions = in.readStringArray();
parentNode = in.readOptionalString();
parentTaskId = in.readLong();
if (in.readBoolean()) {
timeout = TimeValue.readTimeValue(in);
}
@ -170,11 +154,10 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
taskId.writeTo(out);
parentTaskId.writeTo(out);
out.writeStringArrayNullable(nodesIds);
out.writeLong(taskId);
out.writeStringArrayNullable(actions);
out.writeOptionalString(parentNode);
out.writeLong(parentTaskId);
out.writeOptionalStreamable(timeout);
}
@ -182,18 +165,13 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
if (actions() != null && actions().length > 0 && Regex.simpleMatch(actions(), task.getAction()) == false) {
return false;
}
if (taskId() != ALL_TASKS) {
if(taskId() != task.getId()) {
if (taskId().isSet() == false) {
if(taskId().getId() != task.getId()) {
return false;
}
}
if (parentNode() != null) {
if (parentNode().equals(task.getParentNode()) == false) {
return false;
}
}
if (parentTaskId() != ALL_TASKS) {
if (parentTaskId() != task.getParentId()) {
if (parentTaskId.isSet() == false) {
if (parentTaskId.equals(task.getParentTaskId()) == false) {
return false;
}
}

View File

@ -124,13 +124,17 @@ public abstract class TransportTasksAction<
}
protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) {
return clusterState.nodes().resolveNodesIds(request.nodesIds());
if (request.taskId().isSet()) {
return clusterState.nodes().resolveNodesIds(request.nodesIds());
} else {
return new String[]{request.taskId().getNodeId()};
}
}
protected void processTasks(TasksRequest request, Consumer<OperationTask> operation) {
if (request.taskId() != BaseTasksRequest.ALL_TASKS) {
if (request.taskId().isSet() == false) {
// we are only checking one task, we can optimize it
Task task = taskManager.getTask(request.taskId());
Task task = taskManager.getTask(request.taskId().getId());
if (task != null) {
if (request.match(task)) {
operation.accept((OperationTask) task);
@ -143,13 +147,14 @@ public abstract class TransportTasksAction<
} else {
for (Task task : taskManager.getTasks().values()) {
if (request.match(task)) {
operation.accept((OperationTask)task);
operation.accept((OperationTask) task);
}
}
}
}
protected abstract TasksResponse newResponse(TasksRequest request, List<TaskResponse> tasks, List<TaskOperationFailure> taskOperationFailures, List<FailedNodeException> failedNodeExceptions);
protected abstract TasksResponse newResponse(TasksRequest request, List<TaskResponse> tasks, List<TaskOperationFailure>
taskOperationFailures, List<FailedNodeException> failedNodeExceptions);
@SuppressWarnings("unchecked")
protected TasksResponse newResponse(TasksRequest request, AtomicReferenceArray responses) {
@ -232,34 +237,36 @@ public abstract class TransportTasksAction<
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
} else if (!clusterService.localNode().shouldConnectTo(node) && !clusterService.localNode().equals(node)) {
// the check "!clusterService.localNode().equals(node)" is to maintain backward comp. where before
// we allowed to connect from "local" client node to itself, certain tests rely on it, if we remove it, we need to fix
// we allowed to connect from "local" client node to itself, certain tests rely on it, if we remove it, we
// need to fix
// those (and they randomize the client node usage, so tricky to find when)
onFailure(idx, nodeId, new NodeShouldNotConnectException(clusterService.localNode(), node));
} else {
NodeTaskRequest nodeRequest = new NodeTaskRequest(request);
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
taskManager.registerChildTask(task, node.getId());
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(), new BaseTransportResponseHandler<NodeTasksResponse>() {
@Override
public NodeTasksResponse newInstance() {
return new NodeTasksResponse();
}
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(),
new BaseTransportResponseHandler<NodeTasksResponse>() {
@Override
public NodeTasksResponse newInstance() {
return new NodeTasksResponse();
}
@Override
public void handleResponse(NodeTasksResponse response) {
onOperation(idx, response);
}
@Override
public void handleResponse(NodeTasksResponse response) {
onOperation(idx, response);
}
@Override
public void handleException(TransportException exp) {
onFailure(idx, node.id(), exp);
}
@Override
public void handleException(TransportException exp) {
onFailure(idx, node.id(), exp);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
}
} catch (Throwable t) {
onFailure(idx, nodeId, t);

View File

@ -29,10 +29,9 @@ import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.logging.log4j.LogConfigurator;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;

View File

@ -98,7 +98,7 @@ import java.util.Map;
* <p>
* When running tests you have to pass it to the test runner like this:
* <pre>
* mvn test -Dtests.jvm.argline="-Djava.security.debug=access,failure" ...
* gradle test -Dtests.jvm.argline="-Djava.security.debug=access,failure" ...
* </pre>
* See <a href="https://docs.oracle.com/javase/7/docs/technotes/guides/security/troubleshooting-security.html">
* Troubleshooting Security</a> for information.

View File

@ -272,7 +272,7 @@ public interface ClusterAdminClient extends ElasticsearchClient {
*
* @param request The nodes tasks request
* @return The result future
* @see org.elasticsearch.client.Requests#listTasksRequest(String...)
* @see org.elasticsearch.client.Requests#listTasksRequest()
*/
ActionFuture<ListTasksResponse> listTasks(ListTasksRequest request);
@ -281,7 +281,7 @@ public interface ClusterAdminClient extends ElasticsearchClient {
*
* @param request The nodes tasks request
* @param listener A listener to be notified with a result
* @see org.elasticsearch.client.Requests#listTasksRequest(String...)
* @see org.elasticsearch.client.Requests#listTasksRequest()
*/
void listTasks(ListTasksRequest request, ActionListener<ListTasksResponse> listener);
@ -295,7 +295,7 @@ public interface ClusterAdminClient extends ElasticsearchClient {
*
* @param request The nodes tasks request
* @return The result future
* @see org.elasticsearch.client.Requests#cancelTasksRequest(String...)
* @see org.elasticsearch.client.Requests#cancelTasksRequest()
*/
ActionFuture<CancelTasksResponse> cancelTasks(CancelTasksRequest request);
@ -304,7 +304,7 @@ public interface ClusterAdminClient extends ElasticsearchClient {
*
* @param request The nodes tasks request
* @param listener A cancelener to be notified with a result
* @see org.elasticsearch.client.Requests#cancelTasksRequest(String...)
* @see org.elasticsearch.client.Requests#cancelTasksRequest()
*/
void cancelTasks(CancelTasksRequest request, ActionListener<CancelTasksResponse> listener);

View File

@ -419,23 +419,11 @@ public class Requests {
/**
* Creates a nodes tasks request against one or more nodes. Pass <tt>null</tt> or an empty array for all nodes.
*
* @param nodesIds The nodes ids to get the tasks for
* @return The nodes tasks request
* @see org.elasticsearch.client.ClusterAdminClient#listTasks(ListTasksRequest)
*/
public static ListTasksRequest listTasksRequest(String... nodesIds) {
return new ListTasksRequest(nodesIds);
}
/**
* Creates a nodes tasks request against one or more nodes. Pass <tt>null</tt> or an empty array for all nodes.
*
* @param nodesIds The nodes ids to cancel the tasks on
* @return The nodes tasks request
* @see org.elasticsearch.client.ClusterAdminClient#cancelTasks(CancelTasksRequest)
*/
public static CancelTasksRequest cancelTasksRequest(String... nodesIds) {
return new CancelTasksRequest(nodesIds);
public static CancelTasksRequest cancelTasksRequest() {
return new CancelTasksRequest();
}
/**

View File

@ -39,7 +39,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.indices.breaker.CircuitBreakerModule;
@ -155,7 +154,10 @@ public class TransportClient extends AbstractClient {
pluginsService.processModules(modules);
Injector injector = modules.createInjector();
injector.getInstance(TransportService.class).start();
final TransportService transportService = injector.getInstance(TransportService.class);
transportService.start();
transportService.acceptIncomingRequests();
TransportClient transportClient = new TransportClient(injector);
success = true;
return transportClient;

View File

@ -366,7 +366,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
}
}
if (ordered.isEmpty()) {
throw new IllegalArgumentException("No data node with critera [" + nodeAttribute + "] found");
throw new IllegalArgumentException("No data node with criteria [" + nodeAttribute + "] found");
}
return new PlainShardIterator(shardId, ordered);
}

View File

@ -458,7 +458,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
*/
public void started(ShardRouting shard) {
ensureMutable();
assert !shard.active() : "expected an intializing shard " + shard;
assert !shard.active() : "expected an initializing shard " + shard;
if (shard.relocatingNodeId() == null) {
// if this is not a target shard for relocation, we need to update statistics
inactiveShardCount--;

View File

@ -715,7 +715,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
}
if (logger.isTraceEnabled()) {
logger.trace("No eligable node found to assign shard [{}] decision [{}]", shard, decision.type());
logger.trace("No eligible node found to assign shard [{}] decision [{}]", shard, decision.type());
}
} else if (logger.isTraceEnabled()) {
logger.trace("No Node found to assign shard [{}]", shard);

View File

@ -72,16 +72,22 @@ import java.util.function.Supplier;
import static org.elasticsearch.ElasticsearchException.readException;
import static org.elasticsearch.ElasticsearchException.readStackTrace;
/**
* A stream from this node to another node. Technically, it can also be streamed to a byte array but that is mostly for testing.
*/
public abstract class StreamInput extends InputStream {
private Version version = Version.CURRENT;
protected StreamInput() { }
/**
* The version of the node on the other side of this stream.
*/
public Version getVersion() {
return this.version;
}
/**
* Set the version of the node on the other side of this stream.
*/
public void setVersion(Version version) {
this.version = version;
}

View File

@ -63,19 +63,24 @@ import java.util.List;
import java.util.Map;
/**
*
* A stream from another node to this node. Technically, it can also be streamed from a byte array but that is mostly for testing.
*/
public abstract class StreamOutput extends OutputStream {
private Version version = Version.CURRENT;
/**
* The version of the node on the other side of this stream.
*/
public Version getVersion() {
return this.version;
}
public StreamOutput setVersion(Version version) {
/**
* Set the version of the node on the other side of this stream.
*/
public void setVersion(Version version) {
this.version = version;
return this;
}
public long position() throws IOException {

View File

@ -22,11 +22,26 @@ package org.elasticsearch.common.io.stream;
import java.io.IOException;
/**
* Implementers can be written to a {@linkplain StreamOutput} and read from a {@linkplain StreamInput}. This allows them to be "thrown
* across the wire" using Elasticsearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by
* serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. For
* example, {@link org.elasticsearch.common.unit.TimeValue} converts the time to nanoseconds for serialization.
*
* Prefer implementing {@link Writeable} over implementing this interface where possible. Lots of code depends on this interface so this
* isn't always possible.
*
* Implementers of this interface almost always declare a no arg constructor that is exclusively used for creating "empty" objects on which
* you then call {@link #readFrom(StreamInput)}. Because {@linkplain #readFrom(StreamInput)} isn't part of the constructor the fields
* on implementers cannot be final. It is these reasons that this interface has fallen out of favor compared to {@linkplain Writeable}.
*/
public interface Streamable {
/**
* Set this object's fields from a {@linkplain StreamInput}.
*/
void readFrom(StreamInput in) throws IOException;
/**
* Write this object's fields to a {@linkplain StreamOutput}.
*/
void writeTo(StreamOutput out) throws IOException;
}

View File

@ -20,11 +20,17 @@ package org.elasticsearch.common.io.stream;
import java.io.IOException;
/**
* Implementers can be read from {@linkplain StreamInput} by calling their {@link #readFrom(StreamInput)} method.
*
* It is common for implementers of this interface to declare a <code>public static final</code> instance of themselves named PROTOTYPE so
* users can call {@linkplain #readFrom(StreamInput)} on it. It is also fairly typical for readFrom to be implemented as a method that just
* calls a constructor that takes {@linkplain StreamInput} as a parameter. This allows the fields in the implementer to be
* <code>final</code>.
*/
public interface StreamableReader<T> {
/**
* Reads a copy of an object with the same type form the stream input
*
* The caller object remains unchanged.
* Reads an object of this type from the provided {@linkplain StreamInput}. The receiving instance remains unchanged.
*/
T readFrom(StreamInput in) throws IOException;
}

View File

@ -21,10 +21,20 @@ package org.elasticsearch.common.io.stream;
import java.io.IOException;
/**
* Implementers can be written to a {@linkplain StreamOutput} and read from a {@linkplain StreamInput}. This allows them to be "thrown
* across the wire" using Elasticsearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by
* serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. For
* example, {@link org.elasticsearch.common.unit.TimeValue} converts the time to nanoseconds for serialization.
* {@linkplain org.elasticsearch.common.unit.TimeValue} actually implements {@linkplain Streamable} not {@linkplain Writeable} but it has
* the same contract.
*
* Prefer implementing this interface over implementing {@link Streamable} where possible. Lots of code depends on {@linkplain Streamable}
* so this isn't always possible.
*/
public interface Writeable<T> extends StreamableReader<T> {
/**
* Writes the current object into the output stream out
* Write this into the {@linkplain StreamOutput}.
*/
void writeTo(StreamOutput out) throws IOException;
}

View File

@ -17,24 +17,7 @@
* under the License.
*/
package org.elasticsearch.common.logging.jdk;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
/**
*
* Classes for streaming objects from one Elasticsearch node to another over its binary internode protocol.
*/
public class JdkESLoggerFactory extends ESLoggerFactory {
@Override
protected ESLogger rootLogger() {
return getLogger("");
}
@Override
protected ESLogger newInstance(String prefix, String name) {
final java.util.logging.Logger logger = java.util.logging.Logger.getLogger(name);
return new JdkESLogger(prefix, logger);
}
}
package org.elasticsearch.common.io.stream;

View File

@ -17,13 +17,12 @@
* under the License.
*/
package org.elasticsearch.common.logging.log4j;
package org.elasticsearch.common.logging;
import org.apache.log4j.Layout;
import org.apache.log4j.WriterAppender;
import org.apache.log4j.helpers.LogLog;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
import java.io.OutputStream;

View File

@ -19,104 +19,188 @@
package org.elasticsearch.common.logging;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import static org.elasticsearch.common.logging.LoggerMessageFormat.format;
/**
* Contract for all elasticsearch loggers.
* Elasticsearch's logger wrapper.
*/
public interface ESLogger {
public class ESLogger {
private static final String FQCN = ESLogger.class.getName();
String getPrefix();
private final String prefix;
private final Logger logger;
String getName();
public ESLogger(String prefix, Logger logger) {
this.prefix = prefix;
this.logger = logger;
}
/**
* Allows to set the logger level
* If the new level is null, the logger will inherit its level
* from its nearest ancestor with a specific (non-null) level value.
* @param level the new level
* The prefix of the log.
*/
void setLevel(String level);
public String getPrefix() {
return this.prefix;
}
/**
* Returns the current logger level
* If the level is null, it means that the logger inherits its level
* from its nearest ancestor with a specific (non-null) level value.
* @return the logger level
* Fetch the underlying logger so we can look at it. Only exists for testing.
*/
String getLevel();
Logger getLogger() {
return logger;
}
/**
* Returns {@code true} if a TRACE level message is logged.
* Set the level of the logger. If the new level is null, the logger will inherit it's level from its nearest ancestor with a non-null
* level.
*/
boolean isTraceEnabled();
public void setLevel(String level) {
if (level == null) {
logger.setLevel(null);
} else if ("error".equalsIgnoreCase(level)) {
logger.setLevel(Level.ERROR);
} else if ("warn".equalsIgnoreCase(level)) {
logger.setLevel(Level.WARN);
} else if ("info".equalsIgnoreCase(level)) {
logger.setLevel(Level.INFO);
} else if ("debug".equalsIgnoreCase(level)) {
logger.setLevel(Level.DEBUG);
} else if ("trace".equalsIgnoreCase(level)) {
logger.setLevel(Level.TRACE);
}
}
/**
* Returns {@code true} if a DEBUG level message is logged.
* The level of this logger. If null then the logger is inheriting it's level from its nearest ancestor with a non-null level.
*/
boolean isDebugEnabled();
public String getLevel() {
if (logger.getLevel() == null) {
return null;
}
return logger.getLevel().toString();
}
/**
* Returns {@code true} if an INFO level message is logged.
* The name of this logger.
*/
boolean isInfoEnabled();
public String getName() {
return logger.getName();
}
/**
* Returns {@code true} if a WARN level message is logged.
* Returns {@code true} if a TRACE level message should be logged.
*/
boolean isWarnEnabled();
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
/**
* Returns {@code true} if an ERROR level message is logged.
* Returns {@code true} if a DEBUG level message should be logged.
*/
boolean isErrorEnabled();
public boolean isDebugEnabled() {
return logger.isDebugEnabled();
}
/**
* Returns {@code true} if an INFO level message should be logged.
*/
public boolean isInfoEnabled() {
return logger.isInfoEnabled();
}
/**
* Returns {@code true} if a WARN level message should be logged.
*/
public boolean isWarnEnabled() {
return logger.isEnabledFor(Level.WARN);
}
/**
* Returns {@code true} if an ERROR level message should be logged.
*/
public boolean isErrorEnabled() {
return logger.isEnabledFor(Level.ERROR);
}
/**
* Logs a TRACE level message.
*/
public void trace(String msg, Object... params) {
trace(msg, null, params);
}
/**
* Logs a TRACE level message with an exception.
*/
public void trace(String msg, Throwable cause, Object... params) {
if (isTraceEnabled()) {
logger.log(FQCN, Level.TRACE, format(prefix, msg, params), cause);
}
}
/**
* Logs a DEBUG level message.
*/
void trace(String msg, Object... params);
public void debug(String msg, Object... params) {
debug(msg, null, params);
}
/**
* Logs a DEBUG level message.
* Logs a DEBUG level message with an exception.
*/
void trace(String msg, Throwable cause, Object... params);
public void debug(String msg, Throwable cause, Object... params) {
if (isDebugEnabled()) {
logger.log(FQCN, Level.DEBUG, format(prefix, msg, params), cause);
}
}
/**
* Logs a DEBUG level message.
* Logs a INFO level message.
*/
void debug(String msg, Object... params);
public void info(String msg, Object... params) {
info(msg, null, params);
}
/**
* Logs a DEBUG level message.
* Logs a INFO level message with an exception.
*/
void debug(String msg, Throwable cause, Object... params);
/**
* Logs an INFO level message.
*/
void info(String msg, Object... params);
/**
* Logs an INFO level message.
*/
void info(String msg, Throwable cause, Object... params);
public void info(String msg, Throwable cause, Object... params) {
if (isInfoEnabled()) {
logger.log(FQCN, Level.INFO, format(prefix, msg, params), cause);
}
}
/**
* Logs a WARN level message.
*/
void warn(String msg, Object... params);
public void warn(String msg, Object... params) {
warn(msg, null, params);
}
/**
* Logs a WARN level message.
* Logs a WARN level message with an exception.
*/
void warn(String msg, Throwable cause, Object... params);
public void warn(String msg, Throwable cause, Object... params) {
if (isWarnEnabled()) {
logger.log(FQCN, Level.WARN, format(prefix, msg, params), cause);
}
}
/**
* Logs an ERROR level message.
* Logs a ERROR level message.
*/
void error(String msg, Object... params);
public void error(String msg, Object... params) {
error(msg, null, params);
}
/**
* Logs an ERROR level message.
* Logs a ERROR level message with an exception.
*/
void error(String msg, Throwable cause, Object... params);
public void error(String msg, Throwable cause, Object... params) {
if (isErrorEnabled()) {
logger.log(FQCN, Level.ERROR, format(prefix, msg, params), cause);
}
}
}

View File

@ -19,62 +19,29 @@
package org.elasticsearch.common.logging;
import org.elasticsearch.common.logging.jdk.JdkESLoggerFactory;
import org.elasticsearch.common.logging.log4j.Log4jESLoggerFactory;
import org.elasticsearch.common.logging.slf4j.Slf4jESLoggerFactory;
import org.elasticsearch.common.settings.AbstractScopedSettings;
import org.apache.log4j.Logger;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import java.util.Locale;
import java.util.Map;
import java.util.function.Consumer;
import java.util.regex.Pattern;
/**
* Factory to get {@link ESLogger}s
*/
public abstract class ESLoggerFactory {
public static final Setting<LogLevel> LOG_DEFAULT_LEVEL_SETTING = new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, false, Setting.Scope.CLUSTER);
public static final Setting<LogLevel> LOG_LEVEL_SETTING = Setting.dynamicKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse, true, Setting.Scope.CLUSTER);
private static volatile ESLoggerFactory defaultFactory = new JdkESLoggerFactory();
static {
try {
Class<?> loggerClazz = Class.forName("org.apache.log4j.Logger");
// below will throw a NoSuchMethod failure with using slf4j log4j bridge
loggerClazz.getMethod("setLevel", Class.forName("org.apache.log4j.Level"));
defaultFactory = new Log4jESLoggerFactory();
} catch (Throwable e) {
// no log4j
try {
Class.forName("org.slf4j.Logger");
defaultFactory = new Slf4jESLoggerFactory();
} catch (Throwable e1) {
// no slf4j
}
}
}
/**
* Changes the default factory.
*/
public static void setDefaultFactory(ESLoggerFactory defaultFactory) {
if (defaultFactory == null) {
throw new NullPointerException("defaultFactory");
}
ESLoggerFactory.defaultFactory = defaultFactory;
}
public static final Setting<LogLevel> LOG_DEFAULT_LEVEL_SETTING =
new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, false, Setting.Scope.CLUSTER);
public static final Setting<LogLevel> LOG_LEVEL_SETTING =
Setting.dynamicKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse, true, Setting.Scope.CLUSTER);
public static ESLogger getLogger(String prefix, String name) {
return defaultFactory.newInstance(prefix == null ? null : prefix.intern(), name.intern());
prefix = prefix == null ? null : prefix.intern();
name = name.intern();
return new ESLogger(prefix, Logger.getLogger(name));
}
public static ESLogger getLogger(String name) {
return defaultFactory.newInstance(name.intern());
return getLogger(null, name);
}
public static DeprecationLogger getDeprecationLogger(String name) {
@ -86,17 +53,13 @@ public abstract class ESLoggerFactory {
}
public static ESLogger getRootLogger() {
return defaultFactory.rootLogger();
return new ESLogger(null, Logger.getRootLogger());
}
public ESLogger newInstance(String name) {
return newInstance(null, name);
private ESLoggerFactory() {
// Utility class can't be built.
}
protected abstract ESLogger rootLogger();
protected abstract ESLogger newInstance(String prefix, String name);
public enum LogLevel {
WARN, TRACE, INFO, DEBUG, ERROR;
public static LogLevel parse(String level) {

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.common.logging.log4j;
package org.elasticsearch.common.logging;
import org.apache.log4j.PropertyConfigurator;
import org.elasticsearch.ElasticsearchException;
@ -39,13 +39,14 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.Strings.cleanPath;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
/**
*
* Configures log4j with a special set of replacements.
*/
public class LogConfigurator {
@ -54,10 +55,12 @@ public class LogConfigurator {
private static final Map<String, String> REPLACEMENTS;
static {
Map<String, String> replacements = new HashMap<>();
replacements.put("console", "org.elasticsearch.common.logging.log4j.ConsoleAppender");
// Appenders
replacements.put("async", "org.apache.log4j.AsyncAppender");
replacements.put("console", ConsoleAppender.class.getName());
replacements.put("dailyRollingFile", "org.apache.log4j.DailyRollingFileAppender");
replacements.put("externallyRolledFile", "org.apache.log4j.ExternallyRolledFileAppender");
replacements.put("extrasRollingFile", "org.apache.log4j.rolling.RollingFileAppender");
replacements.put("file", "org.apache.log4j.FileAppender");
replacements.put("jdbc", "org.apache.log4j.jdbc.JDBCAppender");
replacements.put("jms", "org.apache.log4j.net.JMSAppender");
@ -65,17 +68,18 @@ public class LogConfigurator {
replacements.put("ntevent", "org.apache.log4j.nt.NTEventLogAppender");
replacements.put("null", "org.apache.log4j.NullAppender");
replacements.put("rollingFile", "org.apache.log4j.RollingFileAppender");
replacements.put("extrasRollingFile", "org.apache.log4j.rolling.RollingFileAppender");
replacements.put("smtp", "org.apache.log4j.net.SMTPAppender");
replacements.put("socket", "org.apache.log4j.net.SocketAppender");
replacements.put("socketHub", "org.apache.log4j.net.SocketHubAppender");
replacements.put("syslog", "org.apache.log4j.net.SyslogAppender");
replacements.put("telnet", "org.apache.log4j.net.TelnetAppender");
replacements.put("terminal", "org.elasticsearch.common.logging.log4j.TerminalAppender");
// policies
replacements.put("terminal", TerminalAppender.class.getName());
// Policies
replacements.put("timeBased", "org.apache.log4j.rolling.TimeBasedRollingPolicy");
replacements.put("sizeBased", "org.apache.log4j.rolling.SizeBasedTriggeringPolicy");
// layouts
// Layouts
replacements.put("simple", "org.apache.log4j.SimpleLayout");
replacements.put("html", "org.apache.log4j.HTMLLayout");
replacements.put("pattern", "org.apache.log4j.PatternLayout");
@ -141,7 +145,8 @@ public class LogConfigurator {
static void resolveConfig(Environment env, final Settings.Builder settingsBuilder) {
try {
Files.walkFileTree(env.configFile(), EnumSet.of(FileVisitOption.FOLLOW_LINKS), Integer.MAX_VALUE, new SimpleFileVisitor<Path>() {
Set<FileVisitOption> options = EnumSet.of(FileVisitOption.FOLLOW_LINKS);
Files.walkFileTree(env.configFile(), options, Integer.MAX_VALUE, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
String fileName = file.getFileName().toString();

View File

@ -17,13 +17,13 @@
* under the License.
*/
package org.elasticsearch.common.logging.support;
package org.elasticsearch.common.logging;
import java.util.HashMap;
import java.util.Map;
import java.util.HashSet;
import java.util.Set;
/**
*
* Format string for Elasticsearch log messages.
*/
public class LoggerMessageFormat {
@ -79,13 +79,13 @@ public class LoggerMessageFormat {
// itself escaped: "abc x:\\{}"
// we have to consume one backward slash
sbuf.append(messagePattern.substring(i, j - 1));
deeplyAppendParameter(sbuf, argArray[L], new HashMap());
deeplyAppendParameter(sbuf, argArray[L], new HashSet<Object[]>());
i = j + 2;
}
} else {
// normal case
sbuf.append(messagePattern.substring(i, j));
deeplyAppendParameter(sbuf, argArray[L], new HashMap());
deeplyAppendParameter(sbuf, argArray[L], new HashSet<Object[]>());
i = j + 2;
}
}
@ -117,7 +117,7 @@ public class LoggerMessageFormat {
}
}
private static void deeplyAppendParameter(StringBuilder sbuf, Object o, Map seenMap) {
private static void deeplyAppendParameter(StringBuilder sbuf, Object o, Set<Object[]> seen) {
if (o == null) {
sbuf.append("null");
return;
@ -144,7 +144,7 @@ public class LoggerMessageFormat {
} else if (o instanceof double[]) {
doubleArrayAppend(sbuf, (double[]) o);
} else {
objectArrayAppend(sbuf, (Object[]) o, seenMap);
objectArrayAppend(sbuf, (Object[]) o, seen);
}
}
}
@ -159,18 +159,18 @@ public class LoggerMessageFormat {
}
private static void objectArrayAppend(StringBuilder sbuf, Object[] a, Map seenMap) {
private static void objectArrayAppend(StringBuilder sbuf, Object[] a, Set<Object[]> seen) {
sbuf.append('[');
if (!seenMap.containsKey(a)) {
seenMap.put(a, null);
if (!seen.contains(a)) {
seen.add(a);
final int len = a.length;
for (int i = 0; i < len; i++) {
deeplyAppendParameter(sbuf, a[i], seenMap);
deeplyAppendParameter(sbuf, a[i], seen);
if (i != len - 1)
sbuf.append(", ");
}
// allow repeats in siblings
seenMap.remove(a);
seen.remove(a);
} else {
sbuf.append("...");
}

View File

@ -35,8 +35,6 @@ import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
/**
* A set of utilities around Logging.
*
*
*/
public class Loggers {
@ -58,20 +56,24 @@ public class Loggers {
return consoleLoggingEnabled;
}
public static ESLogger getLogger(Class clazz, Settings settings, ShardId shardId, String... prefixes) {
public static ESLogger getLogger(Class<?> clazz, Settings settings, ShardId shardId, String... prefixes) {
return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
}
/** Just like {@link #getLogger(Class, org.elasticsearch.common.settings.Settings,ShardId,String...)} but String loggerName instead of Class. */
/**
* Just like {@link #getLogger(Class, org.elasticsearch.common.settings.Settings,ShardId,String...)} but String loggerName instead of
* Class.
*/
public static ESLogger getLogger(String loggerName, Settings settings, ShardId shardId, String... prefixes) {
return getLogger(loggerName, settings, asArrayList(shardId.getIndexName(), Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
return getLogger(loggerName, settings,
asArrayList(shardId.getIndexName(), Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
}
public static ESLogger getLogger(Class clazz, Settings settings, Index index, String... prefixes) {
public static ESLogger getLogger(Class<?> clazz, Settings settings, Index index, String... prefixes) {
return getLogger(clazz, settings, asArrayList(SPACE, index.getName(), prefixes).toArray(new String[0]));
}
public static ESLogger getLogger(Class clazz, Settings settings, String... prefixes) {
public static ESLogger getLogger(Class<?> clazz, Settings settings, String... prefixes) {
return getLogger(buildClassLoggerName(clazz), settings, prefixes);
}
@ -117,11 +119,11 @@ public class Loggers {
return ESLoggerFactory.getLogger(getLoggerName(s));
}
public static ESLogger getLogger(Class clazz) {
public static ESLogger getLogger(Class<?> clazz) {
return ESLoggerFactory.getLogger(getLoggerName(buildClassLoggerName(clazz)));
}
public static ESLogger getLogger(Class clazz, String... prefixes) {
public static ESLogger getLogger(Class<?> clazz, String... prefixes) {
return getLogger(buildClassLoggerName(clazz), prefixes);
}
@ -146,7 +148,7 @@ public class Loggers {
return ESLoggerFactory.getLogger(prefix, getLoggerName(name));
}
private static String buildClassLoggerName(Class clazz) {
private static String buildClassLoggerName(Class<?> clazz) {
String name = clazz.getName();
if (name.startsWith("org.elasticsearch.")) {
name = Classes.getPackageName(clazz);

View File

@ -18,7 +18,7 @@
*/
package org.elasticsearch.common.logging.log4j;
package org.elasticsearch.common.logging;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.spi.LoggingEvent;

View File

@ -1,108 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging.jdk;
import org.elasticsearch.common.logging.support.AbstractESLogger;
import java.util.logging.Level;
import java.util.logging.LogRecord;
/**
* A {@link LogRecord} which is used in conjunction with {@link JdkESLogger}
* with the ability to provide the class name, method name and line number
* information of the code calling the logger
*/
public class ESLogRecord extends LogRecord {
private static final String FQCN = AbstractESLogger.class.getName();
private String sourceClassName;
private String sourceMethodName;
private transient boolean needToInferCaller;
public ESLogRecord(Level level, String msg) {
super(level, msg);
needToInferCaller = true;
}
@Override
public String getSourceClassName() {
if (needToInferCaller) {
inferCaller();
}
return sourceClassName;
}
@Override
public void setSourceClassName(String sourceClassName) {
this.sourceClassName = sourceClassName;
needToInferCaller = false;
}
@Override
public String getSourceMethodName() {
if (needToInferCaller) {
inferCaller();
}
return sourceMethodName;
}
@Override
public void setSourceMethodName(String sourceMethodName) {
this.sourceMethodName = sourceMethodName;
needToInferCaller = false;
}
/**
* Determines the source information for the caller of the logger (class
* name, method name, and line number)
*/
private void inferCaller() {
needToInferCaller = false;
Throwable throwable = new Throwable();
boolean lookingForLogger = true;
for (final StackTraceElement frame : throwable.getStackTrace()) {
String cname = frame.getClassName();
boolean isLoggerImpl = isLoggerImplFrame(cname);
if (lookingForLogger) {
// Skip all frames until we have found the first logger frame.
if (isLoggerImpl) {
lookingForLogger = false;
}
} else {
if (!isLoggerImpl) {
// skip reflection call
if (!cname.startsWith("java.lang.reflect.") && !cname.startsWith("sun.reflect.")) {
// We've found the relevant frame.
setSourceClassName(cname);
setSourceMethodName(frame.getMethodName());
return;
}
}
}
}
// We haven't found a suitable frame, so just punt. This is
// OK as we are only committed to making a "best effort" here.
}
private boolean isLoggerImplFrame(String cname) {
// the log record could be created for a platform logger
return cname.equals(FQCN);
}
}

View File

@ -1,163 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging.jdk;
import org.elasticsearch.common.logging.support.AbstractESLogger;
import java.util.logging.Level;
import java.util.logging.LogRecord;
import java.util.logging.Logger;
/**
*
*/
public class JdkESLogger extends AbstractESLogger {
private final Logger logger;
public JdkESLogger(String prefix, Logger logger) {
super(prefix);
this.logger = logger;
}
@Override
public void setLevel(String level) {
if (level == null) {
logger.setLevel(null);
} else if ("error".equalsIgnoreCase(level)) {
logger.setLevel(Level.SEVERE);
} else if ("warn".equalsIgnoreCase(level)) {
logger.setLevel(Level.WARNING);
} else if ("info".equalsIgnoreCase(level)) {
logger.setLevel(Level.INFO);
} else if ("debug".equalsIgnoreCase(level)) {
logger.setLevel(Level.FINE);
} else if ("trace".equalsIgnoreCase(level)) {
logger.setLevel(Level.FINEST);
}
}
@Override
public String getLevel() {
if (logger.getLevel() == null) {
return null;
}
return logger.getLevel().toString();
}
@Override
public String getName() {
return logger.getName();
}
@Override
public boolean isTraceEnabled() {
return logger.isLoggable(Level.FINEST);
}
@Override
public boolean isDebugEnabled() {
return logger.isLoggable(Level.FINE);
}
@Override
public boolean isInfoEnabled() {
return logger.isLoggable(Level.INFO);
}
@Override
public boolean isWarnEnabled() {
return logger.isLoggable(Level.WARNING);
}
@Override
public boolean isErrorEnabled() {
return logger.isLoggable(Level.SEVERE);
}
@Override
protected void internalTrace(String msg) {
LogRecord record = new ESLogRecord(Level.FINEST, msg);
logger.log(record);
}
@Override
protected void internalTrace(String msg, Throwable cause) {
LogRecord record = new ESLogRecord(Level.FINEST, msg);
record.setThrown(cause);
logger.log(record);
}
@Override
protected void internalDebug(String msg) {
LogRecord record = new ESLogRecord(Level.FINE, msg);
logger.log(record);
}
@Override
protected void internalDebug(String msg, Throwable cause) {
LogRecord record = new ESLogRecord(Level.FINE, msg);
record.setThrown(cause);
logger.log(record);
}
@Override
protected void internalInfo(String msg) {
LogRecord record = new ESLogRecord(Level.INFO, msg);
logger.log(record);
}
@Override
protected void internalInfo(String msg, Throwable cause) {
LogRecord record = new ESLogRecord(Level.INFO, msg);
record.setThrown(cause);
logger.log(record);
}
@Override
protected void internalWarn(String msg) {
LogRecord record = new ESLogRecord(Level.WARNING, msg);
logger.log(record);
}
@Override
protected void internalWarn(String msg, Throwable cause) {
LogRecord record = new ESLogRecord(Level.WARNING, msg);
record.setThrown(cause);
logger.log(record);
}
@Override
protected void internalError(String msg) {
LogRecord record = new ESLogRecord(Level.SEVERE, msg);
logger.log(record);
}
@Override
protected void internalError(String msg, Throwable cause) {
LogRecord record = new ESLogRecord(Level.SEVERE, msg);
record.setThrown(cause);
logger.log(record);
}
protected Logger logger() {
return logger;
}
}

View File

@ -1,147 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging.log4j;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.elasticsearch.common.logging.support.AbstractESLogger;
/**
*
*/
public class Log4jESLogger extends AbstractESLogger {
private final org.apache.log4j.Logger logger;
private final String FQCN = AbstractESLogger.class.getName();
public Log4jESLogger(String prefix, Logger logger) {
super(prefix);
this.logger = logger;
}
public Logger logger() {
return logger;
}
@Override
public void setLevel(String level) {
if (level == null) {
logger.setLevel(null);
} else if ("error".equalsIgnoreCase(level)) {
logger.setLevel(Level.ERROR);
} else if ("warn".equalsIgnoreCase(level)) {
logger.setLevel(Level.WARN);
} else if ("info".equalsIgnoreCase(level)) {
logger.setLevel(Level.INFO);
} else if ("debug".equalsIgnoreCase(level)) {
logger.setLevel(Level.DEBUG);
} else if ("trace".equalsIgnoreCase(level)) {
logger.setLevel(Level.TRACE);
}
}
@Override
public String getLevel() {
if (logger.getLevel() == null) {
return null;
}
return logger.getLevel().toString();
}
@Override
public String getName() {
return logger.getName();
}
@Override
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
@Override
public boolean isDebugEnabled() {
return logger.isDebugEnabled();
}
@Override
public boolean isInfoEnabled() {
return logger.isInfoEnabled();
}
@Override
public boolean isWarnEnabled() {
return logger.isEnabledFor(Level.WARN);
}
@Override
public boolean isErrorEnabled() {
return logger.isEnabledFor(Level.ERROR);
}
@Override
protected void internalTrace(String msg) {
logger.log(FQCN, Level.TRACE, msg, null);
}
@Override
protected void internalTrace(String msg, Throwable cause) {
logger.log(FQCN, Level.TRACE, msg, cause);
}
@Override
protected void internalDebug(String msg) {
logger.log(FQCN, Level.DEBUG, msg, null);
}
@Override
protected void internalDebug(String msg, Throwable cause) {
logger.log(FQCN, Level.DEBUG, msg, cause);
}
@Override
protected void internalInfo(String msg) {
logger.log(FQCN, Level.INFO, msg, null);
}
@Override
protected void internalInfo(String msg, Throwable cause) {
logger.log(FQCN, Level.INFO, msg, cause);
}
@Override
protected void internalWarn(String msg) {
logger.log(FQCN, Level.WARN, msg, null);
}
@Override
protected void internalWarn(String msg, Throwable cause) {
logger.log(FQCN, Level.WARN, msg, cause);
}
@Override
protected void internalError(String msg) {
logger.log(FQCN, Level.ERROR, msg, null);
}
@Override
protected void internalError(String msg, Throwable cause) {
logger.log(FQCN, Level.ERROR, msg, cause);
}
}

View File

@ -1,41 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging.log4j;
import org.apache.log4j.Logger;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
/**
*
*/
public class Log4jESLoggerFactory extends ESLoggerFactory {
@Override
protected ESLogger rootLogger() {
return new Log4jESLogger(null, Logger.getRootLogger());
}
@Override
protected ESLogger newInstance(String prefix, String name) {
final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(name);
return new Log4jESLogger(prefix, logger);
}
}

View File

@ -1,179 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging.slf4j;
import org.elasticsearch.common.logging.support.AbstractESLogger;
import org.slf4j.Logger;
import org.slf4j.spi.LocationAwareLogger;
/**
*
*/
public class Slf4jESLogger extends AbstractESLogger {
private final Logger logger;
private final LocationAwareLogger lALogger;
private final String FQCN = AbstractESLogger.class.getName();
public Slf4jESLogger(String prefix, Logger logger) {
super(prefix);
this.logger = logger;
if (logger instanceof LocationAwareLogger) {
lALogger = (LocationAwareLogger) logger;
} else {
lALogger = null;
}
}
@Override
public void setLevel(String level) {
// can't set it in slf4j...
}
@Override
public String getLevel() {
// can't get it in slf4j...
return null;
}
@Override
public String getName() {
return logger.getName();
}
@Override
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
@Override
public boolean isDebugEnabled() {
return logger.isDebugEnabled();
}
@Override
public boolean isInfoEnabled() {
return logger.isInfoEnabled();
}
@Override
public boolean isWarnEnabled() {
return logger.isWarnEnabled();
}
@Override
public boolean isErrorEnabled() {
return logger.isErrorEnabled();
}
@Override
protected void internalTrace(String msg) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.TRACE_INT, msg, null, null);
} else {
logger.trace(msg);
}
}
@Override
protected void internalTrace(String msg, Throwable cause) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.TRACE_INT, msg, null, cause);
} else {
logger.trace(msg);
}
}
@Override
protected void internalDebug(String msg) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.DEBUG_INT, msg, null, null);
} else {
logger.debug(msg);
}
}
@Override
protected void internalDebug(String msg, Throwable cause) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.DEBUG_INT, msg, null, cause);
} else {
logger.debug(msg);
}
}
@Override
protected void internalInfo(String msg) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.INFO_INT, msg, null, null);
} else {
logger.info(msg);
}
}
@Override
protected void internalInfo(String msg, Throwable cause) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.INFO_INT, msg, null, cause);
} else {
logger.info(msg, cause);
}
}
@Override
protected void internalWarn(String msg) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.WARN_INT, msg, null, null);
} else {
logger.warn(msg);
}
}
@Override
protected void internalWarn(String msg, Throwable cause) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.WARN_INT, msg, null, cause);
} else {
logger.warn(msg);
}
}
@Override
protected void internalError(String msg) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.ERROR_INT, msg, null, null);
} else {
logger.error(msg);
}
}
@Override
protected void internalError(String msg, Throwable cause) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.ERROR_INT, msg, null, cause);
} else {
logger.error(msg);
}
}
protected Logger logger() {
return logger;
}
}

View File

@ -1,41 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging.slf4j;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
*
*/
public class Slf4jESLoggerFactory extends ESLoggerFactory {
@Override
protected ESLogger rootLogger() {
return getLogger(Logger.ROOT_LOGGER_NAME);
}
@Override
protected ESLogger newInstance(String prefix, String name) {
return new Slf4jESLogger(prefix, LoggerFactory.getLogger(name));
}
}

View File

@ -1,133 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging.support;
import org.elasticsearch.common.logging.ESLogger;
/**
*
*/
public abstract class AbstractESLogger implements ESLogger {
private final String prefix;
protected AbstractESLogger(String prefix) {
this.prefix = prefix;
}
@Override
public String getPrefix() {
return this.prefix;
}
@Override
public void trace(String msg, Object... params) {
if (isTraceEnabled()) {
internalTrace(LoggerMessageFormat.format(prefix, msg, params));
}
}
protected abstract void internalTrace(String msg);
@Override
public void trace(String msg, Throwable cause, Object... params) {
if (isTraceEnabled()) {
internalTrace(LoggerMessageFormat.format(prefix, msg, params), cause);
}
}
protected abstract void internalTrace(String msg, Throwable cause);
@Override
public void debug(String msg, Object... params) {
if (isDebugEnabled()) {
internalDebug(LoggerMessageFormat.format(prefix, msg, params));
}
}
protected abstract void internalDebug(String msg);
@Override
public void debug(String msg, Throwable cause, Object... params) {
if (isDebugEnabled()) {
internalDebug(LoggerMessageFormat.format(prefix, msg, params), cause);
}
}
protected abstract void internalDebug(String msg, Throwable cause);
@Override
public void info(String msg, Object... params) {
if (isInfoEnabled()) {
internalInfo(LoggerMessageFormat.format(prefix, msg, params));
}
}
protected abstract void internalInfo(String msg);
@Override
public void info(String msg, Throwable cause, Object... params) {
if (isInfoEnabled()) {
internalInfo(LoggerMessageFormat.format(prefix, msg, params), cause);
}
}
protected abstract void internalInfo(String msg, Throwable cause);
@Override
public void warn(String msg, Object... params) {
if (isWarnEnabled()) {
internalWarn(LoggerMessageFormat.format(prefix, msg, params));
}
}
protected abstract void internalWarn(String msg);
@Override
public void warn(String msg, Throwable cause, Object... params) {
if (isWarnEnabled()) {
internalWarn(LoggerMessageFormat.format(prefix, msg, params), cause);
}
}
protected abstract void internalWarn(String msg, Throwable cause);
@Override
public void error(String msg, Object... params) {
if (isErrorEnabled()) {
internalError(LoggerMessageFormat.format(prefix, msg, params));
}
}
protected abstract void internalError(String msg);
@Override
public void error(String msg, Throwable cause, Object... params) {
if (isErrorEnabled()) {
internalError(LoggerMessageFormat.format(prefix, msg, params), cause);
}
}
protected abstract void internalError(String msg, Throwable cause);
}

View File

@ -22,7 +22,6 @@ package org.elasticsearch.common.settings;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.util.set.Sets;
import java.util.ArrayList;
import java.util.Collections;
@ -63,6 +62,11 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "]");
}
if (setting.hasComplexMatcher()) {
Setting<?> overlappingSetting = findOverlappingSetting(setting, complexMatchers);
if (overlappingSetting != null) {
throw new IllegalArgumentException("complex setting key: [" + setting.getKey() + "] overlaps existing setting key: [" +
overlappingSetting.getKey() + "]");
}
complexMatchers.putIfAbsent(setting.getKey(), setting);
} else {
keySettings.putIfAbsent(setting.getKey(), setting);
@ -410,4 +414,19 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
return changed;
}
private static Setting<?> findOverlappingSetting(Setting<?> newSetting, Map<String, Setting<?>> complexMatchers) {
assert newSetting.hasComplexMatcher();
if (complexMatchers.containsKey(newSetting.getKey())) {
// we return null here because we use a putIfAbsent call when inserting into the map, so if it exists then we already checked
// the setting to make sure there are no overlapping settings.
return null;
}
for (Setting<?> existingSetting : complexMatchers.values()) {
if (newSetting.match(existingSetting.getKey()) || existingSetting.match(newSetting.getKey())) {
return existingSetting;
}
}
return null;
}
}

View File

@ -36,8 +36,6 @@ import java.util.concurrent.LinkedTransferQueue;
*/
public abstract class ConcurrentCollections {
private final static boolean useLinkedTransferQueue = Boolean.parseBoolean(System.getProperty("es.useLinkedTransferQueue", "false"));
static final int aggressiveConcurrencyLevel;
static {
@ -71,9 +69,6 @@ public abstract class ConcurrentCollections {
}
public static <T> Queue<T> newQueue() {
if (useLinkedTransferQueue) {
return new LinkedTransferQueue<>();
}
return new ConcurrentLinkedQueue<>();
}

View File

@ -94,6 +94,12 @@ public interface Discovery extends LifecycleComponent<Discovery> {
DiscoveryStats stats();
/**
* Triggers the first join cycle
*/
void startInitialJoin();
/***
* @return the current value of minimum master nodes, or -1 for not set
*/

View File

@ -87,8 +87,9 @@ public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryServic
logger.info(discovery.nodeDescription());
}
public void waitForInitialState() {
public void joinClusterAndWaitForInitialState() {
try {
discovery.startInitialJoin();
if (!initialStateListener.waitForInitialState(initialStateTimeout)) {
logger.warn("waited for {} and no initial state was set by the discovery", initialStateTimeout);
}

View File

@ -100,6 +100,11 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
@Override
protected void doStart() {
}
@Override
public void startInitialJoin() {
synchronized (clusterGroups) {
ClusterGroup clusterGroup = clusterGroups.get(clusterName);
if (clusterGroup == null) {

View File

@ -216,7 +216,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
joinThreadControl.start();
pingService.start();
this.nodeJoinController = new NodeJoinController(clusterService, routingService, discoverySettings, settings);
}
@Override
public void startInitialJoin() {
// start the join thread from a cluster state update. See {@link JoinThreadControl} for details.
clusterService.submitStateUpdateTask("initial_join", new ClusterStateUpdateTask() {

View File

@ -133,27 +133,6 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
@Override
protected void doStart() {
clusterService.addLast(this);
// check we didn't miss any cluster state that came in until now / during the addition
clusterService.submitStateUpdateTask("gateway_initial_state_recovery", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
checkStateMeetsSettingsAndMaybeRecover(currentState);
return currentState;
}
@Override
public boolean runOnlyOnMaster() {
// It's OK to run on non masters as checkStateMeetsSettingsAndMaybeRecover checks for this
// we return false to avoid unneeded failure logs
return false;
}
@Override
public void onFailure(String source, Throwable t) {
logger.warn("unexpected failure while checking if state can be recovered. another attempt will be made with the next cluster state change", t);
}
});
}
@Override
@ -170,10 +149,9 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
if (lifecycle.stoppedOrClosed()) {
return;
}
checkStateMeetsSettingsAndMaybeRecover(event.state());
}
protected void checkStateMeetsSettingsAndMaybeRecover(ClusterState state) {
final ClusterState state = event.state();
if (state.nodes().localNodeMaster() == false) {
// not our job to recover
return;

View File

@ -154,7 +154,7 @@ public final class IndexModule {
*/
public void addIndexStore(String type, BiFunction<IndexSettings, IndexStoreConfig, IndexStore> provider) {
if (storeTypes.containsKey(type)) {
throw new IllegalArgumentException("key [" + type +"] already registerd");
throw new IllegalArgumentException("key [" + type +"] already registered");
}
storeTypes.put(type, provider);
}

View File

@ -45,7 +45,7 @@ public class CommonGramsTokenFilterFactory extends AbstractTokenFilterFactory {
this.words = Analysis.parseCommonWords(env, settings, null, ignoreCase);
if (this.words == null) {
throw new IllegalArgumentException("mising or empty [common_words] or [common_words_path] configuration for common_grams token filter");
throw new IllegalArgumentException("missing or empty [common_words] or [common_words_path] configuration for common_grams token filter");
}
}

View File

@ -52,7 +52,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
List<String> rules = Analysis.getWordList(env, settings, "synonyms");
StringBuilder sb = new StringBuilder();
for (String line : rules) {
sb.append(line).append(System.getProperty("line.separator"));
sb.append(line).append(System.lineSeparator());
}
rulesReader = new FastStringReader(sb.toString());
} else if (settings.get("synonyms_path") != null) {

View File

@ -36,7 +36,7 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.index.mapper.ParseContext;
import java.io.IOException;

View File

@ -178,7 +178,7 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
throw new IllegalArgumentException("[" + NAME + "] requires query value");
}
if (fields == null) {
throw new IllegalArgumentException("[" + NAME + "] requires fields at initalization time");
throw new IllegalArgumentException("[" + NAME + "] requires fields at initialization time");
}
this.value = value;
this.fieldsBoosts = new TreeMap<>();

View File

@ -38,7 +38,7 @@ public abstract class BaseTranslogReader implements Comparable<BaseTranslogReade
protected final long firstOperationOffset;
public BaseTranslogReader(long generation, FileChannel channel, Path path, long firstOperationOffset) {
assert Translog.parseIdFromFileName(path) == generation : "generation missmatch. Path: " + Translog.parseIdFromFileName(path) + " but generation: " + generation;
assert Translog.parseIdFromFileName(path) == generation : "generation mismatch. Path: " + Translog.parseIdFromFileName(path) + " but generation: " + generation;
this.generation = generation;
this.path = path;

View File

@ -40,7 +40,6 @@ import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
@ -50,7 +49,6 @@ import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
@ -279,8 +277,6 @@ public class Node implements Closeable {
injector.getInstance(IndicesTTLService.class).start();
injector.getInstance(SnapshotsService.class).start();
injector.getInstance(SnapshotShardsService.class).start();
injector.getInstance(TransportService.class).start();
injector.getInstance(ClusterService.class).start();
injector.getInstance(RoutingService.class).start();
injector.getInstance(SearchService.class).start();
injector.getInstance(MonitorService.class).start();
@ -289,16 +285,24 @@ public class Node implements Closeable {
// TODO hack around circular dependencies problems
injector.getInstance(GatewayAllocator.class).setReallocation(injector.getInstance(ClusterService.class), injector.getInstance(RoutingService.class));
DiscoveryService discoService = injector.getInstance(DiscoveryService.class).start();
discoService.waitForInitialState();
// gateway should start after disco, so it can try and recovery from gateway on "start"
injector.getInstance(ResourceWatcherService.class).start();
injector.getInstance(GatewayService.class).start();
// Start the transport service now so the publish address will be added to the local disco node in ClusterService
TransportService transportService = injector.getInstance(TransportService.class);
transportService.start();
injector.getInstance(ClusterService.class).start();
// start after cluster service so the local disco is known
DiscoveryService discoService = injector.getInstance(DiscoveryService.class).start();
transportService.acceptIncomingRequests();
discoService.joinClusterAndWaitForInitialState();
if (settings.getAsBoolean("http.enabled", true)) {
injector.getInstance(HttpServer.class).start();
}
injector.getInstance(ResourceWatcherService.class).start();
injector.getInstance(TribeService.class).start();
if (WRITE_PORTS_FIELD_SETTING.get(settings)) {

View File

@ -24,7 +24,7 @@ import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliToolConfig;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.logging.log4j.LogConfigurator;
import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;

View File

@ -30,6 +30,7 @@ import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.support.RestToXContentListener;
import org.elasticsearch.tasks.TaskId;
import static org.elasticsearch.rest.RestRequest.Method.POST;
@ -40,22 +41,20 @@ public class RestCancelTasksAction extends BaseRestHandler {
public RestCancelTasksAction(Settings settings, RestController controller, Client client) {
super(settings, client);
controller.registerHandler(POST, "/_tasks/_cancel", this);
controller.registerHandler(POST, "/_tasks/{nodeId}/_cancel", this);
controller.registerHandler(POST, "/_tasks/{nodeId}/{taskId}/_cancel", this);
controller.registerHandler(POST, "/_tasks/{taskId}/_cancel", this);
}
@Override
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId"));
long taskId = request.paramAsLong("taskId", ListTasksRequest.ALL_TASKS);
TaskId taskId = new TaskId(request.param("taskId"));
String[] actions = Strings.splitStringByCommaToArray(request.param("actions"));
String parentNode = request.param("parent_node");
long parentTaskId = request.paramAsLong("parent_task", ListTasksRequest.ALL_TASKS);
TaskId parentTaskId = new TaskId(request.param("parent_task_id"));
CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(nodesIds);
CancelTasksRequest cancelTasksRequest = new CancelTasksRequest();
cancelTasksRequest.taskId(taskId);
cancelTasksRequest.nodesIds(nodesIds);
cancelTasksRequest.actions(actions);
cancelTasksRequest.parentNode(parentNode);
cancelTasksRequest.parentTaskId(parentTaskId);
client.admin().cluster().cancelTasks(cancelTasksRequest, new RestToXContentListener<>(channel));
}

View File

@ -29,6 +29,7 @@ import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.support.RestToXContentListener;
import org.elasticsearch.tasks.TaskId;
import static org.elasticsearch.rest.RestRequest.Method.GET;
@ -39,24 +40,22 @@ public class RestListTasksAction extends BaseRestHandler {
public RestListTasksAction(Settings settings, RestController controller, Client client) {
super(settings, client);
controller.registerHandler(GET, "/_tasks", this);
controller.registerHandler(GET, "/_tasks/{nodeId}", this);
controller.registerHandler(GET, "/_tasks/{nodeId}/{taskId}", this);
controller.registerHandler(GET, "/_tasks/{taskId}", this);
}
@Override
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
boolean detailed = request.paramAsBoolean("detailed", false);
String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId"));
long taskId = request.paramAsLong("taskId", ListTasksRequest.ALL_TASKS);
String[] nodesIds = Strings.splitStringByCommaToArray(request.param("node_id"));
TaskId taskId = new TaskId(request.param("taskId"));
String[] actions = Strings.splitStringByCommaToArray(request.param("actions"));
String parentNode = request.param("parent_node");
long parentTaskId = request.paramAsLong("parent_task", ListTasksRequest.ALL_TASKS);
TaskId parentTaskId = new TaskId(request.param("parent_task_id"));
ListTasksRequest listTasksRequest = new ListTasksRequest(nodesIds);
ListTasksRequest listTasksRequest = new ListTasksRequest();
listTasksRequest.taskId(taskId);
listTasksRequest.nodesIds(nodesIds);
listTasksRequest.detailed(detailed);
listTasksRequest.actions(actions);
listTasksRequest.parentNode(parentNode);
listTasksRequest.parentTaskId(parentTaskId);
client.admin().cluster().listTasks(listTasksRequest, new RestToXContentListener<>(channel));
}

View File

@ -102,7 +102,7 @@ public class RestAnalyzeAction extends BaseRestHandler {
public static void buildFromContent(BytesReference content, AnalyzeRequest analyzeRequest, ParseFieldMatcher parseFieldMatcher) {
try (XContentParser parser = XContentHelper.createParser(content)) {
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new IllegalArgumentException("Malforrmed content, must start with an object");
throw new IllegalArgumentException("Malformed content, must start with an object");
} else {
XContentParser.Token token;
String currentFieldName = null;

View File

@ -87,7 +87,7 @@ public class RestSearchScrollAction extends BaseRestHandler {
public static void buildFromContent(BytesReference content, SearchScrollRequest searchScrollRequest) {
try (XContentParser parser = XContentHelper.createParser(content)) {
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new IllegalArgumentException("Malforrmed content, must start with an object");
throw new IllegalArgumentException("Malformed content, must start with an object");
} else {
XContentParser.Token token;
String currentFieldName = null;

View File

@ -26,7 +26,7 @@ import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;

View File

@ -33,6 +33,7 @@ import org.elasticsearch.common.geo.builders.PolygonBuilder;
import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.BoolQueryParser;
@ -287,14 +288,15 @@ public class SearchModule extends AbstractModule {
/**
* Register a new ScoreFunctionParser.
*/
public void registerFunctionScoreParser(ScoreFunctionParser<?> parser) {
public void registerFunctionScoreParser(ScoreFunctionParser<? extends ScoreFunctionBuilder> parser) {
for (String name: parser.getNames()) {
Object oldValue = functionScoreParsers.putIfAbsent(name, parser);
if (oldValue != null) {
throw new IllegalArgumentException("Function score parser [" + oldValue + "] already registered for name [" + name + "]");
}
}
namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, parser.getBuilderPrototype());
@SuppressWarnings("unchecked") NamedWriteable<? extends ScoreFunctionBuilder> sfb = parser.getBuilderPrototype();
namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, sfb);
}
public void registerQueryParser(Supplier<QueryParser<?>> parser) {
@ -358,14 +360,15 @@ public class SearchModule extends AbstractModule {
public IndicesQueriesRegistry buildQueryParserRegistry() {
Map<String, QueryParser<?>> queryParsersMap = new HashMap<>();
for (Supplier<QueryParser<?>> parserSupplier : queryParsers) {
QueryParser<?> parser = parserSupplier.get();
QueryParser<? extends QueryBuilder> parser = parserSupplier.get();
for (String name: parser.names()) {
Object oldValue = queryParsersMap.putIfAbsent(name, parser);
if (oldValue != null) {
throw new IllegalArgumentException("Query parser [" + oldValue + "] already registered for name [" + name + "] while trying to register [" + parser + "]");
}
}
namedWriteableRegistry.registerPrototype(QueryBuilder.class, parser.getBuilderPrototype());
@SuppressWarnings("unchecked") NamedWriteable<? extends QueryBuilder> qb = parser.getBuilderPrototype();
namedWriteableRegistry.registerPrototype(QueryBuilder.class, qb);
}
return new IndicesQueriesRegistry(settings, queryParsersMap);
}

View File

@ -199,7 +199,7 @@ public class AggregatorFactories {
List<PipelineAggregatorBuilder<?>> orderedPipelineAggregators, List<PipelineAggregatorBuilder<?>> unmarkedBuilders,
Set<PipelineAggregatorBuilder<?>> temporarilyMarked, PipelineAggregatorBuilder<?> builder) {
if (temporarilyMarked.contains(builder)) {
throw new IllegalArgumentException("Cyclical dependancy found with pipeline aggregator [" + builder.getName() + "]");
throw new IllegalArgumentException("Cyclical dependency found with pipeline aggregator [" + builder.getName() + "]");
} else if (unmarkedBuilders.contains(builder)) {
temporarilyMarked.add(builder);
String[] bucketsPaths = builder.getBucketsPaths();

View File

@ -228,7 +228,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
metaData);
}
throw new AggregationExecutionException("sigfnificant_terms aggregation cannot be applied to field ["
throw new AggregationExecutionException("significant_terms aggregation cannot be applied to field ["
+ config.fieldContext().field() + "]. It can only be applied to numeric or string fields.");
}

View File

@ -83,10 +83,8 @@ public class SearchPhaseController extends AbstractComponent {
};
public static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0];
public static final String SEARCH_CONTROLLER_OPTIMIZE_SINGLE_SHARD_KEY = "search.controller.optimize_single_shard";
private final BigArrays bigArrays;
private final boolean optimizeSingleShard;
private ScriptService scriptService;
@ -95,11 +93,6 @@ public class SearchPhaseController extends AbstractComponent {
super(settings);
this.bigArrays = bigArrays;
this.scriptService = scriptService;
this.optimizeSingleShard = settings.getAsBoolean(SEARCH_CONTROLLER_OPTIMIZE_SINGLE_SHARD_KEY, true);
}
public boolean optimizeSingleShard() {
return optimizeSingleShard;
}
public AggregatedDfs aggregateDfs(AtomicArray<DfsSearchResult> results) {
@ -168,50 +161,48 @@ public class SearchPhaseController extends AbstractComponent {
return EMPTY_DOCS;
}
if (optimizeSingleShard) {
boolean canOptimize = false;
QuerySearchResult result = null;
int shardIndex = -1;
if (results.size() == 1) {
canOptimize = true;
result = results.get(0).value.queryResult();
shardIndex = results.get(0).index;
} else {
// lets see if we only got hits from a single shard, if so, we can optimize...
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : results) {
if (entry.value.queryResult().topDocs().scoreDocs.length > 0) {
if (result != null) { // we already have one, can't really optimize
canOptimize = false;
break;
}
canOptimize = true;
result = entry.value.queryResult();
shardIndex = entry.index;
boolean canOptimize = false;
QuerySearchResult result = null;
int shardIndex = -1;
if (results.size() == 1) {
canOptimize = true;
result = results.get(0).value.queryResult();
shardIndex = results.get(0).index;
} else {
// lets see if we only got hits from a single shard, if so, we can optimize...
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : results) {
if (entry.value.queryResult().topDocs().scoreDocs.length > 0) {
if (result != null) { // we already have one, can't really optimize
canOptimize = false;
break;
}
canOptimize = true;
result = entry.value.queryResult();
shardIndex = entry.index;
}
}
if (canOptimize) {
int offset = result.from();
if (ignoreFrom) {
offset = 0;
}
ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
if (scoreDocs.length == 0 || scoreDocs.length < offset) {
return EMPTY_DOCS;
}
}
if (canOptimize) {
int offset = result.from();
if (ignoreFrom) {
offset = 0;
}
ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
if (scoreDocs.length == 0 || scoreDocs.length < offset) {
return EMPTY_DOCS;
}
int resultDocsSize = result.size();
if ((scoreDocs.length - offset) < resultDocsSize) {
resultDocsSize = scoreDocs.length - offset;
}
ScoreDoc[] docs = new ScoreDoc[resultDocsSize];
for (int i = 0; i < resultDocsSize; i++) {
ScoreDoc scoreDoc = scoreDocs[offset + i];
scoreDoc.shardIndex = shardIndex;
docs[i] = scoreDoc;
}
return docs;
int resultDocsSize = result.size();
if ((scoreDocs.length - offset) < resultDocsSize) {
resultDocsSize = scoreDocs.length - offset;
}
ScoreDoc[] docs = new ScoreDoc[resultDocsSize];
for (int i = 0; i < resultDocsSize; i++) {
ScoreDoc scoreDoc = scoreDocs[offset + i];
scoreDoc.shardIndex = shardIndex;
docs[i] = scoreDoc;
}
return docs;
}
@SuppressWarnings("unchecked")

View File

@ -32,8 +32,8 @@ public class CancellableTask extends Task {
super(id, type, action, description);
}
public CancellableTask(long id, String type, String action, String description, String parentNode, long parentId) {
super(id, type, action, description, parentNode, parentId);
public CancellableTask(long id, String type, String action, String description, TaskId parentTaskId) {
super(id, type, action, description, parentTaskId);
}
/**

View File

@ -30,8 +30,6 @@ import org.elasticsearch.common.xcontent.ToXContent;
*/
public class Task {
public static final long NO_PARENT_ID = 0;
private final long id;
private final String type;
@ -40,22 +38,18 @@ public class Task {
private final String description;
private final String parentNode;
private final long parentId;
private final TaskId parentTask;
public Task(long id, String type, String action, String description) {
this(id, type, action, description, null, NO_PARENT_ID);
this(id, type, action, description, TaskId.EMPTY_TASK_ID);
}
public Task(long id, String type, String action, String description, String parentNode, long parentId) {
public Task(long id, String type, String action, String description, TaskId parentTask) {
this.id = id;
this.type = type;
this.action = action;
this.description = description;
this.parentNode = parentNode;
this.parentId = parentId;
this.parentTask = parentTask;
}
/**
@ -75,7 +69,7 @@ public class Task {
description = getDescription();
status = getStatus();
}
return new TaskInfo(node, getId(), getType(), getAction(), description, status, parentNode, parentId);
return new TaskInfo(node, getId(), getType(), getAction(), description, status, parentTask);
}
/**
@ -106,18 +100,11 @@ public class Task {
return description;
}
/**
* Returns the parent node of the task or null if the task doesn't have any parent tasks
*/
public String getParentNode() {
return parentNode;
}
/**
* Returns id of the parent task or NO_PARENT_ID if the task doesn't have any parent tasks
*/
public long getParentId() {
return parentId;
public TaskId getParentTaskId() {
return parentTask;
}
/**

View File

@ -0,0 +1,118 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.tasks;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
/**
* Task id that consists of node id and id of the task on the node
*/
public final class TaskId implements Writeable<TaskId> {
public final static TaskId EMPTY_TASK_ID = new TaskId("", -1L);
private final String nodeId;
private final long id;
public TaskId(String nodeId, long id) {
this.nodeId = nodeId;
this.id = id;
}
public TaskId(String taskId) {
if (Strings.hasLength(taskId) && "unset".equals(taskId) == false) {
String[] s = Strings.split(taskId, ":");
if (s == null || s.length != 2) {
throw new IllegalArgumentException("malformed task id " + taskId);
}
this.nodeId = s[0];
try {
this.id = Long.parseLong(s[1]);
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("malformed task id " + taskId, ex);
}
} else {
nodeId = "";
id = -1L;
}
}
public TaskId(StreamInput in) throws IOException {
nodeId = in.readString();
id = in.readLong();
}
public String getNodeId() {
return nodeId;
}
public long getId() {
return id;
}
public boolean isSet() {
return id == -1L;
}
@Override
public String toString() {
if (isSet()) {
return "unset";
} else {
return nodeId + ":" + id;
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(nodeId);
out.writeLong(id);
}
@Override
public TaskId readFrom(StreamInput in) throws IOException {
return new TaskId(in);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TaskId taskId = (TaskId) o;
if (id != taskId.id) return false;
return nodeId.equals(taskId.nodeId);
}
@Override
public int hashCode() {
int result = nodeId.hashCode();
result = 31 * result + (int) (id ^ (id >>> 32));
return result;
}
}

View File

@ -22,7 +22,6 @@ package org.elasticsearch.tasks;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
@ -51,7 +50,7 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
private final AtomicLong taskIdGenerator = new AtomicLong();
private final Map<Tuple<String, Long>, String> banedParents = new ConcurrentHashMap<>();
private final Map<TaskId, String> banedParents = new ConcurrentHashMap<>();
public TaskManager(Settings settings) {
super(settings);
@ -77,8 +76,8 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
CancellableTaskHolder oldHolder = cancellableTasks.put(task.getId(), holder);
assert oldHolder == null;
// Check if this task was banned before we start it
if (task.getParentNode() != null && banedParents.isEmpty() == false) {
String reason = banedParents.get(new Tuple<>(task.getParentNode(), task.getParentId()));
if (task.getParentTaskId().isSet() == false && banedParents.isEmpty() == false) {
String reason = banedParents.get(task.getParentTaskId());
if (reason != null) {
try {
holder.cancel(reason);
@ -191,22 +190,21 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
* <p>
* This method is called when a parent task that has children is cancelled.
*/
public void setBan(String parentNode, long parentId, String reason) {
logger.trace("setting ban for the parent task {}:{} {}", parentNode, parentId, reason);
public void setBan(TaskId parentTaskId, String reason) {
logger.trace("setting ban for the parent task {} {}", parentTaskId, reason);
// Set the ban first, so the newly created tasks cannot be registered
Tuple<String, Long> ban = new Tuple<>(parentNode, parentId);
synchronized (banedParents) {
if (lastDiscoveryNodes.nodeExists(parentNode)) {
if (lastDiscoveryNodes.nodeExists(parentTaskId.getNodeId())) {
// Only set the ban if the node is the part of the cluster
banedParents.put(ban, reason);
banedParents.put(parentTaskId, reason);
}
}
// Now go through already running tasks and cancel them
for (Map.Entry<Long, CancellableTaskHolder> taskEntry : cancellableTasks.entrySet()) {
CancellableTaskHolder holder = taskEntry.getValue();
if (holder.hasParent(parentNode, parentId)) {
if (holder.hasParent(parentTaskId)) {
holder.cancel(reason);
}
}
@ -217,9 +215,9 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
* <p>
* This method is called when a previously banned task finally cancelled
*/
public void removeBan(String parentNode, long parentId) {
logger.trace("removing ban for the parent task {}:{} {}", parentNode, parentId);
banedParents.remove(new Tuple<>(parentNode, parentId));
public void removeBan(TaskId parentTaskId) {
logger.trace("removing ban for the parent task {}", parentTaskId);
banedParents.remove(parentTaskId);
}
@Override
@ -228,14 +226,12 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
synchronized (banedParents) {
lastDiscoveryNodes = event.state().getNodes();
// Remove all bans that were registered by nodes that are no longer in the cluster state
Iterator<Tuple<String, Long>> banIterator = banedParents.keySet().iterator();
Iterator<TaskId> banIterator = banedParents.keySet().iterator();
while (banIterator.hasNext()) {
Tuple<String, Long> nodeAndTaskId = banIterator.next();
String nodeId = nodeAndTaskId.v1();
Long taskId = nodeAndTaskId.v2();
if (lastDiscoveryNodes.nodeExists(nodeId) == false) {
logger.debug("Removing ban for the parent [{}:{}] on the node [{}], reason: the parent node is gone", nodeId,
taskId, event.state().getNodes().localNode());
TaskId taskId = banIterator.next();
if (lastDiscoveryNodes.nodeExists(taskId.getNodeId()) == false) {
logger.debug("Removing ban for the parent [{}] on the node [{}], reason: the parent node is gone", taskId,
event.state().getNodes().localNode());
banIterator.remove();
}
}
@ -244,10 +240,10 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
for (Map.Entry<Long, CancellableTaskHolder> taskEntry : cancellableTasks.entrySet()) {
CancellableTaskHolder holder = taskEntry.getValue();
CancellableTask task = holder.getTask();
String parent = task.getParentNode();
if (parent != null && lastDiscoveryNodes.nodeExists(parent) == false) {
TaskId parentTaskId = task.getParentTaskId();
if (parentTaskId.isSet() == false && lastDiscoveryNodes.nodeExists(parentTaskId.getNodeId()) == false) {
if (task.cancelOnParentLeaving()) {
holder.cancel("Coordinating node [" + parent + "] left the cluster");
holder.cancel("Coordinating node [" + parentTaskId.getNodeId() + "] left the cluster");
}
}
}
@ -340,8 +336,8 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen
}
public boolean hasParent(String parentNode, long parentId) {
return parentId == task.getParentId() && parentNode.equals(task.getParentNode());
public boolean hasParent(TaskId parentTaskId) {
return task.getParentTaskId().equals(parentTaskId);
}
public CancellableTask getTask() {

View File

@ -54,8 +54,8 @@ import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;
import java.util.function.Supplier;
@ -71,7 +71,7 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
public static final String DIRECT_RESPONSE_PROFILE = ".direct";
private final AtomicBoolean started = new AtomicBoolean(false);
private final CountDownLatch blockIncomingRequestsLatch = new CountDownLatch(1);
protected final Transport transport;
protected final ThreadPool threadPool;
protected final TaskManager taskManager;
@ -167,6 +167,7 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
void setTracerLogExclude(List<String> tracelLogExclude) {
this.tracelLogExclude = tracelLogExclude.toArray(Strings.EMPTY_ARRAY);
}
@Override
protected void doStart() {
adapter.rxMetric.clear();
@ -179,14 +180,10 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
logger.info("profile [{}]: {}", entry.getKey(), entry.getValue());
}
}
boolean setStarted = started.compareAndSet(false, true);
assert setStarted : "service was already started";
}
@Override
protected void doStop() {
final boolean setStopped = started.compareAndSet(true, false);
assert setStopped : "service has already been stopped";
try {
transport.stop();
} finally {
@ -213,6 +210,15 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
transport.close();
}
/**
* start accepting incoming requests.
* when the transport layer starts up it will block any incoming requests until
* this method is called
*/
public void acceptIncomingRequests() {
blockIncomingRequestsLatch.countDown();
}
public boolean addressSupported(Class<? extends TransportAddress> address) {
return transport.addressSupported(address);
}
@ -302,7 +308,7 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
timeoutHandler = new TimeoutHandler(requestId);
}
clientHandlers.put(requestId, new RequestHolder<>(new ContextRestoreResponseHandler<T>(threadPool.getThreadContext().newStoredContext(), handler), node, action, timeoutHandler));
if (started.get() == false) {
if (lifecycle.stoppedOrClosed()) {
// if we are not started the exception handling will remove the RequestHolder again and calls the handler to notify the caller.
// it will only notify if the toStop code hasn't done the work yet.
throw new TransportException("TransportService is closed stopped can't send request");
@ -405,10 +411,11 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
/**
* Registers a new request handler
* @param action The action the request handler is associated with
*
* @param action The action the request handler is associated with
* @param requestFactory a callable to be used construct new instances for streaming
* @param executor The executor the request handling will be executed on
* @param handler The handler itself that implements the request handling
* @param executor The executor the request handling will be executed on
* @param handler The handler itself that implements the request handling
*/
public <Request extends TransportRequest> void registerRequestHandler(String action, Supplier<Request> requestFactory, String executor, TransportRequestHandler<Request> handler) {
RequestHandlerRegistry<Request> reg = new RequestHandlerRegistry<>(action, requestFactory, taskManager, handler, executor, false);
@ -417,11 +424,12 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
/**
* Registers a new request handler
* @param action The action the request handler is associated with
* @param request The request class that will be used to constrcut new instances for streaming
* @param executor The executor the request handling will be executed on
*
* @param action The action the request handler is associated with
* @param request The request class that will be used to constrcut new instances for streaming
* @param executor The executor the request handling will be executed on
* @param forceExecution Force execution on the executor queue and never reject it
* @param handler The handler itself that implements the request handling
* @param handler The handler itself that implements the request handling
*/
public <Request extends TransportRequest> void registerRequestHandler(String action, Supplier<Request> request, String executor, boolean forceExecution, TransportRequestHandler<Request> handler) {
RequestHandlerRegistry<Request> reg = new RequestHandlerRegistry<>(action, request, taskManager, handler, executor, forceExecution);
@ -494,6 +502,11 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
@Override
public void onRequestReceived(long requestId, String action) {
try {
blockIncomingRequestsLatch.await();
} catch (InterruptedException e) {
logger.trace("interrupted while waiting for incoming requests block to be removed");
}
if (traceEnabled() && shouldTraceAction(action)) {
traceReceivedRequest(requestId, action);
}
@ -729,6 +742,7 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
private final static class ContextRestoreResponseHandler<T extends TransportResponse> implements TransportResponseHandler<T> {
private final TransportResponseHandler<T> delegate;
private final ThreadContext.StoredContext threadContext;
private ContextRestoreResponseHandler(ThreadContext.StoredContext threadContext, TransportResponseHandler<T> delegate) {
this.delegate = delegate;
this.threadContext = threadContext;
@ -766,7 +780,7 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
final ThreadPool threadPool;
public DirectResponseChannel(ESLogger logger, DiscoveryNode localNode, String action, long requestId,
TransportServiceAdapter adapter, ThreadPool threadPool) {
TransportServiceAdapter adapter, ThreadPool threadPool) {
this.logger = logger;
this.localNode = localNode;
this.action = action;

View File

@ -36,6 +36,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -87,8 +88,8 @@ public class CancellableTasksTests extends TaskManagerTestCase {
}
@Override
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
return new CancellableTask(id, type, action, getDescription(), parentTaskNode, parentTaskId);
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
return new CancellableTask(id, type, action, getDescription(), parentTaskId);
}
}
@ -235,9 +236,9 @@ public class CancellableTasksTests extends TaskManagerTestCase {
});
// Cancel main task
CancelTasksRequest request = new CancelTasksRequest(testNodes[0].discoveryNode.getId());
CancelTasksRequest request = new CancelTasksRequest();
request.reason("Testing Cancellation");
request.taskId(mainTask.getId());
request.taskId(new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId()));
// And send the cancellation request to a random node
CancelTasksResponse response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request)
.get();
@ -269,7 +270,8 @@ public class CancellableTasksTests extends TaskManagerTestCase {
// Make sure that tasks are no longer running
ListTasksResponse listTasksResponse = testNodes[randomIntBetween(0, testNodes.length - 1)]
.transportListTasksAction.execute(new ListTasksRequest(testNodes[0].discoveryNode.getId()).taskId(mainTask.getId())).get();
.transportListTasksAction.execute(new ListTasksRequest().taskId(
new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId()))).get();
assertEquals(0, listTasksResponse.getTasks().size());
// Make sure that there are no leftover bans, the ban removal is async, so we might return from the cancellation
@ -311,7 +313,7 @@ public class CancellableTasksTests extends TaskManagerTestCase {
// Make sure that tasks are running
ListTasksResponse listTasksResponse = testNodes[randomIntBetween(0, testNodes.length - 1)]
.transportListTasksAction.execute(new ListTasksRequest().parentNode(mainNode).taskId(mainTask.getId())).get();
.transportListTasksAction.execute(new ListTasksRequest().parentTaskId(new TaskId(mainNode, mainTask.getId()))).get();
assertThat(listTasksResponse.getTasks().size(), greaterThanOrEqualTo(blockOnNodes.size()));
// Simulate the coordinating node leaving the cluster
@ -328,9 +330,9 @@ public class CancellableTasksTests extends TaskManagerTestCase {
if (simulateBanBeforeLeaving) {
logger.info("--> Simulate issuing cancel request on the node that is about to leave the cluster");
// Simulate issuing cancel request on the node that is about to leave the cluster
CancelTasksRequest request = new CancelTasksRequest(testNodes[0].discoveryNode.getId());
CancelTasksRequest request = new CancelTasksRequest();
request.reason("Testing Cancellation");
request.taskId(mainTask.getId());
request.taskId(new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId()));
// And send the cancellation request to a random node
CancelTasksResponse response = testNodes[0].transportCancelTasksAction.execute(request).get();
logger.info("--> Done simulating issuing cancel request on the node that is about to leave the cluster");
@ -354,7 +356,7 @@ public class CancellableTasksTests extends TaskManagerTestCase {
// Make sure that tasks are no longer running
try {
ListTasksResponse listTasksResponse1 = testNodes[randomIntBetween(1, testNodes.length - 1)]
.transportListTasksAction.execute(new ListTasksRequest().parentNode(mainNode).taskId(mainTask.getId())).get();
.transportListTasksAction.execute(new ListTasksRequest().taskId(new TaskId(mainNode, mainTask.getId()))).get();
assertEquals(0, listTasksResponse1.getTasks().size());
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();

View File

@ -203,6 +203,7 @@ public abstract class TaskManagerTestCase extends ESTestCase {
actionFilters, indexNameExpressionResolver);
transportCancelTasksAction = new TransportCancelTasksAction(settings, clusterName, threadPool, clusterService, transportService,
actionFilters, indexNameExpressionResolver);
transportService.acceptIncomingRequests();
}
public final TestClusterService clusterService;

View File

@ -110,7 +110,7 @@ public class TasksIT extends ESIntegTestCase {
List<TaskInfo> tasks = findEvents(ClusterHealthAction.NAME, Tuple::v1);
// Verify that one of these tasks is a parent of another task
if (tasks.get(0).getParentNode() == null) {
if (tasks.get(0).getParentTaskId().isSet()) {
assertParentTask(Collections.singletonList(tasks.get(1)), tasks.get(0));
} else {
assertParentTask(Collections.singletonList(tasks.get(0)), tasks.get(1));
@ -227,7 +227,9 @@ public class TasksIT extends ESIntegTestCase {
} else {
// A [s][r] level task should have a corresponding [s] level task on the a different node (where primary is located)
sTask = findEvents(RefreshAction.NAME + "[s]",
event -> event.v1() && taskInfo.getParentNode().equals(event.v2().getNode().getId()) && taskInfo.getDescription().equals(event.v2().getDescription()));
event -> event.v1() && taskInfo.getParentTaskId().getNodeId().equals(event.v2().getNode().getId()) && taskInfo
.getDescription()
.equals(event.v2().getDescription()));
}
// There should be only one parent task
assertEquals(1, sTask.size());
@ -393,9 +395,10 @@ public class TasksIT extends ESIntegTestCase {
*/
private void assertParentTask(List<TaskInfo> tasks, TaskInfo parentTask) {
for (TaskInfo task : tasks) {
assertNotNull(task.getParentNode());
assertEquals(parentTask.getNode().getId(), task.getParentNode());
assertEquals(parentTask.getId(), task.getParentId());
assertFalse(task.getParentTaskId().isSet());
assertEquals(parentTask.getNode().getId(), task.getParentTaskId().getNodeId());
assertTrue(Strings.hasLength(task.getParentTaskId().getNodeId()));
assertEquals(parentTask.getId(), task.getParentTaskId().getId());
}
}
}

View File

@ -47,6 +47,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -84,8 +85,8 @@ public class TestTaskPlugin extends Plugin {
private volatile boolean blocked = true;
public TestTask(long id, String type, String action, String description, String parentNode, long parentId) {
super(id, type, action, description, parentNode, parentId);
public TestTask(long id, String type, String action, String description, TaskId parentTaskId) {
super(id, type, action, description, parentTaskId);
}
public boolean isBlocked() {
@ -172,8 +173,8 @@ public class TestTaskPlugin extends Plugin {
}
@Override
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
return new TestTask(id, type, action, this.getDescription(), parentTaskNode, parentTaskId);
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
return new TestTask(id, type, action, this.getDescription(), parentTaskId);
}
}

View File

@ -43,13 +43,11 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.test.tasks.MockTaskManager;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.local.LocalTransport;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.io.IOException;
import java.util.ArrayList;
@ -103,9 +101,9 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
}
@Override
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
if (enableTaskManager) {
return super.createTask(id, type, action, parentTaskNode, parentTaskId);
return super.createTask(id, type, action, parentTaskId);
} else {
return null;
}
@ -313,7 +311,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
}
Task task = actions[0].execute(request, listener);
logger.info("Awaiting for all actions to start");
actionLatch.await();
assertTrue(actionLatch.await(10, TimeUnit.SECONDS));
logger.info("Done waiting for all actions to start");
return task;
}
@ -426,14 +424,13 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
// Find tasks with common parent
listTasksRequest = new ListTasksRequest();
listTasksRequest.parentNode(parentNode);
listTasksRequest.parentTaskId(parentTaskId);
listTasksRequest.parentTaskId(new TaskId(parentNode, parentTaskId));
response = testNode.transportListTasksAction.execute(listTasksRequest).get();
assertEquals(testNodes.length, response.getTasks().size());
for (TaskInfo task : response.getTasks()) {
assertEquals("testAction[n]", task.getAction());
assertEquals(parentNode, task.getParentNode());
assertEquals(parentTaskId, task.getParentId());
assertEquals(parentNode, task.getParentTaskId().getNodeId());
assertEquals(parentTaskId, task.getParentTaskId().getId());
}
// Release all tasks and wait for response
@ -514,7 +511,8 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
String actionName = "testAction"; // only pick the main action
// Try to cancel main task using action name
CancelTasksRequest request = new CancelTasksRequest(testNodes[0].discoveryNode.getId());
CancelTasksRequest request = new CancelTasksRequest();
request.nodesIds(testNodes[0].discoveryNode.getId());
request.reason("Testing Cancellation");
request.actions(actionName);
CancelTasksResponse response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request)
@ -527,9 +525,9 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
// Try to cancel main task using id
request = new CancelTasksRequest(testNodes[0].discoveryNode.getId());
request = new CancelTasksRequest();
request.reason("Testing Cancellation");
request.taskId(task.getId());
request.taskId(new TaskId(testNodes[0].discoveryNode.getId(), task.getId()));
response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request).get();
// Shouldn't match any tasks since testAction doesn't support cancellation
@ -601,7 +599,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
@Override
protected TestTaskResponse taskOperation(TestTasksRequest request, Task task) {
logger.info("Task action on node " + node);
if (failTaskOnNode == node && task.getParentNode() != null) {
if (failTaskOnNode == node && task.getParentTaskId().isSet() == false) {
logger.info("Failing on node " + node);
throw new RuntimeException("Task level failure");
}

View File

@ -150,9 +150,9 @@ public class BulkRequestTests extends ESTestCase {
BulkRequest bulkRequest = new BulkRequest();
try {
bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null);
fail("should have thrown an exception about the unknown paramater _foo");
fail("should have thrown an exception about the unknown parameter _foo");
} catch (IllegalArgumentException e) {
assertThat("message contains error about the unknown paramater _foo: " + e.getMessage(),
assertThat("message contains error about the unknown parameter _foo: " + e.getMessage(),
e.getMessage().contains("Action/metadata line [3] contains an unknown parameter [_foo]"), equalTo(true));
}
}

View File

@ -185,6 +185,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
clusterService = new TestClusterService(THREAD_POOL);
final TransportService transportService = new TransportService(transport, THREAD_POOL);
transportService.start();
transportService.acceptIncomingRequests();
setClusterState(clusterService, TEST_INDEX);
action = new TestTransportBroadcastByNodeAction(
Settings.EMPTY,

View File

@ -84,6 +84,7 @@ public class TransportMasterNodeActionTests extends ESTestCase {
clusterService = new TestClusterService(threadPool);
transportService = new TransportService(transport, threadPool);
transportService.start();
transportService.acceptIncomingRequests();
localNode = new DiscoveryNode("local_node", DummyTransportAddress.INSTANCE, Version.CURRENT);
remoteNode = new DiscoveryNode("remote_node", DummyTransportAddress.INSTANCE, Version.CURRENT);
allNodes = new DiscoveryNode[] { localNode, remoteNode };

View File

@ -88,6 +88,7 @@ public class BroadcastReplicationTests extends ESTestCase {
clusterService = new TestClusterService(threadPool);
transportService = new TransportService(transport, threadPool);
transportService.start();
transportService.acceptIncomingRequests();
broadcastReplicationAction = new TestBroadcastReplicationAction(Settings.EMPTY, threadPool, clusterService, transportService, new ActionFilters(new HashSet<ActionFilter>()), new IndexNameExpressionResolver(Settings.EMPTY), null);
}

View File

@ -18,8 +18,6 @@
*/
package org.elasticsearch.action.support.replication;
import com.carrotsearch.randomizedtesting.annotations.Repeat;
import org.apache.lucene.index.CorruptIndexException;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ReplicationResponse;
@ -126,6 +124,7 @@ public class TransportReplicationActionTests extends ESTestCase {
clusterService = new TestClusterService(threadPool);
transportService = new TransportService(transport, threadPool);
transportService.start();
transportService.acceptIncomingRequests();
action = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool);
count.set(1);
}
@ -1016,7 +1015,7 @@ public class TransportReplicationActionTests extends ESTestCase {
* half the time.
*/
private ReplicationTask maybeTask() {
return random().nextBoolean() ? new ReplicationTask(0, null, null, null, null, 0) : null;
return random().nextBoolean() ? new ReplicationTask(0, null, null, null, null) : null;
}
/**

View File

@ -141,6 +141,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase {
clusterService = new TestClusterService(THREAD_POOL);
transportService = new TransportService(transport, THREAD_POOL);
transportService.start();
transportService.acceptIncomingRequests();
action = new TestTransportInstanceSingleOperationAction(
Settings.EMPTY,
"indices:admin/test",

View File

@ -73,6 +73,7 @@ public class TransportClientNodesServiceTests extends ESTestCase {
};
transportService = new TransportService(Settings.EMPTY, transport, threadPool, new NamedWriteableRegistry());
transportService.start();
transportService.acceptIncomingRequests();
transportClientNodesService = new TransportClientNodesService(Settings.EMPTY, ClusterName.DEFAULT, transportService, threadPool, Version.CURRENT);
nodesCount = randomIntBetween(1, 10);

View File

@ -107,6 +107,7 @@ public class ShardStateActionTests extends ESTestCase {
clusterService = new TestClusterService(THREAD_POOL);
transportService = new TransportService(transport, THREAD_POOL);
transportService.start();
transportService.acceptIncomingRequests();
shardStateAction = new TestShardStateAction(Settings.EMPTY, clusterService, transportService, null, null);
shardStateAction.setOnBeforeWaitForNewMasterAndRetry(() -> {});
shardStateAction.setOnAfterWaitForNewMasterAndRetry(() -> {});

View File

@ -674,7 +674,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
} else {
try {
indexNameExpressionResolver.concreteIndices(context, "Foo*");
fail("expecting exeption when result empty and allowNoIndicec=false");
fail("expecting exception when result empty and allowNoIndicec=false");
} catch (IndexNotFoundException e) {
// expected exception
}

View File

@ -713,7 +713,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
}
logger.debug("now start initializing shards and expect exactly one rebalance from node1 to node 2 sicne index [test] is all on node1");
logger.debug("now start initializing shards and expect exactly one rebalance from node1 to node 2 since index [test] is all on node1");
routingNodes = clusterState.getRoutingNodes();
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();

View File

@ -181,7 +181,7 @@ public class RebalanceAfterActiveTests extends ESAllocationTestCase {
}
logger.info("complete relocation, thats it!");
logger.info("complete relocation, that's it!");
routingNodes = clusterState.getRoutingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();

View File

@ -17,15 +17,13 @@
* under the License.
*/
package org.elasticsearch.common.logging.log4j;
package org.elasticsearch.common.logging;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.spi.LocationInfo;
import org.apache.log4j.spi.LoggingEvent;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
@ -38,7 +36,7 @@ import java.util.List;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
public class Log4jESLoggerTests extends ESTestCase {
public class ESLoggerTests extends ESTestCase {
private ESLogger esTestLogger;
private TestAppender testAppender;
@ -49,7 +47,7 @@ public class Log4jESLoggerTests extends ESTestCase {
@Override
public void setUp() throws Exception {
super.setUp();
this.testLevel = Log4jESLoggerFactory.getLogger("test").getLevel();
this.testLevel = ESLoggerFactory.getLogger("test").getLevel();
LogConfigurator.reset();
Path configDir = getDataPath("config");
// Need to set custom path.conf so we can use a custom logging.yml file for the test
@ -59,18 +57,18 @@ public class Log4jESLoggerTests extends ESTestCase {
.build();
LogConfigurator.configure(settings, true);
esTestLogger = Log4jESLoggerFactory.getLogger("test");
Logger testLogger = ((Log4jESLogger) esTestLogger).logger();
esTestLogger = ESLoggerFactory.getLogger("test");
Logger testLogger = esTestLogger.getLogger();
assertThat(testLogger.getLevel(), equalTo(Level.TRACE));
testAppender = new TestAppender();
testLogger.addAppender(testAppender);
// deprecation setup, needs to be set to debug to log
deprecationLogger = Log4jESLoggerFactory.getDeprecationLogger("test");
deprecationLogger = ESLoggerFactory.getDeprecationLogger("test");
deprecationAppender = new TestAppender();
ESLogger logger = Log4jESLoggerFactory.getLogger("deprecation.test");
ESLogger logger = ESLoggerFactory.getLogger("deprecation.test");
logger.setLevel("DEBUG");
(((Log4jESLogger) logger).logger()).addAppender(deprecationAppender);
logger.getLogger().addAppender(deprecationAppender);
}
@Override
@ -78,9 +76,9 @@ public class Log4jESLoggerTests extends ESTestCase {
public void tearDown() throws Exception {
super.tearDown();
esTestLogger.setLevel(testLevel);
Logger testLogger = ((Log4jESLogger) esTestLogger).logger();
Logger testLogger = esTestLogger.getLogger();
testLogger.removeAppender(testAppender);
Logger deprecationLogger = ((Log4jESLogger) Log4jESLoggerFactory.getLogger("deprecation.test")).logger();
Logger deprecationLogger = ESLoggerFactory.getLogger("deprecation.test").getLogger();
deprecationLogger.removeAppender(deprecationAppender);
}
@ -99,7 +97,7 @@ public class Log4jESLoggerTests extends ESTestCase {
assertThat(event.getRenderedMessage(), equalTo("This is an error"));
LocationInfo locationInfo = event.getLocationInformation();
assertThat(locationInfo, notNullValue());
assertThat(locationInfo.getClassName(), equalTo(Log4jESLoggerTests.class.getCanonicalName()));
assertThat(locationInfo.getClassName(), equalTo(ESLoggerTests.class.getCanonicalName()));
assertThat(locationInfo.getMethodName(), equalTo("testLocationInfoTest"));
event = events.get(1);
assertThat(event, notNullValue());
@ -107,7 +105,7 @@ public class Log4jESLoggerTests extends ESTestCase {
assertThat(event.getRenderedMessage(), equalTo("This is a warning"));
locationInfo = event.getLocationInformation();
assertThat(locationInfo, notNullValue());
assertThat(locationInfo.getClassName(), equalTo(Log4jESLoggerTests.class.getCanonicalName()));
assertThat(locationInfo.getClassName(), equalTo(ESLoggerTests.class.getCanonicalName()));
assertThat(locationInfo.getMethodName(), equalTo("testLocationInfoTest"));
event = events.get(2);
assertThat(event, notNullValue());
@ -115,7 +113,7 @@ public class Log4jESLoggerTests extends ESTestCase {
assertThat(event.getRenderedMessage(), equalTo("This is an info"));
locationInfo = event.getLocationInformation();
assertThat(locationInfo, notNullValue());
assertThat(locationInfo.getClassName(), equalTo(Log4jESLoggerTests.class.getCanonicalName()));
assertThat(locationInfo.getClassName(), equalTo(ESLoggerTests.class.getCanonicalName()));
assertThat(locationInfo.getMethodName(), equalTo("testLocationInfoTest"));
event = events.get(3);
assertThat(event, notNullValue());
@ -123,7 +121,7 @@ public class Log4jESLoggerTests extends ESTestCase {
assertThat(event.getRenderedMessage(), equalTo("This is a debug"));
locationInfo = event.getLocationInformation();
assertThat(locationInfo, notNullValue());
assertThat(locationInfo.getClassName(), equalTo(Log4jESLoggerTests.class.getCanonicalName()));
assertThat(locationInfo.getClassName(), equalTo(ESLoggerTests.class.getCanonicalName()));
assertThat(locationInfo.getMethodName(), equalTo("testLocationInfoTest"));
event = events.get(4);
assertThat(event, notNullValue());
@ -131,7 +129,7 @@ public class Log4jESLoggerTests extends ESTestCase {
assertThat(event.getRenderedMessage(), equalTo("This is a trace"));
locationInfo = event.getLocationInformation();
assertThat(locationInfo, notNullValue());
assertThat(locationInfo.getClassName(), equalTo(Log4jESLoggerTests.class.getCanonicalName()));
assertThat(locationInfo.getClassName(), equalTo(ESLoggerTests.class.getCanonicalName()));
assertThat(locationInfo.getMethodName(), equalTo("testLocationInfoTest"));
}

View File

@ -17,12 +17,11 @@
* under the License.
*/
package org.elasticsearch.common.logging.log4j;
package org.elasticsearch.common.logging;
import org.apache.log4j.Appender;
import org.apache.log4j.Logger;
import org.elasticsearch.common.cli.CliToolTestCase;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
@ -50,7 +49,7 @@ public class LoggingConfigurationTests extends ESTestCase {
}
public void testResolveMultipleConfigs() throws Exception {
String level = Log4jESLoggerFactory.getLogger("test").getLevel();
String level = ESLoggerFactory.getLogger("test").getLevel();
try {
Path configDir = getDataPath("config");
Settings settings = Settings.builder()
@ -59,22 +58,22 @@ public class LoggingConfigurationTests extends ESTestCase {
.build();
LogConfigurator.configure(settings, true);
ESLogger esLogger = Log4jESLoggerFactory.getLogger("test");
Logger logger = ((Log4jESLogger) esLogger).logger();
ESLogger esLogger = ESLoggerFactory.getLogger("test");
Logger logger = esLogger.getLogger();
Appender appender = logger.getAppender("console");
assertThat(appender, notNullValue());
esLogger = Log4jESLoggerFactory.getLogger("second");
logger = ((Log4jESLogger) esLogger).logger();
esLogger = ESLoggerFactory.getLogger("second");
logger = esLogger.getLogger();
appender = logger.getAppender("console2");
assertThat(appender, notNullValue());
esLogger = Log4jESLoggerFactory.getLogger("third");
logger = ((Log4jESLogger) esLogger).logger();
esLogger = ESLoggerFactory.getLogger("third");
logger = esLogger.getLogger();
appender = logger.getAppender("console3");
assertThat(appender, notNullValue());
} finally {
Log4jESLoggerFactory.getLogger("test").setLevel(level);
ESLoggerFactory.getLogger("test").setLevel(level);
}
}
@ -166,8 +165,8 @@ public class LoggingConfigurationTests extends ESTestCase {
.build(), new CliToolTestCase.MockTerminal());
LogConfigurator.configure(environment.settings(), true);
// args should overwrite whatever is in the config
ESLogger esLogger = Log4jESLoggerFactory.getLogger("test_resolve_order");
Logger logger = ((Log4jESLogger) esLogger).logger();
ESLogger esLogger = ESLoggerFactory.getLogger("test_resolve_order");
Logger logger = esLogger.getLogger();
Appender appender = logger.getAppender("console");
assertThat(appender, notNullValue());
assertTrue(logger.isTraceEnabled());
@ -190,10 +189,10 @@ public class LoggingConfigurationTests extends ESTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build(), new CliToolTestCase.MockTerminal());
LogConfigurator.configure(environment.settings(), false);
ESLogger esLogger = Log4jESLoggerFactory.getLogger("test_config_not_read");
ESLogger esLogger = ESLoggerFactory.getLogger("test_config_not_read");
assertNotNull(esLogger);
Logger logger = ((Log4jESLogger) esLogger).logger();
Logger logger = esLogger.getLogger();
Appender appender = logger.getAppender("console");
// config was not read
assertNull(appender);

View File

@ -1,131 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging.jdk;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Handler;
import java.util.logging.Level;
import java.util.logging.LogRecord;
import java.util.logging.Logger;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
public class JDKESLoggerTests extends ESTestCase {
private ESLogger esTestLogger;
private TestHandler testHandler;
@Override
public void setUp() throws Exception {
super.setUp();
JdkESLoggerFactory esTestLoggerFactory = new JdkESLoggerFactory();
esTestLogger = esTestLoggerFactory.newInstance("test");
Logger testLogger = ((JdkESLogger) esTestLogger).logger();
testLogger.setLevel(Level.FINEST);
assertThat(testLogger.getLevel(), equalTo(Level.FINEST));
testHandler = new TestHandler();
testLogger.addHandler(testHandler);
}
public void testLocationInfoTest() {
esTestLogger.error("This is an error");
esTestLogger.warn("This is a warning");
esTestLogger.info("This is an info");
esTestLogger.debug("This is a debug");
esTestLogger.trace("This is a trace");
List<LogRecord> records = testHandler.getEvents();
assertThat(records, notNullValue());
assertThat(records.size(), equalTo(5));
LogRecord record = records.get(0);
assertThat(record, notNullValue());
assertThat(record.getLevel(), equalTo(Level.SEVERE));
assertThat(record.getMessage(), equalTo("This is an error"));
assertThat(record.getSourceClassName(), equalTo(JDKESLoggerTests.class.getCanonicalName()));
assertThat(record.getSourceMethodName(), equalTo("testLocationInfoTest"));
record = records.get(1);
assertThat(record, notNullValue());
assertThat(record.getLevel(), equalTo(Level.WARNING));
assertThat(record.getMessage(), equalTo("This is a warning"));
assertThat(record.getSourceClassName(), equalTo(JDKESLoggerTests.class.getCanonicalName()));
assertThat(record.getSourceMethodName(), equalTo("testLocationInfoTest"));
record = records.get(2);
assertThat(record, notNullValue());
assertThat(record.getLevel(), equalTo(Level.INFO));
assertThat(record.getMessage(), equalTo("This is an info"));
assertThat(record.getSourceClassName(), equalTo(JDKESLoggerTests.class.getCanonicalName()));
assertThat(record.getSourceMethodName(), equalTo("testLocationInfoTest"));
record = records.get(3);
assertThat(record, notNullValue());
assertThat(record.getLevel(), equalTo(Level.FINE));
assertThat(record.getMessage(), equalTo("This is a debug"));
assertThat(record.getSourceClassName(), equalTo(JDKESLoggerTests.class.getCanonicalName()));
assertThat(record.getSourceMethodName(), equalTo("testLocationInfoTest"));
record = records.get(4);
assertThat(record, notNullValue());
assertThat(record.getLevel(), equalTo(Level.FINEST));
assertThat(record.getMessage(), equalTo("This is a trace"));
assertThat(record.getSourceClassName(), equalTo(JDKESLoggerTests.class.getCanonicalName()));
assertThat(record.getSourceMethodName(), equalTo("testLocationInfoTest"));
}
public void testSetLogLevelString() {
// verify the string based level-setters
esTestLogger.setLevel("error");
assertThat(esTestLogger.getLevel(), equalTo("SEVERE"));
esTestLogger.setLevel("warn");
assertThat(esTestLogger.getLevel(), equalTo("WARNING"));
esTestLogger.setLevel("info");
assertThat(esTestLogger.getLevel(), equalTo("INFO"));
esTestLogger.setLevel("debug");
assertThat(esTestLogger.getLevel(), equalTo("FINE"));
esTestLogger.setLevel("trace");
assertThat(esTestLogger.getLevel(), equalTo("FINEST"));
}
private static class TestHandler extends Handler {
private List<LogRecord> records = new ArrayList<>();
@Override
public void close() {
}
public List<LogRecord> getEvents() {
return records;
}
@Override
public void publish(LogRecord record) {
// Forces it to generate the location information
record.getSourceClassName();
records.add(record);
}
@Override
public void flush() {
}
}
}

View File

@ -31,9 +31,12 @@ import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
public class ScopedSettingsTests extends ESTestCase {
@ -299,4 +302,25 @@ public class ScopedSettingsTests extends ESTestCase {
ESLoggerFactory.getRootLogger().setLevel(level);
}
}
public void testOverlappingComplexMatchSettings() {
Set<Setting<?>> settings = new LinkedHashSet<>(2);
final boolean groupFirst = randomBoolean();
final Setting<?> groupSetting = Setting.groupSetting("foo.", false, Setting.Scope.CLUSTER);
final Setting<?> listSetting = Setting.listSetting("foo.bar", Collections.emptyList(), Function.identity(), false,
Setting.Scope.CLUSTER);
settings.add(groupFirst ? groupSetting : listSetting);
settings.add(groupFirst ? listSetting : groupSetting);
try {
new ClusterSettings(Settings.EMPTY, settings);
fail("an exception should have been thrown because settings overlap");
} catch (IllegalArgumentException e) {
if (groupFirst) {
assertEquals("complex setting key: [foo.bar] overlaps existing setting key: [foo.]", e.getMessage());
} else {
assertEquals("complex setting key: [foo.] overlaps existing setting key: [foo.bar]", e.getMessage());
}
}
}
}

View File

@ -58,7 +58,7 @@ public class JsonSettingsLoaderTests extends ESTestCase {
fail("expected exception");
} catch (SettingsException e) {
assertEquals(e.getCause().getClass(), ElasticsearchParseException.class);
assertTrue(e.toString().contains("duplicate settings key [foo] found at line number [1], column number [13], previous value [bar], current value [baz]"));
assertTrue(e.toString().contains("duplicate settings key [foo] found at line number [1], column number [20], previous value [bar], current value [baz]"));
}
}
}

Some files were not shown because too many files have changed in this diff Show More