Merge branch 'master' into feature/aggs-refactoring

This commit is contained in:
Colin Goodheart-Smithe 2016-02-03 14:45:16 +00:00
commit 5d9d91b761
349 changed files with 6585 additions and 8377 deletions

View File

@ -16,7 +16,6 @@
-/plugins/discovery-azure/target
-/plugins/discovery-ec2/target
-/plugins/discovery-gce/target
-/plugins/discovery-multicast/target
-/plugins/jvm-example/target
-/plugins/lang-expression/target
-/plugins/lang-groovy/target

View File

@ -289,7 +289,7 @@ class BuildPlugin implements Plugin<Project> {
String luceneVersion = VersionProperties.lucene
if (luceneVersion.contains('-snapshot')) {
// extract the revision number from the version with a regex matcher
String revision = (luceneVersion =~ /\w+-snapshot-(\d+)/)[0][1]
String revision = (luceneVersion =~ /\w+-snapshot-([a-z0-9]+)/)[0][1]
repos.maven {
name 'lucene-snapshots'
url "http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/${revision}"

View File

@ -94,6 +94,9 @@ class PrecommitTasks {
project.checkstyle {
config = project.resources.text.fromFile(
PrecommitTasks.getResource('/checkstyle.xml'), 'UTF-8')
configProperties = [
suppressions: PrecommitTasks.getResource('/checkstyle_suppressions.xml')
]
}
for (String taskName : ['checkstyleMain', 'checkstyleTest']) {
Task task = project.tasks.findByName(taskName)

View File

@ -6,6 +6,10 @@
<module name="Checker">
<property name="charset" value="UTF-8" />
<module name="SuppressionFilter">
<property name="file" value="${suppressions}" />
</module>
<module name="TreeWalker">
<!-- ~3500 violations
<module name="LineLength">

View File

@ -0,0 +1,10 @@
<?xml version="1.0"?>
<!DOCTYPE suppressions PUBLIC
"-//Puppy Crawl//DTD Suppressions 1.1//EN"
"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
<suppressions>
<!-- These files are generated by ANTLR so its silly to hold them to our rules. -->
<suppress files="org/elasticsearch/painless/PainlessLexer\.java" checks="." />
<suppress files="org/elasticsearch/painless/PainlessParser(|BaseVisitor|Visitor)\.java" checks="." />
</suppressions>

View File

@ -1,5 +1,5 @@
elasticsearch = 3.0.0-SNAPSHOT
lucene = 5.5.0-snapshot-1725675
lucene = 5.5.0-snapshot-4de5f1d
# optional dependencies
spatial4j = 0.5

View File

@ -19,6 +19,7 @@
package org.elasticsearch;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -39,6 +40,7 @@ import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VALUE;
/**
@ -205,7 +207,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
* @param exType the exception type to look for
* @return whether there is a nested exception of the specified type
*/
public boolean contains(Class exType) {
public boolean contains(Class<? extends Throwable> exType) {
if (exType == null) {
return false;
}
@ -469,157 +471,288 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
return throwable;
}
/**
* This is the list of Exceptions Elasticsearch can throw over the wire or save into a corruption marker. Each value in the enum is a
* single exception tying the Class to an id for use of the encode side and the id back to a constructor for use on the decode side. As
* such its ok if the exceptions to change names so long as their constructor can still read the exception. Each exception is listed
* in id order below. If you want to remove an exception leave a tombstone comment and mark the id as null in
* ExceptionSerializationTests.testIds.ids.
*/
enum ElasticsearchExceptionHandle {
// each exception gets an assigned id that must never change. While the exception name can
// change due to refactorings etc. like renaming we have to keep the ordinal <--> class mapping
// to deserialize the exception coming from another node or from an corruption marker on
// a corrupted index.
// these exceptions can be ordered and removed, but (repeating) the ids must never change
// to remove an exception, remove the enum value below, and mark the id as null in ExceptionSerializationTests.testIds.ids
INDEX_SHARD_SNAPSHOT_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException.class, org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException::new, 0),
DFS_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.dfs.DfsPhaseExecutionException.class, org.elasticsearch.search.dfs.DfsPhaseExecutionException::new, 1),
EXECUTION_CANCELLED_EXCEPTION(org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException.class, org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException::new, 2),
MASTER_NOT_DISCOVERED_EXCEPTION(org.elasticsearch.discovery.MasterNotDiscoveredException.class, org.elasticsearch.discovery.MasterNotDiscoveredException::new, 3),
ELASTICSEARCH_SECURITY_EXCEPTION(org.elasticsearch.ElasticsearchSecurityException.class, org.elasticsearch.ElasticsearchSecurityException::new, 4),
INDEX_SHARD_RESTORE_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreException.class, org.elasticsearch.index.snapshots.IndexShardRestoreException::new, 5),
INDEX_CLOSED_EXCEPTION(org.elasticsearch.indices.IndexClosedException.class, org.elasticsearch.indices.IndexClosedException::new, 6),
BIND_HTTP_EXCEPTION(org.elasticsearch.http.BindHttpException.class, org.elasticsearch.http.BindHttpException::new, 7),
REDUCE_SEARCH_PHASE_EXCEPTION(org.elasticsearch.action.search.ReduceSearchPhaseException.class, org.elasticsearch.action.search.ReduceSearchPhaseException::new, 8),
NODE_CLOSED_EXCEPTION(org.elasticsearch.node.NodeClosedException.class, org.elasticsearch.node.NodeClosedException::new, 9),
SNAPSHOT_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.SnapshotFailedEngineException.class, org.elasticsearch.index.engine.SnapshotFailedEngineException::new, 10),
SHARD_NOT_FOUND_EXCEPTION(org.elasticsearch.index.shard.ShardNotFoundException.class, org.elasticsearch.index.shard.ShardNotFoundException::new, 11),
CONNECT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ConnectTransportException.class, org.elasticsearch.transport.ConnectTransportException::new, 12),
NOT_SERIALIZABLE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.NotSerializableTransportException.class, org.elasticsearch.transport.NotSerializableTransportException::new, 13),
RESPONSE_HANDLER_FAILURE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ResponseHandlerFailureTransportException.class, org.elasticsearch.transport.ResponseHandlerFailureTransportException::new, 14),
INDEX_CREATION_EXCEPTION(org.elasticsearch.indices.IndexCreationException.class, org.elasticsearch.indices.IndexCreationException::new, 15),
INDEX_NOT_FOUND_EXCEPTION(org.elasticsearch.index.IndexNotFoundException.class, org.elasticsearch.index.IndexNotFoundException::new, 16),
ILLEGAL_SHARD_ROUTING_STATE_EXCEPTION(org.elasticsearch.cluster.routing.IllegalShardRoutingStateException.class, org.elasticsearch.cluster.routing.IllegalShardRoutingStateException::new, 17),
BROADCAST_SHARD_OPERATION_FAILED_EXCEPTION(org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException.class, org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException::new, 18),
RESOURCE_NOT_FOUND_EXCEPTION(org.elasticsearch.ResourceNotFoundException.class, org.elasticsearch.ResourceNotFoundException::new, 19),
ACTION_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionTransportException.class, org.elasticsearch.transport.ActionTransportException::new, 20),
ELASTICSEARCH_GENERATION_EXCEPTION(org.elasticsearch.ElasticsearchGenerationException.class, org.elasticsearch.ElasticsearchGenerationException::new, 21),
INDEX_SHARD_SNAPSHOT_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException.class,
org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException::new, 0),
DFS_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.dfs.DfsPhaseExecutionException.class,
org.elasticsearch.search.dfs.DfsPhaseExecutionException::new, 1),
EXECUTION_CANCELLED_EXCEPTION(org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException.class,
org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException::new, 2),
MASTER_NOT_DISCOVERED_EXCEPTION(org.elasticsearch.discovery.MasterNotDiscoveredException.class,
org.elasticsearch.discovery.MasterNotDiscoveredException::new, 3),
ELASTICSEARCH_SECURITY_EXCEPTION(org.elasticsearch.ElasticsearchSecurityException.class,
org.elasticsearch.ElasticsearchSecurityException::new, 4),
INDEX_SHARD_RESTORE_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreException.class,
org.elasticsearch.index.snapshots.IndexShardRestoreException::new, 5),
INDEX_CLOSED_EXCEPTION(org.elasticsearch.indices.IndexClosedException.class,
org.elasticsearch.indices.IndexClosedException::new, 6),
BIND_HTTP_EXCEPTION(org.elasticsearch.http.BindHttpException.class,
org.elasticsearch.http.BindHttpException::new, 7),
REDUCE_SEARCH_PHASE_EXCEPTION(org.elasticsearch.action.search.ReduceSearchPhaseException.class,
org.elasticsearch.action.search.ReduceSearchPhaseException::new, 8),
NODE_CLOSED_EXCEPTION(org.elasticsearch.node.NodeClosedException.class,
org.elasticsearch.node.NodeClosedException::new, 9),
SNAPSHOT_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.SnapshotFailedEngineException.class,
org.elasticsearch.index.engine.SnapshotFailedEngineException::new, 10),
SHARD_NOT_FOUND_EXCEPTION(org.elasticsearch.index.shard.ShardNotFoundException.class,
org.elasticsearch.index.shard.ShardNotFoundException::new, 11),
CONNECT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ConnectTransportException.class,
org.elasticsearch.transport.ConnectTransportException::new, 12),
NOT_SERIALIZABLE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.NotSerializableTransportException.class,
org.elasticsearch.transport.NotSerializableTransportException::new, 13),
RESPONSE_HANDLER_FAILURE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ResponseHandlerFailureTransportException.class,
org.elasticsearch.transport.ResponseHandlerFailureTransportException::new, 14),
INDEX_CREATION_EXCEPTION(org.elasticsearch.indices.IndexCreationException.class,
org.elasticsearch.indices.IndexCreationException::new, 15),
INDEX_NOT_FOUND_EXCEPTION(org.elasticsearch.index.IndexNotFoundException.class,
org.elasticsearch.index.IndexNotFoundException::new, 16),
ILLEGAL_SHARD_ROUTING_STATE_EXCEPTION(org.elasticsearch.cluster.routing.IllegalShardRoutingStateException.class,
org.elasticsearch.cluster.routing.IllegalShardRoutingStateException::new, 17),
BROADCAST_SHARD_OPERATION_FAILED_EXCEPTION(org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException.class,
org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException::new, 18),
RESOURCE_NOT_FOUND_EXCEPTION(org.elasticsearch.ResourceNotFoundException.class,
org.elasticsearch.ResourceNotFoundException::new, 19),
ACTION_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionTransportException.class,
org.elasticsearch.transport.ActionTransportException::new, 20),
ELASTICSEARCH_GENERATION_EXCEPTION(org.elasticsearch.ElasticsearchGenerationException.class,
org.elasticsearch.ElasticsearchGenerationException::new, 21),
// 22 was CreateFailedEngineException
INDEX_SHARD_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardStartedException.class, org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class, org.elasticsearch.search.SearchContextMissingException::new, 24),
INDEX_SHARD_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardStartedException.class,
org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class,
org.elasticsearch.search.SearchContextMissingException::new, 24),
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 25),
BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class, org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26),
SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class, org.elasticsearch.snapshots.SnapshotCreationException::new, 27),
DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class, org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28),
DOCUMENT_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentMissingException.class, org.elasticsearch.index.engine.DocumentMissingException::new, 29),
SNAPSHOT_EXCEPTION(org.elasticsearch.snapshots.SnapshotException.class, org.elasticsearch.snapshots.SnapshotException::new, 30),
INVALID_ALIAS_NAME_EXCEPTION(org.elasticsearch.indices.InvalidAliasNameException.class, org.elasticsearch.indices.InvalidAliasNameException::new, 31),
INVALID_INDEX_NAME_EXCEPTION(org.elasticsearch.indices.InvalidIndexNameException.class, org.elasticsearch.indices.InvalidIndexNameException::new, 32),
INDEX_PRIMARY_SHARD_NOT_ALLOCATED_EXCEPTION(org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException.class, org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException::new, 33),
TRANSPORT_EXCEPTION(org.elasticsearch.transport.TransportException.class, org.elasticsearch.transport.TransportException::new, 34),
ELASTICSEARCH_PARSE_EXCEPTION(org.elasticsearch.ElasticsearchParseException.class, org.elasticsearch.ElasticsearchParseException::new, 35),
SEARCH_EXCEPTION(org.elasticsearch.search.SearchException.class, org.elasticsearch.search.SearchException::new, 36),
MAPPER_EXCEPTION(org.elasticsearch.index.mapper.MapperException.class, org.elasticsearch.index.mapper.MapperException::new, 37),
INVALID_TYPE_NAME_EXCEPTION(org.elasticsearch.indices.InvalidTypeNameException.class, org.elasticsearch.indices.InvalidTypeNameException::new, 38),
SNAPSHOT_RESTORE_EXCEPTION(org.elasticsearch.snapshots.SnapshotRestoreException.class, org.elasticsearch.snapshots.SnapshotRestoreException::new, 39),
BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class,
org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26),
SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class,
org.elasticsearch.snapshots.SnapshotCreationException::new, 27),
DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class,
org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28),
DOCUMENT_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentMissingException.class,
org.elasticsearch.index.engine.DocumentMissingException::new, 29),
SNAPSHOT_EXCEPTION(org.elasticsearch.snapshots.SnapshotException.class,
org.elasticsearch.snapshots.SnapshotException::new, 30),
INVALID_ALIAS_NAME_EXCEPTION(org.elasticsearch.indices.InvalidAliasNameException.class,
org.elasticsearch.indices.InvalidAliasNameException::new, 31),
INVALID_INDEX_NAME_EXCEPTION(org.elasticsearch.indices.InvalidIndexNameException.class,
org.elasticsearch.indices.InvalidIndexNameException::new, 32),
INDEX_PRIMARY_SHARD_NOT_ALLOCATED_EXCEPTION(org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException.class,
org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException::new, 33),
TRANSPORT_EXCEPTION(org.elasticsearch.transport.TransportException.class,
org.elasticsearch.transport.TransportException::new, 34),
ELASTICSEARCH_PARSE_EXCEPTION(org.elasticsearch.ElasticsearchParseException.class,
org.elasticsearch.ElasticsearchParseException::new, 35),
SEARCH_EXCEPTION(org.elasticsearch.search.SearchException.class,
org.elasticsearch.search.SearchException::new, 36),
MAPPER_EXCEPTION(org.elasticsearch.index.mapper.MapperException.class,
org.elasticsearch.index.mapper.MapperException::new, 37),
INVALID_TYPE_NAME_EXCEPTION(org.elasticsearch.indices.InvalidTypeNameException.class,
org.elasticsearch.indices.InvalidTypeNameException::new, 38),
SNAPSHOT_RESTORE_EXCEPTION(org.elasticsearch.snapshots.SnapshotRestoreException.class,
org.elasticsearch.snapshots.SnapshotRestoreException::new, 39),
PARSING_EXCEPTION(org.elasticsearch.common.ParsingException.class, org.elasticsearch.common.ParsingException::new, 40),
INDEX_SHARD_CLOSED_EXCEPTION(org.elasticsearch.index.shard.IndexShardClosedException.class, org.elasticsearch.index.shard.IndexShardClosedException::new, 41),
RECOVER_FILES_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.RecoverFilesRecoveryException.class, org.elasticsearch.indices.recovery.RecoverFilesRecoveryException::new, 42),
TRUNCATED_TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TruncatedTranslogException.class, org.elasticsearch.index.translog.TruncatedTranslogException::new, 43),
RECOVERY_FAILED_EXCEPTION(org.elasticsearch.indices.recovery.RecoveryFailedException.class, org.elasticsearch.indices.recovery.RecoveryFailedException::new, 44),
INDEX_SHARD_RELOCATED_EXCEPTION(org.elasticsearch.index.shard.IndexShardRelocatedException.class, org.elasticsearch.index.shard.IndexShardRelocatedException::new, 45),
NODE_SHOULD_NOT_CONNECT_EXCEPTION(org.elasticsearch.transport.NodeShouldNotConnectException.class, org.elasticsearch.transport.NodeShouldNotConnectException::new, 46),
INDEX_TEMPLATE_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexTemplateAlreadyExistsException.class, org.elasticsearch.indices.IndexTemplateAlreadyExistsException::new, 47),
TRANSLOG_CORRUPTED_EXCEPTION(org.elasticsearch.index.translog.TranslogCorruptedException.class, org.elasticsearch.index.translog.TranslogCorruptedException::new, 48),
CLUSTER_BLOCK_EXCEPTION(org.elasticsearch.cluster.block.ClusterBlockException.class, org.elasticsearch.cluster.block.ClusterBlockException::new, 49),
FETCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.fetch.FetchPhaseExecutionException.class, org.elasticsearch.search.fetch.FetchPhaseExecutionException::new, 50),
INDEX_SHARD_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.IndexShardAlreadyExistsException.class, org.elasticsearch.index.IndexShardAlreadyExistsException::new, 51),
VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class, org.elasticsearch.index.engine.VersionConflictEngineException::new, 52),
INDEX_SHARD_CLOSED_EXCEPTION(org.elasticsearch.index.shard.IndexShardClosedException.class,
org.elasticsearch.index.shard.IndexShardClosedException::new, 41),
RECOVER_FILES_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.RecoverFilesRecoveryException.class,
org.elasticsearch.indices.recovery.RecoverFilesRecoveryException::new, 42),
TRUNCATED_TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TruncatedTranslogException.class,
org.elasticsearch.index.translog.TruncatedTranslogException::new, 43),
RECOVERY_FAILED_EXCEPTION(org.elasticsearch.indices.recovery.RecoveryFailedException.class,
org.elasticsearch.indices.recovery.RecoveryFailedException::new, 44),
INDEX_SHARD_RELOCATED_EXCEPTION(org.elasticsearch.index.shard.IndexShardRelocatedException.class,
org.elasticsearch.index.shard.IndexShardRelocatedException::new, 45),
NODE_SHOULD_NOT_CONNECT_EXCEPTION(org.elasticsearch.transport.NodeShouldNotConnectException.class,
org.elasticsearch.transport.NodeShouldNotConnectException::new, 46),
INDEX_TEMPLATE_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexTemplateAlreadyExistsException.class,
org.elasticsearch.indices.IndexTemplateAlreadyExistsException::new, 47),
TRANSLOG_CORRUPTED_EXCEPTION(org.elasticsearch.index.translog.TranslogCorruptedException.class,
org.elasticsearch.index.translog.TranslogCorruptedException::new, 48),
CLUSTER_BLOCK_EXCEPTION(org.elasticsearch.cluster.block.ClusterBlockException.class,
org.elasticsearch.cluster.block.ClusterBlockException::new, 49),
FETCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.fetch.FetchPhaseExecutionException.class,
org.elasticsearch.search.fetch.FetchPhaseExecutionException::new, 50),
INDEX_SHARD_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.IndexShardAlreadyExistsException.class,
org.elasticsearch.index.IndexShardAlreadyExistsException::new, 51),
VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class,
org.elasticsearch.index.engine.VersionConflictEngineException::new, 52),
ENGINE_EXCEPTION(org.elasticsearch.index.engine.EngineException.class, org.elasticsearch.index.engine.EngineException::new, 53),
// 54 was DocumentAlreadyExistsException, which is superseded by VersionConflictEngineException
NO_SUCH_NODE_EXCEPTION(org.elasticsearch.action.NoSuchNodeException.class, org.elasticsearch.action.NoSuchNodeException::new, 55),
SETTINGS_EXCEPTION(org.elasticsearch.common.settings.SettingsException.class, org.elasticsearch.common.settings.SettingsException::new, 56),
INDEX_TEMPLATE_MISSING_EXCEPTION(org.elasticsearch.indices.IndexTemplateMissingException.class, org.elasticsearch.indices.IndexTemplateMissingException::new, 57),
SEND_REQUEST_TRANSPORT_EXCEPTION(org.elasticsearch.transport.SendRequestTransportException.class, org.elasticsearch.transport.SendRequestTransportException::new, 58),
ES_REJECTED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class, org.elasticsearch.common.util.concurrent.EsRejectedExecutionException::new, 59),
EARLY_TERMINATION_EXCEPTION(org.elasticsearch.common.lucene.Lucene.EarlyTerminationException.class, org.elasticsearch.common.lucene.Lucene.EarlyTerminationException::new, 60),
ROUTING_VALIDATION_EXCEPTION(org.elasticsearch.cluster.routing.RoutingValidationException.class, org.elasticsearch.cluster.routing.RoutingValidationException::new, 61),
NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class, org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62),
ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class, org.elasticsearch.indices.AliasFilterParsingException::new, 63),
SETTINGS_EXCEPTION(org.elasticsearch.common.settings.SettingsException.class,
org.elasticsearch.common.settings.SettingsException::new, 56),
INDEX_TEMPLATE_MISSING_EXCEPTION(org.elasticsearch.indices.IndexTemplateMissingException.class,
org.elasticsearch.indices.IndexTemplateMissingException::new, 57),
SEND_REQUEST_TRANSPORT_EXCEPTION(org.elasticsearch.transport.SendRequestTransportException.class,
org.elasticsearch.transport.SendRequestTransportException::new, 58),
ES_REJECTED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class,
org.elasticsearch.common.util.concurrent.EsRejectedExecutionException::new, 59),
EARLY_TERMINATION_EXCEPTION(org.elasticsearch.common.lucene.Lucene.EarlyTerminationException.class,
org.elasticsearch.common.lucene.Lucene.EarlyTerminationException::new, 60),
ROUTING_VALIDATION_EXCEPTION(org.elasticsearch.cluster.routing.RoutingValidationException.class,
org.elasticsearch.cluster.routing.RoutingValidationException::new, 61),
NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class,
org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62),
ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class,
org.elasticsearch.indices.AliasFilterParsingException::new, 63),
// 64 was DeleteByQueryFailedEngineException, which was removed in 3.0
GATEWAY_EXCEPTION(org.elasticsearch.gateway.GatewayException.class, org.elasticsearch.gateway.GatewayException::new, 65),
INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class, org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66),
INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class,
org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66),
HTTP_EXCEPTION(org.elasticsearch.http.HttpException.class, org.elasticsearch.http.HttpException::new, 67),
ELASTICSEARCH_EXCEPTION(org.elasticsearch.ElasticsearchException.class, org.elasticsearch.ElasticsearchException::new, 68),
SNAPSHOT_MISSING_EXCEPTION(org.elasticsearch.snapshots.SnapshotMissingException.class, org.elasticsearch.snapshots.SnapshotMissingException::new, 69),
PRIMARY_MISSING_ACTION_EXCEPTION(org.elasticsearch.action.PrimaryMissingActionException.class, org.elasticsearch.action.PrimaryMissingActionException::new, 70),
ELASTICSEARCH_EXCEPTION(org.elasticsearch.ElasticsearchException.class,
org.elasticsearch.ElasticsearchException::new, 68),
SNAPSHOT_MISSING_EXCEPTION(org.elasticsearch.snapshots.SnapshotMissingException.class,
org.elasticsearch.snapshots.SnapshotMissingException::new, 69),
PRIMARY_MISSING_ACTION_EXCEPTION(org.elasticsearch.action.PrimaryMissingActionException.class,
org.elasticsearch.action.PrimaryMissingActionException::new, 70),
FAILED_NODE_EXCEPTION(org.elasticsearch.action.FailedNodeException.class, org.elasticsearch.action.FailedNodeException::new, 71),
SEARCH_PARSE_EXCEPTION(org.elasticsearch.search.SearchParseException.class, org.elasticsearch.search.SearchParseException::new, 72),
CONCURRENT_SNAPSHOT_EXECUTION_EXCEPTION(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException.class, org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException::new, 73),
BLOB_STORE_EXCEPTION(org.elasticsearch.common.blobstore.BlobStoreException.class, org.elasticsearch.common.blobstore.BlobStoreException::new, 74),
INCOMPATIBLE_CLUSTER_STATE_VERSION_EXCEPTION(org.elasticsearch.cluster.IncompatibleClusterStateVersionException.class, org.elasticsearch.cluster.IncompatibleClusterStateVersionException::new, 75),
RECOVERY_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RecoveryEngineException.class, org.elasticsearch.index.engine.RecoveryEngineException::new, 76),
UNCATEGORIZED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.UncategorizedExecutionException.class, org.elasticsearch.common.util.concurrent.UncategorizedExecutionException::new, 77),
TIMESTAMP_PARSING_EXCEPTION(org.elasticsearch.action.TimestampParsingException.class, org.elasticsearch.action.TimestampParsingException::new, 78),
ROUTING_MISSING_EXCEPTION(org.elasticsearch.action.RoutingMissingException.class, org.elasticsearch.action.RoutingMissingException::new, 79),
INDEX_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.IndexFailedEngineException.class, org.elasticsearch.index.engine.IndexFailedEngineException::new, 80),
INDEX_SHARD_RESTORE_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class, org.elasticsearch.index.snapshots.IndexShardRestoreFailedException::new, 81),
REPOSITORY_EXCEPTION(org.elasticsearch.repositories.RepositoryException.class, org.elasticsearch.repositories.RepositoryException::new, 82),
RECEIVE_TIMEOUT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ReceiveTimeoutTransportException.class, org.elasticsearch.transport.ReceiveTimeoutTransportException::new, 83),
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class, org.elasticsearch.transport.NodeDisconnectedException::new, 84),
ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class, org.elasticsearch.index.AlreadyExpiredException::new, 85),
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class, org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86),
CONCURRENT_SNAPSHOT_EXECUTION_EXCEPTION(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException.class,
org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException::new, 73),
BLOB_STORE_EXCEPTION(org.elasticsearch.common.blobstore.BlobStoreException.class,
org.elasticsearch.common.blobstore.BlobStoreException::new, 74),
INCOMPATIBLE_CLUSTER_STATE_VERSION_EXCEPTION(org.elasticsearch.cluster.IncompatibleClusterStateVersionException.class,
org.elasticsearch.cluster.IncompatibleClusterStateVersionException::new, 75),
RECOVERY_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RecoveryEngineException.class,
org.elasticsearch.index.engine.RecoveryEngineException::new, 76),
UNCATEGORIZED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.UncategorizedExecutionException.class,
org.elasticsearch.common.util.concurrent.UncategorizedExecutionException::new, 77),
TIMESTAMP_PARSING_EXCEPTION(org.elasticsearch.action.TimestampParsingException.class,
org.elasticsearch.action.TimestampParsingException::new, 78),
ROUTING_MISSING_EXCEPTION(org.elasticsearch.action.RoutingMissingException.class,
org.elasticsearch.action.RoutingMissingException::new, 79),
INDEX_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.IndexFailedEngineException.class,
org.elasticsearch.index.engine.IndexFailedEngineException::new, 80),
INDEX_SHARD_RESTORE_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class,
org.elasticsearch.index.snapshots.IndexShardRestoreFailedException::new, 81),
REPOSITORY_EXCEPTION(org.elasticsearch.repositories.RepositoryException.class,
org.elasticsearch.repositories.RepositoryException::new, 82),
RECEIVE_TIMEOUT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ReceiveTimeoutTransportException.class,
org.elasticsearch.transport.ReceiveTimeoutTransportException::new, 83),
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class,
org.elasticsearch.transport.NodeDisconnectedException::new, 84),
ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class,
org.elasticsearch.index.AlreadyExpiredException::new, 85),
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class,
org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86),
// 87 used to be for MergeMappingException
INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class, org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),
PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class, org.elasticsearch.percolator.PercolateException::new, 89),
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class, org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91),
DELAY_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.DelayRecoveryException.class, org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92),
INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class,
org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),
PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class,
org.elasticsearch.percolator.PercolateException::new, 89),
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class,
org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class,
org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91),
DELAY_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.DelayRecoveryException.class,
org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92),
// 93 used to be for IndexWarmerMissingException
NO_NODE_AVAILABLE_EXCEPTION(org.elasticsearch.client.transport.NoNodeAvailableException.class, org.elasticsearch.client.transport.NoNodeAvailableException::new, 94),
INVALID_SNAPSHOT_NAME_EXCEPTION(org.elasticsearch.snapshots.InvalidSnapshotNameException.class, org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96),
ILLEGAL_INDEX_SHARD_STATE_EXCEPTION(org.elasticsearch.index.shard.IllegalIndexShardStateException.class, org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97),
INDEX_SHARD_SNAPSHOT_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotException.class, org.elasticsearch.index.snapshots.IndexShardSnapshotException::new, 98),
INDEX_SHARD_NOT_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotStartedException.class, org.elasticsearch.index.shard.IndexShardNotStartedException::new, 99),
SEARCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.action.search.SearchPhaseExecutionException.class, org.elasticsearch.action.search.SearchPhaseExecutionException::new, 100),
ACTION_NOT_FOUND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionNotFoundTransportException.class, org.elasticsearch.transport.ActionNotFoundTransportException::new, 101),
TRANSPORT_SERIALIZATION_EXCEPTION(org.elasticsearch.transport.TransportSerializationException.class, org.elasticsearch.transport.TransportSerializationException::new, 102),
REMOTE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.RemoteTransportException.class, org.elasticsearch.transport.RemoteTransportException::new, 103),
ENGINE_CREATION_FAILURE_EXCEPTION(org.elasticsearch.index.engine.EngineCreationFailureException.class, org.elasticsearch.index.engine.EngineCreationFailureException::new, 104),
ROUTING_EXCEPTION(org.elasticsearch.cluster.routing.RoutingException.class, org.elasticsearch.cluster.routing.RoutingException::new, 105),
INDEX_SHARD_RECOVERY_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveryException.class, org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106),
REPOSITORY_MISSING_EXCEPTION(org.elasticsearch.repositories.RepositoryMissingException.class, org.elasticsearch.repositories.RepositoryMissingException::new, 107),
PERCOLATOR_EXCEPTION(org.elasticsearch.index.percolator.PercolatorException.class, org.elasticsearch.index.percolator.PercolatorException::new, 108),
DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class, org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),
FLUSH_NOT_ALLOWED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class, org.elasticsearch.index.engine.FlushNotAllowedEngineException::new, 110),
NO_CLASS_SETTINGS_EXCEPTION(org.elasticsearch.common.settings.NoClassSettingsException.class, org.elasticsearch.common.settings.NoClassSettingsException::new, 111),
BIND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.BindTransportException.class, org.elasticsearch.transport.BindTransportException::new, 112),
ALIASES_NOT_FOUND_EXCEPTION(org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException.class, org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException::new, 113),
INDEX_SHARD_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveringException.class, org.elasticsearch.index.shard.IndexShardRecoveringException::new, 114),
TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TranslogException.class, org.elasticsearch.index.translog.TranslogException::new, 115),
PROCESS_CLUSTER_EVENT_TIMEOUT_EXCEPTION(org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException.class, org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException::new, 116),
RETRY_ON_PRIMARY_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException.class, org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException::new, 117),
ELASTICSEARCH_TIMEOUT_EXCEPTION(org.elasticsearch.ElasticsearchTimeoutException.class, org.elasticsearch.ElasticsearchTimeoutException::new, 118),
QUERY_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.query.QueryPhaseExecutionException.class, org.elasticsearch.search.query.QueryPhaseExecutionException::new, 119),
REPOSITORY_VERIFICATION_EXCEPTION(org.elasticsearch.repositories.RepositoryVerificationException.class, org.elasticsearch.repositories.RepositoryVerificationException::new, 120),
INVALID_AGGREGATION_PATH_EXCEPTION(org.elasticsearch.search.aggregations.InvalidAggregationPathException.class, org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121),
INDEX_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexAlreadyExistsException.class, org.elasticsearch.indices.IndexAlreadyExistsException::new, 123),
SCRIPT_PARSE_EXCEPTION(org.elasticsearch.script.Script.ScriptParseException.class, org.elasticsearch.script.Script.ScriptParseException::new, 124),
HTTP_ON_TRANSPORT_EXCEPTION(org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class, org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException::new, 125),
MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class, org.elasticsearch.index.mapper.MapperParsingException::new, 126),
SEARCH_CONTEXT_EXCEPTION(org.elasticsearch.search.SearchContextException.class, org.elasticsearch.search.SearchContextException::new, 127),
SEARCH_SOURCE_BUILDER_EXCEPTION(org.elasticsearch.search.builder.SearchSourceBuilderException.class, org.elasticsearch.search.builder.SearchSourceBuilderException::new, 128),
ENGINE_CLOSED_EXCEPTION(org.elasticsearch.index.engine.EngineClosedException.class, org.elasticsearch.index.engine.EngineClosedException::new, 129),
NO_SHARD_AVAILABLE_ACTION_EXCEPTION(org.elasticsearch.action.NoShardAvailableActionException.class, org.elasticsearch.action.NoShardAvailableActionException::new, 130),
UNAVAILABLE_SHARDS_EXCEPTION(org.elasticsearch.action.UnavailableShardsException.class, org.elasticsearch.action.UnavailableShardsException::new, 131),
FLUSH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushFailedEngineException.class, org.elasticsearch.index.engine.FlushFailedEngineException::new, 132),
CIRCUIT_BREAKING_EXCEPTION(org.elasticsearch.common.breaker.CircuitBreakingException.class, org.elasticsearch.common.breaker.CircuitBreakingException::new, 133),
NODE_NOT_CONNECTED_EXCEPTION(org.elasticsearch.transport.NodeNotConnectedException.class, org.elasticsearch.transport.NodeNotConnectedException::new, 134),
STRICT_DYNAMIC_MAPPING_EXCEPTION(org.elasticsearch.index.mapper.StrictDynamicMappingException.class, org.elasticsearch.index.mapper.StrictDynamicMappingException::new, 135),
RETRY_ON_REPLICA_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class, org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException::new, 136),
TYPE_MISSING_EXCEPTION(org.elasticsearch.indices.TypeMissingException.class, org.elasticsearch.indices.TypeMissingException::new, 137),
FAILED_TO_COMMIT_CLUSTER_STATE_EXCEPTION(org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class, org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException::new, 140),
QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class, org.elasticsearch.index.query.QueryShardException::new, 141);
NO_NODE_AVAILABLE_EXCEPTION(org.elasticsearch.client.transport.NoNodeAvailableException.class,
org.elasticsearch.client.transport.NoNodeAvailableException::new, 94),
INVALID_SNAPSHOT_NAME_EXCEPTION(org.elasticsearch.snapshots.InvalidSnapshotNameException.class,
org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96),
ILLEGAL_INDEX_SHARD_STATE_EXCEPTION(org.elasticsearch.index.shard.IllegalIndexShardStateException.class,
org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97),
INDEX_SHARD_SNAPSHOT_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotException.class,
org.elasticsearch.index.snapshots.IndexShardSnapshotException::new, 98),
INDEX_SHARD_NOT_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotStartedException.class,
org.elasticsearch.index.shard.IndexShardNotStartedException::new, 99),
SEARCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.action.search.SearchPhaseExecutionException.class,
org.elasticsearch.action.search.SearchPhaseExecutionException::new, 100),
ACTION_NOT_FOUND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionNotFoundTransportException.class,
org.elasticsearch.transport.ActionNotFoundTransportException::new, 101),
TRANSPORT_SERIALIZATION_EXCEPTION(org.elasticsearch.transport.TransportSerializationException.class,
org.elasticsearch.transport.TransportSerializationException::new, 102),
REMOTE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.RemoteTransportException.class,
org.elasticsearch.transport.RemoteTransportException::new, 103),
ENGINE_CREATION_FAILURE_EXCEPTION(org.elasticsearch.index.engine.EngineCreationFailureException.class,
org.elasticsearch.index.engine.EngineCreationFailureException::new, 104),
ROUTING_EXCEPTION(org.elasticsearch.cluster.routing.RoutingException.class,
org.elasticsearch.cluster.routing.RoutingException::new, 105),
INDEX_SHARD_RECOVERY_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveryException.class,
org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106),
REPOSITORY_MISSING_EXCEPTION(org.elasticsearch.repositories.RepositoryMissingException.class,
org.elasticsearch.repositories.RepositoryMissingException::new, 107),
PERCOLATOR_EXCEPTION(org.elasticsearch.index.percolator.PercolatorException.class,
org.elasticsearch.index.percolator.PercolatorException::new, 108),
DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class,
org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),
FLUSH_NOT_ALLOWED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class,
org.elasticsearch.index.engine.FlushNotAllowedEngineException::new, 110),
NO_CLASS_SETTINGS_EXCEPTION(org.elasticsearch.common.settings.NoClassSettingsException.class,
org.elasticsearch.common.settings.NoClassSettingsException::new, 111),
BIND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.BindTransportException.class,
org.elasticsearch.transport.BindTransportException::new, 112),
ALIASES_NOT_FOUND_EXCEPTION(org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException.class,
org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException::new, 113),
INDEX_SHARD_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveringException.class,
org.elasticsearch.index.shard.IndexShardRecoveringException::new, 114),
TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TranslogException.class,
org.elasticsearch.index.translog.TranslogException::new, 115),
PROCESS_CLUSTER_EVENT_TIMEOUT_EXCEPTION(org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException.class,
org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException::new, 116),
RETRY_ON_PRIMARY_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException.class,
org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException::new, 117),
ELASTICSEARCH_TIMEOUT_EXCEPTION(org.elasticsearch.ElasticsearchTimeoutException.class,
org.elasticsearch.ElasticsearchTimeoutException::new, 118),
QUERY_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.query.QueryPhaseExecutionException.class,
org.elasticsearch.search.query.QueryPhaseExecutionException::new, 119),
REPOSITORY_VERIFICATION_EXCEPTION(org.elasticsearch.repositories.RepositoryVerificationException.class,
org.elasticsearch.repositories.RepositoryVerificationException::new, 120),
INVALID_AGGREGATION_PATH_EXCEPTION(org.elasticsearch.search.aggregations.InvalidAggregationPathException.class,
org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121),
INDEX_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexAlreadyExistsException.class,
org.elasticsearch.indices.IndexAlreadyExistsException::new, 123),
SCRIPT_PARSE_EXCEPTION(org.elasticsearch.script.Script.ScriptParseException.class,
org.elasticsearch.script.Script.ScriptParseException::new, 124),
HTTP_ON_TRANSPORT_EXCEPTION(org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class,
org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException::new, 125),
MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class,
org.elasticsearch.index.mapper.MapperParsingException::new, 126),
SEARCH_CONTEXT_EXCEPTION(org.elasticsearch.search.SearchContextException.class,
org.elasticsearch.search.SearchContextException::new, 127),
SEARCH_SOURCE_BUILDER_EXCEPTION(org.elasticsearch.search.builder.SearchSourceBuilderException.class,
org.elasticsearch.search.builder.SearchSourceBuilderException::new, 128),
ENGINE_CLOSED_EXCEPTION(org.elasticsearch.index.engine.EngineClosedException.class,
org.elasticsearch.index.engine.EngineClosedException::new, 129),
NO_SHARD_AVAILABLE_ACTION_EXCEPTION(org.elasticsearch.action.NoShardAvailableActionException.class,
org.elasticsearch.action.NoShardAvailableActionException::new, 130),
UNAVAILABLE_SHARDS_EXCEPTION(org.elasticsearch.action.UnavailableShardsException.class,
org.elasticsearch.action.UnavailableShardsException::new, 131),
FLUSH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushFailedEngineException.class,
org.elasticsearch.index.engine.FlushFailedEngineException::new, 132),
CIRCUIT_BREAKING_EXCEPTION(org.elasticsearch.common.breaker.CircuitBreakingException.class,
org.elasticsearch.common.breaker.CircuitBreakingException::new, 133),
NODE_NOT_CONNECTED_EXCEPTION(org.elasticsearch.transport.NodeNotConnectedException.class,
org.elasticsearch.transport.NodeNotConnectedException::new, 134),
STRICT_DYNAMIC_MAPPING_EXCEPTION(org.elasticsearch.index.mapper.StrictDynamicMappingException.class,
org.elasticsearch.index.mapper.StrictDynamicMappingException::new, 135),
RETRY_ON_REPLICA_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class,
org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException::new, 136),
TYPE_MISSING_EXCEPTION(org.elasticsearch.indices.TypeMissingException.class,
org.elasticsearch.indices.TypeMissingException::new, 137),
FAILED_TO_COMMIT_CLUSTER_STATE_EXCEPTION(org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class,
org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException::new, 140),
QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class,
org.elasticsearch.index.query.QueryShardException::new, 141),
NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class,
ShardStateAction.NoLongerPrimaryShardException::new, 142);
final Class<? extends ElasticsearchException> exceptionClass;
final FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> constructor;
final int id;
ElasticsearchExceptionHandle(Class<? extends ElasticsearchException> exceptionClass, FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> constructor, int id) {
<E extends ElasticsearchException> ElasticsearchExceptionHandle(Class<E> exceptionClass,
FunctionThatThrowsIOException<StreamInput, E> constructor, int id) {
// We need the exceptionClass because you can't dig it out of the constructor reliably.
this.exceptionClass = exceptionClass;
this.constructor = constructor;
this.id = id;
@ -627,11 +760,10 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
}
static {
final Map<Class<? extends ElasticsearchException>, ElasticsearchExceptionHandle> exceptions = Arrays.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.exceptionClass, e -> e));
final Map<Integer, FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException>> idToSupplier = Arrays.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.id, e -> e.constructor));
ID_TO_SUPPLIER = Collections.unmodifiableMap(idToSupplier);
CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE = Collections.unmodifiableMap(exceptions);
ID_TO_SUPPLIER = unmodifiableMap(Arrays
.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.id, e -> e.constructor)));
CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE = unmodifiableMap(Arrays
.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.exceptionClass, e -> e)));
}
public Index getIndex() {
@ -703,7 +835,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
builder.startArray();
for (ElasticsearchException rootCause : rootCauses) {
builder.startObject();
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params));
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(
Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params));
builder.endObject();
}
builder.endArray();

View File

@ -254,7 +254,11 @@ public class Version {
public static final int V_1_7_3_ID = 1070399;
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_4_ID = 1070499;
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_5_ID = 1070599;
public static final Version V_1_7_5 = new Version(V_1_7_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_6_ID = 1070699;
public static final Version V_1_7_6 = new Version(V_1_7_6_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_2_0_0_beta1_ID = 2000001;
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
@ -275,11 +279,15 @@ public class Version {
public static final int V_2_1_1_ID = 2010199;
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_1_2_ID = 2010299;
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_1_3_ID = 2010399;
public static final Version V_2_1_3 = new Version(V_2_1_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_2_0_ID = 2020099;
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_4_1);
public static final int V_2_2_1_ID = 2020199;
public static final Version V_2_2_1 = new Version(V_2_2_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_1);
public static final int V_2_3_0_ID = 2030099;
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_3_0_0_ID = 3000099;
public static final Version V_3_0_0 = new Version(V_3_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final Version CURRENT = V_3_0_0;
@ -299,8 +307,12 @@ public class Version {
return V_3_0_0;
case V_2_3_0_ID:
return V_2_3_0;
case V_2_2_1_ID:
return V_2_2_1;
case V_2_2_0_ID:
return V_2_2_0;
case V_2_1_3_ID:
return V_2_1_3;
case V_2_1_2_ID:
return V_2_1_2;
case V_2_1_1_ID:
@ -321,6 +333,10 @@ public class Version {
return V_2_0_0_beta2;
case V_2_0_0_beta1_ID:
return V_2_0_0_beta1;
case V_1_7_6_ID:
return V_1_7_6;
case V_1_7_5_ID:
return V_1_7_5;
case V_1_7_4_ID:
return V_1_7_4;
case V_1_7_3_ID:

View File

@ -58,7 +58,7 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
}
@Override
protected Tuple<ReplicationResponse, ShardFlushRequest> shardOperationOnPrimary(MetaData metaData, ShardFlushRequest shardRequest) throws Throwable {
protected Tuple<ReplicationResponse, ShardFlushRequest> shardOperationOnPrimary(MetaData metaData, ShardFlushRequest shardRequest) {
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
indexShard.flush(shardRequest.getRequest());
logger.trace("{} flush request executed on primary", indexShard.shardId());

View File

@ -60,7 +60,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Basi
}
@Override
protected Tuple<ReplicationResponse, BasicReplicationRequest> shardOperationOnPrimary(MetaData metaData, BasicReplicationRequest shardRequest) throws Throwable {
protected Tuple<ReplicationResponse, BasicReplicationRequest> shardOperationOnPrimary(MetaData metaData, BasicReplicationRequest shardRequest) {
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
indexShard.refresh("api");
logger.trace("{} refresh request executed on primary", indexShard.shardId());

View File

@ -140,7 +140,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
}
@Override
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Throwable {
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Exception {
// validate, if routing is required, that we got routing
IndexMetaData indexMetaData = metaData.index(request.shardId().getIndex());
@ -200,7 +200,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
* Execute the given {@link IndexRequest} on a primary shard, throwing a
* {@link RetryOnPrimaryException} if the operation needs to be re-tried.
*/
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Throwable {
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Exception {
Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard);
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
final ShardId shardId = indexShard.shardId();

View File

@ -22,31 +22,24 @@ package org.elasticsearch.action.ingest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.StatusToXContent;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.ingest.core.PipelineFactoryError;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class SimulatePipelineResponse extends ActionResponse implements StatusToXContent {
public class SimulatePipelineResponse extends ActionResponse implements ToXContent {
private String pipelineId;
private boolean verbose;
private List<SimulateDocumentResult> results;
private PipelineFactoryError error;
public SimulatePipelineResponse() {
}
public SimulatePipelineResponse(PipelineFactoryError error) {
this.error = error;
}
public SimulatePipelineResponse(String pipelineId, boolean verbose, List<SimulateDocumentResult> responses) {
this.pipelineId = pipelineId;
this.verbose = verbose;
@ -65,69 +58,42 @@ public class SimulatePipelineResponse extends ActionResponse implements StatusTo
return verbose;
}
public boolean isError() {
return error != null;
}
@Override
public RestStatus status() {
if (isError()) {
return RestStatus.BAD_REQUEST;
}
return RestStatus.OK;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(isError());
if (isError()) {
error.writeTo(out);
} else {
out.writeString(pipelineId);
out.writeBoolean(verbose);
out.writeVInt(results.size());
for (SimulateDocumentResult response : results) {
response.writeTo(out);
}
out.writeString(pipelineId);
out.writeBoolean(verbose);
out.writeVInt(results.size());
for (SimulateDocumentResult response : results) {
response.writeTo(out);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
boolean isError = in.readBoolean();
if (isError) {
error = new PipelineFactoryError();
error.readFrom(in);
} else {
this.pipelineId = in.readString();
boolean verbose = in.readBoolean();
int responsesLength = in.readVInt();
results = new ArrayList<>();
for (int i = 0; i < responsesLength; i++) {
SimulateDocumentResult<?> simulateDocumentResult;
if (verbose) {
simulateDocumentResult = SimulateDocumentVerboseResult.readSimulateDocumentVerboseResultFrom(in);
} else {
simulateDocumentResult = SimulateDocumentBaseResult.readSimulateDocumentSimpleResult(in);
}
results.add(simulateDocumentResult);
this.pipelineId = in.readString();
boolean verbose = in.readBoolean();
int responsesLength = in.readVInt();
results = new ArrayList<>();
for (int i = 0; i < responsesLength; i++) {
SimulateDocumentResult<?> simulateDocumentResult;
if (verbose) {
simulateDocumentResult = SimulateDocumentVerboseResult.readSimulateDocumentVerboseResultFrom(in);
} else {
simulateDocumentResult = SimulateDocumentBaseResult.readSimulateDocumentSimpleResult(in);
}
results.add(simulateDocumentResult);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (isError()) {
error.toXContent(builder, params);
} else {
builder.startArray(Fields.DOCUMENTS);
for (SimulateDocumentResult response : results) {
response.toXContent(builder, params);
}
builder.endArray();
builder.startArray(Fields.DOCUMENTS);
for (SimulateDocumentResult response : results) {
response.toXContent(builder, params);
}
builder.endArray();
return builder;
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.ingest;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
@ -27,8 +28,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.ingest.PipelineStore;
import org.elasticsearch.ingest.core.PipelineFactoryError;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -58,9 +57,6 @@ public class SimulatePipelineTransportAction extends HandledTransportAction<Simu
} else {
simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), pipelineStore);
}
} catch (ConfigurationPropertyException e) {
listener.onResponse(new SimulatePipelineResponse(new PipelineFactoryError(e)));
return;
} catch (Exception e) {
listener.onFailure(e);
return;

View File

@ -22,12 +22,10 @@ package org.elasticsearch.action.ingest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.ingest.core.PipelineFactoryError;
import java.io.IOException;
public class WritePipelineResponse extends AcknowledgedResponse {
private PipelineFactoryError error;
WritePipelineResponse() {
@ -35,36 +33,17 @@ public class WritePipelineResponse extends AcknowledgedResponse {
public WritePipelineResponse(boolean acknowledged) {
super(acknowledged);
if (!isAcknowledged()) {
error = new PipelineFactoryError("pipeline write is not acknowledged");
}
}
public WritePipelineResponse(PipelineFactoryError error) {
super(false);
this.error = error;
}
public PipelineFactoryError getError() {
return error;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
if (!isAcknowledged()) {
error = new PipelineFactoryError();
error.readFrom(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
if (!isAcknowledged()) {
error.writeTo(out);
}
}
}

View File

@ -1,41 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.ingest;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.action.support.AcknowledgedRestListener;
import java.io.IOException;
public class WritePipelineResponseRestListener extends AcknowledgedRestListener<WritePipelineResponse> {
public WritePipelineResponseRestListener(RestChannel channel) {
super(channel);
}
@Override
protected void addCustomFields(XContentBuilder builder, WritePipelineResponse response) throws IOException {
if (!response.isAcknowledged()) {
response.getError().toXContent(builder, null);
}
}
}

View File

@ -55,6 +55,8 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;
private long routedBasedOnClusterVersion = 0;
public ReplicationRequest() {
}
@ -141,6 +143,20 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
return (Request) this;
}
/**
* Sets the minimum version of the cluster state that is required on the next node before we redirect to another primary.
* Used to prevent redirect loops, see also {@link TransportReplicationAction.ReroutePhase#doRun()}
*/
@SuppressWarnings("unchecked")
Request routedBasedOnClusterVersion(long routedBasedOnClusterVersion) {
this.routedBasedOnClusterVersion = routedBasedOnClusterVersion;
return (Request) this;
}
long routedBasedOnClusterVersion() {
return routedBasedOnClusterVersion;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
@ -161,6 +177,7 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
timeout = TimeValue.readTimeValue(in);
index = in.readString();
routedBasedOnClusterVersion = in.readVLong();
}
@Override
@ -175,6 +192,7 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
out.writeByte(consistencyLevel.id());
timeout.writeTo(out);
out.writeString(index);
out.writeVLong(routedBasedOnClusterVersion);
}
/**

View File

@ -56,6 +56,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.indices.IndicesService;
@ -156,10 +157,11 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
/**
* Primary operation on node with primary copy, the provided metadata should be used for request validation if needed
*
* @return A tuple containing not null values, as first value the result of the primary operation and as second value
* the request to be executed on the replica shards.
*/
protected abstract Tuple<Response, ReplicaRequest> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable;
protected abstract Tuple<Response, ReplicaRequest> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception;
/**
* Replica operation on nodes with replica copies
@ -299,7 +301,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
setShard(shardId);
}
public RetryOnReplicaException(StreamInput in) throws IOException{
public RetryOnReplicaException(StreamInput in) throws IOException {
super(in);
}
}
@ -326,8 +328,8 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
public void onNewClusterState(ClusterState state) {
context.close();
// Forking a thread on local node via transport service so that custom transport service have an
// opportunity to execute custom logic before the replica operation begins
String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
// opportunity to execute custom logic before the replica operation begins
String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
TransportChannelResponseHandler<TransportResponse.Empty> handler = TransportChannelResponseHandler.emptyResponseHandler(logger, channel, extraMessage);
transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler);
}
@ -352,6 +354,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
}
}
private void failReplicaIfNeeded(Throwable t) {
String index = request.shardId().getIndex().getName();
int shardId = request.shardId().id();
@ -383,7 +386,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
@Override
protected void doRun() throws Exception {
assert request.shardId() != null : "request shardId must be set";
try (Releasable ignored = getIndexShardOperationsCounter(request.shardId())) {
try (Releasable ignored = getIndexShardReferenceOnReplica(request.shardId())) {
shardOperationOnReplica(request);
if (logger.isTraceEnabled()) {
logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), request);
@ -399,7 +402,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
setShard(shardId);
}
public RetryOnPrimaryException(StreamInput in) throws IOException{
public RetryOnPrimaryException(StreamInput in) throws IOException {
super(in);
}
}
@ -445,6 +448,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
handleBlockException(blockException);
return;
}
// request does not have a shardId yet, we need to pass the concrete index to resolve shardId
resolveRequest(state.metaData(), concreteIndex, request);
assert request.shardId() != null : "request shardId must be set in resolveRequest";
@ -468,6 +472,15 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
performAction(node, transportPrimaryAction, true);
} else {
if (state.version() < request.routedBasedOnClusterVersion()) {
logger.trace("failed to find primary [{}] for request [{}] despite sender thinking it would be here. Local cluster state version [{}]] is older than on sending node (version [{}]), scheduling a retry...", request.shardId(), request, state.version(), request.routedBasedOnClusterVersion());
retryBecauseUnavailable(request.shardId(), "failed to find primary as current cluster state with version [" + state.version() + "] is stale (expected at least [" + request.routedBasedOnClusterVersion() + "]");
return;
} else {
// chasing the node with the active primary for a second hop requires that we are at least up-to-date with the current cluster state version
// this prevents redirect loops between two nodes when a primary was relocated and the relocation target is not aware that it is the active primary shard already.
request.routedBasedOnClusterVersion(state.version());
}
if (logger.isTraceEnabled()) {
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}]", actionName, request.shardId(), request, state.version(), primary.currentNodeId());
}
@ -584,60 +597,71 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
/**
* Responsible for performing primary operation locally and delegating to replication action once successful
* Responsible for performing primary operation locally or delegating primary operation to relocation target in case where shard has
* been marked as RELOCATED. Delegates to replication action once successful.
* <p>
* Note that as soon as we move to replication action, state responsibility is transferred to {@link ReplicationPhase}.
*/
final class PrimaryPhase extends AbstractRunnable {
class PrimaryPhase extends AbstractRunnable {
private final Request request;
private final ShardId shardId;
private final TransportChannel channel;
private final ClusterState state;
private final AtomicBoolean finished = new AtomicBoolean();
private Releasable indexShardReference;
private IndexShardReference indexShardReference;
PrimaryPhase(Request request, TransportChannel channel) {
this.state = clusterService.state();
this.request = request;
assert request.shardId() != null : "request shardId must be set prior to primary phase";
this.shardId = request.shardId();
this.channel = channel;
}
@Override
public void onFailure(Throwable e) {
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
if (logger.isTraceEnabled()) {
logger.trace("failed to execute [{}] on [{}]", e, request, shardId);
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("failed to execute [{}] on [{}]", e, request, shardId);
}
}
finishAsFailed(e);
}
@Override
protected void doRun() throws Exception {
// request shardID was set in ReroutePhase
assert request.shardId() != null : "request shardID must be set prior to primary phase";
final ShardId shardId = request.shardId();
final String writeConsistencyFailure = checkWriteConsistency(shardId);
if (writeConsistencyFailure != null) {
finishBecauseUnavailable(shardId, writeConsistencyFailure);
return;
}
final ReplicationPhase replicationPhase;
try {
indexShardReference = getIndexShardOperationsCounter(shardId);
// closed in finishAsFailed(e) in the case of error
indexShardReference = getIndexShardReferenceOnPrimary(shardId);
if (indexShardReference.isRelocated() == false) {
// execute locally
Tuple<Response, ReplicaRequest> primaryResponse = shardOperationOnPrimary(state.metaData(), request);
if (logger.isTraceEnabled()) {
logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version());
}
replicationPhase = new ReplicationPhase(primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
} catch (Throwable e) {
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
if (logger.isTraceEnabled()) {
logger.trace("failed to execute [{}] on [{}]", e, request, shardId);
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("failed to execute [{}] on [{}]", e, request, shardId);
}
}
finishAsFailed(e);
return;
ReplicationPhase replicationPhase = new ReplicationPhase(primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
finishAndMoveToReplication(replicationPhase);
} else {
// delegate primary phase to relocation target
// it is safe to execute primary phase on relocation target as there are no more in-flight operations where primary
// phase is executed on local shard and all subsequent operations are executed on relocation target as primary phase.
final ShardRouting primary = indexShardReference.routingEntry();
indexShardReference.close();
assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary;
DiscoveryNode relocatingNode = state.nodes().get(primary.relocatingNodeId());
transportService.sendRequest(relocatingNode, transportPrimaryAction, request, transportOptions,
TransportChannelResponseHandler.responseHandler(logger, TransportReplicationAction.this::newResponseInstance, channel,
"rerouting indexing to target primary " + primary));
}
finishAndMoveToReplication(replicationPhase);
}
/**
@ -721,10 +745,24 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
}
protected Releasable getIndexShardOperationsCounter(ShardId shardId) {
/**
* returns a new reference to {@link IndexShard} to perform a primary operation. Released after performing primary operation locally
* and replication of the operation to all replica shards is completed / failed (see {@link ReplicationPhase}).
*/
protected IndexShardReference getIndexShardReferenceOnPrimary(ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id());
return new IndexShardReference(indexShard);
return new IndexShardReferenceImpl(indexShard, true);
}
/**
* returns a new reference to {@link IndexShard} on a node that the request is replicated to. The reference is closed as soon as
* replication is completed on the node.
*/
protected IndexShardReference getIndexShardReferenceOnReplica(ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id());
return new IndexShardReferenceImpl(indexShard, false);
}
/**
@ -740,16 +778,15 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
private final List<ShardRouting> shards;
private final DiscoveryNodes nodes;
private final boolean executeOnReplica;
private final String indexUUID;
private final AtomicBoolean finished = new AtomicBoolean();
private final AtomicInteger success = new AtomicInteger(1); // We already wrote into the primary shard
private final ConcurrentMap<String, Throwable> shardReplicaFailures = ConcurrentCollections.newConcurrentMap();
private final AtomicInteger pending;
private final int totalShards;
private final Releasable indexShardReference;
private final IndexShardReference indexShardReference;
public ReplicationPhase(ReplicaRequest replicaRequest, Response finalResponse, ShardId shardId,
TransportChannel channel, Releasable indexShardReference) {
TransportChannel channel, IndexShardReference indexShardReference) {
this.replicaRequest = replicaRequest;
this.channel = channel;
this.finalResponse = finalResponse;
@ -766,7 +803,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
final IndexMetaData indexMetaData = state.getMetaData().index(shardId.getIndex());
this.shards = (shardRoutingTable != null) ? shardRoutingTable.shards() : Collections.emptyList();
this.executeOnReplica = (indexMetaData == null) || shouldExecuteReplication(indexMetaData.getSettings());
this.indexUUID = (indexMetaData != null) ? indexMetaData.getIndexUUID() : null;
this.nodes = state.getNodes();
if (shards.isEmpty()) {
@ -777,17 +813,20 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
int numberOfIgnoredShardInstances = 0;
int numberOfPendingShardInstances = 0;
for (ShardRouting shard : shards) {
// the following logic to select the shards to replicate to is mirrored and explained in the doRun method below
if (shard.primary() == false && executeOnReplica == false) {
numberOfIgnoredShardInstances++;
} else if (shard.unassigned()) {
continue;
}
if (shard.unassigned()) {
numberOfIgnoredShardInstances++;
} else {
if (shard.currentNodeId().equals(nodes.localNodeId()) == false) {
numberOfPendingShardInstances++;
}
if (shard.relocating()) {
numberOfPendingShardInstances++;
}
continue;
}
if (nodes.localNodeId().equals(shard.currentNodeId()) == false) {
numberOfPendingShardInstances++;
}
if (shard.relocating() && nodes.localNodeId().equals(shard.relocatingNodeId()) == false) {
numberOfPendingShardInstances++;
}
}
// one for the local primary copy
@ -795,7 +834,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
this.pending = new AtomicInteger(numberOfPendingShardInstances);
if (logger.isTraceEnabled()) {
logger.trace("replication phase started. pending [{}], action [{}], request [{}], cluster state version used [{}]", pending.get(),
transportReplicaAction, replicaRequest, state.version());
transportReplicaAction, replicaRequest, state.version());
}
}
@ -860,7 +899,8 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
performOnReplica(shard);
}
// send operation to relocating shard
if (shard.relocating()) {
// local shard can be a relocation target of a primary that is in relocated state
if (shard.relocating() && nodes.localNodeId().equals(shard.relocatingNodeId()) == false) {
performOnReplica(shard.buildTargetRelocatingShard());
}
}
@ -899,7 +939,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
logger.warn("[{}] {}", exp, shardId, message);
shardStateAction.shardFailed(
shard,
indexUUID,
indexShardReference.routingEntry(),
message,
exp,
new ShardStateAction.Listener() {
@ -993,21 +1033,39 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
return IndexMetaData.isIndexUsingShadowReplicas(settings) == false;
}
static class IndexShardReference implements Releasable {
interface IndexShardReference extends Releasable {
boolean isRelocated();
final private IndexShard counter;
private final AtomicBoolean closed = new AtomicBoolean();
ShardRouting routingEntry();
}
IndexShardReference(IndexShard counter) {
counter.incrementOperationCounter();
this.counter = counter;
static final class IndexShardReferenceImpl implements IndexShardReference {
private final IndexShard indexShard;
private final Releasable operationLock;
IndexShardReferenceImpl(IndexShard indexShard, boolean primaryAction) {
this.indexShard = indexShard;
if (primaryAction) {
operationLock = indexShard.acquirePrimaryOperationLock();
} else {
operationLock = indexShard.acquireReplicaOperationLock();
}
}
@Override
public void close() {
if (closed.compareAndSet(false, true)) {
counter.decrementOperationCounter();
}
operationLock.close();
}
@Override
public boolean isRelocated() {
return indexShard.state() == IndexShardState.RELOCATED;
}
@Override
public ShardRouting routingEntry() {
return indexShard.routingEntry();
}
}

View File

@ -314,7 +314,7 @@ final class Bootstrap {
}
ESLogger logger = Loggers.getLogger(Bootstrap.class);
if (INSTANCE.node != null) {
logger = Loggers.getLogger(Bootstrap.class, INSTANCE.node.settings().get("name"));
logger = Loggers.getLogger(Bootstrap.class, INSTANCE.node.settings().get("node.name"));
}
// HACK, it sucks to do this, but we will run users out of disk space otherwise
if (e instanceof CreationException) {

View File

@ -27,6 +27,7 @@ import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliToolConfig;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.cli.UserError;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.monitor.jvm.JvmInfo;
@ -82,7 +83,9 @@ final class BootstrapCLIParser extends CliTool {
@Override
public ExitStatus execute(Settings settings, Environment env) throws Exception {
terminal.println("Version: %s, Build: %s/%s, JVM: %s", org.elasticsearch.Version.CURRENT, Build.CURRENT.shortHash(), Build.CURRENT.date(), JvmInfo.jvmInfo().version());
terminal.println("Version: " + org.elasticsearch.Version.CURRENT
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
+ ", JVM: " + JvmInfo.jvmInfo().version());
return ExitStatus.OK_AND_EXIT;
}
}
@ -103,7 +106,7 @@ final class BootstrapCLIParser extends CliTool {
// TODO: don't use system properties as a way to do this, its horrible...
@SuppressForbidden(reason = "Sets system properties passed as CLI parameters")
public static Command parse(Terminal terminal, CommandLine cli) {
public static Command parse(Terminal terminal, CommandLine cli) throws UserError {
if (cli.hasOption("V")) {
return Version.parse(terminal, cli);
}
@ -132,11 +135,11 @@ final class BootstrapCLIParser extends CliTool {
String arg = iterator.next();
if (!arg.startsWith("--")) {
if (arg.startsWith("-D") || arg.startsWith("-d") || arg.startsWith("-p")) {
throw new IllegalArgumentException(
throw new UserError(ExitStatus.USAGE,
"Parameter [" + arg + "] starting with \"-D\", \"-d\" or \"-p\" must be before any parameters starting with --"
);
} else {
throw new IllegalArgumentException("Parameter [" + arg + "]does not start with --");
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "]does not start with --");
}
}
// if there is no = sign, we have to get the next argu
@ -150,11 +153,11 @@ final class BootstrapCLIParser extends CliTool {
if (iterator.hasNext()) {
String value = iterator.next();
if (value.startsWith("--")) {
throw new IllegalArgumentException("Parameter [" + arg + "] needs value");
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
}
System.setProperty("es." + arg, value);
} else {
throw new IllegalArgumentException("Parameter [" + arg + "] needs value");
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
}
}
}

View File

@ -103,6 +103,7 @@ public class TransportClientNodesService extends AbstractComponent {
public static final Setting<TimeValue> CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL = Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> CLIENT_TRANSPORT_PING_TIMEOUT = Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME = Setting.boolSetting("client.transport.ignore_cluster_name", false, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> CLIENT_TRANSPORT_SNIFF = Setting.boolSetting("client.transport.sniff", false, false, Setting.Scope.CLUSTER);
@Inject
public TransportClientNodesService(Settings settings, ClusterName clusterName, TransportService transportService,
@ -121,7 +122,7 @@ public class TransportClientNodesService extends AbstractComponent {
logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]");
}
if (this.settings.getAsBoolean("client.transport.sniff", false)) {
if (CLIENT_TRANSPORT_SNIFF.get(this.settings)) {
this.nodesSampler = new SniffNodesSampler();
} else {
this.nodesSampler = new SimpleNodeSampler();

View File

@ -123,6 +123,11 @@ public interface ClusterStateTaskExecutor<T> {
return this == SUCCESS;
}
public Throwable getFailure() {
assert !isSuccess();
return failure;
}
/**
* Handle the execution result with the provided consumers
* @param onSuccess handler to invoke on success

View File

@ -94,7 +94,7 @@ public class MappingUpdatedAction extends AbstractComponent {
}
}
public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Throwable {
public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Exception {
updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout, null);
}
@ -102,7 +102,7 @@ public class MappingUpdatedAction extends AbstractComponent {
* Same as {@link #updateMappingOnMasterSynchronously(String, String, Mapping, TimeValue)}
* using the default timeout.
*/
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Throwable {
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Exception {
updateMappingOnMasterSynchronously(index, type, mappingUpdate, dynamicMappingUpdateTimeout);
}
@ -111,7 +111,7 @@ public class MappingUpdatedAction extends AbstractComponent {
* {@code timeout}. When this method returns successfully mappings have
* been applied to the master node and propagated to data nodes.
*/
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Throwable {
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Exception {
if (updateMappingRequest(index, type, mappingUpdate, timeout).get().isAcknowledged() == false) {
throw new TimeoutException("Failed to acknowledge mapping update within [" + timeout + "]");
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster.action.shard;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
@ -28,8 +29,9 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.MasterNodeChangePredicate;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.cluster.routing.ShardRouting;
@ -46,6 +48,7 @@ import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.ConnectTransportException;
@ -60,6 +63,7 @@ import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map;
@ -125,17 +129,22 @@ public class ShardStateAction extends AbstractComponent {
return ExceptionsHelper.unwrap(exp, MASTER_CHANNEL_EXCEPTIONS) != null;
}
public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
/**
* Send a shard failed request to the master node to update the
* cluster state.
*
* @param shardRouting the shard to fail
* @param sourceShardRouting the source shard requesting the failure (must be the shard itself, or the primary shard)
* @param message the reason for the failure
* @param failure the underlying cause of the failure
* @param listener callback upon completion of the request
*/
public void shardFailed(final ShardRouting shardRouting, ShardRouting sourceShardRouting, final String message, @Nullable final Throwable failure, Listener listener) {
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, failure);
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, sourceShardRouting, message, failure);
sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardRoutingEntry, listener);
}
public void resendShardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
logger.trace("{} re-sending failed shard [{}], index UUID [{}], reason [{}]", shardRouting.shardId(), failure, shardRouting, indexUUID, message);
shardFailed(shardRouting, indexUUID, message, failure, listener);
}
// visible for testing
protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, ShardRoutingEntry shardRoutingEntry, Listener listener) {
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@ -231,15 +240,15 @@ public class ShardStateAction extends AbstractComponent {
// partition tasks into those that correspond to shards
// that exist versus do not exist
Map<Boolean, List<ShardRoutingEntry>> partition =
tasks.stream().collect(Collectors.partitioningBy(task -> shardExists(currentState, task)));
Map<ValidationResult, List<ShardRoutingEntry>> partition =
tasks.stream().collect(Collectors.groupingBy(task -> validateTask(currentState, task)));
// tasks that correspond to non-existent shards are marked
// as successful
batchResultBuilder.successes(partition.get(false));
batchResultBuilder.successes(partition.getOrDefault(ValidationResult.SHARD_MISSING, Collections.emptyList()));
ClusterState maybeUpdatedState = currentState;
List<ShardRoutingEntry> tasksToFail = partition.get(true);
List<ShardRoutingEntry> tasksToFail = partition.getOrDefault(ValidationResult.VALID, Collections.emptyList());
try {
List<FailedRerouteAllocation.FailedShard> failedShards =
tasksToFail
@ -257,6 +266,15 @@ public class ShardStateAction extends AbstractComponent {
batchResultBuilder.failures(tasksToFail, t);
}
partition
.getOrDefault(ValidationResult.SOURCE_INVALID, Collections.emptyList())
.forEach(task -> batchResultBuilder.failure(
task,
new NoLongerPrimaryShardException(
task.getShardRouting().shardId(),
"source shard [" + task.sourceShardRouting + "] is neither the local allocation nor the primary allocation")
));
return batchResultBuilder.build(maybeUpdatedState);
}
@ -265,17 +283,36 @@ public class ShardStateAction extends AbstractComponent {
return allocationService.applyFailedShards(currentState, failedShards);
}
private boolean shardExists(ClusterState currentState, ShardRoutingEntry task) {
private enum ValidationResult {
VALID,
SOURCE_INVALID,
SHARD_MISSING
}
private ValidationResult validateTask(ClusterState currentState, ShardRoutingEntry task) {
// non-local requests
if (!task.shardRouting.isSameAllocation(task.sourceShardRouting)) {
IndexShardRoutingTable indexShard = currentState.getRoutingTable().shardRoutingTableOrNull(task.shardRouting.shardId());
if (indexShard == null) {
return ValidationResult.SOURCE_INVALID;
}
ShardRouting primaryShard = indexShard.primaryShard();
if (primaryShard == null || !primaryShard.isSameAllocation(task.sourceShardRouting)) {
return ValidationResult.SOURCE_INVALID;
}
}
RoutingNodes.RoutingNodeIterator routingNodeIterator =
currentState.getRoutingNodes().routingNodeIter(task.getShardRouting().currentNodeId());
if (routingNodeIterator != null) {
for (ShardRouting maybe : routingNodeIterator) {
if (task.getShardRouting().isSameAllocation(maybe)) {
return true;
return ValidationResult.VALID;
}
}
}
return false;
return ValidationResult.SHARD_MISSING;
}
@Override
@ -291,9 +328,9 @@ public class ShardStateAction extends AbstractComponent {
}
}
public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String message, Listener listener) {
public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener) {
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, null);
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, shardRouting, message, null);
sendShardAction(SHARD_STARTED_ACTION_NAME, observer, shardRoutingEntry, listener);
}
@ -360,16 +397,16 @@ public class ShardStateAction extends AbstractComponent {
public static class ShardRoutingEntry extends TransportRequest {
ShardRouting shardRouting;
String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
ShardRouting sourceShardRouting;
String message;
Throwable failure;
public ShardRoutingEntry() {
}
ShardRoutingEntry(ShardRouting shardRouting, String indexUUID, String message, @Nullable Throwable failure) {
ShardRoutingEntry(ShardRouting shardRouting, ShardRouting sourceShardRouting, String message, @Nullable Throwable failure) {
this.shardRouting = shardRouting;
this.indexUUID = indexUUID;
this.sourceShardRouting = sourceShardRouting;
this.message = message;
this.failure = failure;
}
@ -382,7 +419,7 @@ public class ShardStateAction extends AbstractComponent {
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardRouting = readShardRoutingEntry(in);
indexUUID = in.readString();
sourceShardRouting = readShardRoutingEntry(in);
message = in.readString();
failure = in.readThrowable();
}
@ -391,18 +428,26 @@ public class ShardStateAction extends AbstractComponent {
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardRouting.writeTo(out);
out.writeString(indexUUID);
sourceShardRouting.writeTo(out);
out.writeString(message);
out.writeThrowable(failure);
}
@Override
public String toString() {
return "" + shardRouting + ", indexUUID [" + indexUUID + "], message [" + message + "], failure [" + ExceptionsHelper.detailedMessage(failure) + "]";
List<String> components = new ArrayList<>(4);
components.add("target shard [" + shardRouting + "]");
components.add("source shard [" + sourceShardRouting + "]");
components.add("message [" + message + "]");
if (failure != null) {
components.add("failure [" + ExceptionsHelper.detailedMessage(failure) + "]");
}
return String.join(", ", components);
}
}
public interface Listener {
default void onSuccess() {
}
@ -423,6 +468,20 @@ public class ShardStateAction extends AbstractComponent {
*/
default void onFailure(final Throwable t) {
}
}
public static class NoLongerPrimaryShardException extends ElasticsearchException {
public NoLongerPrimaryShardException(ShardId shardId, String msg) {
super(msg);
setShard(shardId);
}
public NoLongerPrimaryShardException(StreamInput in) throws IOException {
super(in);
}
}
}

View File

@ -43,6 +43,7 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Predicate;
/**
@ -137,6 +138,13 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
return shard;
}
public IndexShardRoutingTable shardRoutingTableOrNull(ShardId shardId) {
return Optional
.ofNullable(index(shardId.getIndexName()))
.flatMap(irt -> Optional.ofNullable(irt.shard(shardId.getId())))
.orElse(null);
}
public RoutingTable validateRaiseException(MetaData metaData) throws RoutingValidationException {
RoutingTableValidation validation = validate(metaData);
if (!validation.valid()) {

View File

@ -196,7 +196,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
// note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling
final String nodeId = DiscoveryService.generateNodeId(settings);
final TransportAddress publishAddress = transportService.boundAddress().publishAddress();
DiscoveryNode localNode = new DiscoveryNode(settings.get("name"), nodeId, publishAddress, nodeAttributes, version);
DiscoveryNode localNode = new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, nodeAttributes, version);
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id());
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).blocks(initialBlocks).build();
this.transportService.setLocalNode(localNode);

View File

@ -100,8 +100,9 @@ public abstract class CheckFileCommand extends CliTool.Command {
Set<PosixFilePermission> permissionsBeforeWrite = entry.getValue();
Set<PosixFilePermission> permissionsAfterWrite = Files.getPosixFilePermissions(entry.getKey());
if (!permissionsBeforeWrite.equals(permissionsAfterWrite)) {
terminal.printWarn("The file permissions of [%s] have changed from [%s] to [%s]",
entry.getKey(), PosixFilePermissions.toString(permissionsBeforeWrite), PosixFilePermissions.toString(permissionsAfterWrite));
terminal.printWarn("The file permissions of [" + entry.getKey() + "] have changed "
+ "from [" + PosixFilePermissions.toString(permissionsBeforeWrite) + "] "
+ "to [" + PosixFilePermissions.toString(permissionsAfterWrite) + "]");
terminal.printWarn("Please ensure that the user account running Elasticsearch has read access to this file!");
}
}
@ -115,7 +116,7 @@ public abstract class CheckFileCommand extends CliTool.Command {
String ownerBeforeWrite = entry.getValue();
String ownerAfterWrite = Files.getOwner(entry.getKey()).getName();
if (!ownerAfterWrite.equals(ownerBeforeWrite)) {
terminal.printWarn("WARN: Owner of file [%s] used to be [%s], but now is [%s]", entry.getKey(), ownerBeforeWrite, ownerAfterWrite);
terminal.printWarn("WARN: Owner of file [" + entry.getKey() + "] used to be [" + ownerBeforeWrite + "], but now is [" + ownerAfterWrite + "]");
}
}
@ -128,7 +129,7 @@ public abstract class CheckFileCommand extends CliTool.Command {
String groupBeforeWrite = entry.getValue();
String groupAfterWrite = Files.readAttributes(entry.getKey(), PosixFileAttributes.class).group().getName();
if (!groupAfterWrite.equals(groupBeforeWrite)) {
terminal.printWarn("WARN: Group of file [%s] used to be [%s], but now is [%s]", entry.getKey(), groupBeforeWrite, groupAfterWrite);
terminal.printWarn("WARN: Group of file [" + entry.getKey() + "] used to be [" + groupBeforeWrite + "], but now is [" + groupAfterWrite + "]");
}
}

View File

@ -19,14 +19,17 @@
package org.elasticsearch.common.cli;
import org.apache.commons.cli.AlreadySelectedException;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.MissingArgumentException;
import org.apache.commons.cli.MissingOptionException;
import org.apache.commons.cli.UnrecognizedOptionException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.io.IOException;
import java.util.Locale;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
@ -50,7 +53,7 @@ import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
public abstract class CliTool {
// based on sysexits.h
public static enum ExitStatus {
public enum ExitStatus {
OK(0),
OK_AND_EXIT(0),
USAGE(64), /* command line usage error */
@ -69,23 +72,13 @@ public abstract class CliTool {
final int status;
private ExitStatus(int status) {
ExitStatus(int status) {
this.status = status;
}
public int status() {
return status;
}
public static ExitStatus fromStatus(int status) {
for (ExitStatus exitStatus : values()) {
if (exitStatus.status() == status) {
return exitStatus;
}
}
return null;
}
}
protected final Terminal terminal;
@ -108,7 +101,7 @@ public abstract class CliTool {
settings = env.settings();
}
public final ExitStatus execute(String... args) {
public final ExitStatus execute(String... args) throws Exception {
// first lets see if the user requests tool help. We're doing it only if
// this is a multi-command tool. If it's a single command tool, the -h/--help
@ -132,7 +125,7 @@ public abstract class CliTool {
String cmdName = args[0];
cmd = config.cmd(cmdName);
if (cmd == null) {
terminal.printError("unknown command [%s]. Use [-h] option to list available commands", cmdName);
terminal.printError("unknown command [" + cmdName + "]. Use [-h] option to list available commands");
return ExitStatus.USAGE;
}
@ -146,23 +139,11 @@ public abstract class CliTool {
}
}
Command command = null;
try {
command = parse(cmd, args);
return command.execute(settings, env);
} catch (IOException ioe) {
terminal.printError(ioe);
return ExitStatus.IO_ERROR;
} catch (IllegalArgumentException ilae) {
terminal.printError(ilae);
return ExitStatus.USAGE;
} catch (Throwable t) {
terminal.printError(t);
if (command == null) {
return ExitStatus.USAGE;
}
return ExitStatus.CODE_ERROR;
return parse(cmd, args).execute(settings, env);
} catch (UserError error) {
terminal.printError(error.getMessage());
return error.exitStatus;
}
}
@ -177,7 +158,13 @@ public abstract class CliTool {
if (cli.hasOption("h")) {
return helpCmd(cmd);
}
cli = parser.parse(cmd.options(), args, cmd.isStopAtNonOption());
try {
cli = parser.parse(cmd.options(), args, cmd.isStopAtNonOption());
} catch (AlreadySelectedException|MissingArgumentException|MissingOptionException|UnrecognizedOptionException e) {
// intentionally drop the stack trace here as these are really user errors,
// the stack trace into cli parsing lib is not important
throw new UserError(ExitStatus.USAGE, e.toString());
}
Terminal.Verbosity verbosity = Terminal.Verbosity.resolve(cli);
terminal.verbosity(verbosity);
return parse(cmd.name(), cli);

View File

@ -50,7 +50,7 @@ public class HelpPrinter {
}
});
} catch (IOException ioe) {
ioe.printStackTrace(terminal.writer());
throw new RuntimeException(ioe);
}
terminal.println();
}

View File

@ -35,8 +35,6 @@ import java.util.Locale;
@SuppressForbidden(reason = "System#out")
public abstract class Terminal {
public static final String DEBUG_SYSTEM_PROPERTY = "es.cli.debug";
public static final Terminal DEFAULT = ConsoleTerminal.supported() ? new ConsoleTerminal() : new SystemTerminal();
public static enum Verbosity {
@ -64,7 +62,6 @@ public abstract class Terminal {
}
private Verbosity verbosity = Verbosity.NORMAL;
private final boolean isDebugEnabled;
public Terminal() {
this(Verbosity.NORMAL);
@ -72,7 +69,6 @@ public abstract class Terminal {
public Terminal(Verbosity verbosity) {
this.verbosity = verbosity;
this.isDebugEnabled = "true".equals(System.getProperty(DEBUG_SYSTEM_PROPERTY, "false"));
}
public void verbosity(Verbosity verbosity) {
@ -93,46 +89,37 @@ public abstract class Terminal {
println(Verbosity.NORMAL);
}
public void println(String msg, Object... args) {
println(Verbosity.NORMAL, msg, args);
public void println(String msg) {
println(Verbosity.NORMAL, msg);
}
public void print(String msg, Object... args) {
print(Verbosity.NORMAL, msg, args);
public void print(String msg) {
print(Verbosity.NORMAL, msg);
}
public void println(Verbosity verbosity) {
println(verbosity, "");
}
public void println(Verbosity verbosity, String msg, Object... args) {
print(verbosity, msg + System.lineSeparator(), args);
public void println(Verbosity verbosity, String msg) {
print(verbosity, msg + System.lineSeparator());
}
public void print(Verbosity verbosity, String msg, Object... args) {
public void print(Verbosity verbosity, String msg) {
if (this.verbosity.enabled(verbosity)) {
doPrint(msg, args);
doPrint(msg);
}
}
public void printError(String msg, Object... args) {
println(Verbosity.SILENT, "ERROR: " + msg, args);
public void printError(String msg) {
println(Verbosity.SILENT, "ERROR: " + msg);
}
public void printError(Throwable t) {
printError("%s", t.toString());
if (isDebugEnabled) {
printStackTrace(t);
}
public void printWarn(String msg) {
println(Verbosity.SILENT, "WARN: " + msg);
}
public void printWarn(String msg, Object... args) {
println(Verbosity.SILENT, "WARN: " + msg, args);
}
protected abstract void doPrint(String msg, Object... args);
public abstract PrintWriter writer();
protected abstract void doPrint(String msg);
private static class ConsoleTerminal extends Terminal {
@ -143,8 +130,8 @@ public abstract class Terminal {
}
@Override
public void doPrint(String msg, Object... args) {
console.printf(msg, args);
public void doPrint(String msg) {
console.printf("%s", msg);
console.flush();
}
@ -158,11 +145,6 @@ public abstract class Terminal {
return console.readPassword(text, args);
}
@Override
public PrintWriter writer() {
return console.writer();
}
@Override
public void printStackTrace(Throwable t) {
t.printStackTrace(console.writer());
@ -175,13 +157,13 @@ public abstract class Terminal {
private final PrintWriter printWriter = new PrintWriter(System.out);
@Override
public void doPrint(String msg, Object... args) {
System.out.print(String.format(Locale.ROOT, msg, args));
public void doPrint(String msg) {
System.out.print(msg);
}
@Override
public String readText(String text, Object... args) {
print(text, args);
print(text);
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
try {
return reader.readLine();
@ -199,10 +181,5 @@ public abstract class Terminal {
public void printStackTrace(Throwable t) {
t.printStackTrace(printWriter);
}
@Override
public PrintWriter writer() {
return printWriter;
}
}
}

View File

@ -17,19 +17,19 @@
* under the License.
*/
package org.elasticsearch.plugins.loading.classpath;
package org.elasticsearch.common.cli;
import org.elasticsearch.plugins.Plugin;
/**
* An exception representing a user fixable problem in {@link CliTool} usage.
*/
public class UserError extends Exception {
public class InClassPathPlugin extends Plugin {
/** The exist status the cli should use when catching this user error. */
public final CliTool.ExitStatus exitStatus;
@Override
public String name() {
return "in-classpath-plugin";
}
@Override
public String description() {
return "A plugin defined in class path";
/** Constructs a UserError with an exit status and message to show the user. */
public UserError(CliTool.ExitStatus exitStatus, String msg) {
super(msg);
this.exitStatus = exitStatus;
}
}

View File

@ -50,7 +50,7 @@ public abstract class AbstractComponent {
* Returns the nodes name from the settings or the empty string if not set.
*/
public final String nodeName() {
return settings.get("name", "");
return settings.get("node.name", "");
}
/**

View File

@ -1,488 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.http.client;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Build;
import org.elasticsearch.ElasticsearchCorruptionException;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.Version;
import org.elasticsearch.common.Base64;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.hash.MessageDigests;
import org.elasticsearch.common.unit.TimeValue;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLConnection;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.nio.file.attribute.FileTime;
import java.util.List;
/**
*
*/
public class HttpDownloadHelper {
private boolean useTimestamp = false;
private boolean skipExisting = false;
public boolean download(URL source, Path dest, @Nullable DownloadProgress progress, TimeValue timeout) throws Exception {
if (Files.exists(dest) && skipExisting) {
return true;
}
//don't do any progress, unless asked
if (progress == null) {
progress = new NullProgress();
}
//set the timestamp to the file date.
long timestamp = 0;
boolean hasTimestamp = false;
if (useTimestamp && Files.exists(dest) ) {
timestamp = Files.getLastModifiedTime(dest).toMillis();
hasTimestamp = true;
}
GetThread getThread = new GetThread(source, dest, hasTimestamp, timestamp, progress);
try {
getThread.setDaemon(true);
getThread.start();
getThread.join(timeout.millis());
if (getThread.isAlive()) {
throw new ElasticsearchTimeoutException("The GET operation took longer than " + timeout + ", stopping it.");
}
}
catch (InterruptedException ie) {
return false;
} finally {
getThread.closeStreams();
}
return getThread.wasSuccessful();
}
public interface Checksummer {
/** Return the hex string for the given byte array */
String checksum(byte[] filebytes);
/** Human-readable name for the checksum format */
String name();
}
/** Checksummer for SHA1 */
public static Checksummer SHA1_CHECKSUM = new Checksummer() {
@Override
public String checksum(byte[] filebytes) {
return MessageDigests.toHexString(MessageDigests.sha1().digest(filebytes));
}
@Override
public String name() {
return "SHA1";
}
};
/** Checksummer for MD5 */
public static Checksummer MD5_CHECKSUM = new Checksummer() {
@Override
public String checksum(byte[] filebytes) {
return MessageDigests.toHexString(MessageDigests.md5().digest(filebytes));
}
@Override
public String name() {
return "MD5";
}
};
/**
* Download the given checksum URL to the destination and check the checksum
* @param checksumURL URL for the checksum file
* @param originalFile original file to calculate checksum of
* @param checksumFile destination to download the checksum file to
* @param hashFunc class used to calculate the checksum of the file
* @return true if the checksum was validated, false if it did not exist
* @throws Exception if the checksum failed to match
*/
public boolean downloadAndVerifyChecksum(URL checksumURL, Path originalFile, Path checksumFile,
@Nullable DownloadProgress progress,
TimeValue timeout, Checksummer hashFunc) throws Exception {
try {
if (download(checksumURL, checksumFile, progress, timeout)) {
byte[] fileBytes = Files.readAllBytes(originalFile);
List<String> checksumLines = Files.readAllLines(checksumFile, StandardCharsets.UTF_8);
if (checksumLines.size() != 1) {
throw new ElasticsearchCorruptionException("invalid format for checksum file (" +
hashFunc.name() + "), expected 1 line, got: " + checksumLines.size());
}
String checksumHex = checksumLines.get(0);
String fileHex = hashFunc.checksum(fileBytes);
if (fileHex.equals(checksumHex) == false) {
throw new ElasticsearchCorruptionException("incorrect hash (" + hashFunc.name() +
"), file hash: [" + fileHex + "], expected: [" + checksumHex + "]");
}
return true;
}
} catch (FileNotFoundException | NoSuchFileException e) {
// checksum file doesn't exist
return false;
} finally {
IOUtils.deleteFilesIgnoringExceptions(checksumFile);
}
return false;
}
/**
* Interface implemented for reporting
* progress of downloading.
*/
public interface DownloadProgress {
/**
* begin a download
*/
void beginDownload();
/**
* tick handler
*/
void onTick();
/**
* end a download
*/
void endDownload();
}
/**
* do nothing with progress info
*/
public static class NullProgress implements DownloadProgress {
/**
* begin a download
*/
@Override
public void beginDownload() {
}
/**
* tick handler
*/
@Override
public void onTick() {
}
/**
* end a download
*/
@Override
public void endDownload() {
}
}
/**
* verbose progress system prints to some output stream
*/
public static class VerboseProgress implements DownloadProgress {
private int dots = 0;
// CheckStyle:VisibilityModifier OFF - bc
PrintWriter writer;
// CheckStyle:VisibilityModifier ON
/**
* Construct a verbose progress reporter.
*
* @param writer the output stream.
*/
public VerboseProgress(PrintWriter writer) {
this.writer = writer;
}
/**
* begin a download
*/
@Override
public void beginDownload() {
writer.print("Downloading ");
dots = 0;
}
/**
* tick handler
*/
@Override
public void onTick() {
writer.print(".");
if (dots++ > 50) {
writer.flush();
dots = 0;
}
}
/**
* end a download
*/
@Override
public void endDownload() {
writer.println("DONE");
writer.flush();
}
}
private class GetThread extends Thread {
private final URL source;
private final Path dest;
private final boolean hasTimestamp;
private final long timestamp;
private final DownloadProgress progress;
private boolean success = false;
private IOException ioexception = null;
private InputStream is = null;
private OutputStream os = null;
private URLConnection connection;
private int redirections = 0;
GetThread(URL source, Path dest, boolean h, long t, DownloadProgress p) {
this.source = source;
this.dest = dest;
hasTimestamp = h;
timestamp = t;
progress = p;
}
@Override
public void run() {
try {
success = get();
} catch (IOException ioex) {
ioexception = ioex;
}
}
private boolean get() throws IOException {
connection = openConnection(source);
if (connection == null) {
return false;
}
boolean downloadSucceeded = downloadFile();
//if (and only if) the use file time option is set, then
//the saved file now has its timestamp set to that of the
//downloaded file
if (downloadSucceeded && useTimestamp) {
updateTimeStamp();
}
return downloadSucceeded;
}
private boolean redirectionAllowed(URL aSource, URL aDest) throws IOException {
// Argh, github does this...
// if (!(aSource.getProtocol().equals(aDest.getProtocol()) || ("http"
// .equals(aSource.getProtocol()) && "https".equals(aDest
// .getProtocol())))) {
// String message = "Redirection detected from "
// + aSource.getProtocol() + " to " + aDest.getProtocol()
// + ". Protocol switch unsafe, not allowed.";
// throw new IOException(message);
// }
redirections++;
if (redirections > 5) {
String message = "More than " + 5 + " times redirected, giving up";
throw new IOException(message);
}
return true;
}
private URLConnection openConnection(URL aSource) throws IOException {
// set up the URL connection
URLConnection connection = aSource.openConnection();
// modify the headers
// NB: things like user authentication could go in here too.
if (hasTimestamp) {
connection.setIfModifiedSince(timestamp);
}
// in case the plugin manager is its own project, this can become an authenticator
boolean isSecureProcotol = "https".equalsIgnoreCase(aSource.getProtocol());
boolean isAuthInfoSet = !Strings.isNullOrEmpty(aSource.getUserInfo());
if (isAuthInfoSet) {
if (!isSecureProcotol) {
throw new IOException("Basic auth is only supported for HTTPS!");
}
String basicAuth = Base64.encodeBytes(aSource.getUserInfo().getBytes(StandardCharsets.UTF_8));
connection.setRequestProperty("Authorization", "Basic " + basicAuth);
}
if (connection instanceof HttpURLConnection) {
((HttpURLConnection) connection).setInstanceFollowRedirects(false);
connection.setUseCaches(true);
connection.setConnectTimeout(5000);
}
connection.setRequestProperty("ES-Version", Version.CURRENT.toString());
connection.setRequestProperty("ES-Build-Hash", Build.CURRENT.shortHash());
connection.setRequestProperty("User-Agent", "elasticsearch-plugin-manager");
// connect to the remote site (may take some time)
connection.connect();
// First check on a 301 / 302 (moved) response (HTTP only)
if (connection instanceof HttpURLConnection) {
HttpURLConnection httpConnection = (HttpURLConnection) connection;
int responseCode = httpConnection.getResponseCode();
if (responseCode == HttpURLConnection.HTTP_MOVED_PERM ||
responseCode == HttpURLConnection.HTTP_MOVED_TEMP ||
responseCode == HttpURLConnection.HTTP_SEE_OTHER) {
String newLocation = httpConnection.getHeaderField("Location");
URL newURL = new URL(newLocation);
if (!redirectionAllowed(aSource, newURL)) {
return null;
}
return openConnection(newURL);
}
// next test for a 304 result (HTTP only)
long lastModified = httpConnection.getLastModified();
if (responseCode == HttpURLConnection.HTTP_NOT_MODIFIED
|| (lastModified != 0 && hasTimestamp && timestamp >= lastModified)) {
// not modified so no file download. just return
// instead and trace out something so the user
// doesn't think that the download happened when it
// didn't
return null;
}
// test for 401 result (HTTP only)
if (responseCode == HttpURLConnection.HTTP_UNAUTHORIZED) {
String message = "HTTP Authorization failure";
throw new IOException(message);
}
}
//REVISIT: at this point even non HTTP connections may
//support the if-modified-since behaviour -we just check
//the date of the content and skip the write if it is not
//newer. Some protocols (FTP) don't include dates, of
//course.
return connection;
}
private boolean downloadFile() throws FileNotFoundException, IOException {
IOException lastEx = null;
for (int i = 0; i < 3; i++) {
// this three attempt trick is to get round quirks in different
// Java implementations. Some of them take a few goes to bind
// property; we ignore the first couple of such failures.
try {
is = connection.getInputStream();
break;
} catch (IOException ex) {
lastEx = ex;
}
}
if (is == null) {
throw lastEx;
}
os = Files.newOutputStream(dest);
progress.beginDownload();
boolean finished = false;
try {
byte[] buffer = new byte[1024 * 100];
int length;
while (!isInterrupted() && (length = is.read(buffer)) >= 0) {
os.write(buffer, 0, length);
progress.onTick();
}
finished = !isInterrupted();
} finally {
if (!finished) {
// we have started to (over)write dest, but failed.
// Try to delete the garbage we'd otherwise leave
// behind.
IOUtils.closeWhileHandlingException(os, is);
IOUtils.deleteFilesIgnoringExceptions(dest);
} else {
IOUtils.close(os, is);
}
}
progress.endDownload();
return true;
}
private void updateTimeStamp() throws IOException {
long remoteTimestamp = connection.getLastModified();
if (remoteTimestamp != 0) {
Files.setLastModifiedTime(dest, FileTime.fromMillis(remoteTimestamp));
}
}
/**
* Has the download completed successfully?
* <p>
* Re-throws any exception caught during executaion.</p>
*/
boolean wasSuccessful() throws IOException {
if (ioexception != null) {
throw ioexception;
}
return success;
}
/**
* Closes streams, interrupts the download, may delete the
* output file.
*/
void closeStreams() throws IOException {
interrupt();
if (success) {
IOUtils.close(is, os);
} else {
IOUtils.closeWhileHandlingException(is, os);
if (dest != null && Files.exists(dest)) {
IOUtils.deleteFilesIgnoringExceptions(dest);
}
}
}
}
}

View File

@ -23,6 +23,7 @@ import org.elasticsearch.common.inject.matcher.Matcher;
import org.elasticsearch.common.inject.spi.Message;
import org.elasticsearch.common.inject.spi.TypeConverter;
import org.elasticsearch.common.inject.spi.TypeListener;
import org.elasticsearch.common.settings.SettingsModule;
import java.lang.annotation.Annotation;
import java.util.Objects;

View File

@ -52,33 +52,6 @@ public final class FileSystemUtils {
private FileSystemUtils() {} // only static methods
/**
* Returns <code>true</code> iff a file under the given root has one of the given extensions. This method
* will travers directories recursively and will terminate once any of the extensions was found. This
* methods will not follow any links.
*
* @param root the root directory to travers. Must be a directory
* @param extensions the file extensions to look for
* @return <code>true</code> iff a file under the given root has one of the given extensions, otherwise <code>false</code>
* @throws IOException if an IOException occurs or if the given root path is not a directory.
*/
public static boolean hasExtensions(Path root, final String... extensions) throws IOException {
final AtomicBoolean retVal = new AtomicBoolean(false);
Files.walkFileTree(root, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
for (String extension : extensions) {
if (file.getFileName().toString().endsWith(extension)) {
retVal.set(true);
return FileVisitResult.TERMINATE;
}
}
return super.visitFile(file, attrs);
}
});
return retVal.get();
}
/**
* Returns <code>true</code> iff one of the files exists otherwise <code>false</code>
*/
@ -168,167 +141,6 @@ public final class FileSystemUtils {
return new BufferedReader(reader);
}
/**
* This utility copy a full directory content (excluded) under
* a new directory but without overwriting existing files.
*
* When a file already exists in destination dir, the source file is copied under
* destination directory but with a suffix appended if set or source file is ignored
* if suffix is not set (null).
* @param source Source directory (for example /tmp/es/src)
* @param destination Destination directory (destination directory /tmp/es/dst)
* @param suffix When not null, files are copied with a suffix appended to the original name (eg: ".new")
* When null, files are ignored
*/
public static void moveFilesWithoutOverwriting(Path source, final Path destination, final String suffix) throws IOException {
// Create destination dir
Files.createDirectories(destination);
final int configPathRootLevel = source.getNameCount();
// We walk through the file tree from
Files.walkFileTree(source, new SimpleFileVisitor<Path>() {
private Path buildPath(Path path) {
return destination.resolve(path);
}
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
// We are now in dir. We need to remove root of config files to have a relative path
// If we are not walking in root dir, we might be able to copy its content
// if it does not already exist
if (configPathRootLevel != dir.getNameCount()) {
Path subpath = dir.subpath(configPathRootLevel, dir.getNameCount());
Path path = buildPath(subpath);
if (!Files.exists(path)) {
// We just move the structure to new dir
// we can't do atomic move here since src / dest might be on different mounts?
move(dir, path);
// We just ignore sub files from here
return FileVisitResult.SKIP_SUBTREE;
}
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
Path subpath = null;
if (configPathRootLevel != file.getNameCount()) {
subpath = file.subpath(configPathRootLevel, file.getNameCount());
}
Path path = buildPath(subpath);
if (!Files.exists(path)) {
// We just move the new file to new dir
move(file, path);
} else if (suffix != null) {
if (!isSameFile(file, path)) {
// If it already exists we try to copy this new version appending suffix to its name
path = path.resolveSibling(path.getFileName().toString().concat(suffix));
// We just move the file to new dir but with a new name (appended with suffix)
Files.move(file, path, StandardCopyOption.REPLACE_EXISTING);
}
}
return FileVisitResult.CONTINUE;
}
/**
* Compares the content of two paths by comparing them
*/
private boolean isSameFile(Path first, Path second) throws IOException {
// do quick file size comparison before hashing
boolean sameFileSize = Files.size(first) == Files.size(second);
if (!sameFileSize) {
return false;
}
byte[] firstBytes = Files.readAllBytes(first);
byte[] secondBytes = Files.readAllBytes(second);
return Arrays.equals(firstBytes, secondBytes);
}
});
}
/**
* Copy recursively a dir to a new location
* @param source source dir
* @param destination destination dir
*/
public static void copyDirectoryRecursively(Path source, Path destination) throws IOException {
Files.walkFileTree(source, new TreeCopier(source, destination, false));
}
/**
* Move or rename a file to a target file. This method supports moving a file from
* different filesystems (not supported by Files.move()).
*
* @param source source file
* @param destination destination file
*/
public static void move(Path source, Path destination) throws IOException {
try {
// We can't use atomic move here since source & target can be on different filesystems.
Files.move(source, destination);
} catch (DirectoryNotEmptyException e) {
Files.walkFileTree(source, new TreeCopier(source, destination, true));
}
}
// TODO: note that this will fail if source and target are on different NIO.2 filesystems.
static class TreeCopier extends SimpleFileVisitor<Path> {
private final Path source;
private final Path target;
private final boolean delete;
TreeCopier(Path source, Path target, boolean delete) {
this.source = source;
this.target = target;
this.delete = delete;
}
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
Path newDir = target.resolve(source.relativize(dir));
try {
Files.copy(dir, newDir);
} catch (FileAlreadyExistsException x) {
// We ignore this
} catch (IOException x) {
return SKIP_SUBTREE;
}
return CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
if (delete) {
IOUtils.rm(dir);
}
return CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
Path newFile = target.resolve(source.relativize(file));
try {
Files.copy(file, newFile);
if (delete) {
Files.deleteIfExists(file);
}
} catch (IOException x) {
// We ignore this
}
return CONTINUE;
}
}
/**
* Returns an array of all files in the given directory matching.
*/

View File

@ -99,7 +99,7 @@ public class Loggers {
prefixesList.add(addr.getHostName());
}
}
String name = settings.get("name");
String name = settings.get("node.name");
if (name != null) {
prefixesList.add(name);
}

View File

@ -25,7 +25,7 @@ import org.apache.log4j.spi.LoggingEvent;
import org.elasticsearch.common.cli.Terminal;
/**
* TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginManagerCliParser.
* TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginCli.
* */
public class TerminalAppender extends AppenderSkeleton {
@Override

View File

@ -63,7 +63,7 @@ public class AllField extends Field {
}
@Override
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) throws IOException {
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) {
try {
allEntries.reset(); // reset the all entries, just in case it was read already
if (allEntries.customBoost() && fieldType().indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) {

View File

@ -150,16 +150,19 @@ public class NetworkModule extends AbstractModule {
public static final String TRANSPORT_TYPE_KEY = "transport.type";
public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type";
public static final String HTTP_TYPE_KEY = "http.type";
public static final String LOCAL_TRANSPORT = "local";
public static final String NETTY_TRANSPORT = "netty";
public static final String HTTP_TYPE_KEY = "http.type";
public static final Setting<String> HTTP_TYPE_SETTING = Setting.simpleString("http.type", false, Scope.CLUSTER);
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, false, Scope.CLUSTER);
public static final Setting<String> TRANSPORT_SERVICE_TYPE_SETTING = Setting.simpleString("transport.service.type", false, Scope.CLUSTER);
public static final Setting<String> TRANSPORT_TYPE_SETTING = Setting.simpleString("transport.type", false, Scope.CLUSTER);
private static final List<Class<? extends RestHandler>> builtinRestHandlers = Arrays.asList(
RestMainAction.class,
RestNodesInfoAction.class,
RestNodesStatsAction.class,
RestNodesHotThreadsAction.class,
@ -380,7 +383,7 @@ public class NetworkModule extends AbstractModule {
} else {
if (HTTP_ENABLED.get(settings)) {
bind(HttpServer.class).asEagerSingleton();
httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_KEY, NETTY_TRANSPORT);
httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_SETTING.getKey(), NETTY_TRANSPORT);
}
bind(RestController.class).asEagerSingleton();
catHandlers.bind(binder());

View File

@ -58,7 +58,6 @@ import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.netty.NettyHttpServerTransport;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.analysis.HunspellService;
@ -76,6 +75,7 @@ import org.elasticsearch.monitor.os.OsService;
import org.elasticsearch.monitor.process.ProcessService;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.repositories.uri.URLRepository;
import org.elasticsearch.rest.BaseRestHandler;
@ -114,9 +114,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
@Override
public boolean hasChanged(Settings current, Settings previous) {
return current.filter(loggerPredicate).getAsMap().equals(previous.filter(loggerPredicate).getAsMap()) == false;
}
}
@Override
@Override
public Settings getValue(Settings current, Settings previous) {
Settings.Builder builder = Settings.builder();
builder.put(current.filter(loggerPredicate).getAsMap());
@ -132,7 +132,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
return builder.build();
}
@Override
@Override
public void apply(Settings value, Settings current, Settings previous) {
for (String key : value.getAsMap().keySet()) {
assert loggerPredicate.test(key);
@ -143,231 +143,240 @@ public final class ClusterSettings extends AbstractScopedSettings {
} else {
ESLoggerFactory.getLogger(component).setLevel(value.get(key));
}
}
}
}
}
};
public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(
Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client
TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT,
TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.THRESHOLD_SETTING,
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING,
ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING,
ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING,
FsRepository.REPOSITORIES_COMPRESS_SETTING,
FsRepository.REPOSITORIES_LOCATION_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING,
IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING,
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
MetaData.SETTING_READ_ONLY_SETTING,
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING,
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING,
ThreadPool.THREADPOOL_GROUP_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING,
SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING,
DestructiveOperations.REQUIRES_NAME_SETTING,
DiscoverySettings.PUBLISH_TIMEOUT_SETTING,
DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING,
DiscoverySettings.COMMIT_TIMEOUT_SETTING,
DiscoverySettings.NO_MASTER_BLOCK_SETTING,
GatewayService.EXPECTED_DATA_NODES_SETTING,
GatewayService.EXPECTED_MASTER_NODES_SETTING,
GatewayService.EXPECTED_NODES_SETTING,
GatewayService.RECOVER_AFTER_DATA_NODES_SETTING,
GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_TIME_SETTING,
NetworkModule.HTTP_ENABLED,
HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS,
HttpTransportSettings.SETTING_CORS_ENABLED,
HttpTransportSettings.SETTING_CORS_MAX_AGE,
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
HttpTransportSettings.SETTING_PIPELINING,
HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN,
HttpTransportSettings.SETTING_HTTP_PORT,
HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT,
HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS,
HttpTransportSettings.SETTING_HTTP_COMPRESSION,
HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL,
HttpTransportSettings.SETTING_CORS_ALLOW_METHODS,
HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS,
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH,
HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE,
HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE,
HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH,
HttpTransportSettings.SETTING_HTTP_RESET_COOKIES,
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING,
InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING,
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING,
TransportService.TRACE_LOG_EXCLUDE_SETTING,
TransportService.TRACE_LOG_INCLUDE_SETTING,
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING,
InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
Transport.TRANSPORT_TCP_COMPRESS,
TransportSettings.TRANSPORT_PROFILES_SETTING,
TransportSettings.HOST,
TransportSettings.PUBLISH_HOST,
TransportSettings.BIND_HOST,
TransportSettings.PUBLISH_PORT,
TransportSettings.PORT,
NettyTransport.WORKER_COUNT,
NettyTransport.CONNECTIONS_PER_NODE_RECOVERY,
NettyTransport.CONNECTIONS_PER_NODE_BULK,
NettyTransport.CONNECTIONS_PER_NODE_REG,
NettyTransport.CONNECTIONS_PER_NODE_STATE,
NettyTransport.CONNECTIONS_PER_NODE_PING,
NettyTransport.PING_SCHEDULE,
NettyTransport.TCP_BLOCKING_CLIENT,
NettyTransport.TCP_CONNECT_TIMEOUT,
NettyTransport.NETTY_MAX_CUMULATION_BUFFER_CAPACITY,
NettyTransport.NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS,
NettyTransport.NETTY_RECEIVE_PREDICTOR_SIZE,
NettyTransport.NETTY_RECEIVE_PREDICTOR_MIN,
NettyTransport.NETTY_RECEIVE_PREDICTOR_MAX,
NetworkService.NETWORK_SERVER,
NettyTransport.NETTY_BOSS_COUNT,
NettyTransport.TCP_NO_DELAY,
NettyTransport.TCP_KEEP_ALIVE,
NettyTransport.TCP_REUSE_ADDRESS,
NettyTransport.TCP_SEND_BUFFER_SIZE,
NettyTransport.TCP_RECEIVE_BUFFER_SIZE,
NettyTransport.TCP_BLOCKING_SERVER,
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING,
NetworkService.TcpSettings.TCP_NO_DELAY,
NetworkService.TcpSettings.TCP_KEEP_ALIVE,
NetworkService.TcpSettings.TCP_REUSE_ADDRESS,
NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_BLOCKING,
NetworkService.TcpSettings.TCP_BLOCKING_SERVER,
NetworkService.TcpSettings.TCP_BLOCKING_CLIENT,
NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT,
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING,
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL,
HunspellService.HUNSPELL_LAZY_LOAD,
HunspellService.HUNSPELL_IGNORE_CASE,
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
Environment.PATH_CONF_SETTING,
Environment.PATH_DATA_SETTING,
Environment.PATH_HOME_SETTING,
Environment.PATH_LOGS_SETTING,
Environment.PATH_PLUGINS_SETTING,
Environment.PATH_REPO_SETTING,
Environment.PATH_SCRIPTS_SETTING,
Environment.PATH_SHARED_DATA_SETTING,
Environment.PIDFILE_SETTING,
DiscoveryService.DISCOVERY_SEED_SETTING,
DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING,
DiscoveryModule.DISCOVERY_TYPE_SETTING,
DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING,
FaultDetection.PING_RETRIES_SETTING,
FaultDetection.PING_TIMEOUT_SETTING,
FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING,
FaultDetection.PING_INTERVAL_SETTING,
FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING,
ZenDiscovery.PING_TIMEOUT_SETTING,
ZenDiscovery.JOIN_TIMEOUT_SETTING,
ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING,
ZenDiscovery.JOIN_RETRY_DELAY_SETTING,
ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING,
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING,
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
SearchService.DEFAULT_KEEPALIVE_SETTING,
SearchService.KEEPALIVE_INTERVAL_SETTING,
Node.WRITE_PORTS_FIELD_SETTING,
Node.NODE_CLIENT_SETTING,
Node.NODE_DATA_SETTING,
Node.NODE_MASTER_SETTING,
Node.NODE_LOCAL_SETTING,
Node.NODE_MODE_SETTING,
Node.NODE_INGEST_SETTING,
URLRepository.ALLOWED_URLS_SETTING,
URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING,
URLRepository.REPOSITORIES_URL_SETTING,
URLRepository.SUPPORTED_PROTOCOLS_SETTING,
TransportMasterNodeReadAction.FORCE_LOCAL_SETTING,
AutoCreateIndex.AUTO_CREATE_INDEX_SETTING,
BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX,
ClusterName.CLUSTER_NAME_SETTING,
Client.CLIENT_TYPE_SETTING_S,
InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING,
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
EsExecutors.PROCESSORS_SETTING,
ThreadContext.DEFAULT_HEADERS_SETTING,
ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING,
ESLoggerFactory.LOG_LEVEL_SETTING,
TribeService.BLOCKS_METADATA_SETTING,
TribeService.BLOCKS_WRITE_SETTING,
TribeService.BLOCKS_WRITE_INDICES_SETTING,
TribeService.BLOCKS_READ_INDICES_SETTING,
TribeService.BLOCKS_METADATA_INDICES_SETTING,
TribeService.ON_CONFLICT_SETTING,
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING,
NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING,
NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH,
OsService.REFRESH_INTERVAL_SETTING,
ProcessService.REFRESH_INTERVAL_SETTING,
JvmService.REFRESH_INTERVAL_SETTING,
FsService.REFRESH_INTERVAL_SETTING,
JvmGcMonitorService.ENABLED_SETTING,
JvmGcMonitorService.REFRESH_INTERVAL_SETTING,
JvmGcMonitorService.GC_SETTING,
PageCacheRecycler.LIMIT_HEAP_SETTING,
PageCacheRecycler.WEIGHT_BYTES_SETTING,
PageCacheRecycler.WEIGHT_INT_SETTING,
PageCacheRecycler.WEIGHT_LONG_SETTING,
PageCacheRecycler.WEIGHT_OBJECTS_SETTING,
PageCacheRecycler.TYPE_SETTING
)));
Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client
TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT,
TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
TransportClientNodesService.CLIENT_TRANSPORT_SNIFF,
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.THRESHOLD_SETTING,
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING,
ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING,
FsRepository.REPOSITORIES_COMPRESS_SETTING,
FsRepository.REPOSITORIES_LOCATION_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING,
IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING,
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
MetaData.SETTING_READ_ONLY_SETTING,
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING,
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING,
ThreadPool.THREADPOOL_GROUP_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING,
SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING,
DestructiveOperations.REQUIRES_NAME_SETTING,
DiscoverySettings.PUBLISH_TIMEOUT_SETTING,
DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING,
DiscoverySettings.COMMIT_TIMEOUT_SETTING,
DiscoverySettings.NO_MASTER_BLOCK_SETTING,
GatewayService.EXPECTED_DATA_NODES_SETTING,
GatewayService.EXPECTED_MASTER_NODES_SETTING,
GatewayService.EXPECTED_NODES_SETTING,
GatewayService.RECOVER_AFTER_DATA_NODES_SETTING,
GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_TIME_SETTING,
NetworkModule.HTTP_ENABLED,
NetworkModule.HTTP_TYPE_SETTING,
NetworkModule.TRANSPORT_SERVICE_TYPE_SETTING,
NetworkModule.TRANSPORT_TYPE_SETTING,
HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS,
HttpTransportSettings.SETTING_CORS_ENABLED,
HttpTransportSettings.SETTING_CORS_MAX_AGE,
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
HttpTransportSettings.SETTING_PIPELINING,
HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN,
HttpTransportSettings.SETTING_HTTP_PORT,
HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT,
HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS,
HttpTransportSettings.SETTING_HTTP_COMPRESSION,
HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL,
HttpTransportSettings.SETTING_CORS_ALLOW_METHODS,
HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS,
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH,
HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE,
HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE,
HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH,
HttpTransportSettings.SETTING_HTTP_RESET_COOKIES,
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING,
InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING,
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING,
TransportService.TRACE_LOG_EXCLUDE_SETTING,
TransportService.TRACE_LOG_INCLUDE_SETTING,
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING,
InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
Transport.TRANSPORT_TCP_COMPRESS,
TransportSettings.TRANSPORT_PROFILES_SETTING,
TransportSettings.HOST,
TransportSettings.PUBLISH_HOST,
TransportSettings.BIND_HOST,
TransportSettings.PUBLISH_PORT,
TransportSettings.PORT,
NettyTransport.WORKER_COUNT,
NettyTransport.CONNECTIONS_PER_NODE_RECOVERY,
NettyTransport.CONNECTIONS_PER_NODE_BULK,
NettyTransport.CONNECTIONS_PER_NODE_REG,
NettyTransport.CONNECTIONS_PER_NODE_STATE,
NettyTransport.CONNECTIONS_PER_NODE_PING,
NettyTransport.PING_SCHEDULE,
NettyTransport.TCP_BLOCKING_CLIENT,
NettyTransport.TCP_CONNECT_TIMEOUT,
NettyTransport.NETTY_MAX_CUMULATION_BUFFER_CAPACITY,
NettyTransport.NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS,
NettyTransport.NETTY_RECEIVE_PREDICTOR_SIZE,
NettyTransport.NETTY_RECEIVE_PREDICTOR_MIN,
NettyTransport.NETTY_RECEIVE_PREDICTOR_MAX,
NetworkService.NETWORK_SERVER,
NettyTransport.NETTY_BOSS_COUNT,
NettyTransport.TCP_NO_DELAY,
NettyTransport.TCP_KEEP_ALIVE,
NettyTransport.TCP_REUSE_ADDRESS,
NettyTransport.TCP_SEND_BUFFER_SIZE,
NettyTransport.TCP_RECEIVE_BUFFER_SIZE,
NettyTransport.TCP_BLOCKING_SERVER,
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING,
NetworkService.TcpSettings.TCP_NO_DELAY,
NetworkService.TcpSettings.TCP_KEEP_ALIVE,
NetworkService.TcpSettings.TCP_REUSE_ADDRESS,
NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_BLOCKING,
NetworkService.TcpSettings.TCP_BLOCKING_SERVER,
NetworkService.TcpSettings.TCP_BLOCKING_CLIENT,
NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT,
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING,
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
ScriptService.SCRIPT_CACHE_EXPIRE_SETTING,
ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL,
HunspellService.HUNSPELL_LAZY_LOAD,
HunspellService.HUNSPELL_IGNORE_CASE,
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
Environment.PATH_CONF_SETTING,
Environment.PATH_DATA_SETTING,
Environment.PATH_HOME_SETTING,
Environment.PATH_LOGS_SETTING,
Environment.PATH_PLUGINS_SETTING,
Environment.PATH_REPO_SETTING,
Environment.PATH_SCRIPTS_SETTING,
Environment.PATH_SHARED_DATA_SETTING,
Environment.PIDFILE_SETTING,
DiscoveryService.DISCOVERY_SEED_SETTING,
DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING,
DiscoveryModule.DISCOVERY_TYPE_SETTING,
DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING,
FaultDetection.PING_RETRIES_SETTING,
FaultDetection.PING_TIMEOUT_SETTING,
FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING,
FaultDetection.PING_INTERVAL_SETTING,
FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING,
ZenDiscovery.PING_TIMEOUT_SETTING,
ZenDiscovery.JOIN_TIMEOUT_SETTING,
ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING,
ZenDiscovery.JOIN_RETRY_DELAY_SETTING,
ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING,
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING,
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
SearchService.DEFAULT_KEEPALIVE_SETTING,
SearchService.KEEPALIVE_INTERVAL_SETTING,
Node.WRITE_PORTS_FIELD_SETTING,
Node.NODE_NAME_SETTING,
Node.NODE_CLIENT_SETTING,
Node.NODE_DATA_SETTING,
Node.NODE_MASTER_SETTING,
Node.NODE_LOCAL_SETTING,
Node.NODE_MODE_SETTING,
Node.NODE_INGEST_SETTING,
Node.NODE_ATTRIBUTES,
URLRepository.ALLOWED_URLS_SETTING,
URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING,
URLRepository.REPOSITORIES_URL_SETTING,
URLRepository.SUPPORTED_PROTOCOLS_SETTING,
TransportMasterNodeReadAction.FORCE_LOCAL_SETTING,
AutoCreateIndex.AUTO_CREATE_INDEX_SETTING,
BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX,
ClusterName.CLUSTER_NAME_SETTING,
Client.CLIENT_TYPE_SETTING_S,
InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING,
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
EsExecutors.PROCESSORS_SETTING,
ThreadContext.DEFAULT_HEADERS_SETTING,
ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING,
ESLoggerFactory.LOG_LEVEL_SETTING,
TribeService.BLOCKS_METADATA_SETTING,
TribeService.BLOCKS_WRITE_SETTING,
TribeService.BLOCKS_WRITE_INDICES_SETTING,
TribeService.BLOCKS_READ_INDICES_SETTING,
TribeService.BLOCKS_METADATA_INDICES_SETTING,
TribeService.ON_CONFLICT_SETTING,
TribeService.TRIBE_NAME_SETTING,
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING,
NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING,
NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH,
OsService.REFRESH_INTERVAL_SETTING,
ProcessService.REFRESH_INTERVAL_SETTING,
JvmService.REFRESH_INTERVAL_SETTING,
FsService.REFRESH_INTERVAL_SETTING,
JvmGcMonitorService.ENABLED_SETTING,
JvmGcMonitorService.REFRESH_INTERVAL_SETTING,
JvmGcMonitorService.GC_SETTING,
PageCacheRecycler.LIMIT_HEAP_SETTING,
PageCacheRecycler.WEIGHT_BYTES_SETTING,
PageCacheRecycler.WEIGHT_INT_SETTING,
PageCacheRecycler.WEIGHT_LONG_SETTING,
PageCacheRecycler.WEIGHT_OBJECTS_SETTING,
PageCacheRecycler.TYPE_SETTING,
PluginsService.MANDATORY_SETTING
)));
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.common.settings;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.tribe.TribeService;
import java.util.HashMap;
import java.util.HashSet;
@ -37,6 +38,8 @@ public class SettingsModule extends AbstractModule {
private final SettingsFilter settingsFilter;
private final Map<String, Setting<?>> clusterSettings = new HashMap<>();
private final Map<String, Setting<?>> indexSettings = new HashMap<>();
private static final Predicate<String> TRIBE_CLIENT_NODE_SETTINGS_PREDICATE = (s) -> s.startsWith("tribe.") && TribeService.TRIBE_SETTING_KEYS.contains(s) == false;
public SettingsModule(Settings settings, SettingsFilter settingsFilter) {
this.settings = settings;
@ -55,12 +58,8 @@ public class SettingsModule extends AbstractModule {
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.clusterSettings.values()));
// by now we are fully configured, lets check node level settings for unregistered index settings
indexScopedSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE));
Predicate<String> noIndexSettingPredicate = IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE.negate();
Predicate<String> noTribePredicate = (s) -> s.startsWith("tribe.") == false;
for (Map.Entry<String, String> entry : settings.filter(noTribePredicate.and(noIndexSettingPredicate)).getAsMap().entrySet()) {
validateClusterSetting(clusterSettings, entry.getKey(), settings);
}
final Predicate<String> acceptOnlyClusterSettings = TRIBE_CLIENT_NODE_SETTINGS_PREDICATE.or(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE).negate();
clusterSettings.validate(settings.filter(acceptOnlyClusterSettings));
validateTribeSettings(settings, clusterSettings);
bind(Settings.class).toInstance(settings);
bind(SettingsFilter.class).toInstance(settingsFilter);
@ -87,24 +86,16 @@ public class SettingsModule extends AbstractModule {
}
public void validateTribeSettings(Settings settings, ClusterSettings clusterSettings) {
Map<String, Settings> groups = settings.getGroups("tribe.", true);
Map<String, Settings> groups = settings.filter(TRIBE_CLIENT_NODE_SETTINGS_PREDICATE).getGroups("tribe.", true);
for (Map.Entry<String, Settings> tribeSettings : groups.entrySet()) {
for (Map.Entry<String, String> entry : tribeSettings.getValue().getAsMap().entrySet()) {
validateClusterSetting(clusterSettings, entry.getKey(), tribeSettings.getValue());
Settings thisTribesSettings = tribeSettings.getValue();
for (Map.Entry<String, String> entry : thisTribesSettings.getAsMap().entrySet()) {
try {
clusterSettings.validate(entry.getKey(), thisTribesSettings);
} catch (IllegalArgumentException ex) {
throw new IllegalArgumentException("tribe." + tribeSettings.getKey() +" validation failed: "+ ex.getMessage(), ex);
}
}
}
}
private final void validateClusterSetting(ClusterSettings clusterSettings, String key, Settings settings) {
// we can't call this method yet since we have not all node level settings registered.
// yet we can validate the ones we have registered to not have invalid values. this is better than nothing
// and progress over perfection and we fail as soon as possible.
// clusterSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE.negate()));
if (clusterSettings.get(key) != null) {
clusterSettings.validate(key, settings);
} else if (AbstractScopedSettings.isValidKey(key) == false) {
throw new IllegalArgumentException("illegal settings key: [" + key + "]");
}
}
}

View File

@ -90,7 +90,7 @@ public class EsExecutors {
}
public static String threadName(Settings settings, String namePrefix) {
String name = settings.get("name");
String name = settings.get("node.name");
if (name == null) {
name = "elasticsearch";
} else {

View File

@ -0,0 +1,117 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.common.lease.Releasable;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Container that represents a resource with reference counting capabilities. Provides operations to suspend acquisition of new references.
* This is useful for resource management when resources are intermittently unavailable.
*
* Assumes less than Integer.MAX_VALUE references are concurrently being held at one point in time.
*/
public final class SuspendableRefContainer {
private static final int TOTAL_PERMITS = Integer.MAX_VALUE;
private final Semaphore semaphore;
public SuspendableRefContainer() {
// fair semaphore to ensure that blockAcquisition() does not starve under thread contention
this.semaphore = new Semaphore(TOTAL_PERMITS, true);
}
/**
* Tries acquiring a reference. Returns reference holder if reference acquisition is not blocked at the time of invocation (see
* {@link #blockAcquisition()}). Returns null if reference acquisition is blocked at the time of invocation.
*
* @return reference holder if reference acquisition is not blocked, null otherwise
* @throws InterruptedException if the current thread is interrupted
*/
public Releasable tryAcquire() throws InterruptedException {
if (semaphore.tryAcquire(1, 0, TimeUnit.SECONDS)) { // the untimed tryAcquire methods do not honor the fairness setting
return idempotentRelease(1);
} else {
return null;
}
}
/**
* Acquires a reference. Blocks if reference acquisition is blocked at the time of invocation.
*
* @return reference holder
* @throws InterruptedException if the current thread is interrupted
*/
public Releasable acquire() throws InterruptedException {
semaphore.acquire();
return idempotentRelease(1);
}
/**
* Acquires a reference. Blocks if reference acquisition is blocked at the time of invocation.
*
* @return reference holder
*/
public Releasable acquireUninterruptibly() {
semaphore.acquireUninterruptibly();
return idempotentRelease(1);
}
/**
* Disables reference acquisition and waits until all existing references are released.
* When released, reference acquisition is enabled again.
* This guarantees that between successful acquisition and release, no one is holding a reference.
*
* @return references holder to all references
*/
public Releasable blockAcquisition() {
semaphore.acquireUninterruptibly(TOTAL_PERMITS);
return idempotentRelease(TOTAL_PERMITS);
}
/**
* Helper method that ensures permits are only released once
*
* @return reference holder
*/
private Releasable idempotentRelease(int permits) {
AtomicBoolean closed = new AtomicBoolean();
return () -> {
if (closed.compareAndSet(false, true)) {
semaphore.release(permits);
}
};
}
/**
* Returns the number of references currently being held.
*/
public int activeRefs() {
int availablePermits = semaphore.availablePermits();
if (availablePermits == 0) {
// when blockAcquisition is holding all permits
return 0;
} else {
return TOTAL_PERMITS - availablePermits;
}
}
}

View File

@ -41,13 +41,28 @@ public interface XContent {
/**
* Creates a new generator using the provided output stream.
*/
XContentGenerator createGenerator(OutputStream os) throws IOException;
default XContentGenerator createGenerator(OutputStream os) throws IOException {
return createGenerator(os, null, true);
}
/**
* Creates a new generator using the provided output stream and some filters.
* Creates a new generator using the provided output stream and some
* inclusive filters. Same as createGenerator(os, filters, true).
*/
XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException;
default XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
return createGenerator(os, filters, true);
}
/**
* Creates a new generator using the provided output stream and some
* filters.
*
* @param inclusive
* If true only paths matching a filter will be included in
* output. If false no path matching a filter will be included in
* output
*/
XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException;
/**
* Creates a parser over the provided string content.
*/
@ -77,4 +92,5 @@ public interface XContent {
* Creates a parser over the provided reader.
*/
XContentParser createParser(Reader reader) throws IOException;
}

View File

@ -83,6 +83,10 @@ public final class XContentBuilder implements BytesStream, Releasable {
return new XContentBuilder(xContent, new BytesStreamOutput(), filters);
}
public static XContentBuilder builder(XContent xContent, String[] filters, boolean inclusive) throws IOException {
return new XContentBuilder(xContent, new BytesStreamOutput(), filters, inclusive);
}
private XContentGenerator generator;
private final OutputStream bos;
@ -102,13 +106,25 @@ public final class XContentBuilder implements BytesStream, Releasable {
}
/**
* Constructs a new builder using the provided xcontent, an OutputStream and some filters. The
* filters are used to filter fields that won't be written to the OutputStream. Make sure
* to call {@link #close()} when the builder is done with.
* Constructs a new builder using the provided xcontent, an OutputStream and
* some filters. If filters are specified, only those values matching a
* filter will be written to the output stream. Make sure to call
* {@link #close()} when the builder is done with.
*/
public XContentBuilder(XContent xContent, OutputStream bos, String[] filters) throws IOException {
this(xContent, bos, filters, true);
}
/**
* Constructs a new builder using the provided xcontent, an OutputStream and
* some filters. If {@code filters} are specified and {@code inclusive} is
* true, only those values matching a filter will be written to the output
* stream. If {@code inclusive} is false, those matching will be excluded.
* Make sure to call {@link #close()} when the builder is done with.
*/
public XContentBuilder(XContent xContent, OutputStream bos, String[] filters, boolean inclusive) throws IOException {
this.bos = bos;
this.generator = xContent.createGenerator(bos, filters);
this.generator = xContent.createGenerator(bos, filters, inclusive);
}
public XContentBuilder fieldCaseConversion(FieldCaseConversion fieldCaseConversion) {

View File

@ -121,5 +121,6 @@ public interface XContentGenerator extends Closeable {
void flush() throws IOException;
@Override
void close() throws IOException;
}

View File

@ -67,13 +67,8 @@ public class CborXContent implements XContent {
}
@Override
public XContentGenerator createGenerator(OutputStream os) throws IOException {
return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8), os);
}
@Override
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8), os, filters);
public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
}
@Override
@ -108,4 +103,5 @@ public class CborXContent implements XContent {
public XContentParser createParser(Reader reader) throws IOException {
return new CborXContentParser(cborFactory.createParser(reader));
}
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.common.xcontent.cbor;
import com.fasterxml.jackson.core.JsonGenerator;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
@ -31,7 +32,11 @@ import java.io.OutputStream;
public class CborXContentGenerator extends JsonXContentGenerator {
public CborXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
super(jsonGenerator, os, filters);
this(jsonGenerator, os, filters, true);
}
public CborXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
super(jsonGenerator, os, filters, inclusive);
}
@Override

View File

@ -71,13 +71,8 @@ public class JsonXContent implements XContent {
}
@Override
public XContentGenerator createGenerator(OutputStream os) throws IOException {
return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8), os);
}
@Override
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8), os, filters);
public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
}
@Override

View File

@ -72,6 +72,10 @@ public class JsonXContentGenerator implements XContentGenerator {
private boolean prettyPrint = false;
public JsonXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
this(jsonGenerator, os, filters, true);
}
public JsonXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
if (jsonGenerator instanceof GeneratorBase) {
this.base = (GeneratorBase) jsonGenerator;
} else {
@ -82,7 +86,8 @@ public class JsonXContentGenerator implements XContentGenerator {
this.generator = jsonGenerator;
this.filter = null;
} else {
this.filter = new FilteringGeneratorDelegate(jsonGenerator, new FilterPathBasedFilter(filters), true, true);
this.filter = new FilteringGeneratorDelegate(jsonGenerator,
new FilterPathBasedFilter(filters, inclusive), true, true);
this.generator = this.filter;
}
@ -375,6 +380,7 @@ public class JsonXContentGenerator implements XContentGenerator {
}
}
@Override
public final void writeRawValue(BytesReference content) throws IOException {
XContentType contentType = XContentFactory.xContentType(content);
if (contentType == null) {
@ -450,4 +456,5 @@ public class JsonXContentGenerator implements XContentGenerator {
}
generator.close();
}
}

View File

@ -68,13 +68,8 @@ public class SmileXContent implements XContent {
}
@Override
public XContentGenerator createGenerator(OutputStream os) throws IOException {
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), os);
}
@Override
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), os, filters);
public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
}
@Override

View File

@ -20,6 +20,7 @@
package org.elasticsearch.common.xcontent.smile;
import com.fasterxml.jackson.core.JsonGenerator;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
@ -31,7 +32,11 @@ import java.io.OutputStream;
public class SmileXContentGenerator extends JsonXContentGenerator {
public SmileXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
super(jsonGenerator, os, filters);
this(jsonGenerator, os, filters, true);
}
public SmileXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
super(jsonGenerator, os, filters, inclusive);
}
@Override

View File

@ -31,25 +31,30 @@ public class FilterPathBasedFilter extends TokenFilter {
* Marker value that should be used to indicate that a property name
* or value matches one of the filter paths.
*/
private static final TokenFilter MATCHING = new TokenFilter(){};
private static final TokenFilter MATCHING = new TokenFilter() {
};
/**
* Marker value that should be used to indicate that none of the
* property names/values matches one of the filter paths.
*/
private static final TokenFilter NO_MATCHING = new TokenFilter(){};
private static final TokenFilter NO_MATCHING = new TokenFilter() {
};
private final FilterPath[] filters;
public FilterPathBasedFilter(FilterPath[] filters) {
private final boolean inclusive;
public FilterPathBasedFilter(FilterPath[] filters, boolean inclusive) {
if (CollectionUtils.isEmpty(filters)) {
throw new IllegalArgumentException("filters cannot be null or empty");
}
this.inclusive = inclusive;
this.filters = filters;
}
public FilterPathBasedFilter(String[] filters) {
this(FilterPath.compile(filters));
public FilterPathBasedFilter(String[] filters, boolean inclusive) {
this(FilterPath.compile(filters), inclusive);
}
/**
@ -77,31 +82,32 @@ public class FilterPathBasedFilter extends TokenFilter {
}
if ((nextFilters != null) && (nextFilters.isEmpty() == false)) {
return new FilterPathBasedFilter(nextFilters.toArray(new FilterPath[nextFilters.size()]));
return new FilterPathBasedFilter(nextFilters.toArray(new FilterPath[nextFilters.size()]), inclusive);
}
}
return NO_MATCHING;
}
@Override
public TokenFilter includeProperty(String name) {
TokenFilter include = evaluate(name, filters);
if (include == MATCHING) {
return TokenFilter.INCLUDE_ALL;
TokenFilter filter = evaluate(name, filters);
if (filter == MATCHING) {
return inclusive ? TokenFilter.INCLUDE_ALL : null;
}
if (include == NO_MATCHING) {
return null;
if (filter == NO_MATCHING) {
return inclusive ? null : TokenFilter.INCLUDE_ALL;
}
return include;
return filter;
}
@Override
protected boolean _includeScalar() {
for (FilterPath filter : filters) {
if (filter.matches()) {
return true;
return inclusive;
}
}
return false;
return !inclusive;
}
}

View File

@ -66,13 +66,8 @@ public class YamlXContent implements XContent {
}
@Override
public XContentGenerator createGenerator(OutputStream os) throws IOException {
return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8), os);
}
@Override
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8), os, filters);
public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
}
@Override

View File

@ -20,6 +20,7 @@
package org.elasticsearch.common.xcontent.yaml;
import com.fasterxml.jackson.core.JsonGenerator;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
@ -31,7 +32,11 @@ import java.io.OutputStream;
public class YamlXContentGenerator extends JsonXContentGenerator {
public YamlXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
super(jsonGenerator, os, filters);
this(jsonGenerator, os, filters, true);
}
public YamlXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
super(jsonGenerator, os, filters, inclusive);
}
@Override

View File

@ -89,17 +89,16 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
*/
public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implements Discovery, PingContextProvider {
public final static Setting<Boolean> REJOIN_ON_MASTER_GONE_SETTING = Setting.boolSetting("discovery.zen.rejoin_on_master_gone", true, true, Setting.Scope.CLUSTER);
public final static Setting<TimeValue> PING_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), false, Setting.Scope.CLUSTER);
public final static Setting<TimeValue> JOIN_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.join_timeout",
settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
public final static Setting<Integer> JOIN_RETRY_ATTEMPTS_SETTING = Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, false, Setting.Scope.CLUSTER);
public final static Setting<TimeValue> JOIN_RETRY_DELAY_SETTING = Setting.positiveTimeSetting("discovery.zen.join_retry_delay", TimeValue.timeValueMillis(100), false, Setting.Scope.CLUSTER);
public final static Setting<Integer> MAX_PINGS_FROM_ANOTHER_MASTER_SETTING = Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, false, Setting.Scope.CLUSTER);
public final static Setting<Boolean> SEND_LEAVE_REQUEST_SETTING = Setting.boolSetting("discovery.zen.send_leave_request", true, false, Setting.Scope.CLUSTER);
public final static Setting<Boolean> MASTER_ELECTION_FILTER_CLIENT_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_client", true, false, Setting.Scope.CLUSTER);
public final static Setting<TimeValue> MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout",
settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
public final static Setting<Boolean> MASTER_ELECTION_FILTER_DATA_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_data", false, false, Setting.Scope.CLUSTER);
public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin";
@ -142,8 +141,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
private final AtomicBoolean initialStateSent = new AtomicBoolean();
private volatile boolean rejoinOnMasterGone;
/** counts the time this node has joined the cluster or have elected it self as master */
private final AtomicLong clusterJoinsCounter = new AtomicLong();
@ -177,7 +174,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
this.masterElectionFilterClientNodes = MASTER_ELECTION_FILTER_CLIENT_SETTING.get(settings);
this.masterElectionFilterDataNodes = MASTER_ELECTION_FILTER_DATA_SETTING.get(settings);
this.masterElectionWaitForJoinsTimeout = MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING.get(settings);
this.rejoinOnMasterGone = REJOIN_ON_MASTER_GONE_SETTING.get(settings);
logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes);
@ -188,7 +184,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
throw new IllegalArgumentException("cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " to more than the current master nodes count [" + masterNodes + "]");
}
});
clusterSettings.addSettingsUpdateConsumer(REJOIN_ON_MASTER_GONE_SETTING, this::setRejoingOnMasterGone);
this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, clusterName, clusterService);
this.masterFD.addListener(new MasterNodeFailureListener());
@ -323,10 +318,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
return clusterJoinsCounter.get() > 0;
}
private void setRejoingOnMasterGone(boolean rejoin) {
this.rejoinOnMasterGone = rejoin;
}
/** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */
@ -670,35 +661,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
// flush any pending cluster states from old master, so it will not be set as master again
publishClusterState.pendingStatesQueue().failAllStatesAndClear(new ElasticsearchException("master left [{}]", reason));
if (rejoinOnMasterGone) {
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master left (reason = " + reason + ")");
}
if (!electMaster.hasEnoughMasterNodes(discoveryNodes)) {
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "not enough master nodes after master left (reason = " + reason + ")");
}
final DiscoveryNode electedMaster = electMaster.electMaster(discoveryNodes); // elect master
final DiscoveryNode localNode = currentState.nodes().localNode();
if (localNode.equals(electedMaster)) {
masterFD.stop("got elected as new master since master left (reason = " + reason + ")");
discoveryNodes = DiscoveryNodes.builder(discoveryNodes).masterNodeId(localNode.id()).build();
ClusterState newState = ClusterState.builder(currentState).nodes(discoveryNodes).build();
nodesFD.updateNodesAndPing(newState);
return newState;
} else {
nodesFD.stop();
if (electedMaster != null) {
discoveryNodes = DiscoveryNodes.builder(discoveryNodes).masterNodeId(electedMaster.id()).build();
masterFD.restart(electedMaster, "possible elected master since master left (reason = " + reason + ")");
return ClusterState.builder(currentState)
.nodes(discoveryNodes)
.build();
} else {
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master_left and no other node elected to become master");
}
}
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master left (reason = " + reason + ")");
}
@Override
@ -857,7 +820,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
// Sanity check: maybe we don't end up here, because serialization may have failed.
if (node.getVersion().before(minimumNodeJoinVersion)) {
callback.onFailure(
new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]")
new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]")
);
return;
}
@ -1109,10 +1072,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
}
}
boolean isRejoinOnMasterGone() {
return rejoinOnMasterGone;
}
public static class RejoinClusterRequest extends TransportRequest {
private String fromNodeId;

View File

@ -330,31 +330,4 @@ public class Environment {
public static FileStore getFileStore(Path path) throws IOException {
return ESFileStore.getMatchingFileStore(path, fileStores);
}
/**
* Returns true if the path is writable.
* Acts just like {@link Files#isWritable(Path)}, except won't
* falsely return false for paths on SUBST'd drive letters
* See https://bugs.openjdk.java.net/browse/JDK-8034057
* Note this will set the file modification time (to its already-set value)
* to test access.
*/
@SuppressForbidden(reason = "works around https://bugs.openjdk.java.net/browse/JDK-8034057")
public static boolean isWritable(Path path) throws IOException {
boolean v = Files.isWritable(path);
if (v || Constants.WINDOWS == false) {
return v;
}
// isWritable returned false on windows, the hack begins!!!!!!
// resetting the modification time is the least destructive/simplest
// way to check for both files and directories, and fails early just
// in getting the current value if file doesn't exist, etc
try {
Files.setLastModifiedTime(path, Files.getLastModifiedTime(path));
return true;
} catch (Throwable e) {
return false;
}
}
}

View File

@ -182,7 +182,7 @@ public final class IndexSettings {
this.index = indexMetaData.getIndex();
version = Version.indexCreated(settings);
logger = Loggers.getLogger(getClass(), settings, index);
nodeName = settings.get("name", "");
nodeName = settings.get("node.name", "");
this.indexMetaData = indexMetaData;
numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
isShadowReplicaIndex = IndexMetaData.isIndexUsingShadowReplicas(settings);

View File

@ -67,9 +67,6 @@ public final class EngineConfig {
private final QueryCache queryCache;
private final QueryCachingPolicy queryCachingPolicy;
static {
}
/**
* Index setting to change the low level lucene codec used for writing new segments.
* This setting is <b>not</b> realtime updateable.

View File

@ -320,7 +320,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
}
@Override
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) throws IOException {
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) {
if (fieldType().indexOptions() != IndexOptions.NONE) {
return getCachedStream().setDoubleValue(number);
}

View File

@ -332,7 +332,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
}
@Override
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) throws IOException {
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) {
if (fieldType().indexOptions() != IndexOptions.NONE) {
return getCachedStream().setFloatValue(number);
}

View File

@ -340,7 +340,7 @@ public class IntegerFieldMapper extends NumberFieldMapper {
}
@Override
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) throws IOException {
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) {
if (fieldType().indexOptions() != IndexOptions.NONE) {
return getCachedStream().setIntValue(number);
}

View File

@ -323,7 +323,7 @@ public class LongFieldMapper extends NumberFieldMapper {
}
@Override
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) throws IOException {
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) {
if (fieldType().indexOptions() != IndexOptions.NONE) {
return getCachedStream().setLongValue(number);
}

View File

@ -408,7 +408,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
}
@Override
public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) throws IOException {
public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) {
return null;
}

View File

@ -332,7 +332,7 @@ public class ShortFieldMapper extends NumberFieldMapper {
}
@Override
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) throws IOException {
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) {
if (fieldType().indexOptions() != IndexOptions.NONE) {
return getCachedStream().setIntValue(number);
}

View File

@ -42,17 +42,17 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.Callback;
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.util.concurrent.SuspendableRefContainer;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexSettings;
@ -189,9 +189,17 @@ public class IndexShard extends AbstractIndexShardComponent {
private final ShardPath path;
private final IndexShardOperationCounter indexShardOperationCounter;
private final SuspendableRefContainer suspendableRefContainer;
private final EnumSet<IndexShardState> readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY);
private static final EnumSet<IndexShardState> readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY);
// for primaries, we only allow to write when actually started (so the cluster has decided we started)
// in case we have a relocation of a primary, we also allow to write after phase 2 completed, where the shard may be
// in state RECOVERING or POST_RECOVERY. After a primary has been marked as RELOCATED, we only allow writes to the relocation target
// which can be either in POST_RECOVERY or already STARTED (this prevents writing concurrently to two primaries).
public static final EnumSet<IndexShardState> writeAllowedStatesForPrimary = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED);
// replication is also allowed while recovering, since we index also during recovery to replicas and rely on version checks to make sure its consistent
// a relocated shard can also be target of a replication if the relocation target has not been marked as active yet and is syncing it's changes back to the relocation source
private static final EnumSet<IndexShardState> writeAllowedStatesForReplica = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED);
private final IndexSearcherWrapper searcherWrapper;
@ -250,7 +258,7 @@ public class IndexShard extends AbstractIndexShardComponent {
}
this.engineConfig = newEngineConfig(translogConfig, cachingPolicy);
this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId);
this.suspendableRefContainer = new SuspendableRefContainer();
this.provider = provider;
this.searcherWrapper = indexSearcherWrapper;
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, newQueryShardContext());
@ -321,6 +329,8 @@ public class IndexShard extends AbstractIndexShardComponent {
* Updates the shards routing entry. This mutate the shards internal state depending
* on the changes that get introduced by the new routing value. This method will persist shard level metadata
* unless explicitly disabled.
*
* @throws IndexShardRelocatedException if shard is marked as relocated and relocation aborted
*/
public void updateRoutingEntry(final ShardRouting newRouting, final boolean persistState) {
final ShardRouting currentRouting = this.shardRouting;
@ -368,6 +378,14 @@ public class IndexShard extends AbstractIndexShardComponent {
}
}
}
if (state == IndexShardState.RELOCATED &&
(newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) {
// if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery
// failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two
// active primaries.
throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state());
}
this.shardRouting = newRouting;
indexEventListener.shardRoutingChanged(this, currentRouting, newRouting);
} finally {
@ -404,12 +422,16 @@ public class IndexShard extends AbstractIndexShardComponent {
}
public IndexShard relocated(String reason) throws IndexShardNotStartedException {
synchronized (mutex) {
if (state != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, state);
try (Releasable block = suspendableRefContainer.blockAcquisition()) {
// no shard operation locks are being held here, move state from started to relocated
synchronized (mutex) {
if (state != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, state);
}
changeState(IndexShardState.RELOCATED, reason);
}
changeState(IndexShardState.RELOCATED, reason);
}
return this;
}
@ -796,7 +818,6 @@ public class IndexShard extends AbstractIndexShardComponent {
refreshScheduledFuture = null;
}
changeState(IndexShardState.CLOSED, reason);
indexShardOperationCounter.decRef();
} finally {
final Engine engine = this.currentEngineReference.getAndSet(null);
try {
@ -810,7 +831,6 @@ public class IndexShard extends AbstractIndexShardComponent {
}
}
public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) {
refresh("percolator_load_queries");
@ -967,16 +987,17 @@ public class IndexShard extends AbstractIndexShardComponent {
IndexShardState state = this.state; // one time volatile read
if (origin == Engine.Operation.Origin.PRIMARY) {
// for primaries, we only allow to write when actually started (so the cluster has decided we started)
// otherwise, we need to retry, we also want to still allow to index if we are relocated in case it fails
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]");
if (writeAllowedStatesForPrimary.contains(state) == false) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForPrimary + ", origin [" + origin + "]");
}
} else if (origin == Engine.Operation.Origin.RECOVERY) {
if (state != IndexShardState.RECOVERING) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when recovering, origin [" + origin + "]");
}
} else {
// for replicas, we allow to write also while recovering, since we index also during recovery to replicas
// and rely on version checks to make sure its consistent
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]");
assert origin == Engine.Operation.Origin.REPLICA;
if (writeAllowedStatesForReplica.contains(state) == false) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForReplica + ", origin [" + origin + "]");
}
}
}
@ -995,7 +1016,7 @@ public class IndexShard extends AbstractIndexShardComponent {
private void verifyNotClosed(Throwable suppressed) throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (state == IndexShardState.CLOSED) {
final IllegalIndexShardStateException exc = new IllegalIndexShardStateException(shardId, state, "operation only allowed when not closed");
final IllegalIndexShardStateException exc = new IndexShardClosedException(shardId, "operation only allowed when not closed");
if (suppressed != null) {
exc.addSuppressed(suppressed);
}
@ -1390,37 +1411,21 @@ public class IndexShard extends AbstractIndexShardComponent {
idxSettings.getSettings().getAsTime(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING, IndexingMemoryController.SHARD_DEFAULT_INACTIVE_TIME));
}
private static class IndexShardOperationCounter extends AbstractRefCounted {
final private ESLogger logger;
private final ShardId shardId;
public IndexShardOperationCounter(ESLogger logger, ShardId shardId) {
super("index-shard-operations-counter");
this.logger = logger;
this.shardId = shardId;
}
@Override
protected void closeInternal() {
logger.debug("operations counter reached 0, will not accept any further writes");
}
@Override
protected void alreadyClosed() {
throw new IndexShardClosedException(shardId, "could not increment operation counter. shard is closed.");
public Releasable acquirePrimaryOperationLock() {
verifyNotClosed();
if (shardRouting.primary() == false) {
throw new IllegalIndexShardStateException(shardId, state, "shard is not a primary");
}
return suspendableRefContainer.acquireUninterruptibly();
}
public void incrementOperationCounter() {
indexShardOperationCounter.incRef();
public Releasable acquireReplicaOperationLock() {
verifyNotClosed();
return suspendableRefContainer.acquireUninterruptibly();
}
public void decrementOperationCounter() {
indexShardOperationCounter.decRef();
}
public int getOperationsCount() {
return Math.max(0, indexShardOperationCounter.refCount() - 1); // refCount is incremented on creation and decremented on close
public int getActiveOperationsCount() {
return suspendableRefContainer.activeRefs(); // refCount is incremented on creation and decremented on close
}
/**

View File

@ -29,10 +29,14 @@ import java.io.IOException;
public class IndexShardRelocatedException extends IllegalIndexShardStateException {
public IndexShardRelocatedException(ShardId shardId) {
super(shardId, IndexShardState.RELOCATED, "Already relocated");
this(shardId, "Already relocated");
}
public IndexShardRelocatedException(ShardId shardId, String reason) {
super(shardId, IndexShardState.RELOCATED, reason);
}
public IndexShardRelocatedException(StreamInput in) throws IOException{
super(in);
}
}
}

View File

@ -76,7 +76,7 @@ public class ShardId implements Streamable, Comparable<ShardId> {
if (this == o) return true;
if (o == null) return false;
ShardId shardId1 = (ShardId) o;
return shardId == shardId1.shardId && index.getName().equals(shardId1.index.getName());
return shardId == shardId1.shardId && index.equals(shardId1.index);
}
@Override
@ -112,8 +112,12 @@ public class ShardId implements Streamable, Comparable<ShardId> {
@Override
public int compareTo(ShardId o) {
if (o.getId() == shardId) {
return index.getName().compareTo(o.getIndex().getName());
int compare = index.getName().compareTo(o.getIndex().getName());
if (compare != 0) {
return compare;
}
return index.getUUID().compareTo(o.getIndex().getUUID());
}
return Integer.compare(shardId, o.getId());
}
}
}

View File

@ -20,30 +20,58 @@
package org.elasticsearch.index.similarity;
import org.apache.lucene.search.similarities.DFISimilarity;
import org.apache.lucene.search.similarities.Independence;
import org.apache.lucene.search.similarities.IndependenceChiSquared;
import org.apache.lucene.search.similarities.IndependenceSaturated;
import org.apache.lucene.search.similarities.IndependenceStandardized;
import org.apache.lucene.search.similarities.Similarity;
import org.elasticsearch.common.settings.Settings;
import java.util.HashMap;
import java.util.Map;
import static java.util.Collections.unmodifiableMap;
/**
* {@link SimilarityProvider} for the {@link DFISimilarity}.
* <p>
* Configuration options available:
* <ul>
* <li>independence_measure</li>
* <li>discount_overlaps</li>
* </ul>
* @see DFISimilarity For more information about configuration
*/
public class DFISimilarityProvider extends AbstractSimilarityProvider {
// the "basic models" of divergence from independence
private static final Map<String, Independence> INDEPENDENCE_MEASURES;
static {
Map<String, Independence> measures = new HashMap<>();
measures.put("standardized", new IndependenceStandardized());
measures.put("saturated", new IndependenceSaturated());
measures.put("chisquared", new IndependenceChiSquared());
INDEPENDENCE_MEASURES = unmodifiableMap(measures);
}
private final DFISimilarity similarity;
public DFISimilarityProvider(String name, Settings settings) {
super(name);
boolean discountOverlaps = settings.getAsBoolean("discount_overlaps", true);
this.similarity = new DFISimilarity();
Independence measure = parseIndependence(settings);
this.similarity = new DFISimilarity(measure);
this.similarity.setDiscountOverlaps(discountOverlaps);
}
private Independence parseIndependence(Settings settings) {
String name = settings.get("independence_measure");
Independence measure = INDEPENDENCE_MEASURES.get(name);
if (measure == null) {
throw new IllegalArgumentException("Unsupported IndependenceMeasure [" + name + "]");
}
return measure;
}
@Override
public Similarity get() {
return similarity;

View File

@ -52,8 +52,8 @@ import static java.util.Collections.unmodifiableMap;
* @see DFRSimilarity For more information about configuration
*/
public class DFRSimilarityProvider extends AbstractSimilarityProvider {
private static final Map<String, BasicModel> MODEL_CACHE;
private static final Map<String, AfterEffect> EFFECT_CACHE;
private static final Map<String, BasicModel> BASIC_MODELS;
private static final Map<String, AfterEffect> AFTER_EFFECTS;
static {
Map<String, BasicModel> models = new HashMap<>();
@ -64,13 +64,13 @@ public class DFRSimilarityProvider extends AbstractSimilarityProvider {
models.put("in", new BasicModelIn());
models.put("ine", new BasicModelIne());
models.put("p", new BasicModelP());
MODEL_CACHE = unmodifiableMap(models);
BASIC_MODELS = unmodifiableMap(models);
Map<String, AfterEffect> effects = new HashMap<>();
effects.put("no", new AfterEffect.NoAfterEffect());
effects.put("b", new AfterEffectB());
effects.put("l", new AfterEffectL());
EFFECT_CACHE = unmodifiableMap(effects);
AFTER_EFFECTS = unmodifiableMap(effects);
}
private final DFRSimilarity similarity;
@ -91,7 +91,7 @@ public class DFRSimilarityProvider extends AbstractSimilarityProvider {
*/
protected BasicModel parseBasicModel(Settings settings) {
String basicModel = settings.get("basic_model");
BasicModel model = MODEL_CACHE.get(basicModel);
BasicModel model = BASIC_MODELS.get(basicModel);
if (model == null) {
throw new IllegalArgumentException("Unsupported BasicModel [" + basicModel + "]");
}
@ -106,7 +106,7 @@ public class DFRSimilarityProvider extends AbstractSimilarityProvider {
*/
protected AfterEffect parseAfterEffect(Settings settings) {
String afterEffect = settings.get("after_effect");
AfterEffect effect = EFFECT_CACHE.get(afterEffect);
AfterEffect effect = AFTER_EFFECTS.get(afterEffect);
if (effect == null) {
throw new IllegalArgumentException("Unsupported AfterEffect [" + afterEffect + "]");
}

View File

@ -39,13 +39,11 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.Callback;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.index.IndexService;
@ -93,26 +91,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
private static final ShardStateAction.Listener SHARD_STATE_ACTION_LISTENER = new ShardStateAction.Listener() {};
// a map of mappings type we have seen per index due to cluster state
// we need this so we won't remove types automatically created as part of the indexing process
private final ConcurrentMap<Tuple<String, String>, Boolean> seenMappings = ConcurrentCollections.newConcurrentMap();
// a list of shards that failed during recovery
// we keep track of these shards in order to prevent repeated recovery of these shards on each cluster state update
private final ConcurrentMap<ShardId, FailedShard> failedShards = ConcurrentCollections.newConcurrentMap();
private final ConcurrentMap<ShardId, ShardRouting> failedShards = ConcurrentCollections.newConcurrentMap();
private final RestoreService restoreService;
private final RepositoriesService repositoriesService;
static class FailedShard {
public final long version;
public final long timestamp;
FailedShard(long version) {
this.version = version;
this.timestamp = System.currentTimeMillis();
}
}
private final Object mutex = new Object();
private final FailedShardHandler failedShardHandler = new FailedShardHandler();
@ -322,7 +306,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
try {
indicesService.createIndex(nodeServicesProvider, indexMetaData, buildInIndexListener);
} catch (Throwable e) {
sendFailShard(shard, indexMetaData.getIndexUUID(), "failed to create index", e);
sendFailShard(shard, "failed to create index", e);
}
}
}
@ -387,7 +371,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
// so this failure typically means wrong node level configuration or something similar
for (IndexShard indexShard : indexService) {
ShardRouting shardRouting = indexShard.routingEntry();
failAndRemoveShard(shardRouting, indexService.indexUUID(), indexService, true, "failed to update mappings", t);
failAndRemoveShard(shardRouting, indexService, true, "failed to update mappings", t);
}
}
}
@ -436,6 +420,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
failedShards.clear();
return;
}
DiscoveryNodes nodes = event.state().nodes();
for (final ShardRouting shardRouting : routingNode) {
@ -455,12 +440,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
if (!indexService.hasShard(shardId) && shardRouting.started()) {
if (failedShards.containsKey(shardRouting.shardId())) {
if (nodes.masterNode() != null) {
shardStateAction.resendShardFailed(shardRouting, indexMetaData.getIndexUUID(),
"master " + nodes.masterNode() + " marked shard as started, but shard has previous failed. resending shard failure.", null, SHARD_STATE_ACTION_LISTENER);
String message = "master " + nodes.masterNode() + " marked shard as started, but shard has previous failed. resending shard failure";
logger.trace("[{}] re-sending failed shard [{}], reason [{}]", shardRouting.shardId(), shardRouting, message);
shardStateAction.shardFailed(shardRouting, shardRouting, message, null, SHARD_STATE_ACTION_LISTENER);
}
} else {
// the master thinks we are started, but we don't have this shard at all, mark it as failed
sendFailShard(shardRouting, indexMetaData.getIndexUUID(), "master [" + nodes.masterNode() + "] marked shard as started, but shard has not been created, mark shard as failed", null);
sendFailShard(shardRouting, "master [" + nodes.masterNode() + "] marked shard as started, but shard has not been created, mark shard as failed", null);
}
continue;
}
@ -492,7 +478,11 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
// shadow replicas do not support primary promotion. The master would reinitialize the shard, giving it a new allocation, meaning we should be there.
assert (shardRouting.primary() && currentRoutingEntry.primary() == false) == false || indexShard.allowsPrimaryPromotion() :
"shard for doesn't support primary promotion but master promoted it with changing allocation. New routing " + shardRouting + ", current routing " + currentRoutingEntry;
indexShard.updateRoutingEntry(shardRouting, event.state().blocks().disableStatePersistence() == false);
try {
indexShard.updateRoutingEntry(shardRouting, event.state().blocks().disableStatePersistence() == false);
} catch (Throwable e) {
failAndRemoveShard(shardRouting, indexService, true, "failed updating shard routing entry", e);
}
}
}
@ -503,40 +493,29 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
}
private void cleanFailedShards(final ClusterChangedEvent event) {
RoutingTable routingTable = event.state().routingTable();
RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
if (routingNode == null) {
failedShards.clear();
return;
}
DiscoveryNodes nodes = event.state().nodes();
long now = System.currentTimeMillis();
String localNodeId = nodes.localNodeId();
Iterator<Map.Entry<ShardId, FailedShard>> iterator = failedShards.entrySet().iterator();
shards:
while (iterator.hasNext()) {
Map.Entry<ShardId, FailedShard> entry = iterator.next();
FailedShard failedShard = entry.getValue();
IndexRoutingTable indexRoutingTable = routingTable.index(entry.getKey().getIndex());
if (indexRoutingTable != null) {
IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(entry.getKey().id());
if (shardRoutingTable != null) {
for (ShardRouting shardRouting : shardRoutingTable.assignedShards()) {
if (localNodeId.equals(shardRouting.currentNodeId())) {
// we have a timeout here just to make sure we don't have dangled failed shards for some reason
// its just another safely layer
if (shardRouting.version() == failedShard.version && ((now - failedShard.timestamp) < TimeValue.timeValueMinutes(60).millis())) {
// It's the same failed shard - keep it if it hasn't timed out
continue shards;
} else {
// Different version or expired, remove it
break;
}
}
}
}
RoutingTable routingTable = event.state().routingTable();
for (Iterator<Map.Entry<ShardId, ShardRouting>> iterator = failedShards.entrySet().iterator(); iterator.hasNext(); ) {
Map.Entry<ShardId, ShardRouting> entry = iterator.next();
ShardId failedShardId = entry.getKey();
ShardRouting failedShardRouting = entry.getValue();
IndexRoutingTable indexRoutingTable = routingTable.index(failedShardId.getIndex());
if (indexRoutingTable == null) {
iterator.remove();
continue;
}
IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(failedShardId.id());
if (shardRoutingTable == null) {
iterator.remove();
continue;
}
if (shardRoutingTable.assignedShards().stream().noneMatch(shr -> shr.isSameAllocation(failedShardRouting))) {
iterator.remove();
}
iterator.remove();
}
}
@ -561,7 +540,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
indexShard.shardId(), indexShard.state(), nodes.masterNode());
}
if (nodes.masterNode() != null) {
shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(),
shardStateAction.shardStarted(shardRouting,
"master " + nodes.masterNode() + " marked shard as initializing, but shard state is [" + indexShard.state() + "], mark shard as started",
SHARD_STATE_ACTION_LISTENER);
}
@ -588,8 +567,9 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
if (!indexService.hasShard(shardId)) {
if (failedShards.containsKey(shardRouting.shardId())) {
if (nodes.masterNode() != null) {
shardStateAction.resendShardFailed(shardRouting, indexMetaData.getIndexUUID(),
"master " + nodes.masterNode() + " marked shard as initializing, but shard is marked as failed, resend shard failure", null, SHARD_STATE_ACTION_LISTENER);
String message = "master " + nodes.masterNode() + " marked shard as initializing, but shard is marked as failed, resend shard failure";
logger.trace("[{}] re-sending failed shard [{}], reason [{}]", shardRouting.shardId(), shardRouting, message);
shardStateAction.shardFailed(shardRouting, shardRouting, message, null, SHARD_STATE_ACTION_LISTENER);
}
return;
}
@ -602,7 +582,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
} catch (IndexShardAlreadyExistsException e) {
// ignore this, the method call can happen several times
} catch (Throwable e) {
failAndRemoveShard(shardRouting, indexService.indexUUID(), indexService, true, "failed to create shard", e);
failAndRemoveShard(shardRouting, indexService, true, "failed to create shard", e);
return;
}
}
@ -626,7 +606,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
// For primaries: requests in any case are routed to both when its relocating and that way we handle
// the edge case where its mark as relocated, and we might need to roll it back...
// For replicas: we are recovering a backup from a primary
RecoveryState.Type type = shardRouting.primary() ? RecoveryState.Type.RELOCATION : RecoveryState.Type.REPLICA;
RecoveryState.Type type = shardRouting.primary() ? RecoveryState.Type.PRIMARY_RELOCATION : RecoveryState.Type.REPLICA;
RecoveryState recoveryState = new RecoveryState(indexShard.shardId(), shardRouting.primary(), type, sourceNode, nodes.localNode());
indexShard.markAsRecovering("from " + sourceNode, recoveryState);
recoveryTarget.startRecovery(indexShard, type, sourceNode, new PeerRecoveryListener(shardRouting, indexService, indexMetaData));
@ -644,7 +624,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
threadPool.generic().execute(() -> {
try {
if (indexShard.recoverFromStore(nodes.localNode())) {
shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(), "after recovery from store", SHARD_STATE_ACTION_LISTENER);
shardStateAction.shardStarted(shardRouting, "after recovery from store", SHARD_STATE_ACTION_LISTENER);
}
} catch (Throwable t) {
handleRecoveryFailure(indexService, shardRouting, true, t);
@ -662,7 +642,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
final IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(restoreSource.snapshotId().getRepository());
if (indexShard.restoreFromRepository(indexShardRepository, nodes.localNode())) {
restoreService.indexShardRestoreCompleted(restoreSource.snapshotId(), sId);
shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(), "after recovery from repository", SHARD_STATE_ACTION_LISTENER);
shardStateAction.shardStarted(shardRouting, "after recovery from repository", SHARD_STATE_ACTION_LISTENER);
}
} catch (Throwable first) {
try {
@ -732,7 +712,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
@Override
public void onRecoveryDone(RecoveryState state) {
shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(), "after recovery (replica) from node [" + state.getSourceNode() + "]", SHARD_STATE_ACTION_LISTENER);
shardStateAction.shardStarted(shardRouting, "after recovery (replica) from node [" + state.getSourceNode() + "]", SHARD_STATE_ACTION_LISTENER);
}
@Override
@ -743,7 +723,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
private void handleRecoveryFailure(IndexService indexService, ShardRouting shardRouting, boolean sendShardFailure, Throwable failure) {
synchronized (mutex) {
failAndRemoveShard(shardRouting, indexService.indexUUID(), indexService, sendShardFailure, "failed recovery", failure);
failAndRemoveShard(shardRouting, indexService, sendShardFailure, "failed recovery", failure);
}
}
@ -764,7 +744,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
}
private void failAndRemoveShard(ShardRouting shardRouting, String indexUUID, @Nullable IndexService indexService, boolean sendShardFailure, String message, @Nullable Throwable failure) {
private void failAndRemoveShard(ShardRouting shardRouting, @Nullable IndexService indexService, boolean sendShardFailure, String message, @Nullable Throwable failure) {
if (indexService != null && indexService.hasShard(shardRouting.getId())) {
// if the indexService is null we can't remove the shard, that's fine since we might have a failure
// when the index is remove and then we already removed the index service for that shard...
@ -777,15 +757,15 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
}
}
if (sendShardFailure) {
sendFailShard(shardRouting, indexUUID, message, failure);
sendFailShard(shardRouting, message, failure);
}
}
private void sendFailShard(ShardRouting shardRouting, String indexUUID, String message, @Nullable Throwable failure) {
private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Throwable failure) {
try {
logger.warn("[{}] marking and sending shard failed due to [{}]", failure, shardRouting.shardId(), message);
failedShards.put(shardRouting.shardId(), new FailedShard(shardRouting.version()));
shardStateAction.shardFailed(shardRouting, indexUUID, message, failure, SHARD_STATE_ACTION_LISTENER);
failedShards.put(shardRouting.shardId(), shardRouting);
shardStateAction.shardFailed(shardRouting, shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER);
} catch (Throwable e1) {
logger.warn("[{}][{}] failed to mark shard as failed (because of [{}])", e1, shardRouting.getIndexName(), shardRouting.getId(), message);
}
@ -798,7 +778,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
final ShardRouting shardRouting = shardFailure.routing;
threadPool.generic().execute(() -> {
synchronized (mutex) {
failAndRemoveShard(shardRouting, shardFailure.indexUUID, indexService, true, "shard failure, reason [" + shardFailure.reason + "]", shardFailure.cause);
failAndRemoveShard(shardRouting, indexService, true, "shard failure, reason [" + shardFailure.reason + "]", shardFailure.cause);
}
});
}

View File

@ -435,7 +435,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
if (indexShard.routingEntry().primary() == false) {
throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard");
}
int opCount = indexShard.getOperationsCount();
int opCount = indexShard.getActiveOperationsCount();
logger.trace("{} in flight operations sampled at [{}]", request.shardId(), opCount);
return new InFlightOpsResponse(opCount);
}

View File

@ -61,8 +61,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
private final ClusterService clusterService;
private final OngoingRecoveres ongoingRecoveries = new OngoingRecoveres();
private final OngoingRecoveries ongoingRecoveries = new OngoingRecoveries();
@Inject
public RecoverySource(Settings settings, TransportService transportService, IndicesService indicesService,
@ -107,11 +106,11 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
}
if (!targetShardRouting.initializing()) {
logger.debug("delaying recovery of {} as it is not listed as initializing on the target node {}. known shards state is [{}]",
request.shardId(), request.targetNode(), targetShardRouting.state());
request.shardId(), request.targetNode(), targetShardRouting.state());
throw new DelayRecoveryException("source node has the state of the target shard to be [" + targetShardRouting.state() + "], expecting to be [initializing]");
}
logger.trace("[{}][{}] starting recovery to {}, mark_as_relocated {}", request.shardId().getIndex().getName(), request.shardId().id(), request.targetNode(), request.markAsRelocated());
logger.trace("[{}][{}] starting recovery to {}", request.shardId().getIndex().getName(), request.shardId().id(), request.targetNode());
final RecoverySourceHandler handler;
if (shard.indexSettings().isOnSharedFilesystem()) {
handler = new SharedFSRecoverySourceHandler(shard, request, recoverySettings, transportService, logger);
@ -134,8 +133,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
}
}
private static final class OngoingRecoveres {
private static final class OngoingRecoveries {
private final Map<IndexShard, Set<RecoverySourceHandler>> ongoingRecoveries = new HashMap<>();
synchronized void add(IndexShard shard, RecoverySourceHandler handler) {

View File

@ -393,9 +393,11 @@ public class RecoverySourceHandler {
}
});
if (request.markAsRelocated()) {
// TODO what happens if the recovery process fails afterwards, we need to mark this back to started
if (isPrimaryRelocation()) {
/**
* if the recovery process fails after setting the shard state to RELOCATED, both relocation source and
* target are failed (see {@link IndexShard#updateRoutingEntry}).
*/
try {
shard.relocated("to " + request.targetNode());
} catch (IllegalIndexShardStateException e) {
@ -406,7 +408,11 @@ public class RecoverySourceHandler {
}
stopWatch.stop();
logger.trace("[{}][{}] finalizing recovery to {}: took [{}]",
indexName, shardId, request.targetNode(), stopWatch.totalTime());
indexName, shardId, request.targetNode(), stopWatch.totalTime());
}
protected boolean isPrimaryRelocation() {
return request.recoveryType() == RecoveryState.Type.PRIMARY_RELOCATION;
}
/**

View File

@ -101,7 +101,7 @@ public class RecoveryState implements ToXContent, Streamable {
STORE((byte) 0),
SNAPSHOT((byte) 1),
REPLICA((byte) 2),
RELOCATION((byte) 3);
PRIMARY_RELOCATION((byte) 3);
private static final Type[] TYPES = new Type[Type.values().length];

View File

@ -138,7 +138,6 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
// create a new recovery status, and process...
final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout());
threadPool.generic().execute(new RecoveryRunner(recoveryId));
}
protected void retryRecovery(final RecoveryStatus recoveryStatus, final Throwable reason, TimeValue retryAfter, final StartRecoveryRequest currentRequest) {
@ -178,7 +177,7 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
return;
}
final StartRecoveryRequest request = new StartRecoveryRequest(recoveryStatus.shardId(), recoveryStatus.sourceNode(), clusterService.localNode(),
false, metadataSnapshot, recoveryStatus.state().getType(), recoveryStatus.recoveryId());
metadataSnapshot, recoveryStatus.state().getType(), recoveryStatus.recoveryId());
final AtomicReference<RecoveryResponse> responseHolder = new AtomicReference<>();
try {
@ -267,7 +266,6 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, "source shard is closed", cause), false);
return;
}
onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, e), true);
}
}

View File

@ -84,8 +84,4 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler {
return 0;
}
private boolean isPrimaryRelocation() {
return request.recoveryType() == RecoveryState.Type.RELOCATION && shard.routingEntry().primary();
}
}

View File

@ -41,8 +41,6 @@ public class StartRecoveryRequest extends TransportRequest {
private DiscoveryNode targetNode;
private boolean markAsRelocated;
private Store.MetadataSnapshot metadataSnapshot;
private RecoveryState.Type recoveryType;
@ -56,12 +54,11 @@ public class StartRecoveryRequest extends TransportRequest {
* @param sourceNode The node to recover from
* @param targetNode The node to recover to
*/
public StartRecoveryRequest(ShardId shardId, DiscoveryNode sourceNode, DiscoveryNode targetNode, boolean markAsRelocated, Store.MetadataSnapshot metadataSnapshot, RecoveryState.Type recoveryType, long recoveryId) {
public StartRecoveryRequest(ShardId shardId, DiscoveryNode sourceNode, DiscoveryNode targetNode, Store.MetadataSnapshot metadataSnapshot, RecoveryState.Type recoveryType, long recoveryId) {
this.recoveryId = recoveryId;
this.shardId = shardId;
this.sourceNode = sourceNode;
this.targetNode = targetNode;
this.markAsRelocated = markAsRelocated;
this.recoveryType = recoveryType;
this.metadataSnapshot = metadataSnapshot;
}
@ -82,10 +79,6 @@ public class StartRecoveryRequest extends TransportRequest {
return targetNode;
}
public boolean markAsRelocated() {
return markAsRelocated;
}
public RecoveryState.Type recoveryType() {
return recoveryType;
}
@ -101,7 +94,6 @@ public class StartRecoveryRequest extends TransportRequest {
shardId = ShardId.readShardId(in);
sourceNode = DiscoveryNode.readNode(in);
targetNode = DiscoveryNode.readNode(in);
markAsRelocated = in.readBoolean();
metadataSnapshot = new Store.MetadataSnapshot(in);
recoveryType = RecoveryState.Type.fromId(in.readByte());
@ -114,7 +106,6 @@ public class StartRecoveryRequest extends TransportRequest {
shardId.writeTo(out);
sourceNode.writeTo(out);
targetNode.writeTo(out);
out.writeBoolean(markAsRelocated);
metadataSnapshot.writeTo(out);
out.writeByte(recoveryType.id());
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.ingest;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ingest.DeletePipelineRequest;
@ -36,10 +37,8 @@ import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.ingest.core.Pipeline;
import org.elasticsearch.ingest.core.PipelineFactoryError;
import org.elasticsearch.ingest.core.Processor;
import org.elasticsearch.ingest.core.TemplateService;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import org.elasticsearch.script.ScriptService;
import java.io.Closeable;
@ -104,8 +103,10 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) {
try {
pipelines.put(pipeline.getId(), factory.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactoryRegistry));
} catch (ElasticsearchParseException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
throw new ElasticsearchParseException("Error updating pipeline with id [" + pipeline.getId() + "]", e);
}
}
this.pipelines = Collections.unmodifiableMap(pipelines);
@ -154,9 +155,10 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
public void put(ClusterService clusterService, PutPipelineRequest request, ActionListener<WritePipelineResponse> listener) {
// validates the pipeline and processor configuration before submitting a cluster update task:
Map<String, Object> pipelineConfig = XContentHelper.convertToMap(request.getSource(), false).v2();
WritePipelineResponse response = validatePipelineResponse(request.getId(), pipelineConfig);
if (response != null) {
listener.onResponse(response);
try {
factory.create(request.getId(), pipelineConfig, processorFactoryRegistry);
} catch(Exception e) {
listener.onFailure(e);
return;
}
clusterService.submitStateUpdateTask("put-pipeline-" + request.getId(), new AckedClusterStateUpdateTask<WritePipelineResponse>(request, listener) {
@ -234,16 +236,4 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
}
return result;
}
WritePipelineResponse validatePipelineResponse(String id, Map<String, Object> config) {
try {
factory.create(id, config, processorFactoryRegistry);
return null;
} catch (ConfigurationPropertyException e) {
return new WritePipelineResponse(new PipelineFactoryError(e));
} catch (Exception e) {
return new WritePipelineResponse(new PipelineFactoryError(e.getMessage()));
}
}
}

View File

@ -32,7 +32,8 @@ import java.util.Objects;
*/
public class CompoundProcessor implements Processor {
static final String ON_FAILURE_MESSAGE_FIELD = "on_failure_message";
static final String ON_FAILURE_PROCESSOR_FIELD = "on_failure_processor";
static final String ON_FAILURE_PROCESSOR_TYPE_FIELD = "on_failure_processor_type";
static final String ON_FAILURE_PROCESSOR_TAG_FIELD = "on_failure_processor_tag";
private final List<Processor> processors;
private final List<Processor> onFailureProcessors;
@ -74,24 +75,26 @@ public class CompoundProcessor implements Processor {
if (onFailureProcessors.isEmpty()) {
throw e;
} else {
executeOnFailure(ingestDocument, e, processor.getType());
executeOnFailure(ingestDocument, e, processor.getType(), processor.getTag());
}
break;
}
}
}
void executeOnFailure(IngestDocument ingestDocument, Exception cause, String failedProcessorType) throws Exception {
void executeOnFailure(IngestDocument ingestDocument, Exception cause, String failedProcessorType, String failedProcessorTag) throws Exception {
Map<String, String> ingestMetadata = ingestDocument.getIngestMetadata();
try {
ingestMetadata.put(ON_FAILURE_MESSAGE_FIELD, cause.getMessage());
ingestMetadata.put(ON_FAILURE_PROCESSOR_FIELD, failedProcessorType);
ingestMetadata.put(ON_FAILURE_PROCESSOR_TYPE_FIELD, failedProcessorType);
ingestMetadata.put(ON_FAILURE_PROCESSOR_TAG_FIELD, failedProcessorTag);
for (Processor processor : onFailureProcessors) {
processor.execute(ingestDocument);
}
} finally {
ingestMetadata.remove(ON_FAILURE_MESSAGE_FIELD);
ingestMetadata.remove(ON_FAILURE_PROCESSOR_FIELD);
ingestMetadata.remove(ON_FAILURE_PROCESSOR_TYPE_FIELD);
ingestMetadata.remove(ON_FAILURE_PROCESSOR_TAG_FIELD);
}
}
}

View File

@ -19,7 +19,8 @@
package org.elasticsearch.ingest.core;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import java.util.List;
import java.util.Map;
@ -32,7 +33,7 @@ public final class ConfigurationUtils {
/**
* Returns and removes the specified optional property from the specified configuration map.
*
* If the property value isn't of type string a {@link ConfigurationPropertyException} is thrown.
* If the property value isn't of type string a {@link ElasticsearchParseException} is thrown.
*/
public static String readOptionalStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName);
@ -42,8 +43,8 @@ public final class ConfigurationUtils {
/**
* Returns and removes the specified property from the specified configuration map.
*
* If the property value isn't of type string an {@link ConfigurationPropertyException} is thrown.
* If the property is missing an {@link ConfigurationPropertyException} is thrown
* If the property value isn't of type string an {@link ElasticsearchParseException} is thrown.
* If the property is missing an {@link ElasticsearchParseException} is thrown
*/
public static String readStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
return readStringProperty(processorType, processorTag, configuration, propertyName, null);
@ -52,15 +53,15 @@ public final class ConfigurationUtils {
/**
* Returns and removes the specified property from the specified configuration map.
*
* If the property value isn't of type string a {@link ConfigurationPropertyException} is thrown.
* If the property is missing and no default value has been specified a {@link ConfigurationPropertyException} is thrown
* If the property value isn't of type string a {@link ElasticsearchParseException} is thrown.
* If the property is missing and no default value has been specified a {@link ElasticsearchParseException} is thrown
*/
public static String readStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName, String defaultValue) {
Object value = configuration.remove(propertyName);
if (value == null && defaultValue != null) {
return defaultValue;
} else if (value == null) {
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing");
throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing");
}
return readString(processorType, processorTag, propertyName, value);
}
@ -72,13 +73,13 @@ public final class ConfigurationUtils {
if (value instanceof String) {
return (String) value;
}
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "property isn't a string, but of type [" + value.getClass().getName() + "]");
throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a string, but of type [" + value.getClass().getName() + "]");
}
/**
* Returns and removes the specified property of type list from the specified configuration map.
*
* If the property value isn't of type list an {@link ConfigurationPropertyException} is thrown.
* If the property value isn't of type list an {@link ElasticsearchParseException} is thrown.
*/
public static <T> List<T> readOptionalList(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName);
@ -91,13 +92,13 @@ public final class ConfigurationUtils {
/**
* Returns and removes the specified property of type list from the specified configuration map.
*
* If the property value isn't of type list an {@link ConfigurationPropertyException} is thrown.
* If the property is missing an {@link ConfigurationPropertyException} is thrown
* If the property value isn't of type list an {@link ElasticsearchParseException} is thrown.
* If the property is missing an {@link ElasticsearchParseException} is thrown
*/
public static <T> List<T> readList(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName);
if (value == null) {
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing");
throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing");
}
return readList(processorType, processorTag, propertyName, value);
@ -109,20 +110,20 @@ public final class ConfigurationUtils {
List<T> stringList = (List<T>) value;
return stringList;
} else {
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "property isn't a list, but of type [" + value.getClass().getName() + "]");
throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a list, but of type [" + value.getClass().getName() + "]");
}
}
/**
* Returns and removes the specified property of type map from the specified configuration map.
*
* If the property value isn't of type map an {@link ConfigurationPropertyException} is thrown.
* If the property is missing an {@link ConfigurationPropertyException} is thrown
* If the property value isn't of type map an {@link ElasticsearchParseException} is thrown.
* If the property is missing an {@link ElasticsearchParseException} is thrown
*/
public static <T> Map<String, T> readMap(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName);
if (value == null) {
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing");
throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing");
}
return readMap(processorType, processorTag, propertyName, value);
@ -131,7 +132,7 @@ public final class ConfigurationUtils {
/**
* Returns and removes the specified property of type map from the specified configuration map.
*
* If the property value isn't of type map an {@link ConfigurationPropertyException} is thrown.
* If the property value isn't of type map an {@link ElasticsearchParseException} is thrown.
*/
public static <T> Map<String, T> readOptionalMap(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName);
@ -148,7 +149,7 @@ public final class ConfigurationUtils {
Map<String, T> map = (Map<String, T>) value;
return map;
} else {
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "property isn't a map, but of type [" + value.getClass().getName() + "]");
throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a map, but of type [" + value.getClass().getName() + "]");
}
}
@ -158,8 +159,23 @@ public final class ConfigurationUtils {
public static Object readObject(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
Object value = configuration.remove(propertyName);
if (value == null) {
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing");
throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing");
}
return value;
}
public static ElasticsearchParseException newConfigurationException(String processorType, String processorTag, String propertyName, String reason) {
ElasticsearchParseException exception = new ElasticsearchParseException("[" + propertyName + "] " + reason);
if (processorType != null) {
exception.addHeader("processor_type", processorType);
}
if (processorTag != null) {
exception.addHeader("processor_tag", processorTag);
}
if (propertyName != null) {
exception.addHeader("property_name", propertyName);
}
return exception;
}
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.ingest.core;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import org.elasticsearch.ElasticsearchParseException;
import java.util.ArrayList;
import java.util.Arrays;
@ -27,6 +27,7 @@ import java.util.Collections;
import java.util.List;
import java.util.Map;
/**
* A pipeline is a list of {@link Processor} instances grouped under a unique id.
*/
@ -84,20 +85,20 @@ public final class Pipeline {
public final static class Factory {
public Pipeline create(String id, Map<String, Object> config, Map<String, Processor.Factory> processorRegistry) throws ConfigurationPropertyException {
public Pipeline create(String id, Map<String, Object> config, Map<String, Processor.Factory> processorRegistry) throws Exception {
String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY);
List<Map<String, Map<String, Object>>> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY);
List<Processor> processors = readProcessorConfigs(processorConfigs, processorRegistry);
List<Map<String, Map<String, Object>>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY);
List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry);
if (config.isEmpty() == false) {
throw new ConfigurationPropertyException("pipeline [" + id + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
throw new ElasticsearchParseException("pipeline [" + id + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
}
CompoundProcessor compoundProcessor = new CompoundProcessor(Collections.unmodifiableList(processors), Collections.unmodifiableList(onFailureProcessors));
return new Pipeline(id, description, compoundProcessor);
}
private List<Processor> readProcessorConfigs(List<Map<String, Map<String, Object>>> processorConfigs, Map<String, Processor.Factory> processorRegistry) throws ConfigurationPropertyException {
private List<Processor> readProcessorConfigs(List<Map<String, Map<String, Object>>> processorConfigs, Map<String, Processor.Factory> processorRegistry) throws Exception {
List<Processor> processors = new ArrayList<>();
if (processorConfigs != null) {
for (Map<String, Map<String, Object>> processorConfigWithKey : processorConfigs) {
@ -110,28 +111,22 @@ public final class Pipeline {
return processors;
}
private Processor readProcessor(Map<String, Processor.Factory> processorRegistry, String type, Map<String, Object> config) throws ConfigurationPropertyException {
private Processor readProcessor(Map<String, Processor.Factory> processorRegistry, String type, Map<String, Object> config) throws Exception {
Processor.Factory factory = processorRegistry.get(type);
if (factory != null) {
List<Map<String, Map<String, Object>>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY);
List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry);
Processor processor;
try {
processor = factory.create(config);
} catch (ConfigurationPropertyException e) {
throw e;
} catch (Exception e) {
throw new ConfigurationPropertyException(e.getMessage());
}
processor = factory.create(config);
if (!config.isEmpty()) {
throw new ConfigurationPropertyException("processor [" + type + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
throw new ElasticsearchParseException("processor [" + type + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
}
if (onFailureProcessors.isEmpty()) {
return processor;
}
return new CompoundProcessor(Collections.singletonList(processor), onFailureProcessors);
}
throw new ConfigurationPropertyException("No processor type exists with name [" + type + "]");
throw new ElasticsearchParseException("No processor type exists with name [" + type + "]");
}
}
}

View File

@ -1,96 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest.core;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import java.io.IOException;
public class PipelineFactoryError implements Streamable, ToXContent {
private String reason;
private String processorType;
private String processorTag;
private String processorPropertyName;
public PipelineFactoryError() {
}
public PipelineFactoryError(ConfigurationPropertyException e) {
this.reason = e.getMessage();
this.processorType = e.getProcessorType();
this.processorTag = e.getProcessorTag();
this.processorPropertyName = e.getPropertyName();
}
public PipelineFactoryError(String reason) {
this.reason = "Constructing Pipeline failed:" + reason;
}
public String getReason() {
return reason;
}
public String getProcessorTag() {
return processorTag;
}
public String getProcessorPropertyName() {
return processorPropertyName;
}
public String getProcessorType() {
return processorType;
}
@Override
public void readFrom(StreamInput in) throws IOException {
reason = in.readString();
processorType = in.readOptionalString();
processorTag = in.readOptionalString();
processorPropertyName = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(reason);
out.writeOptionalString(processorType);
out.writeOptionalString(processorTag);
out.writeOptionalString(processorPropertyName);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("error");
builder.field("type", processorType);
builder.field("tag", processorTag);
builder.field("reason", reason);
builder.field("property_name", processorPropertyName);
builder.endObject();
return builder;
}
}

View File

@ -1,43 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest.core;
public class PipelineFactoryResult {
private final Pipeline pipeline;
private final PipelineFactoryError error;
public PipelineFactoryResult(Pipeline pipeline) {
this.pipeline = pipeline;
this.error = null;
}
public PipelineFactoryResult(PipelineFactoryError error) {
this.error = error;
this.pipeline = null;
}
public Pipeline getPipeline() {
return pipeline;
}
public PipelineFactoryError getError() {
return error;
}
}

View File

@ -17,11 +17,8 @@
* under the License.
*/
package org.elasticsearch.ingest.core;
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
import java.util.Map;
/**

View File

@ -23,7 +23,6 @@ import org.elasticsearch.ingest.core.AbstractProcessor;
import org.elasticsearch.ingest.core.AbstractProcessorFactory;
import org.elasticsearch.ingest.core.ConfigurationUtils;
import org.elasticsearch.ingest.core.IngestDocument;
import org.elasticsearch.ingest.core.Processor;
import java.util.Map;
@ -31,7 +30,7 @@ import java.util.Map;
* Base class for processors that manipulate strings and require a single "fields" array config value, which
* holds a list of field names in string format.
*/
public abstract class AbstractStringProcessor extends AbstractProcessor {
abstract class AbstractStringProcessor extends AbstractProcessor {
private final String field;
protected AbstractStringProcessor(String tag, String field) {
@ -54,7 +53,7 @@ public abstract class AbstractStringProcessor extends AbstractProcessor {
protected abstract String process(String value);
public static abstract class Factory<T extends AbstractStringProcessor> extends AbstractProcessorFactory<T> {
static abstract class Factory<T extends AbstractStringProcessor> extends AbstractProcessorFactory<T> {
protected final String processorType;
protected Factory(String processorType) {

View File

@ -33,7 +33,7 @@ import java.util.Map;
* provided values will be added. If the field is a scalar it will be converted to a single item list and the provided
* values will be added to the newly created list.
*/
public class AppendProcessor extends AbstractProcessor {
public final class AppendProcessor extends AbstractProcessor {
public static final String TYPE = "append";

View File

@ -1,53 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest.processor;
/**
* Exception class thrown by processor factories.
*/
public class ConfigurationPropertyException extends RuntimeException {
private String processorType;
private String processorTag;
private String propertyName;
public ConfigurationPropertyException(String processorType, String processorTag, String propertyName, String message) {
super("[" + propertyName + "] " + message);
this.processorTag = processorTag;
this.processorType = processorType;
this.propertyName = propertyName;
}
public ConfigurationPropertyException(String errorMessage) {
super(errorMessage);
}
public String getPropertyName() {
return propertyName;
}
public String getProcessorType() {
return processorType;
}
public String getProcessorTag() {
return processorTag;
}
}

View File

@ -29,11 +29,13 @@ import java.util.List;
import java.util.Locale;
import java.util.Map;
import static org.elasticsearch.ingest.core.ConfigurationUtils.newConfigurationException;
/**
* Processor that converts fields content to a different type. Supported types are: integer, float, boolean and string.
* Throws exception if the field is not there or the conversion fails.
*/
public class ConvertProcessor extends AbstractProcessor {
public final class ConvertProcessor extends AbstractProcessor {
enum Type {
INTEGER {
@ -80,11 +82,11 @@ public class ConvertProcessor extends AbstractProcessor {
public abstract Object convert(Object value);
public static Type fromString(String type) {
public static Type fromString(String processorTag, String propertyName, String type) {
try {
return Type.valueOf(type.toUpperCase(Locale.ROOT));
} catch(IllegalArgumentException e) {
throw new IllegalArgumentException("type [" + type + "] not supported, cannot convert field.", e);
throw newConfigurationException(TYPE, processorTag, propertyName, "type [" + type + "] not supported, cannot convert field.");
}
}
}
@ -134,11 +136,12 @@ public class ConvertProcessor extends AbstractProcessor {
return TYPE;
}
public static class Factory extends AbstractProcessorFactory<ConvertProcessor> {
public static final class Factory extends AbstractProcessorFactory<ConvertProcessor> {
@Override
public ConvertProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
Type convertType = Type.fromString(ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "type"));
String typeProperty = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "type");
Type convertType = Type.fromString(processorTag, "type", typeProperty);
return new ConvertProcessor(processorTag, field, convertType);
}
}

View File

@ -108,7 +108,7 @@ public final class DateProcessor extends AbstractProcessor {
return matchFormats;
}
public static class Factory extends AbstractProcessorFactory<DateProcessor> {
public static final class Factory extends AbstractProcessorFactory<DateProcessor> {
@SuppressWarnings("unchecked")
public DateProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {

View File

@ -1,106 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest.processor;
import org.elasticsearch.ingest.core.AbstractProcessor;
import org.elasticsearch.ingest.core.AbstractProcessorFactory;
import org.elasticsearch.ingest.core.ConfigurationUtils;
import org.elasticsearch.ingest.core.IngestDocument;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
/**
* Processor that replaces dots in document field names with a
* specified separator.
*/
public class DeDotProcessor extends AbstractProcessor {
public static final String TYPE = "dedot";
static final String DEFAULT_SEPARATOR = "_";
private final String separator;
DeDotProcessor(String tag, String separator) {
super(tag);
this.separator = separator;
}
public String getSeparator() {
return separator;
}
@Override
public void execute(IngestDocument document) {
deDot(document.getSourceAndMetadata());
}
@Override
public String getType() {
return TYPE;
}
/**
* Recursively iterates through Maps and Lists in search of map entries with
* keys containing dots. The dots in these fields are replaced with {@link #separator}.
*
* @param obj The current object in context to be checked for dots in its fields.
*/
private void deDot(Object obj) {
if (obj instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, Object> doc = (Map) obj;
Iterator<Map.Entry<String, Object>> it = doc.entrySet().iterator();
Map<String, Object> deDottedFields = new HashMap<>();
while (it.hasNext()) {
Map.Entry<String, Object> entry = it.next();
deDot(entry.getValue());
String fieldName = entry.getKey();
if (fieldName.contains(".")) {
String deDottedFieldName = fieldName.replaceAll("\\.", separator);
deDottedFields.put(deDottedFieldName, entry.getValue());
it.remove();
}
}
doc.putAll(deDottedFields);
} else if (obj instanceof List) {
@SuppressWarnings("unchecked")
List<Object> list = (List) obj;
for (Object value : list) {
deDot(value);
}
}
}
public static class Factory extends AbstractProcessorFactory<DeDotProcessor> {
@Override
public DeDotProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String separator = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "separator");
if (separator == null) {
separator = DEFAULT_SEPARATOR;
}
return new DeDotProcessor(processorTag, separator);
}
}
}

View File

@ -31,7 +31,7 @@ import java.util.Map;
* Processor that raises a runtime exception with a provided
* error message.
*/
public class FailProcessor extends AbstractProcessor {
public final class FailProcessor extends AbstractProcessor {
public static final String TYPE = "fail";
@ -56,7 +56,7 @@ public class FailProcessor extends AbstractProcessor {
return TYPE;
}
public static class Factory extends AbstractProcessorFactory<FailProcessor> {
public static final class Factory extends AbstractProcessorFactory<FailProcessor> {
private final TemplateService templateService;

View File

@ -32,7 +32,7 @@ import java.util.regex.Pattern;
* Processor that allows to search for patterns in field content and replace them with corresponding string replacement.
* Support fields of string type only, throws exception if a field is of a different type.
*/
public class GsubProcessor extends AbstractProcessor {
public final class GsubProcessor extends AbstractProcessor {
public static final String TYPE = "gsub";
@ -76,7 +76,7 @@ public class GsubProcessor extends AbstractProcessor {
return TYPE;
}
public static class Factory extends AbstractProcessorFactory<GsubProcessor> {
public static final class Factory extends AbstractProcessorFactory<GsubProcessor> {
@Override
public GsubProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");

View File

@ -32,7 +32,7 @@ import java.util.stream.Collectors;
* Processor that joins the different items of an array into a single string value using a separator between each item.
* Throws exception is the specified field is not an array.
*/
public class JoinProcessor extends AbstractProcessor {
public final class JoinProcessor extends AbstractProcessor {
public static final String TYPE = "join";
@ -70,7 +70,7 @@ public class JoinProcessor extends AbstractProcessor {
return TYPE;
}
public static class Factory extends AbstractProcessorFactory<JoinProcessor> {
public final static class Factory extends AbstractProcessorFactory<JoinProcessor> {
@Override
public JoinProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");

View File

@ -26,7 +26,7 @@ import java.util.Locale;
* Throws exception is the field is not of type string.
*/
public class LowercaseProcessor extends AbstractStringProcessor {
public final class LowercaseProcessor extends AbstractStringProcessor {
public static final String TYPE = "lowercase";
@ -44,7 +44,7 @@ public class LowercaseProcessor extends AbstractStringProcessor {
return TYPE;
}
public static class Factory extends AbstractStringProcessor.Factory<LowercaseProcessor> {
public final static class Factory extends AbstractStringProcessor.Factory<LowercaseProcessor> {
public Factory() {
super(TYPE);

View File

@ -30,7 +30,7 @@ import java.util.Map;
/**
* Processor that removes existing fields. Nothing happens if the field is not present.
*/
public class RemoveProcessor extends AbstractProcessor {
public final class RemoveProcessor extends AbstractProcessor {
public static final String TYPE = "remove";
@ -55,7 +55,7 @@ public class RemoveProcessor extends AbstractProcessor {
return TYPE;
}
public static class Factory extends AbstractProcessorFactory<RemoveProcessor> {
public static final class Factory extends AbstractProcessorFactory<RemoveProcessor> {
private final TemplateService templateService;

View File

@ -29,7 +29,7 @@ import java.util.Map;
/**
* Processor that allows to rename existing fields. Will throw exception if the field is not present.
*/
public class RenameProcessor extends AbstractProcessor {
public final class RenameProcessor extends AbstractProcessor {
public static final String TYPE = "rename";
@ -75,7 +75,7 @@ public class RenameProcessor extends AbstractProcessor {
return TYPE;
}
public static class Factory extends AbstractProcessorFactory<RenameProcessor> {
public static final class Factory extends AbstractProcessorFactory<RenameProcessor> {
@Override
public RenameProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");

View File

@ -32,7 +32,7 @@ import java.util.Map;
* Processor that adds new fields with their corresponding values. If the field is already present, its value
* will be replaced with the provided one.
*/
public class SetProcessor extends AbstractProcessor {
public final class SetProcessor extends AbstractProcessor {
public static final String TYPE = "set";

View File

@ -23,7 +23,7 @@ package org.elasticsearch.ingest.processor;
* Processor that trims the content of string fields.
* Throws exception is the field is not of type string.
*/
public class TrimProcessor extends AbstractStringProcessor {
public final class TrimProcessor extends AbstractStringProcessor {
public static final String TYPE = "trim";
@ -41,7 +41,7 @@ public class TrimProcessor extends AbstractStringProcessor {
return TYPE;
}
public static class Factory extends AbstractStringProcessor.Factory<TrimProcessor> {
public static final class Factory extends AbstractStringProcessor.Factory<TrimProcessor> {
public Factory() {
super(TYPE);

View File

@ -25,7 +25,7 @@ import java.util.Locale;
* Processor that converts the content of string fields to uppercase.
* Throws exception is the field is not of type string.
*/
public class UppercaseProcessor extends AbstractStringProcessor {
public final class UppercaseProcessor extends AbstractStringProcessor {
public static final String TYPE = "uppercase";
@ -43,7 +43,7 @@ public class UppercaseProcessor extends AbstractStringProcessor {
return TYPE;
}
public static class Factory extends AbstractStringProcessor.Factory<UppercaseProcessor> {
public static final class Factory extends AbstractStringProcessor.Factory<UppercaseProcessor> {
public Factory() {
super(TYPE);

View File

@ -133,6 +133,11 @@ public class Node implements Closeable {
public static final Setting<Boolean> NODE_LOCAL_SETTING = Setting.boolSetting("node.local", false, false, Setting.Scope.CLUSTER);
public static final Setting<String> NODE_MODE_SETTING = new Setting<>("node.mode", "network", Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> NODE_INGEST_SETTING = Setting.boolSetting("node.ingest", true, false, Setting.Scope.CLUSTER);
public static final Setting<String> NODE_NAME_SETTING = Setting.simpleString("node.name", false, Setting.Scope.CLUSTER);
// this sucks that folks can mistype client etc and get away with it.
// TODO: we should move this to node.attribute.${name} = ${value} instead.
public static final Setting<Settings> NODE_ATTRIBUTES = Setting.groupSetting("node.", false, Setting.Scope.CLUSTER);
private static final String CLIENT_TYPE = "node";
private final Lifecycle lifecycle = new Lifecycle();
@ -156,7 +161,7 @@ public class Node implements Closeable {
.put(Client.CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE).build();
tmpSettings = TribeService.processSettings(tmpSettings);
ESLogger logger = Loggers.getLogger(Node.class, tmpSettings.get("name"));
ESLogger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(tmpSettings));
logger.info("version[{}], pid[{}], build[{}/{}]", version, JvmInfo.jvmInfo().pid(), Build.CURRENT.shortHash(), Build.CURRENT.date());
logger.info("initializing ...");
@ -197,7 +202,8 @@ public class Node implements Closeable {
modules.add(new EnvironmentModule(environment));
modules.add(new NodeModule(this, monitorService));
modules.add(new NetworkModule(networkService, settings, false, namedWriteableRegistry));
modules.add(new ScriptModule(settingsModule));
ScriptModule scriptModule = new ScriptModule();
modules.add(scriptModule);
modules.add(new NodeEnvironmentModule(nodeEnvironment));
modules.add(new ClusterNameModule(this.settings));
modules.add(new ThreadPoolModule(threadPool));
@ -215,7 +221,7 @@ public class Node implements Closeable {
modules.add(new AnalysisModule(environment));
pluginsService.processModules(modules);
scriptModule.prepareSettings(settingsModule);
injector = modules.createInjector();
client = injector.getInstance(Client.class);
@ -262,7 +268,7 @@ public class Node implements Closeable {
return this;
}
ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
ESLogger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
logger.info("starting ...");
// hack around dependency injection problem (for now...)
injector.getInstance(Discovery.class).setRoutingService(injector.getInstance(RoutingService.class));
@ -316,7 +322,7 @@ public class Node implements Closeable {
if (!lifecycle.moveToStopped()) {
return this;
}
ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
ESLogger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
logger.info("stopping ...");
injector.getInstance(TribeService.class).stop();
@ -363,7 +369,7 @@ public class Node implements Closeable {
return;
}
ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
ESLogger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
logger.info("closing ...");
List<Closeable> toClose = new ArrayList<>();
StopWatch stopWatch = new StopWatch("node_close");

Some files were not shown because too many files have changed in this diff Show More