Merge branch 'master' into feature/reindex
This commit is contained in:
commit
bb4d8b79fe
|
@ -78,15 +78,17 @@ class BuildPlugin implements Plugin<Project> {
|
|||
if (project.rootProject.ext.has('buildChecksDone') == false) {
|
||||
String javaHome = findJavaHome()
|
||||
File gradleJavaHome = Jvm.current().javaHome
|
||||
String gradleJavaVersionDetails = "${System.getProperty('java.vendor')} ${System.getProperty('java.version')}" +
|
||||
String javaVendor = System.getProperty('java.vendor')
|
||||
String javaVersion = System.getProperty('java.version')
|
||||
String gradleJavaVersionDetails = "${javaVendor} ${javaVersion}" +
|
||||
" [${System.getProperty('java.vm.name')} ${System.getProperty('java.vm.version')}]"
|
||||
|
||||
String javaVersionDetails = gradleJavaVersionDetails
|
||||
String javaVersion = System.getProperty('java.version')
|
||||
JavaVersion javaVersionEnum = JavaVersion.current()
|
||||
if (new File(javaHome).canonicalPath != gradleJavaHome.canonicalPath) {
|
||||
javaVersionDetails = findJavaVersionDetails(project, javaHome)
|
||||
javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, javaHome))
|
||||
javaVendor = findJavaVendor(project, javaHome)
|
||||
javaVersion = findJavaVersion(project, javaHome)
|
||||
}
|
||||
|
||||
|
@ -114,6 +116,25 @@ class BuildPlugin implements Plugin<Project> {
|
|||
throw new GradleException("Java ${minimumJava} or above is required to build Elasticsearch")
|
||||
}
|
||||
|
||||
// this block of code detecting buggy JDK 8 compiler versions can be removed when minimum Java version is incremented
|
||||
assert minimumJava == JavaVersion.VERSION_1_8 : "Remove JDK compiler bug detection only applicable to JDK 8"
|
||||
if (javaVersionEnum == JavaVersion.VERSION_1_8) {
|
||||
if (Objects.equals("Oracle Corporation", javaVendor)) {
|
||||
def matcher = javaVersion =~ /1\.8\.0(?:_(\d+))?/
|
||||
if (matcher.matches()) {
|
||||
int update;
|
||||
if (matcher.group(1) == null) {
|
||||
update = 0
|
||||
} else {
|
||||
update = matcher.group(1).toInteger()
|
||||
}
|
||||
if (update < 40) {
|
||||
throw new GradleException("JDK ${javaVendor} ${javaVersion} has compiler bug JDK-8052388, update your JDK to at least 8u40")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
project.rootProject.ext.javaHome = javaHome
|
||||
project.rootProject.ext.javaVersion = javaVersion
|
||||
project.rootProject.ext.buildChecksDone = true
|
||||
|
@ -153,6 +174,11 @@ class BuildPlugin implements Plugin<Project> {
|
|||
return runJavascript(project, javaHome, versionScript)
|
||||
}
|
||||
|
||||
private static String findJavaVendor(Project project, String javaHome) {
|
||||
String vendorScript = 'print(java.lang.System.getProperty("java.vendor"));'
|
||||
return runJavascript(project, javaHome, vendorScript)
|
||||
}
|
||||
|
||||
/** Finds the parsable java specification version */
|
||||
private static String findJavaVersion(Project project, String javaHome) {
|
||||
String versionScript = 'print(java.lang.System.getProperty("java.version"));'
|
||||
|
@ -289,7 +315,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
String luceneVersion = VersionProperties.lucene
|
||||
if (luceneVersion.contains('-snapshot')) {
|
||||
// extract the revision number from the version with a regex matcher
|
||||
String revision = (luceneVersion =~ /\w+-snapshot-(\d+)/)[0][1]
|
||||
String revision = (luceneVersion =~ /\w+-snapshot-([a-z0-9]+)/)[0][1]
|
||||
repos.maven {
|
||||
name 'lucene-snapshots'
|
||||
url "http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/${revision}"
|
||||
|
|
|
@ -94,6 +94,9 @@ class PrecommitTasks {
|
|||
project.checkstyle {
|
||||
config = project.resources.text.fromFile(
|
||||
PrecommitTasks.getResource('/checkstyle.xml'), 'UTF-8')
|
||||
configProperties = [
|
||||
suppressions: PrecommitTasks.getResource('/checkstyle_suppressions.xml')
|
||||
]
|
||||
}
|
||||
for (String taskName : ['checkstyleMain', 'checkstyleTest']) {
|
||||
Task task = project.tasks.findByName(taskName)
|
||||
|
|
|
@ -6,6 +6,10 @@
|
|||
<module name="Checker">
|
||||
<property name="charset" value="UTF-8" />
|
||||
|
||||
<module name="SuppressionFilter">
|
||||
<property name="file" value="${suppressions}" />
|
||||
</module>
|
||||
|
||||
<module name="TreeWalker">
|
||||
<!-- ~3500 violations
|
||||
<module name="LineLength">
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
<?xml version="1.0"?>
|
||||
<!DOCTYPE suppressions PUBLIC
|
||||
"-//Puppy Crawl//DTD Suppressions 1.1//EN"
|
||||
"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
|
||||
|
||||
<suppressions>
|
||||
<!-- These files are generated by ANTLR so its silly to hold them to our rules. -->
|
||||
<suppress files="org/elasticsearch/painless/PainlessLexer\.java" checks="." />
|
||||
<suppress files="org/elasticsearch/painless/PainlessParser(|BaseVisitor|Visitor)\.java" checks="." />
|
||||
</suppressions>
|
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 3.0.0-SNAPSHOT
|
||||
lucene = 5.5.0-snapshot-1725675
|
||||
lucene = 5.5.0-snapshot-4de5f1d
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.5
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch;
|
||||
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -39,6 +40,7 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VALUE;
|
||||
|
||||
/**
|
||||
|
@ -205,7 +207,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
* @param exType the exception type to look for
|
||||
* @return whether there is a nested exception of the specified type
|
||||
*/
|
||||
public boolean contains(Class exType) {
|
||||
public boolean contains(Class<? extends Throwable> exType) {
|
||||
if (exType == null) {
|
||||
return false;
|
||||
}
|
||||
|
@ -469,157 +471,288 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
return throwable;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is the list of Exceptions Elasticsearch can throw over the wire or save into a corruption marker. Each value in the enum is a
|
||||
* single exception tying the Class to an id for use of the encode side and the id back to a constructor for use on the decode side. As
|
||||
* such its ok if the exceptions to change names so long as their constructor can still read the exception. Each exception is listed
|
||||
* in id order below. If you want to remove an exception leave a tombstone comment and mark the id as null in
|
||||
* ExceptionSerializationTests.testIds.ids.
|
||||
*/
|
||||
enum ElasticsearchExceptionHandle {
|
||||
// each exception gets an assigned id that must never change. While the exception name can
|
||||
// change due to refactorings etc. like renaming we have to keep the ordinal <--> class mapping
|
||||
// to deserialize the exception coming from another node or from an corruption marker on
|
||||
// a corrupted index.
|
||||
// these exceptions can be ordered and removed, but (repeating) the ids must never change
|
||||
// to remove an exception, remove the enum value below, and mark the id as null in ExceptionSerializationTests.testIds.ids
|
||||
INDEX_SHARD_SNAPSHOT_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException.class, org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException::new, 0),
|
||||
DFS_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.dfs.DfsPhaseExecutionException.class, org.elasticsearch.search.dfs.DfsPhaseExecutionException::new, 1),
|
||||
EXECUTION_CANCELLED_EXCEPTION(org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException.class, org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException::new, 2),
|
||||
MASTER_NOT_DISCOVERED_EXCEPTION(org.elasticsearch.discovery.MasterNotDiscoveredException.class, org.elasticsearch.discovery.MasterNotDiscoveredException::new, 3),
|
||||
ELASTICSEARCH_SECURITY_EXCEPTION(org.elasticsearch.ElasticsearchSecurityException.class, org.elasticsearch.ElasticsearchSecurityException::new, 4),
|
||||
INDEX_SHARD_RESTORE_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreException.class, org.elasticsearch.index.snapshots.IndexShardRestoreException::new, 5),
|
||||
INDEX_CLOSED_EXCEPTION(org.elasticsearch.indices.IndexClosedException.class, org.elasticsearch.indices.IndexClosedException::new, 6),
|
||||
BIND_HTTP_EXCEPTION(org.elasticsearch.http.BindHttpException.class, org.elasticsearch.http.BindHttpException::new, 7),
|
||||
REDUCE_SEARCH_PHASE_EXCEPTION(org.elasticsearch.action.search.ReduceSearchPhaseException.class, org.elasticsearch.action.search.ReduceSearchPhaseException::new, 8),
|
||||
NODE_CLOSED_EXCEPTION(org.elasticsearch.node.NodeClosedException.class, org.elasticsearch.node.NodeClosedException::new, 9),
|
||||
SNAPSHOT_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.SnapshotFailedEngineException.class, org.elasticsearch.index.engine.SnapshotFailedEngineException::new, 10),
|
||||
SHARD_NOT_FOUND_EXCEPTION(org.elasticsearch.index.shard.ShardNotFoundException.class, org.elasticsearch.index.shard.ShardNotFoundException::new, 11),
|
||||
CONNECT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ConnectTransportException.class, org.elasticsearch.transport.ConnectTransportException::new, 12),
|
||||
NOT_SERIALIZABLE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.NotSerializableTransportException.class, org.elasticsearch.transport.NotSerializableTransportException::new, 13),
|
||||
RESPONSE_HANDLER_FAILURE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ResponseHandlerFailureTransportException.class, org.elasticsearch.transport.ResponseHandlerFailureTransportException::new, 14),
|
||||
INDEX_CREATION_EXCEPTION(org.elasticsearch.indices.IndexCreationException.class, org.elasticsearch.indices.IndexCreationException::new, 15),
|
||||
INDEX_NOT_FOUND_EXCEPTION(org.elasticsearch.index.IndexNotFoundException.class, org.elasticsearch.index.IndexNotFoundException::new, 16),
|
||||
ILLEGAL_SHARD_ROUTING_STATE_EXCEPTION(org.elasticsearch.cluster.routing.IllegalShardRoutingStateException.class, org.elasticsearch.cluster.routing.IllegalShardRoutingStateException::new, 17),
|
||||
BROADCAST_SHARD_OPERATION_FAILED_EXCEPTION(org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException.class, org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException::new, 18),
|
||||
RESOURCE_NOT_FOUND_EXCEPTION(org.elasticsearch.ResourceNotFoundException.class, org.elasticsearch.ResourceNotFoundException::new, 19),
|
||||
ACTION_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionTransportException.class, org.elasticsearch.transport.ActionTransportException::new, 20),
|
||||
ELASTICSEARCH_GENERATION_EXCEPTION(org.elasticsearch.ElasticsearchGenerationException.class, org.elasticsearch.ElasticsearchGenerationException::new, 21),
|
||||
INDEX_SHARD_SNAPSHOT_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException::new, 0),
|
||||
DFS_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.dfs.DfsPhaseExecutionException.class,
|
||||
org.elasticsearch.search.dfs.DfsPhaseExecutionException::new, 1),
|
||||
EXECUTION_CANCELLED_EXCEPTION(org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException.class,
|
||||
org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException::new, 2),
|
||||
MASTER_NOT_DISCOVERED_EXCEPTION(org.elasticsearch.discovery.MasterNotDiscoveredException.class,
|
||||
org.elasticsearch.discovery.MasterNotDiscoveredException::new, 3),
|
||||
ELASTICSEARCH_SECURITY_EXCEPTION(org.elasticsearch.ElasticsearchSecurityException.class,
|
||||
org.elasticsearch.ElasticsearchSecurityException::new, 4),
|
||||
INDEX_SHARD_RESTORE_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardRestoreException::new, 5),
|
||||
INDEX_CLOSED_EXCEPTION(org.elasticsearch.indices.IndexClosedException.class,
|
||||
org.elasticsearch.indices.IndexClosedException::new, 6),
|
||||
BIND_HTTP_EXCEPTION(org.elasticsearch.http.BindHttpException.class,
|
||||
org.elasticsearch.http.BindHttpException::new, 7),
|
||||
REDUCE_SEARCH_PHASE_EXCEPTION(org.elasticsearch.action.search.ReduceSearchPhaseException.class,
|
||||
org.elasticsearch.action.search.ReduceSearchPhaseException::new, 8),
|
||||
NODE_CLOSED_EXCEPTION(org.elasticsearch.node.NodeClosedException.class,
|
||||
org.elasticsearch.node.NodeClosedException::new, 9),
|
||||
SNAPSHOT_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.SnapshotFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.SnapshotFailedEngineException::new, 10),
|
||||
SHARD_NOT_FOUND_EXCEPTION(org.elasticsearch.index.shard.ShardNotFoundException.class,
|
||||
org.elasticsearch.index.shard.ShardNotFoundException::new, 11),
|
||||
CONNECT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ConnectTransportException.class,
|
||||
org.elasticsearch.transport.ConnectTransportException::new, 12),
|
||||
NOT_SERIALIZABLE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.NotSerializableTransportException.class,
|
||||
org.elasticsearch.transport.NotSerializableTransportException::new, 13),
|
||||
RESPONSE_HANDLER_FAILURE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ResponseHandlerFailureTransportException.class,
|
||||
org.elasticsearch.transport.ResponseHandlerFailureTransportException::new, 14),
|
||||
INDEX_CREATION_EXCEPTION(org.elasticsearch.indices.IndexCreationException.class,
|
||||
org.elasticsearch.indices.IndexCreationException::new, 15),
|
||||
INDEX_NOT_FOUND_EXCEPTION(org.elasticsearch.index.IndexNotFoundException.class,
|
||||
org.elasticsearch.index.IndexNotFoundException::new, 16),
|
||||
ILLEGAL_SHARD_ROUTING_STATE_EXCEPTION(org.elasticsearch.cluster.routing.IllegalShardRoutingStateException.class,
|
||||
org.elasticsearch.cluster.routing.IllegalShardRoutingStateException::new, 17),
|
||||
BROADCAST_SHARD_OPERATION_FAILED_EXCEPTION(org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException.class,
|
||||
org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException::new, 18),
|
||||
RESOURCE_NOT_FOUND_EXCEPTION(org.elasticsearch.ResourceNotFoundException.class,
|
||||
org.elasticsearch.ResourceNotFoundException::new, 19),
|
||||
ACTION_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionTransportException.class,
|
||||
org.elasticsearch.transport.ActionTransportException::new, 20),
|
||||
ELASTICSEARCH_GENERATION_EXCEPTION(org.elasticsearch.ElasticsearchGenerationException.class,
|
||||
org.elasticsearch.ElasticsearchGenerationException::new, 21),
|
||||
// 22 was CreateFailedEngineException
|
||||
INDEX_SHARD_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardStartedException.class, org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
|
||||
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class, org.elasticsearch.search.SearchContextMissingException::new, 24),
|
||||
INDEX_SHARD_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardStartedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
|
||||
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class,
|
||||
org.elasticsearch.search.SearchContextMissingException::new, 24),
|
||||
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 25),
|
||||
BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class, org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26),
|
||||
SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class, org.elasticsearch.snapshots.SnapshotCreationException::new, 27),
|
||||
DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class, org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28),
|
||||
DOCUMENT_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentMissingException.class, org.elasticsearch.index.engine.DocumentMissingException::new, 29),
|
||||
SNAPSHOT_EXCEPTION(org.elasticsearch.snapshots.SnapshotException.class, org.elasticsearch.snapshots.SnapshotException::new, 30),
|
||||
INVALID_ALIAS_NAME_EXCEPTION(org.elasticsearch.indices.InvalidAliasNameException.class, org.elasticsearch.indices.InvalidAliasNameException::new, 31),
|
||||
INVALID_INDEX_NAME_EXCEPTION(org.elasticsearch.indices.InvalidIndexNameException.class, org.elasticsearch.indices.InvalidIndexNameException::new, 32),
|
||||
INDEX_PRIMARY_SHARD_NOT_ALLOCATED_EXCEPTION(org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException.class, org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException::new, 33),
|
||||
TRANSPORT_EXCEPTION(org.elasticsearch.transport.TransportException.class, org.elasticsearch.transport.TransportException::new, 34),
|
||||
ELASTICSEARCH_PARSE_EXCEPTION(org.elasticsearch.ElasticsearchParseException.class, org.elasticsearch.ElasticsearchParseException::new, 35),
|
||||
SEARCH_EXCEPTION(org.elasticsearch.search.SearchException.class, org.elasticsearch.search.SearchException::new, 36),
|
||||
MAPPER_EXCEPTION(org.elasticsearch.index.mapper.MapperException.class, org.elasticsearch.index.mapper.MapperException::new, 37),
|
||||
INVALID_TYPE_NAME_EXCEPTION(org.elasticsearch.indices.InvalidTypeNameException.class, org.elasticsearch.indices.InvalidTypeNameException::new, 38),
|
||||
SNAPSHOT_RESTORE_EXCEPTION(org.elasticsearch.snapshots.SnapshotRestoreException.class, org.elasticsearch.snapshots.SnapshotRestoreException::new, 39),
|
||||
BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class,
|
||||
org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26),
|
||||
SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class,
|
||||
org.elasticsearch.snapshots.SnapshotCreationException::new, 27),
|
||||
DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28),
|
||||
DOCUMENT_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentMissingException.class,
|
||||
org.elasticsearch.index.engine.DocumentMissingException::new, 29),
|
||||
SNAPSHOT_EXCEPTION(org.elasticsearch.snapshots.SnapshotException.class,
|
||||
org.elasticsearch.snapshots.SnapshotException::new, 30),
|
||||
INVALID_ALIAS_NAME_EXCEPTION(org.elasticsearch.indices.InvalidAliasNameException.class,
|
||||
org.elasticsearch.indices.InvalidAliasNameException::new, 31),
|
||||
INVALID_INDEX_NAME_EXCEPTION(org.elasticsearch.indices.InvalidIndexNameException.class,
|
||||
org.elasticsearch.indices.InvalidIndexNameException::new, 32),
|
||||
INDEX_PRIMARY_SHARD_NOT_ALLOCATED_EXCEPTION(org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException.class,
|
||||
org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException::new, 33),
|
||||
TRANSPORT_EXCEPTION(org.elasticsearch.transport.TransportException.class,
|
||||
org.elasticsearch.transport.TransportException::new, 34),
|
||||
ELASTICSEARCH_PARSE_EXCEPTION(org.elasticsearch.ElasticsearchParseException.class,
|
||||
org.elasticsearch.ElasticsearchParseException::new, 35),
|
||||
SEARCH_EXCEPTION(org.elasticsearch.search.SearchException.class,
|
||||
org.elasticsearch.search.SearchException::new, 36),
|
||||
MAPPER_EXCEPTION(org.elasticsearch.index.mapper.MapperException.class,
|
||||
org.elasticsearch.index.mapper.MapperException::new, 37),
|
||||
INVALID_TYPE_NAME_EXCEPTION(org.elasticsearch.indices.InvalidTypeNameException.class,
|
||||
org.elasticsearch.indices.InvalidTypeNameException::new, 38),
|
||||
SNAPSHOT_RESTORE_EXCEPTION(org.elasticsearch.snapshots.SnapshotRestoreException.class,
|
||||
org.elasticsearch.snapshots.SnapshotRestoreException::new, 39),
|
||||
PARSING_EXCEPTION(org.elasticsearch.common.ParsingException.class, org.elasticsearch.common.ParsingException::new, 40),
|
||||
INDEX_SHARD_CLOSED_EXCEPTION(org.elasticsearch.index.shard.IndexShardClosedException.class, org.elasticsearch.index.shard.IndexShardClosedException::new, 41),
|
||||
RECOVER_FILES_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.RecoverFilesRecoveryException.class, org.elasticsearch.indices.recovery.RecoverFilesRecoveryException::new, 42),
|
||||
TRUNCATED_TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TruncatedTranslogException.class, org.elasticsearch.index.translog.TruncatedTranslogException::new, 43),
|
||||
RECOVERY_FAILED_EXCEPTION(org.elasticsearch.indices.recovery.RecoveryFailedException.class, org.elasticsearch.indices.recovery.RecoveryFailedException::new, 44),
|
||||
INDEX_SHARD_RELOCATED_EXCEPTION(org.elasticsearch.index.shard.IndexShardRelocatedException.class, org.elasticsearch.index.shard.IndexShardRelocatedException::new, 45),
|
||||
NODE_SHOULD_NOT_CONNECT_EXCEPTION(org.elasticsearch.transport.NodeShouldNotConnectException.class, org.elasticsearch.transport.NodeShouldNotConnectException::new, 46),
|
||||
INDEX_TEMPLATE_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexTemplateAlreadyExistsException.class, org.elasticsearch.indices.IndexTemplateAlreadyExistsException::new, 47),
|
||||
TRANSLOG_CORRUPTED_EXCEPTION(org.elasticsearch.index.translog.TranslogCorruptedException.class, org.elasticsearch.index.translog.TranslogCorruptedException::new, 48),
|
||||
CLUSTER_BLOCK_EXCEPTION(org.elasticsearch.cluster.block.ClusterBlockException.class, org.elasticsearch.cluster.block.ClusterBlockException::new, 49),
|
||||
FETCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.fetch.FetchPhaseExecutionException.class, org.elasticsearch.search.fetch.FetchPhaseExecutionException::new, 50),
|
||||
INDEX_SHARD_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.IndexShardAlreadyExistsException.class, org.elasticsearch.index.IndexShardAlreadyExistsException::new, 51),
|
||||
VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class, org.elasticsearch.index.engine.VersionConflictEngineException::new, 52),
|
||||
INDEX_SHARD_CLOSED_EXCEPTION(org.elasticsearch.index.shard.IndexShardClosedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardClosedException::new, 41),
|
||||
RECOVER_FILES_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.RecoverFilesRecoveryException.class,
|
||||
org.elasticsearch.indices.recovery.RecoverFilesRecoveryException::new, 42),
|
||||
TRUNCATED_TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TruncatedTranslogException.class,
|
||||
org.elasticsearch.index.translog.TruncatedTranslogException::new, 43),
|
||||
RECOVERY_FAILED_EXCEPTION(org.elasticsearch.indices.recovery.RecoveryFailedException.class,
|
||||
org.elasticsearch.indices.recovery.RecoveryFailedException::new, 44),
|
||||
INDEX_SHARD_RELOCATED_EXCEPTION(org.elasticsearch.index.shard.IndexShardRelocatedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardRelocatedException::new, 45),
|
||||
NODE_SHOULD_NOT_CONNECT_EXCEPTION(org.elasticsearch.transport.NodeShouldNotConnectException.class,
|
||||
org.elasticsearch.transport.NodeShouldNotConnectException::new, 46),
|
||||
INDEX_TEMPLATE_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexTemplateAlreadyExistsException.class,
|
||||
org.elasticsearch.indices.IndexTemplateAlreadyExistsException::new, 47),
|
||||
TRANSLOG_CORRUPTED_EXCEPTION(org.elasticsearch.index.translog.TranslogCorruptedException.class,
|
||||
org.elasticsearch.index.translog.TranslogCorruptedException::new, 48),
|
||||
CLUSTER_BLOCK_EXCEPTION(org.elasticsearch.cluster.block.ClusterBlockException.class,
|
||||
org.elasticsearch.cluster.block.ClusterBlockException::new, 49),
|
||||
FETCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.fetch.FetchPhaseExecutionException.class,
|
||||
org.elasticsearch.search.fetch.FetchPhaseExecutionException::new, 50),
|
||||
INDEX_SHARD_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.IndexShardAlreadyExistsException.class,
|
||||
org.elasticsearch.index.IndexShardAlreadyExistsException::new, 51),
|
||||
VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class,
|
||||
org.elasticsearch.index.engine.VersionConflictEngineException::new, 52),
|
||||
ENGINE_EXCEPTION(org.elasticsearch.index.engine.EngineException.class, org.elasticsearch.index.engine.EngineException::new, 53),
|
||||
// 54 was DocumentAlreadyExistsException, which is superseded by VersionConflictEngineException
|
||||
NO_SUCH_NODE_EXCEPTION(org.elasticsearch.action.NoSuchNodeException.class, org.elasticsearch.action.NoSuchNodeException::new, 55),
|
||||
SETTINGS_EXCEPTION(org.elasticsearch.common.settings.SettingsException.class, org.elasticsearch.common.settings.SettingsException::new, 56),
|
||||
INDEX_TEMPLATE_MISSING_EXCEPTION(org.elasticsearch.indices.IndexTemplateMissingException.class, org.elasticsearch.indices.IndexTemplateMissingException::new, 57),
|
||||
SEND_REQUEST_TRANSPORT_EXCEPTION(org.elasticsearch.transport.SendRequestTransportException.class, org.elasticsearch.transport.SendRequestTransportException::new, 58),
|
||||
ES_REJECTED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class, org.elasticsearch.common.util.concurrent.EsRejectedExecutionException::new, 59),
|
||||
EARLY_TERMINATION_EXCEPTION(org.elasticsearch.common.lucene.Lucene.EarlyTerminationException.class, org.elasticsearch.common.lucene.Lucene.EarlyTerminationException::new, 60),
|
||||
ROUTING_VALIDATION_EXCEPTION(org.elasticsearch.cluster.routing.RoutingValidationException.class, org.elasticsearch.cluster.routing.RoutingValidationException::new, 61),
|
||||
NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class, org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62),
|
||||
ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class, org.elasticsearch.indices.AliasFilterParsingException::new, 63),
|
||||
SETTINGS_EXCEPTION(org.elasticsearch.common.settings.SettingsException.class,
|
||||
org.elasticsearch.common.settings.SettingsException::new, 56),
|
||||
INDEX_TEMPLATE_MISSING_EXCEPTION(org.elasticsearch.indices.IndexTemplateMissingException.class,
|
||||
org.elasticsearch.indices.IndexTemplateMissingException::new, 57),
|
||||
SEND_REQUEST_TRANSPORT_EXCEPTION(org.elasticsearch.transport.SendRequestTransportException.class,
|
||||
org.elasticsearch.transport.SendRequestTransportException::new, 58),
|
||||
ES_REJECTED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class,
|
||||
org.elasticsearch.common.util.concurrent.EsRejectedExecutionException::new, 59),
|
||||
EARLY_TERMINATION_EXCEPTION(org.elasticsearch.common.lucene.Lucene.EarlyTerminationException.class,
|
||||
org.elasticsearch.common.lucene.Lucene.EarlyTerminationException::new, 60),
|
||||
ROUTING_VALIDATION_EXCEPTION(org.elasticsearch.cluster.routing.RoutingValidationException.class,
|
||||
org.elasticsearch.cluster.routing.RoutingValidationException::new, 61),
|
||||
NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class,
|
||||
org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62),
|
||||
ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class,
|
||||
org.elasticsearch.indices.AliasFilterParsingException::new, 63),
|
||||
// 64 was DeleteByQueryFailedEngineException, which was removed in 3.0
|
||||
GATEWAY_EXCEPTION(org.elasticsearch.gateway.GatewayException.class, org.elasticsearch.gateway.GatewayException::new, 65),
|
||||
INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class, org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66),
|
||||
INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class,
|
||||
org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66),
|
||||
HTTP_EXCEPTION(org.elasticsearch.http.HttpException.class, org.elasticsearch.http.HttpException::new, 67),
|
||||
ELASTICSEARCH_EXCEPTION(org.elasticsearch.ElasticsearchException.class, org.elasticsearch.ElasticsearchException::new, 68),
|
||||
SNAPSHOT_MISSING_EXCEPTION(org.elasticsearch.snapshots.SnapshotMissingException.class, org.elasticsearch.snapshots.SnapshotMissingException::new, 69),
|
||||
PRIMARY_MISSING_ACTION_EXCEPTION(org.elasticsearch.action.PrimaryMissingActionException.class, org.elasticsearch.action.PrimaryMissingActionException::new, 70),
|
||||
ELASTICSEARCH_EXCEPTION(org.elasticsearch.ElasticsearchException.class,
|
||||
org.elasticsearch.ElasticsearchException::new, 68),
|
||||
SNAPSHOT_MISSING_EXCEPTION(org.elasticsearch.snapshots.SnapshotMissingException.class,
|
||||
org.elasticsearch.snapshots.SnapshotMissingException::new, 69),
|
||||
PRIMARY_MISSING_ACTION_EXCEPTION(org.elasticsearch.action.PrimaryMissingActionException.class,
|
||||
org.elasticsearch.action.PrimaryMissingActionException::new, 70),
|
||||
FAILED_NODE_EXCEPTION(org.elasticsearch.action.FailedNodeException.class, org.elasticsearch.action.FailedNodeException::new, 71),
|
||||
SEARCH_PARSE_EXCEPTION(org.elasticsearch.search.SearchParseException.class, org.elasticsearch.search.SearchParseException::new, 72),
|
||||
CONCURRENT_SNAPSHOT_EXECUTION_EXCEPTION(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException.class, org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException::new, 73),
|
||||
BLOB_STORE_EXCEPTION(org.elasticsearch.common.blobstore.BlobStoreException.class, org.elasticsearch.common.blobstore.BlobStoreException::new, 74),
|
||||
INCOMPATIBLE_CLUSTER_STATE_VERSION_EXCEPTION(org.elasticsearch.cluster.IncompatibleClusterStateVersionException.class, org.elasticsearch.cluster.IncompatibleClusterStateVersionException::new, 75),
|
||||
RECOVERY_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RecoveryEngineException.class, org.elasticsearch.index.engine.RecoveryEngineException::new, 76),
|
||||
UNCATEGORIZED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.UncategorizedExecutionException.class, org.elasticsearch.common.util.concurrent.UncategorizedExecutionException::new, 77),
|
||||
TIMESTAMP_PARSING_EXCEPTION(org.elasticsearch.action.TimestampParsingException.class, org.elasticsearch.action.TimestampParsingException::new, 78),
|
||||
ROUTING_MISSING_EXCEPTION(org.elasticsearch.action.RoutingMissingException.class, org.elasticsearch.action.RoutingMissingException::new, 79),
|
||||
INDEX_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.IndexFailedEngineException.class, org.elasticsearch.index.engine.IndexFailedEngineException::new, 80),
|
||||
INDEX_SHARD_RESTORE_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class, org.elasticsearch.index.snapshots.IndexShardRestoreFailedException::new, 81),
|
||||
REPOSITORY_EXCEPTION(org.elasticsearch.repositories.RepositoryException.class, org.elasticsearch.repositories.RepositoryException::new, 82),
|
||||
RECEIVE_TIMEOUT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ReceiveTimeoutTransportException.class, org.elasticsearch.transport.ReceiveTimeoutTransportException::new, 83),
|
||||
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class, org.elasticsearch.transport.NodeDisconnectedException::new, 84),
|
||||
ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class, org.elasticsearch.index.AlreadyExpiredException::new, 85),
|
||||
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class, org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86),
|
||||
CONCURRENT_SNAPSHOT_EXECUTION_EXCEPTION(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException.class,
|
||||
org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException::new, 73),
|
||||
BLOB_STORE_EXCEPTION(org.elasticsearch.common.blobstore.BlobStoreException.class,
|
||||
org.elasticsearch.common.blobstore.BlobStoreException::new, 74),
|
||||
INCOMPATIBLE_CLUSTER_STATE_VERSION_EXCEPTION(org.elasticsearch.cluster.IncompatibleClusterStateVersionException.class,
|
||||
org.elasticsearch.cluster.IncompatibleClusterStateVersionException::new, 75),
|
||||
RECOVERY_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RecoveryEngineException.class,
|
||||
org.elasticsearch.index.engine.RecoveryEngineException::new, 76),
|
||||
UNCATEGORIZED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.UncategorizedExecutionException.class,
|
||||
org.elasticsearch.common.util.concurrent.UncategorizedExecutionException::new, 77),
|
||||
TIMESTAMP_PARSING_EXCEPTION(org.elasticsearch.action.TimestampParsingException.class,
|
||||
org.elasticsearch.action.TimestampParsingException::new, 78),
|
||||
ROUTING_MISSING_EXCEPTION(org.elasticsearch.action.RoutingMissingException.class,
|
||||
org.elasticsearch.action.RoutingMissingException::new, 79),
|
||||
INDEX_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.IndexFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.IndexFailedEngineException::new, 80),
|
||||
INDEX_SHARD_RESTORE_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardRestoreFailedException::new, 81),
|
||||
REPOSITORY_EXCEPTION(org.elasticsearch.repositories.RepositoryException.class,
|
||||
org.elasticsearch.repositories.RepositoryException::new, 82),
|
||||
RECEIVE_TIMEOUT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ReceiveTimeoutTransportException.class,
|
||||
org.elasticsearch.transport.ReceiveTimeoutTransportException::new, 83),
|
||||
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class,
|
||||
org.elasticsearch.transport.NodeDisconnectedException::new, 84),
|
||||
ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class,
|
||||
org.elasticsearch.index.AlreadyExpiredException::new, 85),
|
||||
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class,
|
||||
org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86),
|
||||
// 87 used to be for MergeMappingException
|
||||
INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class, org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),
|
||||
PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class, org.elasticsearch.percolator.PercolateException::new, 89),
|
||||
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
|
||||
AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class, org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91),
|
||||
DELAY_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.DelayRecoveryException.class, org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92),
|
||||
INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class,
|
||||
org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),
|
||||
PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class,
|
||||
org.elasticsearch.percolator.PercolateException::new, 89),
|
||||
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
|
||||
AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class,
|
||||
org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91),
|
||||
DELAY_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.DelayRecoveryException.class,
|
||||
org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92),
|
||||
// 93 used to be for IndexWarmerMissingException
|
||||
NO_NODE_AVAILABLE_EXCEPTION(org.elasticsearch.client.transport.NoNodeAvailableException.class, org.elasticsearch.client.transport.NoNodeAvailableException::new, 94),
|
||||
INVALID_SNAPSHOT_NAME_EXCEPTION(org.elasticsearch.snapshots.InvalidSnapshotNameException.class, org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96),
|
||||
ILLEGAL_INDEX_SHARD_STATE_EXCEPTION(org.elasticsearch.index.shard.IllegalIndexShardStateException.class, org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97),
|
||||
INDEX_SHARD_SNAPSHOT_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotException.class, org.elasticsearch.index.snapshots.IndexShardSnapshotException::new, 98),
|
||||
INDEX_SHARD_NOT_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotStartedException.class, org.elasticsearch.index.shard.IndexShardNotStartedException::new, 99),
|
||||
SEARCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.action.search.SearchPhaseExecutionException.class, org.elasticsearch.action.search.SearchPhaseExecutionException::new, 100),
|
||||
ACTION_NOT_FOUND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionNotFoundTransportException.class, org.elasticsearch.transport.ActionNotFoundTransportException::new, 101),
|
||||
TRANSPORT_SERIALIZATION_EXCEPTION(org.elasticsearch.transport.TransportSerializationException.class, org.elasticsearch.transport.TransportSerializationException::new, 102),
|
||||
REMOTE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.RemoteTransportException.class, org.elasticsearch.transport.RemoteTransportException::new, 103),
|
||||
ENGINE_CREATION_FAILURE_EXCEPTION(org.elasticsearch.index.engine.EngineCreationFailureException.class, org.elasticsearch.index.engine.EngineCreationFailureException::new, 104),
|
||||
ROUTING_EXCEPTION(org.elasticsearch.cluster.routing.RoutingException.class, org.elasticsearch.cluster.routing.RoutingException::new, 105),
|
||||
INDEX_SHARD_RECOVERY_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveryException.class, org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106),
|
||||
REPOSITORY_MISSING_EXCEPTION(org.elasticsearch.repositories.RepositoryMissingException.class, org.elasticsearch.repositories.RepositoryMissingException::new, 107),
|
||||
PERCOLATOR_EXCEPTION(org.elasticsearch.index.percolator.PercolatorException.class, org.elasticsearch.index.percolator.PercolatorException::new, 108),
|
||||
DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class, org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),
|
||||
FLUSH_NOT_ALLOWED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class, org.elasticsearch.index.engine.FlushNotAllowedEngineException::new, 110),
|
||||
NO_CLASS_SETTINGS_EXCEPTION(org.elasticsearch.common.settings.NoClassSettingsException.class, org.elasticsearch.common.settings.NoClassSettingsException::new, 111),
|
||||
BIND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.BindTransportException.class, org.elasticsearch.transport.BindTransportException::new, 112),
|
||||
ALIASES_NOT_FOUND_EXCEPTION(org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException.class, org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException::new, 113),
|
||||
INDEX_SHARD_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveringException.class, org.elasticsearch.index.shard.IndexShardRecoveringException::new, 114),
|
||||
TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TranslogException.class, org.elasticsearch.index.translog.TranslogException::new, 115),
|
||||
PROCESS_CLUSTER_EVENT_TIMEOUT_EXCEPTION(org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException.class, org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException::new, 116),
|
||||
RETRY_ON_PRIMARY_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException.class, org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException::new, 117),
|
||||
ELASTICSEARCH_TIMEOUT_EXCEPTION(org.elasticsearch.ElasticsearchTimeoutException.class, org.elasticsearch.ElasticsearchTimeoutException::new, 118),
|
||||
QUERY_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.query.QueryPhaseExecutionException.class, org.elasticsearch.search.query.QueryPhaseExecutionException::new, 119),
|
||||
REPOSITORY_VERIFICATION_EXCEPTION(org.elasticsearch.repositories.RepositoryVerificationException.class, org.elasticsearch.repositories.RepositoryVerificationException::new, 120),
|
||||
INVALID_AGGREGATION_PATH_EXCEPTION(org.elasticsearch.search.aggregations.InvalidAggregationPathException.class, org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121),
|
||||
INDEX_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexAlreadyExistsException.class, org.elasticsearch.indices.IndexAlreadyExistsException::new, 123),
|
||||
SCRIPT_PARSE_EXCEPTION(org.elasticsearch.script.Script.ScriptParseException.class, org.elasticsearch.script.Script.ScriptParseException::new, 124),
|
||||
HTTP_ON_TRANSPORT_EXCEPTION(org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class, org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException::new, 125),
|
||||
MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class, org.elasticsearch.index.mapper.MapperParsingException::new, 126),
|
||||
SEARCH_CONTEXT_EXCEPTION(org.elasticsearch.search.SearchContextException.class, org.elasticsearch.search.SearchContextException::new, 127),
|
||||
SEARCH_SOURCE_BUILDER_EXCEPTION(org.elasticsearch.search.builder.SearchSourceBuilderException.class, org.elasticsearch.search.builder.SearchSourceBuilderException::new, 128),
|
||||
ENGINE_CLOSED_EXCEPTION(org.elasticsearch.index.engine.EngineClosedException.class, org.elasticsearch.index.engine.EngineClosedException::new, 129),
|
||||
NO_SHARD_AVAILABLE_ACTION_EXCEPTION(org.elasticsearch.action.NoShardAvailableActionException.class, org.elasticsearch.action.NoShardAvailableActionException::new, 130),
|
||||
UNAVAILABLE_SHARDS_EXCEPTION(org.elasticsearch.action.UnavailableShardsException.class, org.elasticsearch.action.UnavailableShardsException::new, 131),
|
||||
FLUSH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushFailedEngineException.class, org.elasticsearch.index.engine.FlushFailedEngineException::new, 132),
|
||||
CIRCUIT_BREAKING_EXCEPTION(org.elasticsearch.common.breaker.CircuitBreakingException.class, org.elasticsearch.common.breaker.CircuitBreakingException::new, 133),
|
||||
NODE_NOT_CONNECTED_EXCEPTION(org.elasticsearch.transport.NodeNotConnectedException.class, org.elasticsearch.transport.NodeNotConnectedException::new, 134),
|
||||
STRICT_DYNAMIC_MAPPING_EXCEPTION(org.elasticsearch.index.mapper.StrictDynamicMappingException.class, org.elasticsearch.index.mapper.StrictDynamicMappingException::new, 135),
|
||||
RETRY_ON_REPLICA_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class, org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException::new, 136),
|
||||
TYPE_MISSING_EXCEPTION(org.elasticsearch.indices.TypeMissingException.class, org.elasticsearch.indices.TypeMissingException::new, 137),
|
||||
FAILED_TO_COMMIT_CLUSTER_STATE_EXCEPTION(org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class, org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException::new, 140),
|
||||
QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class, org.elasticsearch.index.query.QueryShardException::new, 141);
|
||||
NO_NODE_AVAILABLE_EXCEPTION(org.elasticsearch.client.transport.NoNodeAvailableException.class,
|
||||
org.elasticsearch.client.transport.NoNodeAvailableException::new, 94),
|
||||
INVALID_SNAPSHOT_NAME_EXCEPTION(org.elasticsearch.snapshots.InvalidSnapshotNameException.class,
|
||||
org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96),
|
||||
ILLEGAL_INDEX_SHARD_STATE_EXCEPTION(org.elasticsearch.index.shard.IllegalIndexShardStateException.class,
|
||||
org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97),
|
||||
INDEX_SHARD_SNAPSHOT_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardSnapshotException::new, 98),
|
||||
INDEX_SHARD_NOT_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotStartedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardNotStartedException::new, 99),
|
||||
SEARCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.action.search.SearchPhaseExecutionException.class,
|
||||
org.elasticsearch.action.search.SearchPhaseExecutionException::new, 100),
|
||||
ACTION_NOT_FOUND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionNotFoundTransportException.class,
|
||||
org.elasticsearch.transport.ActionNotFoundTransportException::new, 101),
|
||||
TRANSPORT_SERIALIZATION_EXCEPTION(org.elasticsearch.transport.TransportSerializationException.class,
|
||||
org.elasticsearch.transport.TransportSerializationException::new, 102),
|
||||
REMOTE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.RemoteTransportException.class,
|
||||
org.elasticsearch.transport.RemoteTransportException::new, 103),
|
||||
ENGINE_CREATION_FAILURE_EXCEPTION(org.elasticsearch.index.engine.EngineCreationFailureException.class,
|
||||
org.elasticsearch.index.engine.EngineCreationFailureException::new, 104),
|
||||
ROUTING_EXCEPTION(org.elasticsearch.cluster.routing.RoutingException.class,
|
||||
org.elasticsearch.cluster.routing.RoutingException::new, 105),
|
||||
INDEX_SHARD_RECOVERY_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveryException.class,
|
||||
org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106),
|
||||
REPOSITORY_MISSING_EXCEPTION(org.elasticsearch.repositories.RepositoryMissingException.class,
|
||||
org.elasticsearch.repositories.RepositoryMissingException::new, 107),
|
||||
PERCOLATOR_EXCEPTION(org.elasticsearch.index.percolator.PercolatorException.class,
|
||||
org.elasticsearch.index.percolator.PercolatorException::new, 108),
|
||||
DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class,
|
||||
org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),
|
||||
FLUSH_NOT_ALLOWED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class,
|
||||
org.elasticsearch.index.engine.FlushNotAllowedEngineException::new, 110),
|
||||
NO_CLASS_SETTINGS_EXCEPTION(org.elasticsearch.common.settings.NoClassSettingsException.class,
|
||||
org.elasticsearch.common.settings.NoClassSettingsException::new, 111),
|
||||
BIND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.BindTransportException.class,
|
||||
org.elasticsearch.transport.BindTransportException::new, 112),
|
||||
ALIASES_NOT_FOUND_EXCEPTION(org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException.class,
|
||||
org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException::new, 113),
|
||||
INDEX_SHARD_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveringException.class,
|
||||
org.elasticsearch.index.shard.IndexShardRecoveringException::new, 114),
|
||||
TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TranslogException.class,
|
||||
org.elasticsearch.index.translog.TranslogException::new, 115),
|
||||
PROCESS_CLUSTER_EVENT_TIMEOUT_EXCEPTION(org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException.class,
|
||||
org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException::new, 116),
|
||||
RETRY_ON_PRIMARY_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException.class,
|
||||
org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException::new, 117),
|
||||
ELASTICSEARCH_TIMEOUT_EXCEPTION(org.elasticsearch.ElasticsearchTimeoutException.class,
|
||||
org.elasticsearch.ElasticsearchTimeoutException::new, 118),
|
||||
QUERY_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.query.QueryPhaseExecutionException.class,
|
||||
org.elasticsearch.search.query.QueryPhaseExecutionException::new, 119),
|
||||
REPOSITORY_VERIFICATION_EXCEPTION(org.elasticsearch.repositories.RepositoryVerificationException.class,
|
||||
org.elasticsearch.repositories.RepositoryVerificationException::new, 120),
|
||||
INVALID_AGGREGATION_PATH_EXCEPTION(org.elasticsearch.search.aggregations.InvalidAggregationPathException.class,
|
||||
org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121),
|
||||
INDEX_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexAlreadyExistsException.class,
|
||||
org.elasticsearch.indices.IndexAlreadyExistsException::new, 123),
|
||||
SCRIPT_PARSE_EXCEPTION(org.elasticsearch.script.Script.ScriptParseException.class,
|
||||
org.elasticsearch.script.Script.ScriptParseException::new, 124),
|
||||
HTTP_ON_TRANSPORT_EXCEPTION(org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class,
|
||||
org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException::new, 125),
|
||||
MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class,
|
||||
org.elasticsearch.index.mapper.MapperParsingException::new, 126),
|
||||
SEARCH_CONTEXT_EXCEPTION(org.elasticsearch.search.SearchContextException.class,
|
||||
org.elasticsearch.search.SearchContextException::new, 127),
|
||||
SEARCH_SOURCE_BUILDER_EXCEPTION(org.elasticsearch.search.builder.SearchSourceBuilderException.class,
|
||||
org.elasticsearch.search.builder.SearchSourceBuilderException::new, 128),
|
||||
ENGINE_CLOSED_EXCEPTION(org.elasticsearch.index.engine.EngineClosedException.class,
|
||||
org.elasticsearch.index.engine.EngineClosedException::new, 129),
|
||||
NO_SHARD_AVAILABLE_ACTION_EXCEPTION(org.elasticsearch.action.NoShardAvailableActionException.class,
|
||||
org.elasticsearch.action.NoShardAvailableActionException::new, 130),
|
||||
UNAVAILABLE_SHARDS_EXCEPTION(org.elasticsearch.action.UnavailableShardsException.class,
|
||||
org.elasticsearch.action.UnavailableShardsException::new, 131),
|
||||
FLUSH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.FlushFailedEngineException::new, 132),
|
||||
CIRCUIT_BREAKING_EXCEPTION(org.elasticsearch.common.breaker.CircuitBreakingException.class,
|
||||
org.elasticsearch.common.breaker.CircuitBreakingException::new, 133),
|
||||
NODE_NOT_CONNECTED_EXCEPTION(org.elasticsearch.transport.NodeNotConnectedException.class,
|
||||
org.elasticsearch.transport.NodeNotConnectedException::new, 134),
|
||||
STRICT_DYNAMIC_MAPPING_EXCEPTION(org.elasticsearch.index.mapper.StrictDynamicMappingException.class,
|
||||
org.elasticsearch.index.mapper.StrictDynamicMappingException::new, 135),
|
||||
RETRY_ON_REPLICA_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class,
|
||||
org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException::new, 136),
|
||||
TYPE_MISSING_EXCEPTION(org.elasticsearch.indices.TypeMissingException.class,
|
||||
org.elasticsearch.indices.TypeMissingException::new, 137),
|
||||
FAILED_TO_COMMIT_CLUSTER_STATE_EXCEPTION(org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class,
|
||||
org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException::new, 140),
|
||||
QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class,
|
||||
org.elasticsearch.index.query.QueryShardException::new, 141),
|
||||
NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class,
|
||||
ShardStateAction.NoLongerPrimaryShardException::new, 142);
|
||||
|
||||
|
||||
final Class<? extends ElasticsearchException> exceptionClass;
|
||||
final FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> constructor;
|
||||
final int id;
|
||||
|
||||
ElasticsearchExceptionHandle(Class<? extends ElasticsearchException> exceptionClass, FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> constructor, int id) {
|
||||
<E extends ElasticsearchException> ElasticsearchExceptionHandle(Class<E> exceptionClass,
|
||||
FunctionThatThrowsIOException<StreamInput, E> constructor, int id) {
|
||||
// We need the exceptionClass because you can't dig it out of the constructor reliably.
|
||||
this.exceptionClass = exceptionClass;
|
||||
this.constructor = constructor;
|
||||
this.id = id;
|
||||
|
@ -627,11 +760,10 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
}
|
||||
|
||||
static {
|
||||
final Map<Class<? extends ElasticsearchException>, ElasticsearchExceptionHandle> exceptions = Arrays.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.exceptionClass, e -> e));
|
||||
final Map<Integer, FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException>> idToSupplier = Arrays.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.id, e -> e.constructor));
|
||||
|
||||
ID_TO_SUPPLIER = Collections.unmodifiableMap(idToSupplier);
|
||||
CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE = Collections.unmodifiableMap(exceptions);
|
||||
ID_TO_SUPPLIER = unmodifiableMap(Arrays
|
||||
.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.id, e -> e.constructor)));
|
||||
CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE = unmodifiableMap(Arrays
|
||||
.stream(ElasticsearchExceptionHandle.values()).collect(Collectors.toMap(e -> e.exceptionClass, e -> e)));
|
||||
}
|
||||
|
||||
public Index getIndex() {
|
||||
|
@ -703,7 +835,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
builder.startArray();
|
||||
for (ElasticsearchException rootCause : rootCauses) {
|
||||
builder.startObject();
|
||||
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params));
|
||||
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(
|
||||
Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params));
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
|
|
|
@ -254,7 +254,11 @@ public class Version {
|
|||
public static final int V_1_7_3_ID = 1070399;
|
||||
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_4_ID = 1070499;
|
||||
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_5_ID = 1070599;
|
||||
public static final Version V_1_7_5 = new Version(V_1_7_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_6_ID = 1070699;
|
||||
public static final Version V_1_7_6 = new Version(V_1_7_6_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
|
||||
public static final int V_2_0_0_beta1_ID = 2000001;
|
||||
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
|
@ -275,11 +279,15 @@ public class Version {
|
|||
public static final int V_2_1_1_ID = 2010199;
|
||||
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_2_ID = 2010299;
|
||||
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_3_ID = 2010399;
|
||||
public static final Version V_2_1_3 = new Version(V_2_1_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_2_0_ID = 2020099;
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_2_1_ID = 2020199;
|
||||
public static final Version V_2_2_1 = new Version(V_2_2_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_3_0_ID = 2030099;
|
||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
|
||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_3_0_0_ID = 3000099;
|
||||
public static final Version V_3_0_0 = new Version(V_3_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final Version CURRENT = V_3_0_0;
|
||||
|
@ -299,8 +307,12 @@ public class Version {
|
|||
return V_3_0_0;
|
||||
case V_2_3_0_ID:
|
||||
return V_2_3_0;
|
||||
case V_2_2_1_ID:
|
||||
return V_2_2_1;
|
||||
case V_2_2_0_ID:
|
||||
return V_2_2_0;
|
||||
case V_2_1_3_ID:
|
||||
return V_2_1_3;
|
||||
case V_2_1_2_ID:
|
||||
return V_2_1_2;
|
||||
case V_2_1_1_ID:
|
||||
|
@ -321,6 +333,10 @@ public class Version {
|
|||
return V_2_0_0_beta2;
|
||||
case V_2_0_0_beta1_ID:
|
||||
return V_2_0_0_beta1;
|
||||
case V_1_7_6_ID:
|
||||
return V_1_7_6;
|
||||
case V_1_7_5_ID:
|
||||
return V_1_7_5;
|
||||
case V_1_7_4_ID:
|
||||
return V_1_7_4;
|
||||
case V_1_7_3_ID:
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksResponse;
|
||||
|
@ -111,7 +112,7 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
|
||||
if (getNodeFailures() != null && getNodeFailures().size() > 0) {
|
||||
builder.startArray("node_failures");
|
||||
for (FailedNodeException ex : getNodeFailures()){
|
||||
for (FailedNodeException ex : getNodeFailures()) {
|
||||
builder.value(ex);
|
||||
}
|
||||
builder.endArray();
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -48,20 +49,23 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
|||
|
||||
private final String description;
|
||||
|
||||
private final Task.Status status;
|
||||
|
||||
private final String parentNode;
|
||||
|
||||
private final long parentId;
|
||||
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description) {
|
||||
this(node, id, type, action, description, null, -1L);
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status) {
|
||||
this(node, id, type, action, description, status, null, -1L);
|
||||
}
|
||||
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, String parentNode, long parentId) {
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status, String parentNode, long parentId) {
|
||||
this.node = node;
|
||||
this.id = id;
|
||||
this.type = type;
|
||||
this.action = action;
|
||||
this.description = description;
|
||||
this.status = status;
|
||||
this.parentNode = parentNode;
|
||||
this.parentId = parentId;
|
||||
}
|
||||
|
@ -72,6 +76,11 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
|||
type = in.readString();
|
||||
action = in.readString();
|
||||
description = in.readOptionalString();
|
||||
if (in.readBoolean()) {
|
||||
status = in.readTaskStatus();
|
||||
} else {
|
||||
status = null;
|
||||
}
|
||||
parentNode = in.readOptionalString();
|
||||
parentId = in.readLong();
|
||||
}
|
||||
|
@ -96,6 +105,14 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
|||
return description;
|
||||
}
|
||||
|
||||
/**
|
||||
* The status of the running task. Only available if TaskInfos were build
|
||||
* with the detailed flag.
|
||||
*/
|
||||
public Task.Status getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public String getParentNode() {
|
||||
return parentNode;
|
||||
}
|
||||
|
@ -116,6 +133,12 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
|||
out.writeString(type);
|
||||
out.writeString(action);
|
||||
out.writeOptionalString(description);
|
||||
if (status != null) {
|
||||
out.writeBoolean(true);
|
||||
out.writeTaskStatus(status);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
out.writeOptionalString(parentNode);
|
||||
out.writeLong(parentId);
|
||||
}
|
||||
|
@ -127,6 +150,9 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
|||
builder.field("id", id);
|
||||
builder.field("type", type);
|
||||
builder.field("action", action);
|
||||
if (status != null) {
|
||||
builder.field("status", status, params);
|
||||
}
|
||||
if (description != null) {
|
||||
builder.field("description", description);
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Tuple<ReplicationResponse, ShardFlushRequest> shardOperationOnPrimary(MetaData metaData, ShardFlushRequest shardRequest) throws Throwable {
|
||||
protected Tuple<ReplicationResponse, ShardFlushRequest> shardOperationOnPrimary(MetaData metaData, ShardFlushRequest shardRequest) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
|
||||
indexShard.flush(shardRequest.getRequest());
|
||||
logger.trace("{} flush request executed on primary", indexShard.shardId());
|
||||
|
|
|
@ -60,7 +60,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Basi
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Tuple<ReplicationResponse, BasicReplicationRequest> shardOperationOnPrimary(MetaData metaData, BasicReplicationRequest shardRequest) throws Throwable {
|
||||
protected Tuple<ReplicationResponse, BasicReplicationRequest> shardOperationOnPrimary(MetaData metaData, BasicReplicationRequest shardRequest) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
|
||||
indexShard.refresh("api");
|
||||
logger.trace("{} refresh request executed on primary", indexShard.shardId());
|
||||
|
|
|
@ -80,7 +80,7 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction<Ge
|
|||
continue;
|
||||
}
|
||||
|
||||
Settings settings = SettingsFilter.filterSettings(settingsFilter.getPatterns(), indexMetaData.getSettings());
|
||||
Settings settings = settingsFilter.filter(indexMetaData.getSettings());
|
||||
if (request.humanReadable()) {
|
||||
settings = IndexMetaData.addHumanReadableSettings(settings);
|
||||
}
|
||||
|
|
|
@ -140,7 +140,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Throwable {
|
||||
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Exception {
|
||||
|
||||
// validate, if routing is required, that we got routing
|
||||
IndexMetaData indexMetaData = metaData.index(request.shardId().getIndex());
|
||||
|
@ -200,7 +200,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
* Execute the given {@link IndexRequest} on a primary shard, throwing a
|
||||
* {@link RetryOnPrimaryException} if the operation needs to be re-tried.
|
||||
*/
|
||||
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Throwable {
|
||||
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Exception {
|
||||
Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard);
|
||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
final ShardId shardId = indexShard.shardId();
|
||||
|
|
|
@ -22,31 +22,24 @@ package org.elasticsearch.action.ingest;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.ingest.core.PipelineFactoryError;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
public class SimulatePipelineResponse extends ActionResponse implements StatusToXContent {
|
||||
public class SimulatePipelineResponse extends ActionResponse implements ToXContent {
|
||||
private String pipelineId;
|
||||
private boolean verbose;
|
||||
private List<SimulateDocumentResult> results;
|
||||
private PipelineFactoryError error;
|
||||
|
||||
public SimulatePipelineResponse() {
|
||||
|
||||
}
|
||||
|
||||
public SimulatePipelineResponse(PipelineFactoryError error) {
|
||||
this.error = error;
|
||||
}
|
||||
|
||||
public SimulatePipelineResponse(String pipelineId, boolean verbose, List<SimulateDocumentResult> responses) {
|
||||
this.pipelineId = pipelineId;
|
||||
this.verbose = verbose;
|
||||
|
@ -65,69 +58,42 @@ public class SimulatePipelineResponse extends ActionResponse implements StatusTo
|
|||
return verbose;
|
||||
}
|
||||
|
||||
public boolean isError() {
|
||||
return error != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestStatus status() {
|
||||
if (isError()) {
|
||||
return RestStatus.BAD_REQUEST;
|
||||
}
|
||||
return RestStatus.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(isError());
|
||||
if (isError()) {
|
||||
error.writeTo(out);
|
||||
} else {
|
||||
out.writeString(pipelineId);
|
||||
out.writeBoolean(verbose);
|
||||
out.writeVInt(results.size());
|
||||
for (SimulateDocumentResult response : results) {
|
||||
response.writeTo(out);
|
||||
}
|
||||
out.writeString(pipelineId);
|
||||
out.writeBoolean(verbose);
|
||||
out.writeVInt(results.size());
|
||||
for (SimulateDocumentResult response : results) {
|
||||
response.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
boolean isError = in.readBoolean();
|
||||
if (isError) {
|
||||
error = new PipelineFactoryError();
|
||||
error.readFrom(in);
|
||||
} else {
|
||||
this.pipelineId = in.readString();
|
||||
boolean verbose = in.readBoolean();
|
||||
int responsesLength = in.readVInt();
|
||||
results = new ArrayList<>();
|
||||
for (int i = 0; i < responsesLength; i++) {
|
||||
SimulateDocumentResult<?> simulateDocumentResult;
|
||||
if (verbose) {
|
||||
simulateDocumentResult = SimulateDocumentVerboseResult.readSimulateDocumentVerboseResultFrom(in);
|
||||
} else {
|
||||
simulateDocumentResult = SimulateDocumentBaseResult.readSimulateDocumentSimpleResult(in);
|
||||
}
|
||||
results.add(simulateDocumentResult);
|
||||
this.pipelineId = in.readString();
|
||||
boolean verbose = in.readBoolean();
|
||||
int responsesLength = in.readVInt();
|
||||
results = new ArrayList<>();
|
||||
for (int i = 0; i < responsesLength; i++) {
|
||||
SimulateDocumentResult<?> simulateDocumentResult;
|
||||
if (verbose) {
|
||||
simulateDocumentResult = SimulateDocumentVerboseResult.readSimulateDocumentVerboseResultFrom(in);
|
||||
} else {
|
||||
simulateDocumentResult = SimulateDocumentBaseResult.readSimulateDocumentSimpleResult(in);
|
||||
}
|
||||
results.add(simulateDocumentResult);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (isError()) {
|
||||
error.toXContent(builder, params);
|
||||
} else {
|
||||
builder.startArray(Fields.DOCUMENTS);
|
||||
for (SimulateDocumentResult response : results) {
|
||||
response.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
builder.startArray(Fields.DOCUMENTS);
|
||||
for (SimulateDocumentResult response : results) {
|
||||
response.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
|
@ -27,8 +28,6 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.ingest.PipelineStore;
|
||||
import org.elasticsearch.ingest.core.PipelineFactoryError;
|
||||
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
|
||||
import org.elasticsearch.node.service.NodeService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -58,9 +57,6 @@ public class SimulatePipelineTransportAction extends HandledTransportAction<Simu
|
|||
} else {
|
||||
simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), pipelineStore);
|
||||
}
|
||||
} catch (ConfigurationPropertyException e) {
|
||||
listener.onResponse(new SimulatePipelineResponse(new PipelineFactoryError(e)));
|
||||
return;
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
return;
|
||||
|
|
|
@ -22,12 +22,10 @@ package org.elasticsearch.action.ingest;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.ingest.core.PipelineFactoryError;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class WritePipelineResponse extends AcknowledgedResponse {
|
||||
private PipelineFactoryError error;
|
||||
|
||||
WritePipelineResponse() {
|
||||
|
||||
|
@ -35,36 +33,17 @@ public class WritePipelineResponse extends AcknowledgedResponse {
|
|||
|
||||
public WritePipelineResponse(boolean acknowledged) {
|
||||
super(acknowledged);
|
||||
if (!isAcknowledged()) {
|
||||
error = new PipelineFactoryError("pipeline write is not acknowledged");
|
||||
}
|
||||
}
|
||||
|
||||
public WritePipelineResponse(PipelineFactoryError error) {
|
||||
super(false);
|
||||
this.error = error;
|
||||
}
|
||||
|
||||
public PipelineFactoryError getError() {
|
||||
return error;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
readAcknowledged(in);
|
||||
if (!isAcknowledged()) {
|
||||
error = new PipelineFactoryError();
|
||||
error.readFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
writeAcknowledged(out);
|
||||
if (!isAcknowledged()) {
|
||||
error.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,41 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.action.support.AcknowledgedRestListener;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class WritePipelineResponseRestListener extends AcknowledgedRestListener<WritePipelineResponse> {
|
||||
|
||||
public WritePipelineResponseRestListener(RestChannel channel) {
|
||||
super(channel);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void addCustomFields(XContentBuilder builder, WritePipelineResponse response) throws IOException {
|
||||
if (!response.isAcknowledged()) {
|
||||
response.getError().toXContent(builder, null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -44,6 +44,20 @@ public abstract class ChildTaskActionRequest<Request extends ActionRequest<Reque
|
|||
this.parentTaskId = parentTaskId;
|
||||
}
|
||||
|
||||
/**
|
||||
* The node that owns the parent task.
|
||||
*/
|
||||
public String getParentTaskNode() {
|
||||
return parentTaskNode;
|
||||
}
|
||||
|
||||
/**
|
||||
* The task id of the parent task on the parent node.
|
||||
*/
|
||||
public long getParentTaskId() {
|
||||
return parentTaskId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.support.replication;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
|
@ -30,6 +29,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -55,6 +55,8 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
|||
|
||||
private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;
|
||||
|
||||
private long routedBasedOnClusterVersion = 0;
|
||||
|
||||
public ReplicationRequest() {
|
||||
|
||||
}
|
||||
|
@ -141,6 +143,20 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
|||
return (Request) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the minimum version of the cluster state that is required on the next node before we redirect to another primary.
|
||||
* Used to prevent redirect loops, see also {@link TransportReplicationAction.ReroutePhase#doRun()}
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
Request routedBasedOnClusterVersion(long routedBasedOnClusterVersion) {
|
||||
this.routedBasedOnClusterVersion = routedBasedOnClusterVersion;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
long routedBasedOnClusterVersion() {
|
||||
return routedBasedOnClusterVersion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
|
@ -161,6 +177,7 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
|||
consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
|
||||
timeout = TimeValue.readTimeValue(in);
|
||||
index = in.readString();
|
||||
routedBasedOnClusterVersion = in.readVLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -175,6 +192,12 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
|||
out.writeByte(consistencyLevel.id());
|
||||
timeout.writeTo(out);
|
||||
out.writeString(index);
|
||||
out.writeVLong(routedBasedOnClusterVersion);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action) {
|
||||
return new ReplicationTask(id, type, action, this::getDescription, getParentTaskNode(), getParentTaskId());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support.replication;
|
||||
|
||||
import org.elasticsearch.common.inject.Provider;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
/**
|
||||
* Task that tracks replication actions.
|
||||
*/
|
||||
public class ReplicationTask extends Task {
|
||||
private volatile String phase = "starting";
|
||||
|
||||
public ReplicationTask(long id, String type, String action, Provider<String> description, String parentNode, long parentId) {
|
||||
super(id, type, action, description, parentNode, parentId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the current phase of the task.
|
||||
*/
|
||||
public void setPhase(String phase) {
|
||||
this.phase = phase;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current phase of the task.
|
||||
*/
|
||||
public String getPhase() {
|
||||
return phase;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Status getStatus() {
|
||||
return new Status(phase);
|
||||
}
|
||||
|
||||
public static class Status implements Task.Status {
|
||||
public static final Status PROTOTYPE = new Status("prototype");
|
||||
|
||||
private final String phase;
|
||||
|
||||
public Status(String phase) {
|
||||
this.phase = requireNonNull(phase, "Phase cannot be null");
|
||||
}
|
||||
|
||||
public Status(StreamInput in) throws IOException {
|
||||
phase = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return "replication";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("phase", phase);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(phase);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Status readFrom(StreamInput in) throws IOException {
|
||||
return new Status(in);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -56,6 +56,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
|
|||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
|
@ -141,7 +142,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
|
||||
@Override
|
||||
protected void doExecute(Task task, Request request, ActionListener<Response> listener) {
|
||||
new ReroutePhase(task, request, listener).run();
|
||||
new ReroutePhase((ReplicationTask) task, request, listener).run();
|
||||
}
|
||||
|
||||
protected abstract Response newResponseInstance();
|
||||
|
@ -156,10 +157,11 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
|
||||
/**
|
||||
* Primary operation on node with primary copy, the provided metadata should be used for request validation if needed
|
||||
*
|
||||
* @return A tuple containing not null values, as first value the result of the primary operation and as second value
|
||||
* the request to be executed on the replica shards.
|
||||
*/
|
||||
protected abstract Tuple<Response, ReplicaRequest> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable;
|
||||
protected abstract Tuple<Response, ReplicaRequest> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception;
|
||||
|
||||
/**
|
||||
* Replica operation on nodes with replica copies
|
||||
|
@ -281,14 +283,24 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
class PrimaryOperationTransportHandler implements TransportRequestHandler<Request> {
|
||||
@Override
|
||||
public void messageReceived(final Request request, final TransportChannel channel) throws Exception {
|
||||
new PrimaryPhase(request, channel).run();
|
||||
throw new UnsupportedOperationException("the task parameter is required for this operation");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void messageReceived(Request request, TransportChannel channel, Task task) throws Exception {
|
||||
new PrimaryPhase((ReplicationTask) task, request, channel).run();
|
||||
}
|
||||
}
|
||||
|
||||
class ReplicaOperationTransportHandler implements TransportRequestHandler<ReplicaRequest> {
|
||||
@Override
|
||||
public void messageReceived(final ReplicaRequest request, final TransportChannel channel) throws Exception {
|
||||
new AsyncReplicaAction(request, channel).run();
|
||||
throw new UnsupportedOperationException("the task parameter is required for this operation");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void messageReceived(ReplicaRequest request, TransportChannel channel, Task task) throws Exception {
|
||||
new AsyncReplicaAction(request, channel, (ReplicationTask) task).run();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -299,7 +311,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
setShard(shardId);
|
||||
}
|
||||
|
||||
public RetryOnReplicaException(StreamInput in) throws IOException{
|
||||
public RetryOnReplicaException(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
}
|
||||
|
@ -307,13 +319,18 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
private final class AsyncReplicaAction extends AbstractRunnable {
|
||||
private final ReplicaRequest request;
|
||||
private final TransportChannel channel;
|
||||
/**
|
||||
* The task on the node with the replica shard.
|
||||
*/
|
||||
private final ReplicationTask task;
|
||||
// important: we pass null as a timeout as failing a replica is
|
||||
// something we want to avoid at all costs
|
||||
private final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
|
||||
|
||||
AsyncReplicaAction(ReplicaRequest request, TransportChannel channel) {
|
||||
AsyncReplicaAction(ReplicaRequest request, TransportChannel channel, ReplicationTask task) {
|
||||
this.request = request;
|
||||
this.channel = channel;
|
||||
this.task = task;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -326,8 +343,8 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
public void onNewClusterState(ClusterState state) {
|
||||
context.close();
|
||||
// Forking a thread on local node via transport service so that custom transport service have an
|
||||
// opportunity to execute custom logic before the replica operation begins
|
||||
String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
|
||||
// opportunity to execute custom logic before the replica operation begins
|
||||
String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
|
||||
TransportChannelResponseHandler<TransportResponse.Empty> handler = TransportChannelResponseHandler.emptyResponseHandler(logger, channel, extraMessage);
|
||||
transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler);
|
||||
}
|
||||
|
@ -352,6 +369,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void failReplicaIfNeeded(Throwable t) {
|
||||
String index = request.shardId().getIndex().getName();
|
||||
int shardId = request.shardId().id();
|
||||
|
@ -382,13 +400,15 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
setPhase(task, "replica");
|
||||
assert request.shardId() != null : "request shardId must be set";
|
||||
try (Releasable ignored = getIndexShardOperationsCounter(request.shardId())) {
|
||||
try (Releasable ignored = getIndexShardReferenceOnReplica(request.shardId())) {
|
||||
shardOperationOnReplica(request);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), request);
|
||||
}
|
||||
}
|
||||
setPhase(task, "finished");
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
|
@ -399,7 +419,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
setShard(shardId);
|
||||
}
|
||||
|
||||
public RetryOnPrimaryException(StreamInput in) throws IOException{
|
||||
public RetryOnPrimaryException(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
}
|
||||
|
@ -414,15 +434,17 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
final class ReroutePhase extends AbstractRunnable {
|
||||
private final ActionListener<Response> listener;
|
||||
private final Request request;
|
||||
private final ReplicationTask task;
|
||||
private final ClusterStateObserver observer;
|
||||
private final AtomicBoolean finished = new AtomicBoolean();
|
||||
|
||||
ReroutePhase(Task task, Request request, ActionListener<Response> listener) {
|
||||
ReroutePhase(ReplicationTask task, Request request, ActionListener<Response> listener) {
|
||||
this.request = request;
|
||||
if (task != null) {
|
||||
this.request.setParentTask(clusterService.localNode().getId(), task.getId());
|
||||
}
|
||||
this.listener = listener;
|
||||
this.task = task;
|
||||
this.observer = new ClusterStateObserver(clusterService, request.timeout(), logger, threadPool.getThreadContext());
|
||||
}
|
||||
|
||||
|
@ -433,6 +455,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
|
||||
@Override
|
||||
protected void doRun() {
|
||||
setPhase(task, "routing");
|
||||
final ClusterState state = observer.observedState();
|
||||
ClusterBlockException blockException = state.blocks().globalBlockedException(globalBlockLevel());
|
||||
if (blockException != null) {
|
||||
|
@ -445,6 +468,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
handleBlockException(blockException);
|
||||
return;
|
||||
}
|
||||
|
||||
// request does not have a shardId yet, we need to pass the concrete index to resolve shardId
|
||||
resolveRequest(state.metaData(), concreteIndex, request);
|
||||
assert request.shardId() != null : "request shardId must be set in resolveRequest";
|
||||
|
@ -463,14 +487,25 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
}
|
||||
final DiscoveryNode node = state.nodes().get(primary.currentNodeId());
|
||||
if (primary.currentNodeId().equals(state.nodes().localNodeId())) {
|
||||
setPhase(task, "waiting_on_primary");
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}] ", transportPrimaryAction, request.shardId(), request, state.version(), primary.currentNodeId());
|
||||
}
|
||||
performAction(node, transportPrimaryAction, true);
|
||||
} else {
|
||||
if (state.version() < request.routedBasedOnClusterVersion()) {
|
||||
logger.trace("failed to find primary [{}] for request [{}] despite sender thinking it would be here. Local cluster state version [{}]] is older than on sending node (version [{}]), scheduling a retry...", request.shardId(), request, state.version(), request.routedBasedOnClusterVersion());
|
||||
retryBecauseUnavailable(request.shardId(), "failed to find primary as current cluster state with version [" + state.version() + "] is stale (expected at least [" + request.routedBasedOnClusterVersion() + "]");
|
||||
return;
|
||||
} else {
|
||||
// chasing the node with the active primary for a second hop requires that we are at least up-to-date with the current cluster state version
|
||||
// this prevents redirect loops between two nodes when a primary was relocated and the relocation target is not aware that it is the active primary shard already.
|
||||
request.routedBasedOnClusterVersion(state.version());
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}]", actionName, request.shardId(), request, state.version(), primary.currentNodeId());
|
||||
}
|
||||
setPhase(task, "rerouted");
|
||||
performAction(node, actionName, false);
|
||||
}
|
||||
}
|
||||
|
@ -527,6 +562,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
finishAsFailed(failure);
|
||||
return;
|
||||
}
|
||||
setPhase(task, "waiting_for_retry");
|
||||
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
|
@ -551,6 +587,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
|
||||
void finishAsFailed(Throwable failure) {
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
setPhase(task, "failed");
|
||||
logger.trace("operation failed. action [{}], request [{}]", failure, actionName, request);
|
||||
listener.onFailure(failure);
|
||||
} else {
|
||||
|
@ -561,6 +598,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
void finishWithUnexpectedFailure(Throwable failure) {
|
||||
logger.warn("unexpected error during the primary phase for action [{}], request [{}]", failure, actionName, request);
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
setPhase(task, "failed");
|
||||
listener.onFailure(failure);
|
||||
} else {
|
||||
assert false : "finishWithUnexpectedFailure called but operation is already finished";
|
||||
|
@ -569,6 +607,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
|
||||
void finishOnSuccess(Response response) {
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
setPhase(task, "finished");
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("operation succeeded. action [{}],request [{}]", actionName, request);
|
||||
}
|
||||
|
@ -584,60 +623,74 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
}
|
||||
|
||||
/**
|
||||
* Responsible for performing primary operation locally and delegating to replication action once successful
|
||||
* Responsible for performing primary operation locally or delegating primary operation to relocation target in case where shard has
|
||||
* been marked as RELOCATED. Delegates to replication action once successful.
|
||||
* <p>
|
||||
* Note that as soon as we move to replication action, state responsibility is transferred to {@link ReplicationPhase}.
|
||||
*/
|
||||
final class PrimaryPhase extends AbstractRunnable {
|
||||
class PrimaryPhase extends AbstractRunnable {
|
||||
private final ReplicationTask task;
|
||||
private final Request request;
|
||||
private final ShardId shardId;
|
||||
private final TransportChannel channel;
|
||||
private final ClusterState state;
|
||||
private final AtomicBoolean finished = new AtomicBoolean();
|
||||
private Releasable indexShardReference;
|
||||
private IndexShardReference indexShardReference;
|
||||
|
||||
PrimaryPhase(Request request, TransportChannel channel) {
|
||||
PrimaryPhase(ReplicationTask task, Request request, TransportChannel channel) {
|
||||
this.state = clusterService.state();
|
||||
this.task = task;
|
||||
this.request = request;
|
||||
assert request.shardId() != null : "request shardId must be set prior to primary phase";
|
||||
this.shardId = request.shardId();
|
||||
this.channel = channel;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("failed to execute [{}] on [{}]", e, request, shardId);
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to execute [{}] on [{}]", e, request, shardId);
|
||||
}
|
||||
}
|
||||
finishAsFailed(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
setPhase(task, "primary");
|
||||
// request shardID was set in ReroutePhase
|
||||
assert request.shardId() != null : "request shardID must be set prior to primary phase";
|
||||
final ShardId shardId = request.shardId();
|
||||
final String writeConsistencyFailure = checkWriteConsistency(shardId);
|
||||
if (writeConsistencyFailure != null) {
|
||||
finishBecauseUnavailable(shardId, writeConsistencyFailure);
|
||||
return;
|
||||
}
|
||||
final ReplicationPhase replicationPhase;
|
||||
try {
|
||||
indexShardReference = getIndexShardOperationsCounter(shardId);
|
||||
// closed in finishAsFailed(e) in the case of error
|
||||
indexShardReference = getIndexShardReferenceOnPrimary(shardId);
|
||||
if (indexShardReference.isRelocated() == false) {
|
||||
// execute locally
|
||||
Tuple<Response, ReplicaRequest> primaryResponse = shardOperationOnPrimary(state.metaData(), request);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version());
|
||||
}
|
||||
replicationPhase = new ReplicationPhase(primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
|
||||
} catch (Throwable e) {
|
||||
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("failed to execute [{}] on [{}]", e, request, shardId);
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to execute [{}] on [{}]", e, request, shardId);
|
||||
}
|
||||
}
|
||||
finishAsFailed(e);
|
||||
return;
|
||||
ReplicationPhase replicationPhase = new ReplicationPhase(task, primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
|
||||
finishAndMoveToReplication(replicationPhase);
|
||||
} else {
|
||||
// delegate primary phase to relocation target
|
||||
// it is safe to execute primary phase on relocation target as there are no more in-flight operations where primary
|
||||
// phase is executed on local shard and all subsequent operations are executed on relocation target as primary phase.
|
||||
final ShardRouting primary = indexShardReference.routingEntry();
|
||||
indexShardReference.close();
|
||||
assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary;
|
||||
DiscoveryNode relocatingNode = state.nodes().get(primary.relocatingNodeId());
|
||||
transportService.sendRequest(relocatingNode, transportPrimaryAction, request, transportOptions,
|
||||
TransportChannelResponseHandler.responseHandler(logger, TransportReplicationAction.this::newResponseInstance, channel,
|
||||
"rerouting indexing to target primary " + primary));
|
||||
}
|
||||
finishAndMoveToReplication(replicationPhase);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -704,6 +757,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
*/
|
||||
void finishAsFailed(Throwable failure) {
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
setPhase(task, "failed");
|
||||
Releasables.close(indexShardReference);
|
||||
logger.trace("operation failed", failure);
|
||||
try {
|
||||
|
@ -721,10 +775,24 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
}
|
||||
}
|
||||
|
||||
protected Releasable getIndexShardOperationsCounter(ShardId shardId) {
|
||||
/**
|
||||
* returns a new reference to {@link IndexShard} to perform a primary operation. Released after performing primary operation locally
|
||||
* and replication of the operation to all replica shards is completed / failed (see {@link ReplicationPhase}).
|
||||
*/
|
||||
protected IndexShardReference getIndexShardReferenceOnPrimary(ShardId shardId) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
IndexShard indexShard = indexService.getShard(shardId.id());
|
||||
return new IndexShardReference(indexShard);
|
||||
return new IndexShardReferenceImpl(indexShard, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* returns a new reference to {@link IndexShard} on a node that the request is replicated to. The reference is closed as soon as
|
||||
* replication is completed on the node.
|
||||
*/
|
||||
protected IndexShardReference getIndexShardReferenceOnReplica(ShardId shardId) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
IndexShard indexShard = indexService.getShard(shardId.id());
|
||||
return new IndexShardReferenceImpl(indexShard, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -732,7 +800,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
* relocating copies
|
||||
*/
|
||||
final class ReplicationPhase extends AbstractRunnable {
|
||||
|
||||
private final ReplicationTask task;
|
||||
private final ReplicaRequest replicaRequest;
|
||||
private final Response finalResponse;
|
||||
private final TransportChannel channel;
|
||||
|
@ -740,16 +808,16 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
private final List<ShardRouting> shards;
|
||||
private final DiscoveryNodes nodes;
|
||||
private final boolean executeOnReplica;
|
||||
private final String indexUUID;
|
||||
private final AtomicBoolean finished = new AtomicBoolean();
|
||||
private final AtomicInteger success = new AtomicInteger(1); // We already wrote into the primary shard
|
||||
private final ConcurrentMap<String, Throwable> shardReplicaFailures = ConcurrentCollections.newConcurrentMap();
|
||||
private final AtomicInteger pending;
|
||||
private final int totalShards;
|
||||
private final Releasable indexShardReference;
|
||||
private final IndexShardReference indexShardReference;
|
||||
|
||||
public ReplicationPhase(ReplicaRequest replicaRequest, Response finalResponse, ShardId shardId,
|
||||
TransportChannel channel, Releasable indexShardReference) {
|
||||
public ReplicationPhase(ReplicationTask task, ReplicaRequest replicaRequest, Response finalResponse, ShardId shardId,
|
||||
TransportChannel channel, IndexShardReference indexShardReference) {
|
||||
this.task = task;
|
||||
this.replicaRequest = replicaRequest;
|
||||
this.channel = channel;
|
||||
this.finalResponse = finalResponse;
|
||||
|
@ -766,7 +834,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
final IndexMetaData indexMetaData = state.getMetaData().index(shardId.getIndex());
|
||||
this.shards = (shardRoutingTable != null) ? shardRoutingTable.shards() : Collections.emptyList();
|
||||
this.executeOnReplica = (indexMetaData == null) || shouldExecuteReplication(indexMetaData.getSettings());
|
||||
this.indexUUID = (indexMetaData != null) ? indexMetaData.getIndexUUID() : null;
|
||||
this.nodes = state.getNodes();
|
||||
|
||||
if (shards.isEmpty()) {
|
||||
|
@ -777,17 +844,20 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
int numberOfIgnoredShardInstances = 0;
|
||||
int numberOfPendingShardInstances = 0;
|
||||
for (ShardRouting shard : shards) {
|
||||
// the following logic to select the shards to replicate to is mirrored and explained in the doRun method below
|
||||
if (shard.primary() == false && executeOnReplica == false) {
|
||||
numberOfIgnoredShardInstances++;
|
||||
} else if (shard.unassigned()) {
|
||||
continue;
|
||||
}
|
||||
if (shard.unassigned()) {
|
||||
numberOfIgnoredShardInstances++;
|
||||
} else {
|
||||
if (shard.currentNodeId().equals(nodes.localNodeId()) == false) {
|
||||
numberOfPendingShardInstances++;
|
||||
}
|
||||
if (shard.relocating()) {
|
||||
numberOfPendingShardInstances++;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (nodes.localNodeId().equals(shard.currentNodeId()) == false) {
|
||||
numberOfPendingShardInstances++;
|
||||
}
|
||||
if (shard.relocating() && nodes.localNodeId().equals(shard.relocatingNodeId()) == false) {
|
||||
numberOfPendingShardInstances++;
|
||||
}
|
||||
}
|
||||
// one for the local primary copy
|
||||
|
@ -795,7 +865,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
this.pending = new AtomicInteger(numberOfPendingShardInstances);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("replication phase started. pending [{}], action [{}], request [{}], cluster state version used [{}]", pending.get(),
|
||||
transportReplicaAction, replicaRequest, state.version());
|
||||
transportReplicaAction, replicaRequest, state.version());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -831,6 +901,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
*/
|
||||
@Override
|
||||
protected void doRun() {
|
||||
setPhase(task, "replicating");
|
||||
if (pending.get() == 0) {
|
||||
doFinish();
|
||||
return;
|
||||
|
@ -860,7 +931,8 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
performOnReplica(shard);
|
||||
}
|
||||
// send operation to relocating shard
|
||||
if (shard.relocating()) {
|
||||
// local shard can be a relocation target of a primary that is in relocated state
|
||||
if (shard.relocating() && nodes.localNodeId().equals(shard.relocatingNodeId()) == false) {
|
||||
performOnReplica(shard.buildTargetRelocatingShard());
|
||||
}
|
||||
}
|
||||
|
@ -899,7 +971,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
logger.warn("[{}] {}", exp, shardId, message);
|
||||
shardStateAction.shardFailed(
|
||||
shard,
|
||||
indexUUID,
|
||||
indexShardReference.routingEntry(),
|
||||
message,
|
||||
exp,
|
||||
new ShardStateAction.Listener() {
|
||||
|
@ -941,6 +1013,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
}
|
||||
|
||||
private void forceFinishAsFailed(Throwable t) {
|
||||
setPhase(task, "failed");
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
Releasables.close(indexShardReference);
|
||||
try {
|
||||
|
@ -954,6 +1027,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
|
||||
private void doFinish() {
|
||||
if (finished.compareAndSet(false, true)) {
|
||||
setPhase(task, "finished");
|
||||
Releasables.close(indexShardReference);
|
||||
final ReplicationResponse.ShardInfo.Failure[] failuresArray;
|
||||
if (!shardReplicaFailures.isEmpty()) {
|
||||
|
@ -993,21 +1067,39 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
return IndexMetaData.isIndexUsingShadowReplicas(settings) == false;
|
||||
}
|
||||
|
||||
static class IndexShardReference implements Releasable {
|
||||
interface IndexShardReference extends Releasable {
|
||||
boolean isRelocated();
|
||||
|
||||
final private IndexShard counter;
|
||||
private final AtomicBoolean closed = new AtomicBoolean();
|
||||
ShardRouting routingEntry();
|
||||
}
|
||||
|
||||
IndexShardReference(IndexShard counter) {
|
||||
counter.incrementOperationCounter();
|
||||
this.counter = counter;
|
||||
static final class IndexShardReferenceImpl implements IndexShardReference {
|
||||
|
||||
private final IndexShard indexShard;
|
||||
private final Releasable operationLock;
|
||||
|
||||
IndexShardReferenceImpl(IndexShard indexShard, boolean primaryAction) {
|
||||
this.indexShard = indexShard;
|
||||
if (primaryAction) {
|
||||
operationLock = indexShard.acquirePrimaryOperationLock();
|
||||
} else {
|
||||
operationLock = indexShard.acquireReplicaOperationLock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (closed.compareAndSet(false, true)) {
|
||||
counter.decrementOperationCounter();
|
||||
}
|
||||
operationLock.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isRelocated() {
|
||||
return indexShard.state() == IndexShardState.RELOCATED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ShardRouting routingEntry() {
|
||||
return indexShard.routingEntry();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1024,4 +1116,14 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
}
|
||||
indexShard.maybeFlush();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the current phase on the task if it isn't null. Pulled into its own
|
||||
* method because its more convenient that way.
|
||||
*/
|
||||
static void setPhase(ReplicationTask task, String phase) {
|
||||
if (task != null) {
|
||||
task.setPhase(phase);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -314,7 +314,7 @@ final class Bootstrap {
|
|||
}
|
||||
ESLogger logger = Loggers.getLogger(Bootstrap.class);
|
||||
if (INSTANCE.node != null) {
|
||||
logger = Loggers.getLogger(Bootstrap.class, INSTANCE.node.settings().get("name"));
|
||||
logger = Loggers.getLogger(Bootstrap.class, INSTANCE.node.settings().get("node.name"));
|
||||
}
|
||||
// HACK, it sucks to do this, but we will run users out of disk space otherwise
|
||||
if (e instanceof CreationException) {
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.SuppressForbidden;
|
|||
import org.elasticsearch.common.cli.CliTool;
|
||||
import org.elasticsearch.common.cli.CliToolConfig;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.common.cli.UserError;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
@ -82,7 +83,9 @@ final class BootstrapCLIParser extends CliTool {
|
|||
|
||||
@Override
|
||||
public ExitStatus execute(Settings settings, Environment env) throws Exception {
|
||||
terminal.println("Version: %s, Build: %s/%s, JVM: %s", org.elasticsearch.Version.CURRENT, Build.CURRENT.shortHash(), Build.CURRENT.date(), JvmInfo.jvmInfo().version());
|
||||
terminal.println("Version: " + org.elasticsearch.Version.CURRENT
|
||||
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
|
||||
+ ", JVM: " + JvmInfo.jvmInfo().version());
|
||||
return ExitStatus.OK_AND_EXIT;
|
||||
}
|
||||
}
|
||||
|
@ -103,7 +106,7 @@ final class BootstrapCLIParser extends CliTool {
|
|||
|
||||
// TODO: don't use system properties as a way to do this, its horrible...
|
||||
@SuppressForbidden(reason = "Sets system properties passed as CLI parameters")
|
||||
public static Command parse(Terminal terminal, CommandLine cli) {
|
||||
public static Command parse(Terminal terminal, CommandLine cli) throws UserError {
|
||||
if (cli.hasOption("V")) {
|
||||
return Version.parse(terminal, cli);
|
||||
}
|
||||
|
@ -132,11 +135,11 @@ final class BootstrapCLIParser extends CliTool {
|
|||
String arg = iterator.next();
|
||||
if (!arg.startsWith("--")) {
|
||||
if (arg.startsWith("-D") || arg.startsWith("-d") || arg.startsWith("-p")) {
|
||||
throw new IllegalArgumentException(
|
||||
throw new UserError(ExitStatus.USAGE,
|
||||
"Parameter [" + arg + "] starting with \"-D\", \"-d\" or \"-p\" must be before any parameters starting with --"
|
||||
);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Parameter [" + arg + "]does not start with --");
|
||||
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "]does not start with --");
|
||||
}
|
||||
}
|
||||
// if there is no = sign, we have to get the next argu
|
||||
|
@ -150,11 +153,11 @@ final class BootstrapCLIParser extends CliTool {
|
|||
if (iterator.hasNext()) {
|
||||
String value = iterator.next();
|
||||
if (value.startsWith("--")) {
|
||||
throw new IllegalArgumentException("Parameter [" + arg + "] needs value");
|
||||
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
|
||||
}
|
||||
System.setProperty("es." + arg, value);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Parameter [" + arg + "] needs value");
|
||||
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -129,7 +129,6 @@ public class TransportClient extends AbstractClient {
|
|||
|
||||
final ThreadPool threadPool = new ThreadPool(settings);
|
||||
final NetworkService networkService = new NetworkService(settings);
|
||||
final SettingsFilter settingsFilter = new SettingsFilter(settings);
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
boolean success = false;
|
||||
try {
|
||||
|
@ -140,7 +139,7 @@ public class TransportClient extends AbstractClient {
|
|||
modules.add(pluginModule);
|
||||
}
|
||||
modules.add(new PluginsModule(pluginsService));
|
||||
modules.add(new SettingsModule(settings, settingsFilter));
|
||||
modules.add(new SettingsModule(settings));
|
||||
modules.add(new NetworkModule(networkService, settings, true, namedWriteableRegistry));
|
||||
modules.add(new ClusterNameModule(settings));
|
||||
modules.add(new ThreadPoolModule(threadPool));
|
||||
|
|
|
@ -103,6 +103,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
public static final Setting<TimeValue> CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL = Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> CLIENT_TRANSPORT_PING_TIMEOUT = Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME = Setting.boolSetting("client.transport.ignore_cluster_name", false, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> CLIENT_TRANSPORT_SNIFF = Setting.boolSetting("client.transport.sniff", false, false, Setting.Scope.CLUSTER);
|
||||
|
||||
@Inject
|
||||
public TransportClientNodesService(Settings settings, ClusterName clusterName, TransportService transportService,
|
||||
|
@ -121,7 +122,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]");
|
||||
}
|
||||
|
||||
if (this.settings.getAsBoolean("client.transport.sniff", false)) {
|
||||
if (CLIENT_TRANSPORT_SNIFF.get(this.settings)) {
|
||||
this.nodesSampler = new SniffNodesSampler();
|
||||
} else {
|
||||
this.nodesSampler = new SimpleNodeSampler();
|
||||
|
|
|
@ -123,6 +123,11 @@ public interface ClusterStateTaskExecutor<T> {
|
|||
return this == SUCCESS;
|
||||
}
|
||||
|
||||
public Throwable getFailure() {
|
||||
assert !isSuccess();
|
||||
return failure;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the execution result with the provided consumers
|
||||
* @param onSuccess handler to invoke on success
|
||||
|
|
|
@ -94,7 +94,7 @@ public class MappingUpdatedAction extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Throwable {
|
||||
public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Exception {
|
||||
updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout, null);
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ public class MappingUpdatedAction extends AbstractComponent {
|
|||
* Same as {@link #updateMappingOnMasterSynchronously(String, String, Mapping, TimeValue)}
|
||||
* using the default timeout.
|
||||
*/
|
||||
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Throwable {
|
||||
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Exception {
|
||||
updateMappingOnMasterSynchronously(index, type, mappingUpdate, dynamicMappingUpdateTimeout);
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ public class MappingUpdatedAction extends AbstractComponent {
|
|||
* {@code timeout}. When this method returns successfully mappings have
|
||||
* been applied to the master node and propagated to data nodes.
|
||||
*/
|
||||
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Throwable {
|
||||
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Exception {
|
||||
if (updateMappingRequest(index, type, mappingUpdate, timeout).get().isAcknowledged() == false) {
|
||||
throw new TimeoutException("Failed to acknowledge mapping update within [" + timeout + "]");
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.cluster.action.shard;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -28,8 +29,9 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor;
|
|||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.MasterNodeChangePredicate;
|
||||
import org.elasticsearch.cluster.NotMasterException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
|
@ -46,6 +48,7 @@ import org.elasticsearch.common.logging.ESLogger;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.node.NodeClosedException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
|
@ -60,6 +63,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
@ -125,17 +129,22 @@ public class ShardStateAction extends AbstractComponent {
|
|||
return ExceptionsHelper.unwrap(exp, MASTER_CHANNEL_EXCEPTIONS) != null;
|
||||
}
|
||||
|
||||
public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
|
||||
/**
|
||||
* Send a shard failed request to the master node to update the
|
||||
* cluster state.
|
||||
*
|
||||
* @param shardRouting the shard to fail
|
||||
* @param sourceShardRouting the source shard requesting the failure (must be the shard itself, or the primary shard)
|
||||
* @param message the reason for the failure
|
||||
* @param failure the underlying cause of the failure
|
||||
* @param listener callback upon completion of the request
|
||||
*/
|
||||
public void shardFailed(final ShardRouting shardRouting, ShardRouting sourceShardRouting, final String message, @Nullable final Throwable failure, Listener listener) {
|
||||
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
|
||||
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, failure);
|
||||
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, sourceShardRouting, message, failure);
|
||||
sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardRoutingEntry, listener);
|
||||
}
|
||||
|
||||
public void resendShardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
|
||||
logger.trace("{} re-sending failed shard [{}], index UUID [{}], reason [{}]", shardRouting.shardId(), failure, shardRouting, indexUUID, message);
|
||||
shardFailed(shardRouting, indexUUID, message, failure, listener);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, ShardRoutingEntry shardRoutingEntry, Listener listener) {
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
|
@ -231,15 +240,15 @@ public class ShardStateAction extends AbstractComponent {
|
|||
|
||||
// partition tasks into those that correspond to shards
|
||||
// that exist versus do not exist
|
||||
Map<Boolean, List<ShardRoutingEntry>> partition =
|
||||
tasks.stream().collect(Collectors.partitioningBy(task -> shardExists(currentState, task)));
|
||||
Map<ValidationResult, List<ShardRoutingEntry>> partition =
|
||||
tasks.stream().collect(Collectors.groupingBy(task -> validateTask(currentState, task)));
|
||||
|
||||
// tasks that correspond to non-existent shards are marked
|
||||
// as successful
|
||||
batchResultBuilder.successes(partition.get(false));
|
||||
batchResultBuilder.successes(partition.getOrDefault(ValidationResult.SHARD_MISSING, Collections.emptyList()));
|
||||
|
||||
ClusterState maybeUpdatedState = currentState;
|
||||
List<ShardRoutingEntry> tasksToFail = partition.get(true);
|
||||
List<ShardRoutingEntry> tasksToFail = partition.getOrDefault(ValidationResult.VALID, Collections.emptyList());
|
||||
try {
|
||||
List<FailedRerouteAllocation.FailedShard> failedShards =
|
||||
tasksToFail
|
||||
|
@ -257,6 +266,15 @@ public class ShardStateAction extends AbstractComponent {
|
|||
batchResultBuilder.failures(tasksToFail, t);
|
||||
}
|
||||
|
||||
partition
|
||||
.getOrDefault(ValidationResult.SOURCE_INVALID, Collections.emptyList())
|
||||
.forEach(task -> batchResultBuilder.failure(
|
||||
task,
|
||||
new NoLongerPrimaryShardException(
|
||||
task.getShardRouting().shardId(),
|
||||
"source shard [" + task.sourceShardRouting + "] is neither the local allocation nor the primary allocation")
|
||||
));
|
||||
|
||||
return batchResultBuilder.build(maybeUpdatedState);
|
||||
}
|
||||
|
||||
|
@ -265,17 +283,36 @@ public class ShardStateAction extends AbstractComponent {
|
|||
return allocationService.applyFailedShards(currentState, failedShards);
|
||||
}
|
||||
|
||||
private boolean shardExists(ClusterState currentState, ShardRoutingEntry task) {
|
||||
private enum ValidationResult {
|
||||
VALID,
|
||||
SOURCE_INVALID,
|
||||
SHARD_MISSING
|
||||
}
|
||||
|
||||
private ValidationResult validateTask(ClusterState currentState, ShardRoutingEntry task) {
|
||||
|
||||
// non-local requests
|
||||
if (!task.shardRouting.isSameAllocation(task.sourceShardRouting)) {
|
||||
IndexShardRoutingTable indexShard = currentState.getRoutingTable().shardRoutingTableOrNull(task.shardRouting.shardId());
|
||||
if (indexShard == null) {
|
||||
return ValidationResult.SOURCE_INVALID;
|
||||
}
|
||||
ShardRouting primaryShard = indexShard.primaryShard();
|
||||
if (primaryShard == null || !primaryShard.isSameAllocation(task.sourceShardRouting)) {
|
||||
return ValidationResult.SOURCE_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
RoutingNodes.RoutingNodeIterator routingNodeIterator =
|
||||
currentState.getRoutingNodes().routingNodeIter(task.getShardRouting().currentNodeId());
|
||||
if (routingNodeIterator != null) {
|
||||
for (ShardRouting maybe : routingNodeIterator) {
|
||||
if (task.getShardRouting().isSameAllocation(maybe)) {
|
||||
return true;
|
||||
return ValidationResult.VALID;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
return ValidationResult.SHARD_MISSING;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -291,9 +328,9 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String message, Listener listener) {
|
||||
public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener) {
|
||||
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
|
||||
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, null);
|
||||
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, shardRouting, message, null);
|
||||
sendShardAction(SHARD_STARTED_ACTION_NAME, observer, shardRoutingEntry, listener);
|
||||
}
|
||||
|
||||
|
@ -360,16 +397,16 @@ public class ShardStateAction extends AbstractComponent {
|
|||
|
||||
public static class ShardRoutingEntry extends TransportRequest {
|
||||
ShardRouting shardRouting;
|
||||
String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
|
||||
ShardRouting sourceShardRouting;
|
||||
String message;
|
||||
Throwable failure;
|
||||
|
||||
public ShardRoutingEntry() {
|
||||
}
|
||||
|
||||
ShardRoutingEntry(ShardRouting shardRouting, String indexUUID, String message, @Nullable Throwable failure) {
|
||||
ShardRoutingEntry(ShardRouting shardRouting, ShardRouting sourceShardRouting, String message, @Nullable Throwable failure) {
|
||||
this.shardRouting = shardRouting;
|
||||
this.indexUUID = indexUUID;
|
||||
this.sourceShardRouting = sourceShardRouting;
|
||||
this.message = message;
|
||||
this.failure = failure;
|
||||
}
|
||||
|
@ -382,7 +419,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
shardRouting = readShardRoutingEntry(in);
|
||||
indexUUID = in.readString();
|
||||
sourceShardRouting = readShardRoutingEntry(in);
|
||||
message = in.readString();
|
||||
failure = in.readThrowable();
|
||||
}
|
||||
|
@ -391,18 +428,26 @@ public class ShardStateAction extends AbstractComponent {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
shardRouting.writeTo(out);
|
||||
out.writeString(indexUUID);
|
||||
sourceShardRouting.writeTo(out);
|
||||
out.writeString(message);
|
||||
out.writeThrowable(failure);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "" + shardRouting + ", indexUUID [" + indexUUID + "], message [" + message + "], failure [" + ExceptionsHelper.detailedMessage(failure) + "]";
|
||||
List<String> components = new ArrayList<>(4);
|
||||
components.add("target shard [" + shardRouting + "]");
|
||||
components.add("source shard [" + sourceShardRouting + "]");
|
||||
components.add("message [" + message + "]");
|
||||
if (failure != null) {
|
||||
components.add("failure [" + ExceptionsHelper.detailedMessage(failure) + "]");
|
||||
}
|
||||
return String.join(", ", components);
|
||||
}
|
||||
}
|
||||
|
||||
public interface Listener {
|
||||
|
||||
default void onSuccess() {
|
||||
}
|
||||
|
||||
|
@ -423,6 +468,20 @@ public class ShardStateAction extends AbstractComponent {
|
|||
*/
|
||||
default void onFailure(final Throwable t) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class NoLongerPrimaryShardException extends ElasticsearchException {
|
||||
|
||||
public NoLongerPrimaryShardException(ShardId shardId, String msg) {
|
||||
super(msg);
|
||||
setShard(shardId);
|
||||
}
|
||||
|
||||
public NoLongerPrimaryShardException(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ import java.util.HashMap;
|
|||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
|
@ -137,6 +138,13 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
|||
return shard;
|
||||
}
|
||||
|
||||
public IndexShardRoutingTable shardRoutingTableOrNull(ShardId shardId) {
|
||||
return Optional
|
||||
.ofNullable(index(shardId.getIndexName()))
|
||||
.flatMap(irt -> Optional.ofNullable(irt.shard(shardId.getId())))
|
||||
.orElse(null);
|
||||
}
|
||||
|
||||
public RoutingTable validateRaiseException(MetaData metaData) throws RoutingValidationException {
|
||||
RoutingTableValidation validation = validate(metaData);
|
||||
if (!validation.valid()) {
|
||||
|
|
|
@ -196,7 +196,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
// note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling
|
||||
final String nodeId = DiscoveryService.generateNodeId(settings);
|
||||
final TransportAddress publishAddress = transportService.boundAddress().publishAddress();
|
||||
DiscoveryNode localNode = new DiscoveryNode(settings.get("name"), nodeId, publishAddress, nodeAttributes, version);
|
||||
DiscoveryNode localNode = new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, nodeAttributes, version);
|
||||
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id());
|
||||
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).blocks(initialBlocks).build();
|
||||
this.transportService.setLocalNode(localNode);
|
||||
|
|
|
@ -1140,4 +1140,5 @@ public class Strings {
|
|||
return sb.toString();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -100,9 +100,10 @@ public abstract class CheckFileCommand extends CliTool.Command {
|
|||
Set<PosixFilePermission> permissionsBeforeWrite = entry.getValue();
|
||||
Set<PosixFilePermission> permissionsAfterWrite = Files.getPosixFilePermissions(entry.getKey());
|
||||
if (!permissionsBeforeWrite.equals(permissionsAfterWrite)) {
|
||||
terminal.printWarn("The file permissions of [%s] have changed from [%s] to [%s]",
|
||||
entry.getKey(), PosixFilePermissions.toString(permissionsBeforeWrite), PosixFilePermissions.toString(permissionsAfterWrite));
|
||||
terminal.printWarn("Please ensure that the user account running Elasticsearch has read access to this file!");
|
||||
terminal.println(Terminal.Verbosity.SILENT, "WARNING: The file permissions of [" + entry.getKey() + "] have changed "
|
||||
+ "from [" + PosixFilePermissions.toString(permissionsBeforeWrite) + "] "
|
||||
+ "to [" + PosixFilePermissions.toString(permissionsAfterWrite) + "]");
|
||||
terminal.println(Terminal.Verbosity.SILENT, "Please ensure that the user account running Elasticsearch has read access to this file!");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -115,7 +116,7 @@ public abstract class CheckFileCommand extends CliTool.Command {
|
|||
String ownerBeforeWrite = entry.getValue();
|
||||
String ownerAfterWrite = Files.getOwner(entry.getKey()).getName();
|
||||
if (!ownerAfterWrite.equals(ownerBeforeWrite)) {
|
||||
terminal.printWarn("WARN: Owner of file [%s] used to be [%s], but now is [%s]", entry.getKey(), ownerBeforeWrite, ownerAfterWrite);
|
||||
terminal.println(Terminal.Verbosity.SILENT, "WARNING: Owner of file [" + entry.getKey() + "] used to be [" + ownerBeforeWrite + "], but now is [" + ownerAfterWrite + "]");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -128,7 +129,7 @@ public abstract class CheckFileCommand extends CliTool.Command {
|
|||
String groupBeforeWrite = entry.getValue();
|
||||
String groupAfterWrite = Files.readAttributes(entry.getKey(), PosixFileAttributes.class).group().getName();
|
||||
if (!groupAfterWrite.equals(groupBeforeWrite)) {
|
||||
terminal.printWarn("WARN: Group of file [%s] used to be [%s], but now is [%s]", entry.getKey(), groupBeforeWrite, groupAfterWrite);
|
||||
terminal.println(Terminal.Verbosity.SILENT, "WARNING: Group of file [" + entry.getKey() + "] used to be [" + groupBeforeWrite + "], but now is [" + groupAfterWrite + "]");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,14 +19,17 @@
|
|||
|
||||
package org.elasticsearch.common.cli;
|
||||
|
||||
import org.apache.commons.cli.AlreadySelectedException;
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.cli.CommandLineParser;
|
||||
import org.apache.commons.cli.DefaultParser;
|
||||
import org.apache.commons.cli.MissingArgumentException;
|
||||
import org.apache.commons.cli.MissingOptionException;
|
||||
import org.apache.commons.cli.UnrecognizedOptionException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
|
@ -50,7 +53,7 @@ import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
|||
public abstract class CliTool {
|
||||
|
||||
// based on sysexits.h
|
||||
public static enum ExitStatus {
|
||||
public enum ExitStatus {
|
||||
OK(0),
|
||||
OK_AND_EXIT(0),
|
||||
USAGE(64), /* command line usage error */
|
||||
|
@ -69,23 +72,13 @@ public abstract class CliTool {
|
|||
|
||||
final int status;
|
||||
|
||||
private ExitStatus(int status) {
|
||||
ExitStatus(int status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public int status() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public static ExitStatus fromStatus(int status) {
|
||||
for (ExitStatus exitStatus : values()) {
|
||||
if (exitStatus.status() == status) {
|
||||
return exitStatus;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
protected final Terminal terminal;
|
||||
|
@ -108,7 +101,7 @@ public abstract class CliTool {
|
|||
settings = env.settings();
|
||||
}
|
||||
|
||||
public final ExitStatus execute(String... args) {
|
||||
public final ExitStatus execute(String... args) throws Exception {
|
||||
|
||||
// first lets see if the user requests tool help. We're doing it only if
|
||||
// this is a multi-command tool. If it's a single command tool, the -h/--help
|
||||
|
@ -124,7 +117,7 @@ public abstract class CliTool {
|
|||
} else {
|
||||
|
||||
if (args.length == 0) {
|
||||
terminal.printError("command not specified");
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: command not specified");
|
||||
config.printUsage(terminal);
|
||||
return ExitStatus.USAGE;
|
||||
}
|
||||
|
@ -132,7 +125,7 @@ public abstract class CliTool {
|
|||
String cmdName = args[0];
|
||||
cmd = config.cmd(cmdName);
|
||||
if (cmd == null) {
|
||||
terminal.printError("unknown command [%s]. Use [-h] option to list available commands", cmdName);
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: unknown command [" + cmdName + "]. Use [-h] option to list available commands");
|
||||
return ExitStatus.USAGE;
|
||||
}
|
||||
|
||||
|
@ -146,23 +139,11 @@ public abstract class CliTool {
|
|||
}
|
||||
}
|
||||
|
||||
Command command = null;
|
||||
try {
|
||||
|
||||
command = parse(cmd, args);
|
||||
return command.execute(settings, env);
|
||||
} catch (IOException ioe) {
|
||||
terminal.printError(ioe);
|
||||
return ExitStatus.IO_ERROR;
|
||||
} catch (IllegalArgumentException ilae) {
|
||||
terminal.printError(ilae);
|
||||
return ExitStatus.USAGE;
|
||||
} catch (Throwable t) {
|
||||
terminal.printError(t);
|
||||
if (command == null) {
|
||||
return ExitStatus.USAGE;
|
||||
}
|
||||
return ExitStatus.CODE_ERROR;
|
||||
return parse(cmd, args).execute(settings, env);
|
||||
} catch (UserError error) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + error.getMessage());
|
||||
return error.exitStatus;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -177,9 +158,21 @@ public abstract class CliTool {
|
|||
if (cli.hasOption("h")) {
|
||||
return helpCmd(cmd);
|
||||
}
|
||||
cli = parser.parse(cmd.options(), args, cmd.isStopAtNonOption());
|
||||
Terminal.Verbosity verbosity = Terminal.Verbosity.resolve(cli);
|
||||
terminal.verbosity(verbosity);
|
||||
try {
|
||||
cli = parser.parse(cmd.options(), args, cmd.isStopAtNonOption());
|
||||
} catch (AlreadySelectedException|MissingArgumentException|MissingOptionException|UnrecognizedOptionException e) {
|
||||
// intentionally drop the stack trace here as these are really user errors,
|
||||
// the stack trace into cli parsing lib is not important
|
||||
throw new UserError(ExitStatus.USAGE, e.toString());
|
||||
}
|
||||
|
||||
if (cli.hasOption("v")) {
|
||||
terminal.setVerbosity(Terminal.Verbosity.VERBOSE);
|
||||
} else if (cli.hasOption("s")) {
|
||||
terminal.setVerbosity(Terminal.Verbosity.SILENT);
|
||||
} else {
|
||||
terminal.setVerbosity(Terminal.Verbosity.NORMAL);
|
||||
}
|
||||
return parse(cmd.name(), cli);
|
||||
}
|
||||
|
||||
|
@ -237,7 +230,7 @@ public abstract class CliTool {
|
|||
public ExitStatus execute(Settings settings, Environment env) throws Exception {
|
||||
if (msg != null) {
|
||||
if (status != ExitStatus.OK) {
|
||||
terminal.printError(msg);
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + msg);
|
||||
} else {
|
||||
terminal.println(msg);
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ public class HelpPrinter {
|
|||
}
|
||||
|
||||
private static void print(Class clazz, String name, final Terminal terminal) {
|
||||
terminal.println(Terminal.Verbosity.SILENT);
|
||||
terminal.println(Terminal.Verbosity.SILENT, "");
|
||||
try (InputStream input = clazz.getResourceAsStream(name + HELP_FILE_EXT)) {
|
||||
Streams.readAllLines(input, new Callback<String>() {
|
||||
@Override
|
||||
|
@ -52,6 +52,6 @@ public class HelpPrinter {
|
|||
} catch (IOException ioe) {
|
||||
throw new RuntimeException(ioe);
|
||||
}
|
||||
terminal.println();
|
||||
terminal.println(Terminal.Verbosity.SILENT, "");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,163 +19,103 @@
|
|||
|
||||
package org.elasticsearch.common.cli;
|
||||
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.Console;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.PrintWriter;
|
||||
import java.util.Locale;
|
||||
import java.nio.charset.Charset;
|
||||
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
/**
|
||||
*
|
||||
* A Terminal wraps access to reading input and writing output for a {@link CliTool}.
|
||||
*
|
||||
* The available methods are similar to those of {@link Console}, with the ability
|
||||
* to read either normal text or a password, and the ability to print a line
|
||||
* of text. Printing is also gated by the {@link Verbosity} of the terminal,
|
||||
* which allows {@link #println(Verbosity,String)} calls which act like a logger,
|
||||
* only actually printing if the verbosity level of the terminal is above
|
||||
* the verbosity of the message.
|
||||
*/
|
||||
@SuppressForbidden(reason = "System#out")
|
||||
public abstract class Terminal {
|
||||
|
||||
public static final String DEBUG_SYSTEM_PROPERTY = "es.cli.debug";
|
||||
/** The default terminal implementation, which will be a console if available, or stdout/stderr if not. */
|
||||
public static final Terminal DEFAULT = ConsoleTerminal.isSupported() ? new ConsoleTerminal() : new SystemTerminal();
|
||||
|
||||
public static final Terminal DEFAULT = ConsoleTerminal.supported() ? new ConsoleTerminal() : new SystemTerminal();
|
||||
|
||||
public static enum Verbosity {
|
||||
SILENT(0), NORMAL(1), VERBOSE(2);
|
||||
|
||||
private final int level;
|
||||
|
||||
private Verbosity(int level) {
|
||||
this.level = level;
|
||||
}
|
||||
|
||||
public boolean enabled(Verbosity verbosity) {
|
||||
return level >= verbosity.level;
|
||||
}
|
||||
|
||||
public static Verbosity resolve(CommandLine cli) {
|
||||
if (cli.hasOption("s")) {
|
||||
return SILENT;
|
||||
}
|
||||
if (cli.hasOption("v")) {
|
||||
return VERBOSE;
|
||||
}
|
||||
return NORMAL;
|
||||
}
|
||||
/** Defines the available verbosity levels of messages to be printed. */
|
||||
public enum Verbosity {
|
||||
SILENT, /* always printed */
|
||||
NORMAL, /* printed when no options are given to cli */
|
||||
VERBOSE /* printed only when cli is passed verbose option */
|
||||
}
|
||||
|
||||
/** The current verbosity for the terminal, defaulting to {@link Verbosity#NORMAL}. */
|
||||
private Verbosity verbosity = Verbosity.NORMAL;
|
||||
private final boolean isDebugEnabled;
|
||||
|
||||
public Terminal() {
|
||||
this(Verbosity.NORMAL);
|
||||
}
|
||||
|
||||
public Terminal(Verbosity verbosity) {
|
||||
this.verbosity = verbosity;
|
||||
this.isDebugEnabled = "true".equals(System.getProperty(DEBUG_SYSTEM_PROPERTY, "false"));
|
||||
}
|
||||
|
||||
public void verbosity(Verbosity verbosity) {
|
||||
/** Sets the verbosity of the terminal. */
|
||||
void setVerbosity(Verbosity verbosity) {
|
||||
this.verbosity = verbosity;
|
||||
}
|
||||
|
||||
public Verbosity verbosity() {
|
||||
return verbosity;
|
||||
/** Reads clear text from the terminal input. See {@link Console#readLine()}. */
|
||||
public abstract String readText(String prompt);
|
||||
|
||||
/** Reads password text from the terminal input. See {@link Console#readPassword()}}. */
|
||||
public abstract char[] readSecret(String prompt);
|
||||
|
||||
/** Print a message directly to the terminal. */
|
||||
protected abstract void doPrint(String msg);
|
||||
|
||||
/** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level. */
|
||||
public final void println(String msg) {
|
||||
println(Verbosity.NORMAL, msg);
|
||||
}
|
||||
|
||||
public abstract String readText(String text, Object... args);
|
||||
|
||||
public abstract char[] readSecret(String text, Object... args);
|
||||
|
||||
protected abstract void printStackTrace(Throwable t);
|
||||
|
||||
public void println() {
|
||||
println(Verbosity.NORMAL);
|
||||
}
|
||||
|
||||
public void println(String msg, Object... args) {
|
||||
println(Verbosity.NORMAL, msg, args);
|
||||
}
|
||||
|
||||
public void print(String msg, Object... args) {
|
||||
print(Verbosity.NORMAL, msg, args);
|
||||
}
|
||||
|
||||
public void println(Verbosity verbosity) {
|
||||
println(verbosity, "");
|
||||
}
|
||||
|
||||
public void println(Verbosity verbosity, String msg, Object... args) {
|
||||
print(verbosity, msg + System.lineSeparator(), args);
|
||||
}
|
||||
|
||||
public void print(Verbosity verbosity, String msg, Object... args) {
|
||||
if (this.verbosity.enabled(verbosity)) {
|
||||
doPrint(msg, args);
|
||||
/** Prints a line to the terminal at {@code verbosity} level. */
|
||||
public final void println(Verbosity verbosity, String msg) {
|
||||
if (this.verbosity.ordinal() >= verbosity.ordinal()) {
|
||||
doPrint(msg + System.lineSeparator());
|
||||
}
|
||||
}
|
||||
|
||||
public void printError(String msg, Object... args) {
|
||||
println(Verbosity.SILENT, "ERROR: " + msg, args);
|
||||
}
|
||||
|
||||
public void printError(Throwable t) {
|
||||
printError("%s", t.toString());
|
||||
if (isDebugEnabled) {
|
||||
printStackTrace(t);
|
||||
}
|
||||
}
|
||||
|
||||
public void printWarn(String msg, Object... args) {
|
||||
println(Verbosity.SILENT, "WARN: " + msg, args);
|
||||
}
|
||||
|
||||
protected abstract void doPrint(String msg, Object... args);
|
||||
|
||||
private static class ConsoleTerminal extends Terminal {
|
||||
|
||||
final Console console = System.console();
|
||||
private static final Console console = System.console();
|
||||
|
||||
static boolean supported() {
|
||||
return System.console() != null;
|
||||
static boolean isSupported() {
|
||||
return console != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doPrint(String msg, Object... args) {
|
||||
console.printf(msg, args);
|
||||
public void doPrint(String msg) {
|
||||
console.printf("%s", msg);
|
||||
console.flush();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readText(String text, Object... args) {
|
||||
return console.readLine(text, args);
|
||||
public String readText(String prompt) {
|
||||
return console.readLine("%s", prompt);
|
||||
}
|
||||
|
||||
@Override
|
||||
public char[] readSecret(String text, Object... args) {
|
||||
return console.readPassword(text, args);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void printStackTrace(Throwable t) {
|
||||
t.printStackTrace(console.writer());
|
||||
public char[] readSecret(String prompt) {
|
||||
return console.readPassword("%s", prompt);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "System#out")
|
||||
private static class SystemTerminal extends Terminal {
|
||||
|
||||
private final PrintWriter printWriter = new PrintWriter(System.out);
|
||||
|
||||
@Override
|
||||
public void doPrint(String msg, Object... args) {
|
||||
System.out.print(String.format(Locale.ROOT, msg, args));
|
||||
@SuppressForbidden(reason = "System#out")
|
||||
public void doPrint(String msg) {
|
||||
System.out.print(msg);
|
||||
System.out.flush();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readText(String text, Object... args) {
|
||||
print(text, args);
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
|
||||
public String readText(String text) {
|
||||
doPrint(text);
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset()));
|
||||
try {
|
||||
return reader.readLine();
|
||||
} catch (IOException ioe) {
|
||||
|
@ -184,13 +124,8 @@ public abstract class Terminal {
|
|||
}
|
||||
|
||||
@Override
|
||||
public char[] readSecret(String text, Object... args) {
|
||||
return readText(text, args).toCharArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void printStackTrace(Throwable t) {
|
||||
t.printStackTrace(printWriter);
|
||||
public char[] readSecret(String text) {
|
||||
return readText(text).toCharArray();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,27 +17,19 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.ingest.core;
|
||||
package org.elasticsearch.common.cli;
|
||||
|
||||
public class PipelineFactoryResult {
|
||||
private final Pipeline pipeline;
|
||||
private final PipelineFactoryError error;
|
||||
/**
|
||||
* An exception representing a user fixable problem in {@link CliTool} usage.
|
||||
*/
|
||||
public class UserError extends Exception {
|
||||
|
||||
public PipelineFactoryResult(Pipeline pipeline) {
|
||||
this.pipeline = pipeline;
|
||||
this.error = null;
|
||||
}
|
||||
/** The exist status the cli should use when catching this user error. */
|
||||
public final CliTool.ExitStatus exitStatus;
|
||||
|
||||
public PipelineFactoryResult(PipelineFactoryError error) {
|
||||
this.error = error;
|
||||
this.pipeline = null;
|
||||
}
|
||||
|
||||
public Pipeline getPipeline() {
|
||||
return pipeline;
|
||||
}
|
||||
|
||||
public PipelineFactoryError getError() {
|
||||
return error;
|
||||
/** Constructs a UserError with an exit status and message to show the user. */
|
||||
public UserError(CliTool.ExitStatus exitStatus, String msg) {
|
||||
super(msg);
|
||||
this.exitStatus = exitStatus;
|
||||
}
|
||||
}
|
|
@ -50,7 +50,7 @@ public abstract class AbstractComponent {
|
|||
* Returns the nodes name from the settings or the empty string if not set.
|
||||
*/
|
||||
public final String nodeName() {
|
||||
return settings.get("name", "");
|
||||
return settings.get("node.name", "");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.common.inject.matcher.Matcher;
|
|||
import org.elasticsearch.common.inject.spi.Message;
|
||||
import org.elasticsearch.common.inject.spi.TypeConverter;
|
||||
import org.elasticsearch.common.inject.spi.TypeListener;
|
||||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
|
||||
import java.lang.annotation.Annotation;
|
||||
import java.util.Objects;
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.text.Text;
|
|||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
|
@ -690,6 +691,13 @@ public abstract class StreamInput extends InputStream {
|
|||
return readNamedWriteable(ScoreFunctionBuilder.class);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a {@link Task.Status} from the current stream.
|
||||
*/
|
||||
public Task.Status readTaskStatus() throws IOException {
|
||||
return readNamedWriteable(Task.Status.class);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a list of objects
|
||||
*/
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.common.text.Text;
|
|||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.joda.time.ReadableInstant;
|
||||
|
||||
import java.io.EOFException;
|
||||
|
@ -660,6 +661,13 @@ public abstract class StreamOutput extends OutputStream {
|
|||
writeNamedWriteable(scoreFunctionBuilder);
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a {@link Task.Status} to the current stream.
|
||||
*/
|
||||
public void writeTaskStatus(Task.Status status) throws IOException {
|
||||
writeNamedWriteable(status);
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes the given {@link GeoPoint} to the stream
|
||||
*/
|
||||
|
|
|
@ -99,7 +99,7 @@ public class Loggers {
|
|||
prefixesList.add(addr.getHostName());
|
||||
}
|
||||
}
|
||||
String name = settings.get("name");
|
||||
String name = settings.get("node.name");
|
||||
if (name != null) {
|
||||
prefixesList.add(name);
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ public class AllField extends Field {
|
|||
}
|
||||
|
||||
@Override
|
||||
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) throws IOException {
|
||||
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) {
|
||||
try {
|
||||
allEntries.reset(); // reset the all entries, just in case it was read already
|
||||
if (allEntries.customBoost() && fieldType().indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) {
|
||||
|
|
|
@ -150,16 +150,19 @@ public class NetworkModule extends AbstractModule {
|
|||
|
||||
public static final String TRANSPORT_TYPE_KEY = "transport.type";
|
||||
public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type";
|
||||
|
||||
public static final String HTTP_TYPE_KEY = "http.type";
|
||||
public static final String LOCAL_TRANSPORT = "local";
|
||||
public static final String NETTY_TRANSPORT = "netty";
|
||||
|
||||
public static final String HTTP_TYPE_KEY = "http.type";
|
||||
public static final Setting<String> HTTP_TYPE_SETTING = Setting.simpleString("http.type", false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, false, Scope.CLUSTER);
|
||||
public static final Setting<String> TRANSPORT_SERVICE_TYPE_SETTING = Setting.simpleString("transport.service.type", false, Scope.CLUSTER);
|
||||
public static final Setting<String> TRANSPORT_TYPE_SETTING = Setting.simpleString("transport.type", false, Scope.CLUSTER);
|
||||
|
||||
|
||||
|
||||
private static final List<Class<? extends RestHandler>> builtinRestHandlers = Arrays.asList(
|
||||
RestMainAction.class,
|
||||
|
||||
RestNodesInfoAction.class,
|
||||
RestNodesStatsAction.class,
|
||||
RestNodesHotThreadsAction.class,
|
||||
|
@ -380,7 +383,7 @@ public class NetworkModule extends AbstractModule {
|
|||
} else {
|
||||
if (HTTP_ENABLED.get(settings)) {
|
||||
bind(HttpServer.class).asEagerSingleton();
|
||||
httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_KEY, NETTY_TRANSPORT);
|
||||
httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_SETTING.getKey(), NETTY_TRANSPORT);
|
||||
}
|
||||
bind(RestController.class).asEagerSingleton();
|
||||
catHandlers.bind(binder());
|
||||
|
|
|
@ -58,7 +58,6 @@ import org.elasticsearch.env.NodeEnvironment;
|
|||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
import org.elasticsearch.http.HttpTransportSettings;
|
||||
import org.elasticsearch.http.netty.NettyHttpServerTransport;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.analysis.HunspellService;
|
||||
|
@ -76,6 +75,7 @@ import org.elasticsearch.monitor.os.OsService;
|
|||
import org.elasticsearch.monitor.process.ProcessService;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
import org.elasticsearch.repositories.fs.FsRepository;
|
||||
import org.elasticsearch.repositories.uri.URLRepository;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
|
@ -114,9 +114,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
@Override
|
||||
public boolean hasChanged(Settings current, Settings previous) {
|
||||
return current.filter(loggerPredicate).getAsMap().equals(previous.filter(loggerPredicate).getAsMap()) == false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@Override
|
||||
public Settings getValue(Settings current, Settings previous) {
|
||||
Settings.Builder builder = Settings.builder();
|
||||
builder.put(current.filter(loggerPredicate).getAsMap());
|
||||
|
@ -132,7 +132,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
return builder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
@Override
|
||||
public void apply(Settings value, Settings current, Settings previous) {
|
||||
for (String key : value.getAsMap().keySet()) {
|
||||
assert loggerPredicate.test(key);
|
||||
|
@ -143,231 +143,240 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
} else {
|
||||
ESLoggerFactory.getLogger(component).setLevel(value.get(key));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(
|
||||
Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
|
||||
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
|
||||
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
|
||||
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
|
||||
BalancedShardsAllocator.THRESHOLD_SETTING,
|
||||
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING,
|
||||
ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING,
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING,
|
||||
ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING,
|
||||
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
|
||||
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
|
||||
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
|
||||
FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING,
|
||||
FsRepository.REPOSITORIES_COMPRESS_SETTING,
|
||||
FsRepository.REPOSITORIES_LOCATION_SETTING,
|
||||
IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING,
|
||||
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
|
||||
IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING,
|
||||
IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING,
|
||||
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
|
||||
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
|
||||
MetaData.SETTING_READ_ONLY_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING,
|
||||
ThreadPool.THREADPOOL_GROUP_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING,
|
||||
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING,
|
||||
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING,
|
||||
SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING,
|
||||
DestructiveOperations.REQUIRES_NAME_SETTING,
|
||||
DiscoverySettings.PUBLISH_TIMEOUT_SETTING,
|
||||
DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING,
|
||||
DiscoverySettings.COMMIT_TIMEOUT_SETTING,
|
||||
DiscoverySettings.NO_MASTER_BLOCK_SETTING,
|
||||
GatewayService.EXPECTED_DATA_NODES_SETTING,
|
||||
GatewayService.EXPECTED_MASTER_NODES_SETTING,
|
||||
GatewayService.EXPECTED_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_DATA_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_TIME_SETTING,
|
||||
NetworkModule.HTTP_ENABLED,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS,
|
||||
HttpTransportSettings.SETTING_CORS_ENABLED,
|
||||
HttpTransportSettings.SETTING_CORS_MAX_AGE,
|
||||
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
|
||||
HttpTransportSettings.SETTING_PIPELINING,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN,
|
||||
HttpTransportSettings.SETTING_HTTP_PORT,
|
||||
HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT,
|
||||
HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS,
|
||||
HttpTransportSettings.SETTING_HTTP_COMPRESSION,
|
||||
HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_METHODS,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS,
|
||||
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH,
|
||||
HttpTransportSettings.SETTING_HTTP_RESET_COOKIES,
|
||||
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
|
||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
|
||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
|
||||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING,
|
||||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING,
|
||||
InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
|
||||
SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING,
|
||||
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING,
|
||||
TransportService.TRACE_LOG_EXCLUDE_SETTING,
|
||||
TransportService.TRACE_LOG_INCLUDE_SETTING,
|
||||
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
|
||||
ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING,
|
||||
InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING,
|
||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
|
||||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
|
||||
Transport.TRANSPORT_TCP_COMPRESS,
|
||||
TransportSettings.TRANSPORT_PROFILES_SETTING,
|
||||
TransportSettings.HOST,
|
||||
TransportSettings.PUBLISH_HOST,
|
||||
TransportSettings.BIND_HOST,
|
||||
TransportSettings.PUBLISH_PORT,
|
||||
TransportSettings.PORT,
|
||||
NettyTransport.WORKER_COUNT,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_RECOVERY,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_BULK,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_REG,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_STATE,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_PING,
|
||||
NettyTransport.PING_SCHEDULE,
|
||||
NettyTransport.TCP_BLOCKING_CLIENT,
|
||||
NettyTransport.TCP_CONNECT_TIMEOUT,
|
||||
NettyTransport.NETTY_MAX_CUMULATION_BUFFER_CAPACITY,
|
||||
NettyTransport.NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS,
|
||||
NettyTransport.NETTY_RECEIVE_PREDICTOR_SIZE,
|
||||
NettyTransport.NETTY_RECEIVE_PREDICTOR_MIN,
|
||||
NettyTransport.NETTY_RECEIVE_PREDICTOR_MAX,
|
||||
NetworkService.NETWORK_SERVER,
|
||||
NettyTransport.NETTY_BOSS_COUNT,
|
||||
NettyTransport.TCP_NO_DELAY,
|
||||
NettyTransport.TCP_KEEP_ALIVE,
|
||||
NettyTransport.TCP_REUSE_ADDRESS,
|
||||
NettyTransport.TCP_SEND_BUFFER_SIZE,
|
||||
NettyTransport.TCP_RECEIVE_BUFFER_SIZE,
|
||||
NettyTransport.TCP_BLOCKING_SERVER,
|
||||
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING,
|
||||
NetworkService.TcpSettings.TCP_NO_DELAY,
|
||||
NetworkService.TcpSettings.TCP_KEEP_ALIVE,
|
||||
NetworkService.TcpSettings.TCP_REUSE_ADDRESS,
|
||||
NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE,
|
||||
NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE,
|
||||
NetworkService.TcpSettings.TCP_BLOCKING,
|
||||
NetworkService.TcpSettings.TCP_BLOCKING_SERVER,
|
||||
NetworkService.TcpSettings.TCP_BLOCKING_CLIENT,
|
||||
NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT,
|
||||
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
|
||||
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
|
||||
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING,
|
||||
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
|
||||
IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
|
||||
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
|
||||
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
|
||||
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
|
||||
IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL,
|
||||
HunspellService.HUNSPELL_LAZY_LOAD,
|
||||
HunspellService.HUNSPELL_IGNORE_CASE,
|
||||
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
|
||||
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
|
||||
Environment.PATH_CONF_SETTING,
|
||||
Environment.PATH_DATA_SETTING,
|
||||
Environment.PATH_HOME_SETTING,
|
||||
Environment.PATH_LOGS_SETTING,
|
||||
Environment.PATH_PLUGINS_SETTING,
|
||||
Environment.PATH_REPO_SETTING,
|
||||
Environment.PATH_SCRIPTS_SETTING,
|
||||
Environment.PATH_SHARED_DATA_SETTING,
|
||||
Environment.PIDFILE_SETTING,
|
||||
DiscoveryService.DISCOVERY_SEED_SETTING,
|
||||
DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING,
|
||||
DiscoveryModule.DISCOVERY_TYPE_SETTING,
|
||||
DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING,
|
||||
FaultDetection.PING_RETRIES_SETTING,
|
||||
FaultDetection.PING_TIMEOUT_SETTING,
|
||||
FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING,
|
||||
FaultDetection.PING_INTERVAL_SETTING,
|
||||
FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING,
|
||||
ZenDiscovery.PING_TIMEOUT_SETTING,
|
||||
ZenDiscovery.JOIN_TIMEOUT_SETTING,
|
||||
ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING,
|
||||
ZenDiscovery.JOIN_RETRY_DELAY_SETTING,
|
||||
ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING,
|
||||
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
|
||||
ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING,
|
||||
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
|
||||
ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING,
|
||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
|
||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
|
||||
SearchService.DEFAULT_KEEPALIVE_SETTING,
|
||||
SearchService.KEEPALIVE_INTERVAL_SETTING,
|
||||
Node.WRITE_PORTS_FIELD_SETTING,
|
||||
Node.NODE_CLIENT_SETTING,
|
||||
Node.NODE_DATA_SETTING,
|
||||
Node.NODE_MASTER_SETTING,
|
||||
Node.NODE_LOCAL_SETTING,
|
||||
Node.NODE_MODE_SETTING,
|
||||
Node.NODE_INGEST_SETTING,
|
||||
URLRepository.ALLOWED_URLS_SETTING,
|
||||
URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING,
|
||||
URLRepository.REPOSITORIES_URL_SETTING,
|
||||
URLRepository.SUPPORTED_PROTOCOLS_SETTING,
|
||||
TransportMasterNodeReadAction.FORCE_LOCAL_SETTING,
|
||||
AutoCreateIndex.AUTO_CREATE_INDEX_SETTING,
|
||||
BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX,
|
||||
ClusterName.CLUSTER_NAME_SETTING,
|
||||
Client.CLIENT_TYPE_SETTING_S,
|
||||
InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING,
|
||||
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
|
||||
EsExecutors.PROCESSORS_SETTING,
|
||||
ThreadContext.DEFAULT_HEADERS_SETTING,
|
||||
ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING,
|
||||
ESLoggerFactory.LOG_LEVEL_SETTING,
|
||||
TribeService.BLOCKS_METADATA_SETTING,
|
||||
TribeService.BLOCKS_WRITE_SETTING,
|
||||
TribeService.BLOCKS_WRITE_INDICES_SETTING,
|
||||
TribeService.BLOCKS_READ_INDICES_SETTING,
|
||||
TribeService.BLOCKS_METADATA_INDICES_SETTING,
|
||||
TribeService.ON_CONFLICT_SETTING,
|
||||
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING,
|
||||
NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING,
|
||||
NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH,
|
||||
OsService.REFRESH_INTERVAL_SETTING,
|
||||
ProcessService.REFRESH_INTERVAL_SETTING,
|
||||
JvmService.REFRESH_INTERVAL_SETTING,
|
||||
FsService.REFRESH_INTERVAL_SETTING,
|
||||
JvmGcMonitorService.ENABLED_SETTING,
|
||||
JvmGcMonitorService.REFRESH_INTERVAL_SETTING,
|
||||
JvmGcMonitorService.GC_SETTING,
|
||||
PageCacheRecycler.LIMIT_HEAP_SETTING,
|
||||
PageCacheRecycler.WEIGHT_BYTES_SETTING,
|
||||
PageCacheRecycler.WEIGHT_INT_SETTING,
|
||||
PageCacheRecycler.WEIGHT_LONG_SETTING,
|
||||
PageCacheRecycler.WEIGHT_OBJECTS_SETTING,
|
||||
PageCacheRecycler.TYPE_SETTING
|
||||
)));
|
||||
Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_SNIFF,
|
||||
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
|
||||
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
|
||||
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
|
||||
BalancedShardsAllocator.THRESHOLD_SETTING,
|
||||
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING,
|
||||
ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING,
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING,
|
||||
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
|
||||
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
|
||||
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
|
||||
FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING,
|
||||
FsRepository.REPOSITORIES_COMPRESS_SETTING,
|
||||
FsRepository.REPOSITORIES_LOCATION_SETTING,
|
||||
IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING,
|
||||
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
|
||||
IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING,
|
||||
IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING,
|
||||
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
|
||||
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
|
||||
MetaData.SETTING_READ_ONLY_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING,
|
||||
ThreadPool.THREADPOOL_GROUP_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING,
|
||||
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING,
|
||||
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING,
|
||||
SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING,
|
||||
DestructiveOperations.REQUIRES_NAME_SETTING,
|
||||
DiscoverySettings.PUBLISH_TIMEOUT_SETTING,
|
||||
DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING,
|
||||
DiscoverySettings.COMMIT_TIMEOUT_SETTING,
|
||||
DiscoverySettings.NO_MASTER_BLOCK_SETTING,
|
||||
GatewayService.EXPECTED_DATA_NODES_SETTING,
|
||||
GatewayService.EXPECTED_MASTER_NODES_SETTING,
|
||||
GatewayService.EXPECTED_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_DATA_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_TIME_SETTING,
|
||||
NetworkModule.HTTP_ENABLED,
|
||||
NetworkModule.HTTP_TYPE_SETTING,
|
||||
NetworkModule.TRANSPORT_SERVICE_TYPE_SETTING,
|
||||
NetworkModule.TRANSPORT_TYPE_SETTING,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS,
|
||||
HttpTransportSettings.SETTING_CORS_ENABLED,
|
||||
HttpTransportSettings.SETTING_CORS_MAX_AGE,
|
||||
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
|
||||
HttpTransportSettings.SETTING_PIPELINING,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN,
|
||||
HttpTransportSettings.SETTING_HTTP_PORT,
|
||||
HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT,
|
||||
HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS,
|
||||
HttpTransportSettings.SETTING_HTTP_COMPRESSION,
|
||||
HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_METHODS,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS,
|
||||
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH,
|
||||
HttpTransportSettings.SETTING_HTTP_RESET_COOKIES,
|
||||
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
|
||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
|
||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
|
||||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING,
|
||||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING,
|
||||
InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
|
||||
SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING,
|
||||
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING,
|
||||
TransportService.TRACE_LOG_EXCLUDE_SETTING,
|
||||
TransportService.TRACE_LOG_INCLUDE_SETTING,
|
||||
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
|
||||
ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING,
|
||||
InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING,
|
||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
|
||||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
|
||||
Transport.TRANSPORT_TCP_COMPRESS,
|
||||
TransportSettings.TRANSPORT_PROFILES_SETTING,
|
||||
TransportSettings.HOST,
|
||||
TransportSettings.PUBLISH_HOST,
|
||||
TransportSettings.BIND_HOST,
|
||||
TransportSettings.PUBLISH_PORT,
|
||||
TransportSettings.PORT,
|
||||
NettyTransport.WORKER_COUNT,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_RECOVERY,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_BULK,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_REG,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_STATE,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_PING,
|
||||
NettyTransport.PING_SCHEDULE,
|
||||
NettyTransport.TCP_BLOCKING_CLIENT,
|
||||
NettyTransport.TCP_CONNECT_TIMEOUT,
|
||||
NettyTransport.NETTY_MAX_CUMULATION_BUFFER_CAPACITY,
|
||||
NettyTransport.NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS,
|
||||
NettyTransport.NETTY_RECEIVE_PREDICTOR_SIZE,
|
||||
NettyTransport.NETTY_RECEIVE_PREDICTOR_MIN,
|
||||
NettyTransport.NETTY_RECEIVE_PREDICTOR_MAX,
|
||||
NetworkService.NETWORK_SERVER,
|
||||
NettyTransport.NETTY_BOSS_COUNT,
|
||||
NettyTransport.TCP_NO_DELAY,
|
||||
NettyTransport.TCP_KEEP_ALIVE,
|
||||
NettyTransport.TCP_REUSE_ADDRESS,
|
||||
NettyTransport.TCP_SEND_BUFFER_SIZE,
|
||||
NettyTransport.TCP_RECEIVE_BUFFER_SIZE,
|
||||
NettyTransport.TCP_BLOCKING_SERVER,
|
||||
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING,
|
||||
NetworkService.TcpSettings.TCP_NO_DELAY,
|
||||
NetworkService.TcpSettings.TCP_KEEP_ALIVE,
|
||||
NetworkService.TcpSettings.TCP_REUSE_ADDRESS,
|
||||
NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE,
|
||||
NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE,
|
||||
NetworkService.TcpSettings.TCP_BLOCKING,
|
||||
NetworkService.TcpSettings.TCP_BLOCKING_SERVER,
|
||||
NetworkService.TcpSettings.TCP_BLOCKING_CLIENT,
|
||||
NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT,
|
||||
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
|
||||
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
|
||||
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING,
|
||||
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
|
||||
ScriptService.SCRIPT_CACHE_EXPIRE_SETTING,
|
||||
ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING,
|
||||
IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
|
||||
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
|
||||
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
|
||||
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
|
||||
IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL,
|
||||
HunspellService.HUNSPELL_LAZY_LOAD,
|
||||
HunspellService.HUNSPELL_IGNORE_CASE,
|
||||
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
|
||||
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
|
||||
Environment.PATH_CONF_SETTING,
|
||||
Environment.PATH_DATA_SETTING,
|
||||
Environment.PATH_HOME_SETTING,
|
||||
Environment.PATH_LOGS_SETTING,
|
||||
Environment.PATH_PLUGINS_SETTING,
|
||||
Environment.PATH_REPO_SETTING,
|
||||
Environment.PATH_SCRIPTS_SETTING,
|
||||
Environment.PATH_SHARED_DATA_SETTING,
|
||||
Environment.PIDFILE_SETTING,
|
||||
DiscoveryService.DISCOVERY_SEED_SETTING,
|
||||
DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING,
|
||||
DiscoveryModule.DISCOVERY_TYPE_SETTING,
|
||||
DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING,
|
||||
FaultDetection.PING_RETRIES_SETTING,
|
||||
FaultDetection.PING_TIMEOUT_SETTING,
|
||||
FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING,
|
||||
FaultDetection.PING_INTERVAL_SETTING,
|
||||
FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING,
|
||||
ZenDiscovery.PING_TIMEOUT_SETTING,
|
||||
ZenDiscovery.JOIN_TIMEOUT_SETTING,
|
||||
ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING,
|
||||
ZenDiscovery.JOIN_RETRY_DELAY_SETTING,
|
||||
ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING,
|
||||
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
|
||||
ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING,
|
||||
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
|
||||
ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING,
|
||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
|
||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
|
||||
SearchService.DEFAULT_KEEPALIVE_SETTING,
|
||||
SearchService.KEEPALIVE_INTERVAL_SETTING,
|
||||
Node.WRITE_PORTS_FIELD_SETTING,
|
||||
Node.NODE_NAME_SETTING,
|
||||
Node.NODE_CLIENT_SETTING,
|
||||
Node.NODE_DATA_SETTING,
|
||||
Node.NODE_MASTER_SETTING,
|
||||
Node.NODE_LOCAL_SETTING,
|
||||
Node.NODE_MODE_SETTING,
|
||||
Node.NODE_INGEST_SETTING,
|
||||
Node.NODE_ATTRIBUTES,
|
||||
URLRepository.ALLOWED_URLS_SETTING,
|
||||
URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING,
|
||||
URLRepository.REPOSITORIES_URL_SETTING,
|
||||
URLRepository.SUPPORTED_PROTOCOLS_SETTING,
|
||||
TransportMasterNodeReadAction.FORCE_LOCAL_SETTING,
|
||||
AutoCreateIndex.AUTO_CREATE_INDEX_SETTING,
|
||||
BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX,
|
||||
ClusterName.CLUSTER_NAME_SETTING,
|
||||
Client.CLIENT_TYPE_SETTING_S,
|
||||
InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING,
|
||||
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
|
||||
EsExecutors.PROCESSORS_SETTING,
|
||||
ThreadContext.DEFAULT_HEADERS_SETTING,
|
||||
ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING,
|
||||
ESLoggerFactory.LOG_LEVEL_SETTING,
|
||||
TribeService.BLOCKS_METADATA_SETTING,
|
||||
TribeService.BLOCKS_WRITE_SETTING,
|
||||
TribeService.BLOCKS_WRITE_INDICES_SETTING,
|
||||
TribeService.BLOCKS_READ_INDICES_SETTING,
|
||||
TribeService.BLOCKS_METADATA_INDICES_SETTING,
|
||||
TribeService.ON_CONFLICT_SETTING,
|
||||
TribeService.TRIBE_NAME_SETTING,
|
||||
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING,
|
||||
NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING,
|
||||
NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH,
|
||||
OsService.REFRESH_INTERVAL_SETTING,
|
||||
ProcessService.REFRESH_INTERVAL_SETTING,
|
||||
JvmService.REFRESH_INTERVAL_SETTING,
|
||||
FsService.REFRESH_INTERVAL_SETTING,
|
||||
JvmGcMonitorService.ENABLED_SETTING,
|
||||
JvmGcMonitorService.REFRESH_INTERVAL_SETTING,
|
||||
JvmGcMonitorService.GC_SETTING,
|
||||
PageCacheRecycler.LIMIT_HEAP_SETTING,
|
||||
PageCacheRecycler.WEIGHT_BYTES_SETTING,
|
||||
PageCacheRecycler.WEIGHT_INT_SETTING,
|
||||
PageCacheRecycler.WEIGHT_LONG_SETTING,
|
||||
PageCacheRecycler.WEIGHT_OBJECTS_SETTING,
|
||||
PageCacheRecycler.TYPE_SETTING,
|
||||
PluginsService.MANDATORY_SETTING
|
||||
)));
|
||||
}
|
||||
|
|
|
@ -46,6 +46,24 @@ import java.util.stream.Collectors;
|
|||
/**
|
||||
* A setting. Encapsulates typical stuff like default value, parsing, and scope.
|
||||
* Some (dynamic=true) can by modified at run time using the API.
|
||||
* All settings inside elasticsearch or in any of the plugins should use this type-safe and generic settings infrastructure
|
||||
* together with {@link AbstractScopedSettings}. This class contains several untility methods that makes it straight forward
|
||||
* to add settings for the majority of the cases. For instance a simple boolean settings can be defined like this:
|
||||
* <pre>{@code
|
||||
* public static final Setting<Boolean>; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, false, Scope.CLUSTER);}
|
||||
* </pre>
|
||||
* To retrieve the value of the setting a {@link Settings} object can be passed directly to the {@link Setting#get(Settings)} method.
|
||||
* <pre>
|
||||
* final boolean myBooleanValue = MY_BOOLEAN.get(settings);
|
||||
* </pre>
|
||||
* It's recommended to use typed settings rather than string based settings. For example adding a setting for an enum type:
|
||||
* <pre>{@code
|
||||
* public enum Color {
|
||||
* RED, GREEN, BLUE;
|
||||
* }
|
||||
* public static final Setting<Color> MY_BOOLEAN = new Setting<>("my.color.setting", Color.RED.toString(), Color::valueOf, false, Scope.CLUSTER);
|
||||
* }
|
||||
* </pre>
|
||||
*/
|
||||
public class Setting<T> extends ToXContentToBytes {
|
||||
private final String key;
|
||||
|
@ -84,7 +102,9 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the settings key or a prefix if this setting is a group setting
|
||||
* Returns the settings key or a prefix if this setting is a group setting.
|
||||
* <b>Note: this method should not be used to retrieve a value from a {@link Settings} object.
|
||||
* Use {@link #get(Settings)} instead</b>
|
||||
*
|
||||
* @see #isGroupSetting()
|
||||
*/
|
||||
|
|
|
@ -25,14 +25,18 @@ import org.elasticsearch.common.xcontent.ToXContent.Params;
|
|||
import org.elasticsearch.rest.RestRequest;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
*
|
||||
* A class that allows to filter settings objects by simple regular expression patterns or full settings keys.
|
||||
* It's used for response filtering on the rest layer to for instance filter out sensitive information like access keys.
|
||||
*/
|
||||
public final class SettingsFilter extends AbstractComponent {
|
||||
/**
|
||||
|
@ -40,50 +44,62 @@ public final class SettingsFilter extends AbstractComponent {
|
|||
*/
|
||||
public static String SETTINGS_FILTER_PARAM = "settings_filter";
|
||||
|
||||
private final CopyOnWriteArrayList<String> patterns = new CopyOnWriteArrayList<>();
|
||||
private final Set<String> patterns;
|
||||
private final String patternString;
|
||||
|
||||
public SettingsFilter(Settings settings) {
|
||||
public SettingsFilter(Settings settings, Collection<String> patterns) {
|
||||
super(settings);
|
||||
HashSet<String> set = new HashSet<>();
|
||||
for (String pattern : patterns) {
|
||||
if (isValidPattern(pattern) == false) {
|
||||
throw new IllegalArgumentException("invalid pattern: " + pattern);
|
||||
}
|
||||
}
|
||||
this.patterns = Collections.unmodifiableSet(new HashSet<>(patterns));
|
||||
patternString = Strings.collectionToDelimitedString(patterns, ",");
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a new simple pattern to the list of filters
|
||||
* Returns a set of patterns
|
||||
*/
|
||||
public void addFilter(String pattern) {
|
||||
patterns.add(pattern);
|
||||
public Set<String> getPatterns() {
|
||||
return patterns;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes a simple pattern from the list of filters
|
||||
* Returns <code>true</code> iff the given string is either a valid settings key pattern or a simple regular expression
|
||||
* @see Regex
|
||||
* @see AbstractScopedSettings#isValidKey(String)
|
||||
*/
|
||||
public void removeFilter(String pattern) {
|
||||
patterns.remove(pattern);
|
||||
}
|
||||
|
||||
public String getPatterns() {
|
||||
return Strings.collectionToDelimitedString(patterns, ",");
|
||||
public static boolean isValidPattern(String pattern) {
|
||||
return AbstractScopedSettings.isValidKey(pattern) || Regex.isSimpleMatchPattern(pattern);
|
||||
}
|
||||
|
||||
public void addFilterSettingParams(RestRequest request) {
|
||||
if (patterns.isEmpty() == false) {
|
||||
request.params().put(SETTINGS_FILTER_PARAM, getPatterns());
|
||||
request.params().put(SETTINGS_FILTER_PARAM, patternString);
|
||||
}
|
||||
}
|
||||
|
||||
public static Settings filterSettings(Params params, Settings settings) {
|
||||
String patterns = params.param(SETTINGS_FILTER_PARAM);
|
||||
Settings filteredSettings = settings;
|
||||
final Settings filteredSettings;
|
||||
if (patterns != null && patterns.isEmpty() == false) {
|
||||
filteredSettings = SettingsFilter.filterSettings(patterns, filteredSettings);
|
||||
filteredSettings = filterSettings(Strings.commaDelimitedListToSet(patterns), settings);
|
||||
} else {
|
||||
filteredSettings = settings;
|
||||
}
|
||||
return filteredSettings;
|
||||
}
|
||||
|
||||
public static Settings filterSettings(String patterns, Settings settings) {
|
||||
String[] patternArray = Strings.delimitedListToStringArray(patterns, ",");
|
||||
public Settings filter(Settings settings) {
|
||||
return filterSettings(patterns, settings);
|
||||
}
|
||||
|
||||
private static Settings filterSettings(Iterable<String> patterns, Settings settings) {
|
||||
Settings.Builder builder = Settings.settingsBuilder().put(settings);
|
||||
List<String> simpleMatchPatternList = new ArrayList<>();
|
||||
for (String pattern : patternArray) {
|
||||
for (String pattern : patterns) {
|
||||
if (Regex.isSimpleMatchPattern(pattern)) {
|
||||
simpleMatchPatternList.add(pattern);
|
||||
} else {
|
||||
|
@ -102,4 +118,4 @@ public final class SettingsFilter extends AbstractComponent {
|
|||
}
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,27 +20,28 @@
|
|||
package org.elasticsearch.common.settings;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.tribe.TribeService;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* A module that binds the provided settings to the {@link Settings} interface.
|
||||
*
|
||||
*
|
||||
*/
|
||||
public class SettingsModule extends AbstractModule {
|
||||
|
||||
private final Settings settings;
|
||||
private final SettingsFilter settingsFilter;
|
||||
private final Set<String> settingsFilterPattern = new HashSet<>();
|
||||
private final Map<String, Setting<?>> clusterSettings = new HashMap<>();
|
||||
private final Map<String, Setting<?>> indexSettings = new HashMap<>();
|
||||
private static final Predicate<String> TRIBE_CLIENT_NODE_SETTINGS_PREDICATE = (s) -> s.startsWith("tribe.") && TribeService.TRIBE_SETTING_KEYS.contains(s) == false;
|
||||
|
||||
public SettingsModule(Settings settings, SettingsFilter settingsFilter) {
|
||||
public SettingsModule(Settings settings) {
|
||||
this.settings = settings;
|
||||
this.settingsFilter = settingsFilter;
|
||||
for (Setting<?> setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) {
|
||||
registerSetting(setting);
|
||||
}
|
||||
|
@ -55,20 +56,21 @@ public class SettingsModule extends AbstractModule {
|
|||
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.clusterSettings.values()));
|
||||
// by now we are fully configured, lets check node level settings for unregistered index settings
|
||||
indexScopedSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE));
|
||||
Predicate<String> noIndexSettingPredicate = IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE.negate();
|
||||
Predicate<String> noTribePredicate = (s) -> s.startsWith("tribe.") == false;
|
||||
for (Map.Entry<String, String> entry : settings.filter(noTribePredicate.and(noIndexSettingPredicate)).getAsMap().entrySet()) {
|
||||
validateClusterSetting(clusterSettings, entry.getKey(), settings);
|
||||
}
|
||||
|
||||
final Predicate<String> acceptOnlyClusterSettings = TRIBE_CLIENT_NODE_SETTINGS_PREDICATE.or(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE).negate();
|
||||
clusterSettings.validate(settings.filter(acceptOnlyClusterSettings));
|
||||
validateTribeSettings(settings, clusterSettings);
|
||||
bind(Settings.class).toInstance(settings);
|
||||
bind(SettingsFilter.class).toInstance(settingsFilter);
|
||||
bind(SettingsFilter.class).toInstance(new SettingsFilter(settings, settingsFilterPattern));
|
||||
|
||||
bind(ClusterSettings.class).toInstance(clusterSettings);
|
||||
bind(IndexScopedSettings.class).toInstance(indexScopedSettings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Registers a new setting. This method should be used by plugins in order to expose any custom settings the plugin defines.
|
||||
* Unless a setting is registered the setting is unusable. If a setting is never the less specified the node will reject
|
||||
* the setting during startup.
|
||||
*/
|
||||
public void registerSetting(Setting<?> setting) {
|
||||
switch (setting.getScope()) {
|
||||
case CLUSTER:
|
||||
|
@ -86,25 +88,38 @@ public class SettingsModule extends AbstractModule {
|
|||
}
|
||||
}
|
||||
|
||||
public void validateTribeSettings(Settings settings, ClusterSettings clusterSettings) {
|
||||
Map<String, Settings> groups = settings.getGroups("tribe.", true);
|
||||
/**
|
||||
* Registers a settings filter pattern that allows to filter out certain settings that for instance contain sensitive information
|
||||
* or if a setting is for internal purposes only. The given patter must either be a valid settings key or a simple regesp pattern.
|
||||
*/
|
||||
public void registerSettingsFilter(String filter) {
|
||||
if (SettingsFilter.isValidPattern(filter) == false) {
|
||||
throw new IllegalArgumentException("filter [" + filter +"] is invalid must be either a key or a regex pattern");
|
||||
}
|
||||
if (settingsFilterPattern.contains(filter)) {
|
||||
throw new IllegalArgumentException("filter [" + filter + "] has already been registered");
|
||||
}
|
||||
settingsFilterPattern.add(filter);
|
||||
}
|
||||
|
||||
public void registerSettingsFilterIfMissing(String filter) {
|
||||
if (settingsFilterPattern.contains(filter)) {
|
||||
registerSettingsFilter(filter);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void validateTribeSettings(Settings settings, ClusterSettings clusterSettings) {
|
||||
Map<String, Settings> groups = settings.filter(TRIBE_CLIENT_NODE_SETTINGS_PREDICATE).getGroups("tribe.", true);
|
||||
for (Map.Entry<String, Settings> tribeSettings : groups.entrySet()) {
|
||||
for (Map.Entry<String, String> entry : tribeSettings.getValue().getAsMap().entrySet()) {
|
||||
validateClusterSetting(clusterSettings, entry.getKey(), tribeSettings.getValue());
|
||||
Settings thisTribesSettings = tribeSettings.getValue();
|
||||
for (Map.Entry<String, String> entry : thisTribesSettings.getAsMap().entrySet()) {
|
||||
try {
|
||||
clusterSettings.validate(entry.getKey(), thisTribesSettings);
|
||||
} catch (IllegalArgumentException ex) {
|
||||
throw new IllegalArgumentException("tribe." + tribeSettings.getKey() +" validation failed: "+ ex.getMessage(), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private final void validateClusterSetting(ClusterSettings clusterSettings, String key, Settings settings) {
|
||||
// we can't call this method yet since we have not all node level settings registered.
|
||||
// yet we can validate the ones we have registered to not have invalid values. this is better than nothing
|
||||
// and progress over perfection and we fail as soon as possible.
|
||||
// clusterSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE.negate()));
|
||||
if (clusterSettings.get(key) != null) {
|
||||
clusterSettings.validate(key, settings);
|
||||
} else if (AbstractScopedSettings.isValidKey(key) == false) {
|
||||
throw new IllegalArgumentException("illegal settings key: [" + key + "]");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ public class EsExecutors {
|
|||
}
|
||||
|
||||
public static String threadName(Settings settings, String namePrefix) {
|
||||
String name = settings.get("name");
|
||||
String name = settings.get("node.name");
|
||||
if (name == null) {
|
||||
name = "elasticsearch";
|
||||
} else {
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.util.concurrent;
|
||||
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
|
||||
import java.util.concurrent.Semaphore;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/**
|
||||
* Container that represents a resource with reference counting capabilities. Provides operations to suspend acquisition of new references.
|
||||
* This is useful for resource management when resources are intermittently unavailable.
|
||||
*
|
||||
* Assumes less than Integer.MAX_VALUE references are concurrently being held at one point in time.
|
||||
*/
|
||||
public final class SuspendableRefContainer {
|
||||
private static final int TOTAL_PERMITS = Integer.MAX_VALUE;
|
||||
private final Semaphore semaphore;
|
||||
|
||||
public SuspendableRefContainer() {
|
||||
// fair semaphore to ensure that blockAcquisition() does not starve under thread contention
|
||||
this.semaphore = new Semaphore(TOTAL_PERMITS, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries acquiring a reference. Returns reference holder if reference acquisition is not blocked at the time of invocation (see
|
||||
* {@link #blockAcquisition()}). Returns null if reference acquisition is blocked at the time of invocation.
|
||||
*
|
||||
* @return reference holder if reference acquisition is not blocked, null otherwise
|
||||
* @throws InterruptedException if the current thread is interrupted
|
||||
*/
|
||||
public Releasable tryAcquire() throws InterruptedException {
|
||||
if (semaphore.tryAcquire(1, 0, TimeUnit.SECONDS)) { // the untimed tryAcquire methods do not honor the fairness setting
|
||||
return idempotentRelease(1);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquires a reference. Blocks if reference acquisition is blocked at the time of invocation.
|
||||
*
|
||||
* @return reference holder
|
||||
* @throws InterruptedException if the current thread is interrupted
|
||||
*/
|
||||
public Releasable acquire() throws InterruptedException {
|
||||
semaphore.acquire();
|
||||
return idempotentRelease(1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquires a reference. Blocks if reference acquisition is blocked at the time of invocation.
|
||||
*
|
||||
* @return reference holder
|
||||
*/
|
||||
public Releasable acquireUninterruptibly() {
|
||||
semaphore.acquireUninterruptibly();
|
||||
return idempotentRelease(1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Disables reference acquisition and waits until all existing references are released.
|
||||
* When released, reference acquisition is enabled again.
|
||||
* This guarantees that between successful acquisition and release, no one is holding a reference.
|
||||
*
|
||||
* @return references holder to all references
|
||||
*/
|
||||
public Releasable blockAcquisition() {
|
||||
semaphore.acquireUninterruptibly(TOTAL_PERMITS);
|
||||
return idempotentRelease(TOTAL_PERMITS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method that ensures permits are only released once
|
||||
*
|
||||
* @return reference holder
|
||||
*/
|
||||
private Releasable idempotentRelease(int permits) {
|
||||
AtomicBoolean closed = new AtomicBoolean();
|
||||
return () -> {
|
||||
if (closed.compareAndSet(false, true)) {
|
||||
semaphore.release(permits);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of references currently being held.
|
||||
*/
|
||||
public int activeRefs() {
|
||||
int availablePermits = semaphore.availablePermits();
|
||||
if (availablePermits == 0) {
|
||||
// when blockAcquisition is holding all permits
|
||||
return 0;
|
||||
} else {
|
||||
return TOTAL_PERMITS - availablePermits;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -41,13 +41,28 @@ public interface XContent {
|
|||
/**
|
||||
* Creates a new generator using the provided output stream.
|
||||
*/
|
||||
XContentGenerator createGenerator(OutputStream os) throws IOException;
|
||||
default XContentGenerator createGenerator(OutputStream os) throws IOException {
|
||||
return createGenerator(os, null, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new generator using the provided output stream and some filters.
|
||||
* Creates a new generator using the provided output stream and some
|
||||
* inclusive filters. Same as createGenerator(os, filters, true).
|
||||
*/
|
||||
XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException;
|
||||
default XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
|
||||
return createGenerator(os, filters, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new generator using the provided output stream and some
|
||||
* filters.
|
||||
*
|
||||
* @param inclusive
|
||||
* If true only paths matching a filter will be included in
|
||||
* output. If false no path matching a filter will be included in
|
||||
* output
|
||||
*/
|
||||
XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException;
|
||||
/**
|
||||
* Creates a parser over the provided string content.
|
||||
*/
|
||||
|
@ -77,4 +92,5 @@ public interface XContent {
|
|||
* Creates a parser over the provided reader.
|
||||
*/
|
||||
XContentParser createParser(Reader reader) throws IOException;
|
||||
|
||||
}
|
||||
|
|
|
@ -83,6 +83,10 @@ public final class XContentBuilder implements BytesStream, Releasable {
|
|||
return new XContentBuilder(xContent, new BytesStreamOutput(), filters);
|
||||
}
|
||||
|
||||
public static XContentBuilder builder(XContent xContent, String[] filters, boolean inclusive) throws IOException {
|
||||
return new XContentBuilder(xContent, new BytesStreamOutput(), filters, inclusive);
|
||||
}
|
||||
|
||||
private XContentGenerator generator;
|
||||
|
||||
private final OutputStream bos;
|
||||
|
@ -102,13 +106,25 @@ public final class XContentBuilder implements BytesStream, Releasable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Constructs a new builder using the provided xcontent, an OutputStream and some filters. The
|
||||
* filters are used to filter fields that won't be written to the OutputStream. Make sure
|
||||
* to call {@link #close()} when the builder is done with.
|
||||
* Constructs a new builder using the provided xcontent, an OutputStream and
|
||||
* some filters. If filters are specified, only those values matching a
|
||||
* filter will be written to the output stream. Make sure to call
|
||||
* {@link #close()} when the builder is done with.
|
||||
*/
|
||||
public XContentBuilder(XContent xContent, OutputStream bos, String[] filters) throws IOException {
|
||||
this(xContent, bos, filters, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new builder using the provided xcontent, an OutputStream and
|
||||
* some filters. If {@code filters} are specified and {@code inclusive} is
|
||||
* true, only those values matching a filter will be written to the output
|
||||
* stream. If {@code inclusive} is false, those matching will be excluded.
|
||||
* Make sure to call {@link #close()} when the builder is done with.
|
||||
*/
|
||||
public XContentBuilder(XContent xContent, OutputStream bos, String[] filters, boolean inclusive) throws IOException {
|
||||
this.bos = bos;
|
||||
this.generator = xContent.createGenerator(bos, filters);
|
||||
this.generator = xContent.createGenerator(bos, filters, inclusive);
|
||||
}
|
||||
|
||||
public XContentBuilder fieldCaseConversion(FieldCaseConversion fieldCaseConversion) {
|
||||
|
|
|
@ -121,5 +121,6 @@ public interface XContentGenerator extends Closeable {
|
|||
|
||||
void flush() throws IOException;
|
||||
|
||||
@Override
|
||||
void close() throws IOException;
|
||||
}
|
||||
|
|
|
@ -67,13 +67,8 @@ public class CborXContent implements XContent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os) throws IOException {
|
||||
return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8), os);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
|
||||
return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8), os, filters);
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
|
||||
return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -108,4 +103,5 @@ public class CborXContent implements XContent {
|
|||
public XContentParser createParser(Reader reader) throws IOException {
|
||||
return new CborXContentParser(cborFactory.createParser(reader));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.common.xcontent.cbor;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
|
||||
|
||||
|
@ -31,7 +32,11 @@ import java.io.OutputStream;
|
|||
public class CborXContentGenerator extends JsonXContentGenerator {
|
||||
|
||||
public CborXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
|
||||
super(jsonGenerator, os, filters);
|
||||
this(jsonGenerator, os, filters, true);
|
||||
}
|
||||
|
||||
public CborXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
|
||||
super(jsonGenerator, os, filters, inclusive);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -71,13 +71,8 @@ public class JsonXContent implements XContent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os) throws IOException {
|
||||
return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8), os);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
|
||||
return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8), os, filters);
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
|
||||
return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -72,6 +72,10 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
private boolean prettyPrint = false;
|
||||
|
||||
public JsonXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
|
||||
this(jsonGenerator, os, filters, true);
|
||||
}
|
||||
|
||||
public JsonXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
|
||||
if (jsonGenerator instanceof GeneratorBase) {
|
||||
this.base = (GeneratorBase) jsonGenerator;
|
||||
} else {
|
||||
|
@ -82,7 +86,8 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
this.generator = jsonGenerator;
|
||||
this.filter = null;
|
||||
} else {
|
||||
this.filter = new FilteringGeneratorDelegate(jsonGenerator, new FilterPathBasedFilter(filters), true, true);
|
||||
this.filter = new FilteringGeneratorDelegate(jsonGenerator,
|
||||
new FilterPathBasedFilter(filters, inclusive), true, true);
|
||||
this.generator = this.filter;
|
||||
}
|
||||
|
||||
|
@ -375,6 +380,7 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void writeRawValue(BytesReference content) throws IOException {
|
||||
XContentType contentType = XContentFactory.xContentType(content);
|
||||
if (contentType == null) {
|
||||
|
@ -450,4 +456,5 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
}
|
||||
generator.close();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -68,13 +68,8 @@ public class SmileXContent implements XContent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os) throws IOException {
|
||||
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), os);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
|
||||
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), os, filters);
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
|
||||
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.common.xcontent.smile;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
|
||||
|
||||
|
@ -31,7 +32,11 @@ import java.io.OutputStream;
|
|||
public class SmileXContentGenerator extends JsonXContentGenerator {
|
||||
|
||||
public SmileXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
|
||||
super(jsonGenerator, os, filters);
|
||||
this(jsonGenerator, os, filters, true);
|
||||
}
|
||||
|
||||
public SmileXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
|
||||
super(jsonGenerator, os, filters, inclusive);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -31,25 +31,30 @@ public class FilterPathBasedFilter extends TokenFilter {
|
|||
* Marker value that should be used to indicate that a property name
|
||||
* or value matches one of the filter paths.
|
||||
*/
|
||||
private static final TokenFilter MATCHING = new TokenFilter(){};
|
||||
private static final TokenFilter MATCHING = new TokenFilter() {
|
||||
};
|
||||
|
||||
/**
|
||||
* Marker value that should be used to indicate that none of the
|
||||
* property names/values matches one of the filter paths.
|
||||
*/
|
||||
private static final TokenFilter NO_MATCHING = new TokenFilter(){};
|
||||
private static final TokenFilter NO_MATCHING = new TokenFilter() {
|
||||
};
|
||||
|
||||
private final FilterPath[] filters;
|
||||
|
||||
public FilterPathBasedFilter(FilterPath[] filters) {
|
||||
private final boolean inclusive;
|
||||
|
||||
public FilterPathBasedFilter(FilterPath[] filters, boolean inclusive) {
|
||||
if (CollectionUtils.isEmpty(filters)) {
|
||||
throw new IllegalArgumentException("filters cannot be null or empty");
|
||||
}
|
||||
this.inclusive = inclusive;
|
||||
this.filters = filters;
|
||||
}
|
||||
|
||||
public FilterPathBasedFilter(String[] filters) {
|
||||
this(FilterPath.compile(filters));
|
||||
public FilterPathBasedFilter(String[] filters, boolean inclusive) {
|
||||
this(FilterPath.compile(filters), inclusive);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -77,31 +82,32 @@ public class FilterPathBasedFilter extends TokenFilter {
|
|||
}
|
||||
|
||||
if ((nextFilters != null) && (nextFilters.isEmpty() == false)) {
|
||||
return new FilterPathBasedFilter(nextFilters.toArray(new FilterPath[nextFilters.size()]));
|
||||
return new FilterPathBasedFilter(nextFilters.toArray(new FilterPath[nextFilters.size()]), inclusive);
|
||||
}
|
||||
}
|
||||
return NO_MATCHING;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public TokenFilter includeProperty(String name) {
|
||||
TokenFilter include = evaluate(name, filters);
|
||||
if (include == MATCHING) {
|
||||
return TokenFilter.INCLUDE_ALL;
|
||||
TokenFilter filter = evaluate(name, filters);
|
||||
if (filter == MATCHING) {
|
||||
return inclusive ? TokenFilter.INCLUDE_ALL : null;
|
||||
}
|
||||
if (include == NO_MATCHING) {
|
||||
return null;
|
||||
if (filter == NO_MATCHING) {
|
||||
return inclusive ? null : TokenFilter.INCLUDE_ALL;
|
||||
}
|
||||
return include;
|
||||
return filter;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean _includeScalar() {
|
||||
for (FilterPath filter : filters) {
|
||||
if (filter.matches()) {
|
||||
return true;
|
||||
return inclusive;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
return !inclusive;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,13 +66,8 @@ public class YamlXContent implements XContent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os) throws IOException {
|
||||
return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8), os);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
|
||||
return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8), os, filters);
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
|
||||
return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.common.xcontent.yaml;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
|
||||
|
||||
|
@ -31,7 +32,11 @@ import java.io.OutputStream;
|
|||
public class YamlXContentGenerator extends JsonXContentGenerator {
|
||||
|
||||
public YamlXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
|
||||
super(jsonGenerator, os, filters);
|
||||
this(jsonGenerator, os, filters, true);
|
||||
}
|
||||
|
||||
public YamlXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
|
||||
super(jsonGenerator, os, filters, inclusive);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -93,4 +93,10 @@ public interface Discovery extends LifecycleComponent<Discovery> {
|
|||
*/
|
||||
DiscoveryStats stats();
|
||||
|
||||
|
||||
/***
|
||||
* @return the current value of minimum master nodes, or -1 for not set
|
||||
*/
|
||||
int getMinimumMasterNodes();
|
||||
|
||||
}
|
||||
|
|
|
@ -299,6 +299,11 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
|||
return new DiscoveryStats(null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getMinimumMasterNodes() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
private LocalDiscovery[] members() {
|
||||
ClusterGroup clusterGroup = clusterGroups.get(clusterName);
|
||||
if (clusterGroup == null) {
|
||||
|
|
|
@ -89,17 +89,16 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
|||
*/
|
||||
public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implements Discovery, PingContextProvider {
|
||||
|
||||
public final static Setting<Boolean> REJOIN_ON_MASTER_GONE_SETTING = Setting.boolSetting("discovery.zen.rejoin_on_master_gone", true, true, Setting.Scope.CLUSTER);
|
||||
public final static Setting<TimeValue> PING_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<TimeValue> JOIN_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.join_timeout",
|
||||
settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
|
||||
settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<Integer> JOIN_RETRY_ATTEMPTS_SETTING = Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<TimeValue> JOIN_RETRY_DELAY_SETTING = Setting.positiveTimeSetting("discovery.zen.join_retry_delay", TimeValue.timeValueMillis(100), false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<Integer> MAX_PINGS_FROM_ANOTHER_MASTER_SETTING = Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<Boolean> SEND_LEAVE_REQUEST_SETTING = Setting.boolSetting("discovery.zen.send_leave_request", true, false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<Boolean> MASTER_ELECTION_FILTER_CLIENT_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_client", true, false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<TimeValue> MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout",
|
||||
settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
|
||||
settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<Boolean> MASTER_ELECTION_FILTER_DATA_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_data", false, false, Setting.Scope.CLUSTER);
|
||||
|
||||
public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin";
|
||||
|
@ -142,8 +141,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
|
||||
private final AtomicBoolean initialStateSent = new AtomicBoolean();
|
||||
|
||||
private volatile boolean rejoinOnMasterGone;
|
||||
|
||||
/** counts the time this node has joined the cluster or have elected it self as master */
|
||||
private final AtomicLong clusterJoinsCounter = new AtomicLong();
|
||||
|
||||
|
@ -177,7 +174,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
this.masterElectionFilterClientNodes = MASTER_ELECTION_FILTER_CLIENT_SETTING.get(settings);
|
||||
this.masterElectionFilterDataNodes = MASTER_ELECTION_FILTER_DATA_SETTING.get(settings);
|
||||
this.masterElectionWaitForJoinsTimeout = MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING.get(settings);
|
||||
this.rejoinOnMasterGone = REJOIN_ON_MASTER_GONE_SETTING.get(settings);
|
||||
|
||||
logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes);
|
||||
|
||||
|
@ -188,7 +184,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
throw new IllegalArgumentException("cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " to more than the current master nodes count [" + masterNodes + "]");
|
||||
}
|
||||
});
|
||||
clusterSettings.addSettingsUpdateConsumer(REJOIN_ON_MASTER_GONE_SETTING, this::setRejoingOnMasterGone);
|
||||
|
||||
this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, clusterName, clusterService);
|
||||
this.masterFD.addListener(new MasterNodeFailureListener());
|
||||
|
@ -323,10 +318,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
return clusterJoinsCounter.get() > 0;
|
||||
}
|
||||
|
||||
private void setRejoingOnMasterGone(boolean rejoin) {
|
||||
this.rejoinOnMasterGone = rejoin;
|
||||
}
|
||||
|
||||
/** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */
|
||||
|
||||
|
||||
|
@ -363,6 +354,11 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
return new DiscoveryStats(queueStats);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getMinimumMasterNodes() {
|
||||
return electMaster.minimumMasterNodes();
|
||||
}
|
||||
|
||||
/**
|
||||
* returns true if zen discovery is started and there is a currently a background thread active for (re)joining
|
||||
* the cluster used for testing.
|
||||
|
@ -670,35 +666,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
// flush any pending cluster states from old master, so it will not be set as master again
|
||||
publishClusterState.pendingStatesQueue().failAllStatesAndClear(new ElasticsearchException("master left [{}]", reason));
|
||||
|
||||
if (rejoinOnMasterGone) {
|
||||
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master left (reason = " + reason + ")");
|
||||
}
|
||||
|
||||
if (!electMaster.hasEnoughMasterNodes(discoveryNodes)) {
|
||||
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "not enough master nodes after master left (reason = " + reason + ")");
|
||||
}
|
||||
|
||||
final DiscoveryNode electedMaster = electMaster.electMaster(discoveryNodes); // elect master
|
||||
final DiscoveryNode localNode = currentState.nodes().localNode();
|
||||
if (localNode.equals(electedMaster)) {
|
||||
masterFD.stop("got elected as new master since master left (reason = " + reason + ")");
|
||||
discoveryNodes = DiscoveryNodes.builder(discoveryNodes).masterNodeId(localNode.id()).build();
|
||||
ClusterState newState = ClusterState.builder(currentState).nodes(discoveryNodes).build();
|
||||
nodesFD.updateNodesAndPing(newState);
|
||||
return newState;
|
||||
|
||||
} else {
|
||||
nodesFD.stop();
|
||||
if (electedMaster != null) {
|
||||
discoveryNodes = DiscoveryNodes.builder(discoveryNodes).masterNodeId(electedMaster.id()).build();
|
||||
masterFD.restart(electedMaster, "possible elected master since master left (reason = " + reason + ")");
|
||||
return ClusterState.builder(currentState)
|
||||
.nodes(discoveryNodes)
|
||||
.build();
|
||||
} else {
|
||||
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master_left and no other node elected to become master");
|
||||
}
|
||||
}
|
||||
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master left (reason = " + reason + ")");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -857,7 +825,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
// Sanity check: maybe we don't end up here, because serialization may have failed.
|
||||
if (node.getVersion().before(minimumNodeJoinVersion)) {
|
||||
callback.onFailure(
|
||||
new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]")
|
||||
new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]")
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
@ -1109,10 +1077,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
}
|
||||
}
|
||||
|
||||
boolean isRejoinOnMasterGone() {
|
||||
return rejoinOnMasterGone;
|
||||
}
|
||||
|
||||
public static class RejoinClusterRequest extends TransportRequest {
|
||||
|
||||
private String fromNodeId;
|
||||
|
|
|
@ -330,31 +330,4 @@ public class Environment {
|
|||
public static FileStore getFileStore(Path path) throws IOException {
|
||||
return ESFileStore.getMatchingFileStore(path, fileStores);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the path is writable.
|
||||
* Acts just like {@link Files#isWritable(Path)}, except won't
|
||||
* falsely return false for paths on SUBST'd drive letters
|
||||
* See https://bugs.openjdk.java.net/browse/JDK-8034057
|
||||
* Note this will set the file modification time (to its already-set value)
|
||||
* to test access.
|
||||
*/
|
||||
@SuppressForbidden(reason = "works around https://bugs.openjdk.java.net/browse/JDK-8034057")
|
||||
public static boolean isWritable(Path path) throws IOException {
|
||||
boolean v = Files.isWritable(path);
|
||||
if (v || Constants.WINDOWS == false) {
|
||||
return v;
|
||||
}
|
||||
|
||||
// isWritable returned false on windows, the hack begins!!!!!!
|
||||
// resetting the modification time is the least destructive/simplest
|
||||
// way to check for both files and directories, and fails early just
|
||||
// in getting the current value if file doesn't exist, etc
|
||||
try {
|
||||
Files.setLastModifiedTime(path, Files.getLastModifiedTime(path));
|
||||
return true;
|
||||
} catch (Throwable e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,9 +34,11 @@ import org.elasticsearch.cluster.metadata.MetaData;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -51,23 +53,21 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
|
|||
|
||||
private final TransportNodesListGatewayMetaState listGatewayMetaState;
|
||||
|
||||
private final String initialMeta;
|
||||
private final ClusterName clusterName;
|
||||
|
||||
private final Supplier<Integer> minimumMasterNodesProvider;
|
||||
|
||||
@Inject
|
||||
public Gateway(Settings settings, ClusterService clusterService, NodeEnvironment nodeEnv, GatewayMetaState metaState,
|
||||
TransportNodesListGatewayMetaState listGatewayMetaState, ClusterName clusterName) {
|
||||
TransportNodesListGatewayMetaState listGatewayMetaState, ClusterName clusterName, Discovery discovery) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.nodeEnv = nodeEnv;
|
||||
this.metaState = metaState;
|
||||
this.listGatewayMetaState = listGatewayMetaState;
|
||||
this.clusterName = clusterName;
|
||||
|
||||
this.minimumMasterNodesProvider = discovery::getMinimumMasterNodes;
|
||||
clusterService.addLast(this);
|
||||
|
||||
// we define what is our minimum "master" nodes, use that to allow for recovery
|
||||
this.initialMeta = settings.get("gateway.initial_meta", settings.get("gateway.local.initial_meta", settings.get("discovery.zen.minimum_master_nodes", "1")));
|
||||
}
|
||||
|
||||
public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException {
|
||||
|
@ -76,7 +76,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
|
|||
TransportNodesListGatewayMetaState.NodesGatewayMetaState nodesState = listGatewayMetaState.list(nodesIds.toArray(String.class), null).actionGet();
|
||||
|
||||
|
||||
int requiredAllocation = calcRequiredAllocations(this.initialMeta, nodesIds.size());
|
||||
int requiredAllocation = Math.max(1, minimumMasterNodesProvider.get());
|
||||
|
||||
|
||||
if (nodesState.failures().length > 0) {
|
||||
|
@ -143,35 +143,6 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
|
|||
builder.metaData(metaDataBuilder);
|
||||
listener.onSuccess(builder.build());
|
||||
}
|
||||
|
||||
protected int calcRequiredAllocations(final String setting, final int nodeCount) {
|
||||
int requiredAllocation = 1;
|
||||
try {
|
||||
if ("quorum".equals(setting)) {
|
||||
if (nodeCount > 2) {
|
||||
requiredAllocation = (nodeCount / 2) + 1;
|
||||
}
|
||||
} else if ("quorum-1".equals(setting) || "half".equals(setting)) {
|
||||
if (nodeCount > 2) {
|
||||
requiredAllocation = ((1 + nodeCount) / 2);
|
||||
}
|
||||
} else if ("one".equals(setting)) {
|
||||
requiredAllocation = 1;
|
||||
} else if ("full".equals(setting) || "all".equals(setting)) {
|
||||
requiredAllocation = nodeCount;
|
||||
} else if ("full-1".equals(setting) || "all-1".equals(setting)) {
|
||||
if (nodeCount > 1) {
|
||||
requiredAllocation = nodeCount - 1;
|
||||
}
|
||||
} else {
|
||||
requiredAllocation = Integer.parseInt(setting);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to derived initial_meta from value {}", setting);
|
||||
}
|
||||
return requiredAllocation;
|
||||
}
|
||||
|
||||
public void reset() throws Exception {
|
||||
try {
|
||||
Path[] dataPaths = nodeEnv.nodeDataPaths();
|
||||
|
|
|
@ -20,9 +20,7 @@
|
|||
package org.elasticsearch.http.netty;
|
||||
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.http.HttpTransportSettings;
|
||||
import org.elasticsearch.http.netty.pipelining.OrderedUpstreamMessageEvent;
|
||||
import org.elasticsearch.rest.support.RestUtils;
|
||||
import org.jboss.netty.channel.ChannelHandler;
|
||||
import org.jboss.netty.channel.ChannelHandlerContext;
|
||||
import org.jboss.netty.channel.ExceptionEvent;
|
||||
|
@ -30,9 +28,6 @@ import org.jboss.netty.channel.MessageEvent;
|
|||
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
|
||||
import org.jboss.netty.handler.codec.http.HttpRequest;
|
||||
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
@ -40,15 +35,12 @@ import java.util.regex.Pattern;
|
|||
public class HttpRequestHandler extends SimpleChannelUpstreamHandler {
|
||||
|
||||
private final NettyHttpServerTransport serverTransport;
|
||||
private final Pattern corsPattern;
|
||||
private final boolean httpPipeliningEnabled;
|
||||
private final boolean detailedErrorsEnabled;
|
||||
private final ThreadContext threadContext;
|
||||
|
||||
public HttpRequestHandler(NettyHttpServerTransport serverTransport, boolean detailedErrorsEnabled, ThreadContext threadContext) {
|
||||
this.serverTransport = serverTransport;
|
||||
this.corsPattern = RestUtils
|
||||
.checkCorsSettingForRegex(HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN.get(serverTransport.settings()));
|
||||
this.httpPipeliningEnabled = serverTransport.pipelining;
|
||||
this.detailedErrorsEnabled = detailedErrorsEnabled;
|
||||
this.threadContext = threadContext;
|
||||
|
@ -70,9 +62,9 @@ public class HttpRequestHandler extends SimpleChannelUpstreamHandler {
|
|||
// when reading, or using a cumalation buffer
|
||||
NettyHttpRequest httpRequest = new NettyHttpRequest(request, e.getChannel());
|
||||
if (oue != null) {
|
||||
serverTransport.dispatchRequest(httpRequest, new NettyHttpChannel(serverTransport, httpRequest, corsPattern, oue, detailedErrorsEnabled));
|
||||
serverTransport.dispatchRequest(httpRequest, new NettyHttpChannel(serverTransport, httpRequest, oue, detailedErrorsEnabled));
|
||||
} else {
|
||||
serverTransport.dispatchRequest(httpRequest, new NettyHttpChannel(serverTransport, httpRequest, corsPattern, detailedErrorsEnabled));
|
||||
serverTransport.dispatchRequest(httpRequest, new NettyHttpChannel(serverTransport, httpRequest, detailedErrorsEnabled));
|
||||
}
|
||||
super.messageReceived(ctx, e);
|
||||
}
|
||||
|
|
|
@ -19,18 +19,17 @@
|
|||
|
||||
package org.elasticsearch.http.netty;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.netty.ReleaseChannelFutureListener;
|
||||
import org.elasticsearch.http.HttpChannel;
|
||||
import org.elasticsearch.http.netty.cors.CorsHandler;
|
||||
import org.elasticsearch.http.netty.pipelining.OrderedDownstreamChannelEvent;
|
||||
import org.elasticsearch.http.netty.pipelining.OrderedUpstreamMessageEvent;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.support.RestUtils;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
import org.jboss.netty.channel.ChannelFuture;
|
||||
|
@ -40,28 +39,17 @@ import org.jboss.netty.handler.codec.http.CookieDecoder;
|
|||
import org.jboss.netty.handler.codec.http.CookieEncoder;
|
||||
import org.jboss.netty.handler.codec.http.DefaultHttpResponse;
|
||||
import org.jboss.netty.handler.codec.http.HttpHeaders;
|
||||
import org.jboss.netty.handler.codec.http.HttpMethod;
|
||||
import org.jboss.netty.handler.codec.http.HttpResponse;
|
||||
import org.jboss.netty.handler.codec.http.HttpResponseStatus;
|
||||
import org.jboss.netty.handler.codec.http.HttpVersion;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_CREDENTIALS;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_ORIGIN;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_MAX_AGE;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ORIGIN;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.USER_AGENT;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.CONNECTION;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Values.CLOSE;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Values.KEEP_ALIVE;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -72,18 +60,18 @@ public class NettyHttpChannel extends HttpChannel {
|
|||
private final Channel channel;
|
||||
private final org.jboss.netty.handler.codec.http.HttpRequest nettyRequest;
|
||||
private OrderedUpstreamMessageEvent orderedUpstreamMessageEvent = null;
|
||||
private Pattern corsPattern;
|
||||
|
||||
public NettyHttpChannel(NettyHttpServerTransport transport, NettyHttpRequest request, Pattern corsPattern, boolean detailedErrorsEnabled) {
|
||||
public NettyHttpChannel(NettyHttpServerTransport transport, NettyHttpRequest request,
|
||||
boolean detailedErrorsEnabled) {
|
||||
super(request, detailedErrorsEnabled);
|
||||
this.transport = transport;
|
||||
this.channel = request.getChannel();
|
||||
this.nettyRequest = request.request();
|
||||
this.corsPattern = corsPattern;
|
||||
}
|
||||
|
||||
public NettyHttpChannel(NettyHttpServerTransport transport, NettyHttpRequest request, Pattern corsPattern, OrderedUpstreamMessageEvent orderedUpstreamMessageEvent, boolean detailedErrorsEnabled) {
|
||||
this(transport, request, corsPattern, detailedErrorsEnabled);
|
||||
public NettyHttpChannel(NettyHttpServerTransport transport, NettyHttpRequest request,
|
||||
OrderedUpstreamMessageEvent orderedUpstreamMessageEvent, boolean detailedErrorsEnabled) {
|
||||
this(transport, request, detailedErrorsEnabled);
|
||||
this.orderedUpstreamMessageEvent = orderedUpstreamMessageEvent;
|
||||
}
|
||||
|
||||
|
@ -95,48 +83,12 @@ public class NettyHttpChannel extends HttpChannel {
|
|||
|
||||
@Override
|
||||
public void sendResponse(RestResponse response) {
|
||||
// Decide whether to close the connection or not.
|
||||
boolean http10 = nettyRequest.getProtocolVersion().equals(HttpVersion.HTTP_1_0);
|
||||
boolean close =
|
||||
HttpHeaders.Values.CLOSE.equalsIgnoreCase(nettyRequest.headers().get(HttpHeaders.Names.CONNECTION)) ||
|
||||
(http10 && !HttpHeaders.Values.KEEP_ALIVE.equalsIgnoreCase(nettyRequest.headers().get(HttpHeaders.Names.CONNECTION)));
|
||||
// if the response object was created upstream, then use it;
|
||||
// otherwise, create a new one
|
||||
HttpResponse resp = newResponse();
|
||||
resp.setStatus(getStatus(response.status()));
|
||||
|
||||
// Build the response object.
|
||||
HttpResponseStatus status = getStatus(response.status());
|
||||
org.jboss.netty.handler.codec.http.HttpResponse resp;
|
||||
if (http10) {
|
||||
resp = new DefaultHttpResponse(HttpVersion.HTTP_1_0, status);
|
||||
if (!close) {
|
||||
resp.headers().add(HttpHeaders.Names.CONNECTION, "Keep-Alive");
|
||||
}
|
||||
} else {
|
||||
resp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status);
|
||||
}
|
||||
if (RestUtils.isBrowser(nettyRequest.headers().get(USER_AGENT))) {
|
||||
if (SETTING_CORS_ENABLED.get(transport.settings())) {
|
||||
String originHeader = request.header(ORIGIN);
|
||||
if (!Strings.isNullOrEmpty(originHeader)) {
|
||||
if (corsPattern == null) {
|
||||
String allowedOrigins = SETTING_CORS_ALLOW_ORIGIN.get(transport.settings());
|
||||
if (!Strings.isNullOrEmpty(allowedOrigins)) {
|
||||
resp.headers().add(ACCESS_CONTROL_ALLOW_ORIGIN, allowedOrigins);
|
||||
}
|
||||
} else {
|
||||
resp.headers().add(ACCESS_CONTROL_ALLOW_ORIGIN, corsPattern.matcher(originHeader).matches() ? originHeader : "null");
|
||||
}
|
||||
}
|
||||
if (nettyRequest.getMethod() == HttpMethod.OPTIONS) {
|
||||
// Allow Ajax requests based on the CORS "preflight" request
|
||||
resp.headers().add(ACCESS_CONTROL_MAX_AGE, SETTING_CORS_MAX_AGE.get(transport.settings()));
|
||||
resp.headers().add(ACCESS_CONTROL_ALLOW_METHODS, SETTING_CORS_ALLOW_METHODS.get(transport.settings()));
|
||||
resp.headers().add(ACCESS_CONTROL_ALLOW_HEADERS, SETTING_CORS_ALLOW_HEADERS.get(transport.settings()));
|
||||
}
|
||||
|
||||
if (SETTING_CORS_ALLOW_CREDENTIALS.get(transport.settings())) {
|
||||
resp.headers().add(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
|
||||
}
|
||||
}
|
||||
}
|
||||
CorsHandler.setCorsResponseHeaders(nettyRequest, resp, transport.getCorsConfig());
|
||||
|
||||
String opaque = nettyRequest.headers().get("X-Opaque-Id");
|
||||
if (opaque != null) {
|
||||
|
@ -201,7 +153,7 @@ public class NettyHttpChannel extends HttpChannel {
|
|||
addedReleaseListener = true;
|
||||
}
|
||||
|
||||
if (close) {
|
||||
if (isCloseConnection()) {
|
||||
future.addListener(ChannelFutureListener.CLOSE);
|
||||
}
|
||||
|
||||
|
@ -212,6 +164,36 @@ public class NettyHttpChannel extends HttpChannel {
|
|||
}
|
||||
}
|
||||
|
||||
// Determine if the request protocol version is HTTP 1.0
|
||||
private boolean isHttp10() {
|
||||
return nettyRequest.getProtocolVersion().equals(HttpVersion.HTTP_1_0);
|
||||
}
|
||||
|
||||
// Determine if the request connection should be closed on completion.
|
||||
private boolean isCloseConnection() {
|
||||
final boolean http10 = isHttp10();
|
||||
return CLOSE.equalsIgnoreCase(nettyRequest.headers().get(CONNECTION)) ||
|
||||
(http10 && !KEEP_ALIVE.equalsIgnoreCase(nettyRequest.headers().get(CONNECTION)));
|
||||
}
|
||||
|
||||
// Create a new {@link HttpResponse} to transmit the response for the netty request.
|
||||
private HttpResponse newResponse() {
|
||||
final boolean http10 = isHttp10();
|
||||
final boolean close = isCloseConnection();
|
||||
// Build the response object.
|
||||
HttpResponseStatus status = HttpResponseStatus.OK; // default to initialize
|
||||
org.jboss.netty.handler.codec.http.HttpResponse resp;
|
||||
if (http10) {
|
||||
resp = new DefaultHttpResponse(HttpVersion.HTTP_1_0, status);
|
||||
if (!close) {
|
||||
resp.headers().add(CONNECTION, "Keep-Alive");
|
||||
}
|
||||
} else {
|
||||
resp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status);
|
||||
}
|
||||
return resp;
|
||||
}
|
||||
|
||||
private static final HttpResponseStatus TOO_MANY_REQUESTS = new HttpResponseStatus(429, "Too Many Requests");
|
||||
|
||||
private HttpResponseStatus getStatus(RestStatus status) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.http.netty;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -44,10 +45,13 @@ import org.elasticsearch.http.HttpRequest;
|
|||
import org.elasticsearch.http.HttpServerAdapter;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
import org.elasticsearch.http.HttpStats;
|
||||
import org.elasticsearch.http.HttpTransportSettings;
|
||||
import org.elasticsearch.http.netty.cors.CorsConfig;
|
||||
import org.elasticsearch.http.netty.cors.CorsConfigBuilder;
|
||||
import org.elasticsearch.http.netty.cors.CorsHandler;
|
||||
import org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.rest.support.RestUtils;
|
||||
import org.elasticsearch.transport.BindTransportException;
|
||||
import org.jboss.netty.bootstrap.ServerBootstrap;
|
||||
import org.jboss.netty.channel.AdaptiveReceiveBufferSizePredictorFactory;
|
||||
|
@ -63,6 +67,7 @@ import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
|
|||
import org.jboss.netty.channel.socket.oio.OioServerSocketChannelFactory;
|
||||
import org.jboss.netty.handler.codec.http.HttpChunkAggregator;
|
||||
import org.jboss.netty.handler.codec.http.HttpContentCompressor;
|
||||
import org.jboss.netty.handler.codec.http.HttpMethod;
|
||||
import org.jboss.netty.handler.codec.http.HttpRequestDecoder;
|
||||
import org.jboss.netty.handler.timeout.ReadTimeoutException;
|
||||
|
||||
|
@ -74,13 +79,34 @@ import java.util.Arrays;
|
|||
import java.util.List;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_REUSE_ADDRESS;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE;
|
||||
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PORT;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS;
|
||||
import static org.elasticsearch.http.netty.cors.CorsHandler.ANY_ORIGIN;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -146,6 +172,8 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
|
||||
protected volatile HttpServerAdapter httpServerAdapter;
|
||||
|
||||
private final CorsConfig corsConfig;
|
||||
|
||||
@Inject
|
||||
@SuppressForbidden(reason = "sets org.jboss.netty.epollBugWorkaround based on netty.epollBugWorkaround")
|
||||
// TODO: why be confusing like this? just let the user do it with the netty parameter instead!
|
||||
|
@ -158,25 +186,25 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
if (settings.getAsBoolean("netty.epollBugWorkaround", false)) {
|
||||
System.setProperty("org.jboss.netty.epollBugWorkaround", "true");
|
||||
}
|
||||
ByteSizeValue maxContentLength = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings);
|
||||
this.maxChunkSize = HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
|
||||
this.maxHeaderSize = HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
|
||||
this.maxInitialLineLength = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings);
|
||||
this.resetCookies = HttpTransportSettings.SETTING_HTTP_RESET_COOKIES.get(settings);
|
||||
ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings);
|
||||
this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
|
||||
this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
|
||||
this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings);
|
||||
this.resetCookies = SETTING_HTTP_RESET_COOKIES.get(settings);
|
||||
this.maxCumulationBufferCapacity = settings.getAsBytesSize("http.netty.max_cumulation_buffer_capacity", null);
|
||||
this.maxCompositeBufferComponents = settings.getAsInt("http.netty.max_composite_buffer_components", -1);
|
||||
this.workerCount = settings.getAsInt("http.netty.worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2);
|
||||
this.blockingServer = settings.getAsBoolean("http.netty.http.blocking_server", TCP_BLOCKING.get(settings));
|
||||
this.port = HttpTransportSettings.SETTING_HTTP_PORT.get(settings);
|
||||
this.port = SETTING_HTTP_PORT.get(settings);
|
||||
this.bindHosts = settings.getAsArray("http.netty.bind_host", settings.getAsArray("http.bind_host", settings.getAsArray("http.host", null)));
|
||||
this.publishHosts = settings.getAsArray("http.netty.publish_host", settings.getAsArray("http.publish_host", settings.getAsArray("http.host", null)));
|
||||
this.publishPort = HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT.get(settings);
|
||||
this.publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings);
|
||||
this.tcpNoDelay = settings.getAsBoolean("http.netty.tcp_no_delay", TCP_NO_DELAY.get(settings));
|
||||
this.tcpKeepAlive = settings.getAsBoolean("http.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings));
|
||||
this.reuseAddress = settings.getAsBoolean("http.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings));
|
||||
this.tcpSendBufferSize = settings.getAsBytesSize("http.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings));
|
||||
this.tcpReceiveBufferSize = settings.getAsBytesSize("http.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings));
|
||||
this.detailedErrorsEnabled = HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings);
|
||||
this.detailedErrorsEnabled = SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings);
|
||||
|
||||
long defaultReceiverPredictor = 512 * 1024;
|
||||
if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
|
||||
|
@ -194,10 +222,11 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
|
||||
}
|
||||
|
||||
this.compression = HttpTransportSettings.SETTING_HTTP_COMPRESSION.get(settings);
|
||||
this.compressionLevel = HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL.get(settings);
|
||||
this.pipelining = HttpTransportSettings.SETTING_PIPELINING.get(settings);
|
||||
this.pipeliningMaxEvents = HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS.get(settings);
|
||||
this.compression = SETTING_HTTP_COMPRESSION.get(settings);
|
||||
this.compressionLevel = SETTING_HTTP_COMPRESSION_LEVEL.get(settings);
|
||||
this.pipelining = SETTING_PIPELINING.get(settings);
|
||||
this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings);
|
||||
this.corsConfig = buildCorsConfig(settings);
|
||||
|
||||
// validate max content length
|
||||
if (maxContentLength.bytes() > Integer.MAX_VALUE) {
|
||||
|
@ -290,6 +319,39 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[boundAddresses.size()]), new InetSocketTransportAddress(publishAddress));
|
||||
}
|
||||
|
||||
private CorsConfig buildCorsConfig(Settings settings) {
|
||||
if (SETTING_CORS_ENABLED.get(settings) == false) {
|
||||
return CorsConfigBuilder.forOrigins().disable().build();
|
||||
}
|
||||
String origin = SETTING_CORS_ALLOW_ORIGIN.get(settings);
|
||||
final CorsConfigBuilder builder;
|
||||
if (Strings.isNullOrEmpty(origin)) {
|
||||
builder = CorsConfigBuilder.forOrigins();
|
||||
} else if (origin.equals(ANY_ORIGIN)) {
|
||||
builder = CorsConfigBuilder.forAnyOrigin();
|
||||
} else {
|
||||
Pattern p = RestUtils.checkCorsSettingForRegex(origin);
|
||||
if (p == null) {
|
||||
builder = CorsConfigBuilder.forOrigins(RestUtils.corsSettingAsArray(origin));
|
||||
} else {
|
||||
builder = CorsConfigBuilder.forPattern(p);
|
||||
}
|
||||
}
|
||||
if (SETTING_CORS_ALLOW_CREDENTIALS.get(settings)) {
|
||||
builder.allowCredentials();
|
||||
}
|
||||
String[] strMethods = settings.getAsArray(SETTING_CORS_ALLOW_METHODS.get(settings), new String[0]);
|
||||
HttpMethod[] methods = Arrays.asList(strMethods)
|
||||
.stream()
|
||||
.map(HttpMethod::valueOf)
|
||||
.toArray(size -> new HttpMethod[size]);
|
||||
return builder.allowedRequestMethods(methods)
|
||||
.maxAge(SETTING_CORS_MAX_AGE.get(settings))
|
||||
.allowedRequestHeaders(settings.getAsArray(SETTING_CORS_ALLOW_HEADERS.get(settings), new String[0]))
|
||||
.shortCircuit()
|
||||
.build();
|
||||
}
|
||||
|
||||
private InetSocketTransportAddress bindAddress(final InetAddress hostAddress) {
|
||||
final AtomicReference<Exception> lastException = new AtomicReference<>();
|
||||
final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();
|
||||
|
@ -365,6 +427,10 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
return new HttpStats(channels == null ? 0 : channels.numberOfOpenChannels(), channels == null ? 0 : channels.totalChannels());
|
||||
}
|
||||
|
||||
public CorsConfig getCorsConfig() {
|
||||
return corsConfig;
|
||||
}
|
||||
|
||||
protected void dispatchRequest(HttpRequest request, HttpChannel channel) {
|
||||
httpServerAdapter.dispatchRequest(request, channel, threadPool.getThreadContext());
|
||||
}
|
||||
|
@ -430,6 +496,9 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
httpChunkAggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents);
|
||||
}
|
||||
pipeline.addLast("aggregator", httpChunkAggregator);
|
||||
if (SETTING_CORS_ENABLED.get(transport.settings())) {
|
||||
pipeline.addLast("cors", new CorsHandler(transport.getCorsConfig()));
|
||||
}
|
||||
pipeline.addLast("encoder", new ESHttpResponseEncoder());
|
||||
if (transport.compression) {
|
||||
pipeline.addLast("encoder_compress", new HttpContentCompressor(transport.compressionLevel));
|
||||
|
|
|
@ -0,0 +1,233 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.netty.cors;
|
||||
|
||||
import org.jboss.netty.handler.codec.http.DefaultHttpHeaders;
|
||||
import org.jboss.netty.handler.codec.http.HttpHeaders;
|
||||
import org.jboss.netty.handler.codec.http.HttpMethod;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* Configuration for Cross-Origin Resource Sharing (CORS).
|
||||
*
|
||||
* This class was lifted from the Netty project:
|
||||
* https://github.com/netty/netty
|
||||
*/
|
||||
public final class CorsConfig {
|
||||
|
||||
private final Optional<Set<String>> origins;
|
||||
private final Optional<Pattern> pattern;
|
||||
private final boolean anyOrigin;
|
||||
private final boolean enabled;
|
||||
private final boolean allowCredentials;
|
||||
private final long maxAge;
|
||||
private final Set<HttpMethod> allowedRequestMethods;
|
||||
private final Set<String> allowedRequestHeaders;
|
||||
private final boolean allowNullOrigin;
|
||||
private final Map<CharSequence, Callable<?>> preflightHeaders;
|
||||
private final boolean shortCircuit;
|
||||
|
||||
CorsConfig(final CorsConfigBuilder builder) {
|
||||
origins = builder.origins.map(s -> new LinkedHashSet<>(s));
|
||||
pattern = builder.pattern;
|
||||
anyOrigin = builder.anyOrigin;
|
||||
enabled = builder.enabled;
|
||||
allowCredentials = builder.allowCredentials;
|
||||
maxAge = builder.maxAge;
|
||||
allowedRequestMethods = builder.requestMethods;
|
||||
allowedRequestHeaders = builder.requestHeaders;
|
||||
allowNullOrigin = builder.allowNullOrigin;
|
||||
preflightHeaders = builder.preflightHeaders;
|
||||
shortCircuit = builder.shortCircuit;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if support for CORS is enabled.
|
||||
*
|
||||
* @return {@code true} if support for CORS is enabled, false otherwise.
|
||||
*/
|
||||
public boolean isCorsSupportEnabled() {
|
||||
return enabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines whether a wildcard origin, '*', is supported.
|
||||
*
|
||||
* @return {@code boolean} true if any origin is allowed.
|
||||
*/
|
||||
public boolean isAnyOriginSupported() {
|
||||
return anyOrigin;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the set of allowed origins.
|
||||
*
|
||||
* @return {@code Set} the allowed origins.
|
||||
*/
|
||||
public Optional<Set<String>> origins() {
|
||||
return origins;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the input origin is allowed by this configuration.
|
||||
*
|
||||
* @return {@code true} if the origin is allowed, otherwise {@code false}
|
||||
*/
|
||||
public boolean isOriginAllowed(final String origin) {
|
||||
if (origins.isPresent()) {
|
||||
return origins.get().contains(origin);
|
||||
} else if (pattern.isPresent()) {
|
||||
return pattern.get().matcher(origin).matches();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Web browsers may set the 'Origin' request header to 'null' if a resource is loaded
|
||||
* from the local file system.
|
||||
*
|
||||
* If isNullOriginAllowed is true then the server will response with the wildcard for the
|
||||
* the CORS response header 'Access-Control-Allow-Origin'.
|
||||
*
|
||||
* @return {@code true} if a 'null' origin should be supported.
|
||||
*/
|
||||
public boolean isNullOriginAllowed() {
|
||||
return allowNullOrigin;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if cookies are supported for CORS requests.
|
||||
*
|
||||
* By default cookies are not included in CORS requests but if isCredentialsAllowed returns
|
||||
* true cookies will be added to CORS requests. Setting this value to true will set the
|
||||
* CORS 'Access-Control-Allow-Credentials' response header to true.
|
||||
*
|
||||
* Please note that cookie support needs to be enabled on the client side as well.
|
||||
* The client needs to opt-in to send cookies by calling:
|
||||
* <pre>
|
||||
* xhr.withCredentials = true;
|
||||
* </pre>
|
||||
* The default value for 'withCredentials' is false in which case no cookies are sent.
|
||||
* Settning this to true will included cookies in cross origin requests.
|
||||
*
|
||||
* @return {@code true} if cookies are supported.
|
||||
*/
|
||||
public boolean isCredentialsAllowed() {
|
||||
return allowCredentials;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the maxAge setting.
|
||||
*
|
||||
* When making a preflight request the client has to perform two request with can be inefficient.
|
||||
* This setting will set the CORS 'Access-Control-Max-Age' response header and enables the
|
||||
* caching of the preflight response for the specified time. During this time no preflight
|
||||
* request will be made.
|
||||
*
|
||||
* @return {@code long} the time in seconds that a preflight request may be cached.
|
||||
*/
|
||||
public long maxAge() {
|
||||
return maxAge;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the allowed set of Request Methods. The Http methods that should be returned in the
|
||||
* CORS 'Access-Control-Request-Method' response header.
|
||||
*
|
||||
* @return {@code Set} of {@link HttpMethod}s that represent the allowed Request Methods.
|
||||
*/
|
||||
public Set<HttpMethod> allowedRequestMethods() {
|
||||
return Collections.unmodifiableSet(allowedRequestMethods);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the allowed set of Request Headers.
|
||||
*
|
||||
* The header names returned from this method will be used to set the CORS
|
||||
* 'Access-Control-Allow-Headers' response header.
|
||||
*
|
||||
* @return {@code Set<String>} of strings that represent the allowed Request Headers.
|
||||
*/
|
||||
public Set<String> allowedRequestHeaders() {
|
||||
return Collections.unmodifiableSet(allowedRequestHeaders);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns HTTP response headers that should be added to a CORS preflight response.
|
||||
*
|
||||
* @return {@link HttpHeaders} the HTTP response headers to be added.
|
||||
*/
|
||||
public HttpHeaders preflightResponseHeaders() {
|
||||
if (preflightHeaders.isEmpty()) {
|
||||
return HttpHeaders.EMPTY_HEADERS;
|
||||
}
|
||||
final HttpHeaders preflightHeaders = new DefaultHttpHeaders();
|
||||
for (Map.Entry<CharSequence, Callable<?>> entry : this.preflightHeaders.entrySet()) {
|
||||
final Object value = getValue(entry.getValue());
|
||||
if (value instanceof Iterable) {
|
||||
preflightHeaders.add(entry.getKey().toString(), (Iterable<?>) value);
|
||||
} else {
|
||||
preflightHeaders.add(entry.getKey().toString(), value);
|
||||
}
|
||||
}
|
||||
return preflightHeaders;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines whether a CORS request should be rejected if it's invalid before being
|
||||
* further processing.
|
||||
*
|
||||
* CORS headers are set after a request is processed. This may not always be desired
|
||||
* and this setting will check that the Origin is valid and if it is not valid no
|
||||
* further processing will take place, and a error will be returned to the calling client.
|
||||
*
|
||||
* @return {@code true} if a CORS request should short-curcuit upon receiving an invalid Origin header.
|
||||
*/
|
||||
public boolean isShortCircuit() {
|
||||
return shortCircuit;
|
||||
}
|
||||
|
||||
private static <T> T getValue(final Callable<T> callable) {
|
||||
try {
|
||||
return callable.call();
|
||||
} catch (final Exception e) {
|
||||
throw new IllegalStateException("Could not generate value for callable [" + callable + ']', e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "CorsConfig[enabled=" + enabled +
|
||||
", origins=" + origins +
|
||||
", anyOrigin=" + anyOrigin +
|
||||
", isCredentialsAllowed=" + allowCredentials +
|
||||
", maxAge=" + maxAge +
|
||||
", allowedRequestMethods=" + allowedRequestMethods +
|
||||
", allowedRequestHeaders=" + allowedRequestHeaders +
|
||||
", preflightHeaders=" + preflightHeaders + ']';
|
||||
}
|
||||
}
|
|
@ -0,0 +1,356 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.netty.cors;
|
||||
|
||||
import org.jboss.netty.handler.codec.http.HttpMethod;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* Builder used to configure and build a {@link CorsConfig} instance.
|
||||
*
|
||||
* This class was lifted from the Netty project:
|
||||
* https://github.com/netty/netty
|
||||
*/
|
||||
public final class CorsConfigBuilder {
|
||||
|
||||
/**
|
||||
* Creates a Builder instance with it's origin set to '*'.
|
||||
*
|
||||
* @return Builder to support method chaining.
|
||||
*/
|
||||
public static CorsConfigBuilder forAnyOrigin() {
|
||||
return new CorsConfigBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a {@link CorsConfigBuilder} instance with the specified origin.
|
||||
*
|
||||
* @return {@link CorsConfigBuilder} to support method chaining.
|
||||
*/
|
||||
public static CorsConfigBuilder forOrigin(final String origin) {
|
||||
if ("*".equals(origin)) {
|
||||
return new CorsConfigBuilder();
|
||||
}
|
||||
return new CorsConfigBuilder(origin);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Create a {@link CorsConfigBuilder} instance with the specified pattern origin.
|
||||
*
|
||||
* @param pattern the regular expression pattern to match incoming origins on.
|
||||
* @return {@link CorsConfigBuilder} with the configured origin pattern.
|
||||
*/
|
||||
public static CorsConfigBuilder forPattern(final Pattern pattern) {
|
||||
if (pattern == null) {
|
||||
throw new IllegalArgumentException("CORS pattern cannot be null");
|
||||
}
|
||||
return new CorsConfigBuilder(pattern);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a {@link CorsConfigBuilder} instance with the specified origins.
|
||||
*
|
||||
* @return {@link CorsConfigBuilder} to support method chaining.
|
||||
*/
|
||||
public static CorsConfigBuilder forOrigins(final String... origins) {
|
||||
return new CorsConfigBuilder(origins);
|
||||
}
|
||||
|
||||
Optional<Set<String>> origins;
|
||||
Optional<Pattern> pattern;
|
||||
final boolean anyOrigin;
|
||||
boolean allowNullOrigin;
|
||||
boolean enabled = true;
|
||||
boolean allowCredentials;
|
||||
long maxAge;
|
||||
final Set<HttpMethod> requestMethods = new HashSet<>();
|
||||
final Set<String> requestHeaders = new HashSet<>();
|
||||
final Map<CharSequence, Callable<?>> preflightHeaders = new HashMap<>();
|
||||
private boolean noPreflightHeaders;
|
||||
boolean shortCircuit;
|
||||
|
||||
/**
|
||||
* Creates a new Builder instance with the origin passed in.
|
||||
*
|
||||
* @param origins the origin to be used for this builder.
|
||||
*/
|
||||
CorsConfigBuilder(final String... origins) {
|
||||
this.origins = Optional.of(new LinkedHashSet<>(Arrays.asList(origins)));
|
||||
pattern = Optional.empty();
|
||||
anyOrigin = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Builder instance allowing any origin, "*" which is the
|
||||
* wildcard origin.
|
||||
*
|
||||
*/
|
||||
CorsConfigBuilder() {
|
||||
anyOrigin = true;
|
||||
origins = Optional.empty();
|
||||
pattern = Optional.empty();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Builder instance allowing any origin that matches the pattern.
|
||||
*
|
||||
* @param pattern the pattern to match against for incoming origins.
|
||||
*/
|
||||
CorsConfigBuilder(final Pattern pattern) {
|
||||
this.pattern = Optional.of(pattern);
|
||||
origins = Optional.empty();
|
||||
anyOrigin = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Web browsers may set the 'Origin' request header to 'null' if a resource is loaded
|
||||
* from the local file system. Calling this method will enable a successful CORS response
|
||||
* with a wildcard for the the CORS response header 'Access-Control-Allow-Origin'.
|
||||
*
|
||||
* @return {@link CorsConfigBuilder} to support method chaining.
|
||||
*/
|
||||
CorsConfigBuilder allowNullOrigin() {
|
||||
allowNullOrigin = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Disables CORS support.
|
||||
*
|
||||
* @return {@link CorsConfigBuilder} to support method chaining.
|
||||
*/
|
||||
public CorsConfigBuilder disable() {
|
||||
enabled = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* By default cookies are not included in CORS requests, but this method will enable cookies to
|
||||
* be added to CORS requests. Calling this method will set the CORS 'Access-Control-Allow-Credentials'
|
||||
* response header to true.
|
||||
*
|
||||
* Please note, that cookie support needs to be enabled on the client side as well.
|
||||
* The client needs to opt-in to send cookies by calling:
|
||||
* <pre>
|
||||
* xhr.withCredentials = true;
|
||||
* </pre>
|
||||
* The default value for 'withCredentials' is false in which case no cookies are sent.
|
||||
* Setting this to true will included cookies in cross origin requests.
|
||||
*
|
||||
* @return {@link CorsConfigBuilder} to support method chaining.
|
||||
*/
|
||||
public CorsConfigBuilder allowCredentials() {
|
||||
allowCredentials = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* When making a preflight request the client has to perform two request with can be inefficient.
|
||||
* This setting will set the CORS 'Access-Control-Max-Age' response header and enables the
|
||||
* caching of the preflight response for the specified time. During this time no preflight
|
||||
* request will be made.
|
||||
*
|
||||
* @param max the maximum time, in seconds, that the preflight response may be cached.
|
||||
* @return {@link CorsConfigBuilder} to support method chaining.
|
||||
*/
|
||||
public CorsConfigBuilder maxAge(final long max) {
|
||||
maxAge = max;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies the allowed set of HTTP Request Methods that should be returned in the
|
||||
* CORS 'Access-Control-Request-Method' response header.
|
||||
*
|
||||
* @param methods the {@link HttpMethod}s that should be allowed.
|
||||
* @return {@link CorsConfigBuilder} to support method chaining.
|
||||
*/
|
||||
public CorsConfigBuilder allowedRequestMethods(final HttpMethod... methods) {
|
||||
requestMethods.addAll(Arrays.asList(methods));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies the if headers that should be returned in the CORS 'Access-Control-Allow-Headers'
|
||||
* response header.
|
||||
*
|
||||
* If a client specifies headers on the request, for example by calling:
|
||||
* <pre>
|
||||
* xhr.setRequestHeader('My-Custom-Header', "SomeValue");
|
||||
* </pre>
|
||||
* the server will receive the above header name in the 'Access-Control-Request-Headers' of the
|
||||
* preflight request. The server will then decide if it allows this header to be sent for the
|
||||
* real request (remember that a preflight is not the real request but a request asking the server
|
||||
* if it allow a request).
|
||||
*
|
||||
* @param headers the headers to be added to the preflight 'Access-Control-Allow-Headers' response header.
|
||||
* @return {@link CorsConfigBuilder} to support method chaining.
|
||||
*/
|
||||
public CorsConfigBuilder allowedRequestHeaders(final String... headers) {
|
||||
requestHeaders.addAll(Arrays.asList(headers));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns HTTP response headers that should be added to a CORS preflight response.
|
||||
*
|
||||
* An intermediary like a load balancer might require that a CORS preflight request
|
||||
* have certain headers set. This enables such headers to be added.
|
||||
*
|
||||
* @param name the name of the HTTP header.
|
||||
* @param values the values for the HTTP header.
|
||||
* @return {@link CorsConfigBuilder} to support method chaining.
|
||||
*/
|
||||
public CorsConfigBuilder preflightResponseHeader(final CharSequence name, final Object... values) {
|
||||
if (values.length == 1) {
|
||||
preflightHeaders.put(name, new ConstantValueGenerator(values[0]));
|
||||
} else {
|
||||
preflightResponseHeader(name, Arrays.asList(values));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns HTTP response headers that should be added to a CORS preflight response.
|
||||
*
|
||||
* An intermediary like a load balancer might require that a CORS preflight request
|
||||
* have certain headers set. This enables such headers to be added.
|
||||
*
|
||||
* @param name the name of the HTTP header.
|
||||
* @param value the values for the HTTP header.
|
||||
* @param <T> the type of values that the Iterable contains.
|
||||
* @return {@link CorsConfigBuilder} to support method chaining.
|
||||
*/
|
||||
public <T> CorsConfigBuilder preflightResponseHeader(final CharSequence name, final Iterable<T> value) {
|
||||
preflightHeaders.put(name, new ConstantValueGenerator(value));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns HTTP response headers that should be added to a CORS preflight response.
|
||||
*
|
||||
* An intermediary like a load balancer might require that a CORS preflight request
|
||||
* have certain headers set. This enables such headers to be added.
|
||||
*
|
||||
* Some values must be dynamically created when the HTTP response is created, for
|
||||
* example the 'Date' response header. This can be accomplished by using a Callable
|
||||
* which will have its 'call' method invoked when the HTTP response is created.
|
||||
*
|
||||
* @param name the name of the HTTP header.
|
||||
* @param valueGenerator a Callable which will be invoked at HTTP response creation.
|
||||
* @param <T> the type of the value that the Callable can return.
|
||||
* @return {@link CorsConfigBuilder} to support method chaining.
|
||||
*/
|
||||
public <T> CorsConfigBuilder preflightResponseHeader(final CharSequence name, final Callable<T> valueGenerator) {
|
||||
preflightHeaders.put(name, valueGenerator);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies that no preflight response headers should be added to a preflight response.
|
||||
*
|
||||
* @return {@link CorsConfigBuilder} to support method chaining.
|
||||
*/
|
||||
public CorsConfigBuilder noPreflightResponseHeaders() {
|
||||
noPreflightHeaders = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies that a CORS request should be rejected if it's invalid before being
|
||||
* further processing.
|
||||
*
|
||||
* CORS headers are set after a request is processed. This may not always be desired
|
||||
* and this setting will check that the Origin is valid and if it is not valid no
|
||||
* further processing will take place, and a error will be returned to the calling client.
|
||||
*
|
||||
* @return {@link CorsConfigBuilder} to support method chaining.
|
||||
*/
|
||||
public CorsConfigBuilder shortCircuit() {
|
||||
shortCircuit = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a {@link CorsConfig} with settings specified by previous method calls.
|
||||
*
|
||||
* @return {@link CorsConfig} the configured CorsConfig instance.
|
||||
*/
|
||||
public CorsConfig build() {
|
||||
if (preflightHeaders.isEmpty() && !noPreflightHeaders) {
|
||||
preflightHeaders.put("date", DateValueGenerator.INSTANCE);
|
||||
preflightHeaders.put("content-length", new ConstantValueGenerator("0"));
|
||||
}
|
||||
return new CorsConfig(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* This class is used for preflight HTTP response values that do not need to be
|
||||
* generated, but instead the value is "static" in that the same value will be returned
|
||||
* for each call.
|
||||
*/
|
||||
private static final class ConstantValueGenerator implements Callable<Object> {
|
||||
|
||||
private final Object value;
|
||||
|
||||
/**
|
||||
* Sole constructor.
|
||||
*
|
||||
* @param value the value that will be returned when the call method is invoked.
|
||||
*/
|
||||
private ConstantValueGenerator(final Object value) {
|
||||
if (value == null) {
|
||||
throw new IllegalArgumentException("value must not be null");
|
||||
}
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object call() {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This callable is used for the DATE preflight HTTP response HTTP header.
|
||||
* It's value must be generated when the response is generated, hence will be
|
||||
* different for every call.
|
||||
*/
|
||||
private static final class DateValueGenerator implements Callable<Date> {
|
||||
|
||||
static final DateValueGenerator INSTANCE = new DateValueGenerator();
|
||||
|
||||
@Override
|
||||
public Date call() throws Exception {
|
||||
return new Date();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,231 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.netty.cors;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.rest.support.RestUtils;
|
||||
import org.jboss.netty.channel.ChannelFutureListener;
|
||||
import org.jboss.netty.channel.ChannelHandlerContext;
|
||||
import org.jboss.netty.channel.MessageEvent;
|
||||
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
|
||||
import org.jboss.netty.handler.codec.http.DefaultHttpResponse;
|
||||
import org.jboss.netty.handler.codec.http.HttpHeaders;
|
||||
import org.jboss.netty.handler.codec.http.HttpMethod;
|
||||
import org.jboss.netty.handler.codec.http.HttpRequest;
|
||||
import org.jboss.netty.handler.codec.http.HttpResponse;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_CREDENTIALS;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_ORIGIN;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_MAX_AGE;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ORIGIN;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.USER_AGENT;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.VARY;
|
||||
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.FORBIDDEN;
|
||||
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK;
|
||||
|
||||
/**
|
||||
* Handles <a href="http://www.w3.org/TR/cors/">Cross Origin Resource Sharing</a> (CORS) requests.
|
||||
* <p>
|
||||
* This handler can be configured using a {@link CorsConfig}, please
|
||||
* refer to this class for details about the configuration options available.
|
||||
*
|
||||
* This code was borrowed from Netty 4 and refactored to work for Elasticsearch's Netty 3 setup.
|
||||
*/
|
||||
public class CorsHandler extends SimpleChannelUpstreamHandler {
|
||||
|
||||
public static final String ANY_ORIGIN = "*";
|
||||
private final CorsConfig config;
|
||||
|
||||
private HttpRequest request;
|
||||
|
||||
/**
|
||||
* Creates a new instance with the specified {@link CorsConfig}.
|
||||
*/
|
||||
public CorsHandler(final CorsConfig config) {
|
||||
if (config == null) {
|
||||
throw new IllegalArgumentException("Config cannot be null");
|
||||
}
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void messageReceived(final ChannelHandlerContext ctx, final MessageEvent e) throws Exception {
|
||||
if (config.isCorsSupportEnabled() && e.getMessage() instanceof HttpRequest) {
|
||||
request = (HttpRequest) e.getMessage();
|
||||
if (RestUtils.isBrowser(request.headers().get(USER_AGENT))) {
|
||||
if (isPreflightRequest(request)) {
|
||||
handlePreflight(ctx, request);
|
||||
return;
|
||||
}
|
||||
if (config.isShortCircuit() && !validateOrigin()) {
|
||||
forbidden(ctx, request);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
super.messageReceived(ctx, e);
|
||||
}
|
||||
|
||||
public static void setCorsResponseHeaders(HttpRequest request, HttpResponse resp, CorsConfig config) {
|
||||
if (!config.isCorsSupportEnabled()) {
|
||||
return;
|
||||
}
|
||||
String originHeader = request.headers().get(ORIGIN);
|
||||
if (!Strings.isNullOrEmpty(originHeader)) {
|
||||
final String originHeaderVal;
|
||||
if (config.isAnyOriginSupported()) {
|
||||
originHeaderVal = ANY_ORIGIN;
|
||||
} else if (config.isOriginAllowed(originHeader)) {
|
||||
originHeaderVal = originHeader;
|
||||
} else {
|
||||
originHeaderVal = null;
|
||||
}
|
||||
if (originHeaderVal != null) {
|
||||
resp.headers().add(ACCESS_CONTROL_ALLOW_ORIGIN, originHeaderVal);
|
||||
}
|
||||
}
|
||||
if (config.isCredentialsAllowed()) {
|
||||
resp.headers().add(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
|
||||
}
|
||||
}
|
||||
|
||||
private void handlePreflight(final ChannelHandlerContext ctx, final HttpRequest request) {
|
||||
final HttpResponse response = new DefaultHttpResponse(request.getProtocolVersion(), OK);
|
||||
if (setOrigin(response)) {
|
||||
setAllowMethods(response);
|
||||
setAllowHeaders(response);
|
||||
setAllowCredentials(response);
|
||||
setMaxAge(response);
|
||||
setPreflightHeaders(response);
|
||||
ctx.getChannel().write(response).addListener(ChannelFutureListener.CLOSE);
|
||||
} else {
|
||||
forbidden(ctx, request);
|
||||
}
|
||||
}
|
||||
|
||||
private static void forbidden(final ChannelHandlerContext ctx, final HttpRequest request) {
|
||||
ctx.getChannel().write(new DefaultHttpResponse(request.getProtocolVersion(), FORBIDDEN))
|
||||
.addListener(ChannelFutureListener.CLOSE);
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a non CORS specification feature which enables the setting of preflight
|
||||
* response headers that might be required by intermediaries.
|
||||
*
|
||||
* @param response the HttpResponse to which the preflight response headers should be added.
|
||||
*/
|
||||
private void setPreflightHeaders(final HttpResponse response) {
|
||||
response.headers().add(config.preflightResponseHeaders());
|
||||
}
|
||||
|
||||
private boolean setOrigin(final HttpResponse response) {
|
||||
final String origin = request.headers().get(ORIGIN);
|
||||
if (!Strings.isNullOrEmpty(origin)) {
|
||||
if ("null".equals(origin) && config.isNullOriginAllowed()) {
|
||||
setAnyOrigin(response);
|
||||
return true;
|
||||
}
|
||||
if (config.isAnyOriginSupported()) {
|
||||
if (config.isCredentialsAllowed()) {
|
||||
echoRequestOrigin(response);
|
||||
setVaryHeader(response);
|
||||
} else {
|
||||
setAnyOrigin(response);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
if (config.isOriginAllowed(origin)) {
|
||||
setOrigin(response, origin);
|
||||
setVaryHeader(response);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean validateOrigin() {
|
||||
if (config.isAnyOriginSupported()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
final String origin = request.headers().get(ORIGIN);
|
||||
if (Strings.isNullOrEmpty(origin)) {
|
||||
// Not a CORS request so we cannot validate it. It may be a non CORS request.
|
||||
return true;
|
||||
}
|
||||
|
||||
if ("null".equals(origin) && config.isNullOriginAllowed()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return config.isOriginAllowed(origin);
|
||||
}
|
||||
|
||||
private void echoRequestOrigin(final HttpResponse response) {
|
||||
setOrigin(response, request.headers().get(ORIGIN));
|
||||
}
|
||||
|
||||
private static void setVaryHeader(final HttpResponse response) {
|
||||
response.headers().set(VARY, ORIGIN);
|
||||
}
|
||||
|
||||
private static void setAnyOrigin(final HttpResponse response) {
|
||||
setOrigin(response, ANY_ORIGIN);
|
||||
}
|
||||
|
||||
private static void setOrigin(final HttpResponse response, final String origin) {
|
||||
response.headers().set(ACCESS_CONTROL_ALLOW_ORIGIN, origin);
|
||||
}
|
||||
|
||||
private void setAllowCredentials(final HttpResponse response) {
|
||||
if (config.isCredentialsAllowed()
|
||||
&& !response.headers().get(ACCESS_CONTROL_ALLOW_ORIGIN).equals(ANY_ORIGIN)) {
|
||||
response.headers().set(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean isPreflightRequest(final HttpRequest request) {
|
||||
final HttpHeaders headers = request.headers();
|
||||
return request.getMethod().equals(HttpMethod.OPTIONS) &&
|
||||
headers.contains(HttpHeaders.Names.ORIGIN) &&
|
||||
headers.contains(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD);
|
||||
}
|
||||
|
||||
private void setAllowMethods(final HttpResponse response) {
|
||||
response.headers().set(ACCESS_CONTROL_ALLOW_METHODS,
|
||||
String.join(", ", config.allowedRequestMethods().stream()
|
||||
.map(HttpMethod::getName)
|
||||
.collect(Collectors.toList())).trim());
|
||||
}
|
||||
|
||||
private void setAllowHeaders(final HttpResponse response) {
|
||||
response.headers().set(ACCESS_CONTROL_ALLOW_HEADERS, config.allowedRequestHeaders());
|
||||
}
|
||||
|
||||
private void setMaxAge(final HttpResponse response) {
|
||||
response.headers().set(ACCESS_CONTROL_MAX_AGE, config.maxAge());
|
||||
}
|
||||
|
||||
}
|
|
@ -182,7 +182,7 @@ public final class IndexSettings {
|
|||
this.index = indexMetaData.getIndex();
|
||||
version = Version.indexCreated(settings);
|
||||
logger = Loggers.getLogger(getClass(), settings, index);
|
||||
nodeName = settings.get("name", "");
|
||||
nodeName = settings.get("node.name", "");
|
||||
this.indexMetaData = indexMetaData;
|
||||
numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
|
||||
isShadowReplicaIndex = IndexMetaData.isIndexUsingShadowReplicas(settings);
|
||||
|
|
|
@ -67,9 +67,6 @@ public final class EngineConfig {
|
|||
private final QueryCache queryCache;
|
||||
private final QueryCachingPolicy queryCachingPolicy;
|
||||
|
||||
static {
|
||||
|
||||
}
|
||||
/**
|
||||
* Index setting to change the low level lucene codec used for writing new segments.
|
||||
* This setting is <b>not</b> realtime updateable.
|
||||
|
|
|
@ -320,7 +320,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) throws IOException {
|
||||
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) {
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE) {
|
||||
return getCachedStream().setDoubleValue(number);
|
||||
}
|
||||
|
|
|
@ -332,7 +332,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) throws IOException {
|
||||
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) {
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE) {
|
||||
return getCachedStream().setFloatValue(number);
|
||||
}
|
||||
|
|
|
@ -340,7 +340,7 @@ public class IntegerFieldMapper extends NumberFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) throws IOException {
|
||||
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) {
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE) {
|
||||
return getCachedStream().setIntValue(number);
|
||||
}
|
||||
|
|
|
@ -323,7 +323,7 @@ public class LongFieldMapper extends NumberFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) throws IOException {
|
||||
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) {
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE) {
|
||||
return getCachedStream().setLongValue(number);
|
||||
}
|
||||
|
|
|
@ -408,7 +408,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
|||
}
|
||||
|
||||
@Override
|
||||
public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) throws IOException {
|
||||
public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
@ -332,7 +332,7 @@ public class ShortFieldMapper extends NumberFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) throws IOException {
|
||||
public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) {
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE) {
|
||||
return getCachedStream().setIntValue(number);
|
||||
}
|
||||
|
|
|
@ -42,17 +42,17 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
|
|||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.metrics.MeanMetric;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.Callback;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.common.util.concurrent.SuspendableRefContainer;
|
||||
import org.elasticsearch.gateway.MetaDataStateFormat;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -189,9 +189,17 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
|
||||
private final ShardPath path;
|
||||
|
||||
private final IndexShardOperationCounter indexShardOperationCounter;
|
||||
private final SuspendableRefContainer suspendableRefContainer;
|
||||
|
||||
private final EnumSet<IndexShardState> readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY);
|
||||
private static final EnumSet<IndexShardState> readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY);
|
||||
// for primaries, we only allow to write when actually started (so the cluster has decided we started)
|
||||
// in case we have a relocation of a primary, we also allow to write after phase 2 completed, where the shard may be
|
||||
// in state RECOVERING or POST_RECOVERY. After a primary has been marked as RELOCATED, we only allow writes to the relocation target
|
||||
// which can be either in POST_RECOVERY or already STARTED (this prevents writing concurrently to two primaries).
|
||||
public static final EnumSet<IndexShardState> writeAllowedStatesForPrimary = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED);
|
||||
// replication is also allowed while recovering, since we index also during recovery to replicas and rely on version checks to make sure its consistent
|
||||
// a relocated shard can also be target of a replication if the relocation target has not been marked as active yet and is syncing it's changes back to the relocation source
|
||||
private static final EnumSet<IndexShardState> writeAllowedStatesForReplica = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED);
|
||||
|
||||
private final IndexSearcherWrapper searcherWrapper;
|
||||
|
||||
|
@ -250,7 +258,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
}
|
||||
|
||||
this.engineConfig = newEngineConfig(translogConfig, cachingPolicy);
|
||||
this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId);
|
||||
this.suspendableRefContainer = new SuspendableRefContainer();
|
||||
this.provider = provider;
|
||||
this.searcherWrapper = indexSearcherWrapper;
|
||||
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, newQueryShardContext());
|
||||
|
@ -321,6 +329,8 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
* Updates the shards routing entry. This mutate the shards internal state depending
|
||||
* on the changes that get introduced by the new routing value. This method will persist shard level metadata
|
||||
* unless explicitly disabled.
|
||||
*
|
||||
* @throws IndexShardRelocatedException if shard is marked as relocated and relocation aborted
|
||||
*/
|
||||
public void updateRoutingEntry(final ShardRouting newRouting, final boolean persistState) {
|
||||
final ShardRouting currentRouting = this.shardRouting;
|
||||
|
@ -368,6 +378,14 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (state == IndexShardState.RELOCATED &&
|
||||
(newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) {
|
||||
// if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery
|
||||
// failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two
|
||||
// active primaries.
|
||||
throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state());
|
||||
}
|
||||
this.shardRouting = newRouting;
|
||||
indexEventListener.shardRoutingChanged(this, currentRouting, newRouting);
|
||||
} finally {
|
||||
|
@ -404,12 +422,16 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
}
|
||||
|
||||
public IndexShard relocated(String reason) throws IndexShardNotStartedException {
|
||||
synchronized (mutex) {
|
||||
if (state != IndexShardState.STARTED) {
|
||||
throw new IndexShardNotStartedException(shardId, state);
|
||||
try (Releasable block = suspendableRefContainer.blockAcquisition()) {
|
||||
// no shard operation locks are being held here, move state from started to relocated
|
||||
synchronized (mutex) {
|
||||
if (state != IndexShardState.STARTED) {
|
||||
throw new IndexShardNotStartedException(shardId, state);
|
||||
}
|
||||
changeState(IndexShardState.RELOCATED, reason);
|
||||
}
|
||||
changeState(IndexShardState.RELOCATED, reason);
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -796,7 +818,6 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
refreshScheduledFuture = null;
|
||||
}
|
||||
changeState(IndexShardState.CLOSED, reason);
|
||||
indexShardOperationCounter.decRef();
|
||||
} finally {
|
||||
final Engine engine = this.currentEngineReference.getAndSet(null);
|
||||
try {
|
||||
|
@ -810,7 +831,6 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
|
||||
if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) {
|
||||
refresh("percolator_load_queries");
|
||||
|
@ -967,16 +987,17 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
IndexShardState state = this.state; // one time volatile read
|
||||
|
||||
if (origin == Engine.Operation.Origin.PRIMARY) {
|
||||
// for primaries, we only allow to write when actually started (so the cluster has decided we started)
|
||||
// otherwise, we need to retry, we also want to still allow to index if we are relocated in case it fails
|
||||
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) {
|
||||
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]");
|
||||
if (writeAllowedStatesForPrimary.contains(state) == false) {
|
||||
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForPrimary + ", origin [" + origin + "]");
|
||||
}
|
||||
} else if (origin == Engine.Operation.Origin.RECOVERY) {
|
||||
if (state != IndexShardState.RECOVERING) {
|
||||
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when recovering, origin [" + origin + "]");
|
||||
}
|
||||
} else {
|
||||
// for replicas, we allow to write also while recovering, since we index also during recovery to replicas
|
||||
// and rely on version checks to make sure its consistent
|
||||
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) {
|
||||
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]");
|
||||
assert origin == Engine.Operation.Origin.REPLICA;
|
||||
if (writeAllowedStatesForReplica.contains(state) == false) {
|
||||
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForReplica + ", origin [" + origin + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -995,7 +1016,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
private void verifyNotClosed(Throwable suppressed) throws IllegalIndexShardStateException {
|
||||
IndexShardState state = this.state; // one time volatile read
|
||||
if (state == IndexShardState.CLOSED) {
|
||||
final IllegalIndexShardStateException exc = new IllegalIndexShardStateException(shardId, state, "operation only allowed when not closed");
|
||||
final IllegalIndexShardStateException exc = new IndexShardClosedException(shardId, "operation only allowed when not closed");
|
||||
if (suppressed != null) {
|
||||
exc.addSuppressed(suppressed);
|
||||
}
|
||||
|
@ -1390,37 +1411,21 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
idxSettings.getSettings().getAsTime(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING, IndexingMemoryController.SHARD_DEFAULT_INACTIVE_TIME));
|
||||
}
|
||||
|
||||
private static class IndexShardOperationCounter extends AbstractRefCounted {
|
||||
final private ESLogger logger;
|
||||
private final ShardId shardId;
|
||||
|
||||
public IndexShardOperationCounter(ESLogger logger, ShardId shardId) {
|
||||
super("index-shard-operations-counter");
|
||||
this.logger = logger;
|
||||
this.shardId = shardId;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void closeInternal() {
|
||||
logger.debug("operations counter reached 0, will not accept any further writes");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void alreadyClosed() {
|
||||
throw new IndexShardClosedException(shardId, "could not increment operation counter. shard is closed.");
|
||||
public Releasable acquirePrimaryOperationLock() {
|
||||
verifyNotClosed();
|
||||
if (shardRouting.primary() == false) {
|
||||
throw new IllegalIndexShardStateException(shardId, state, "shard is not a primary");
|
||||
}
|
||||
return suspendableRefContainer.acquireUninterruptibly();
|
||||
}
|
||||
|
||||
public void incrementOperationCounter() {
|
||||
indexShardOperationCounter.incRef();
|
||||
public Releasable acquireReplicaOperationLock() {
|
||||
verifyNotClosed();
|
||||
return suspendableRefContainer.acquireUninterruptibly();
|
||||
}
|
||||
|
||||
public void decrementOperationCounter() {
|
||||
indexShardOperationCounter.decRef();
|
||||
}
|
||||
|
||||
public int getOperationsCount() {
|
||||
return Math.max(0, indexShardOperationCounter.refCount() - 1); // refCount is incremented on creation and decremented on close
|
||||
public int getActiveOperationsCount() {
|
||||
return suspendableRefContainer.activeRefs(); // refCount is incremented on creation and decremented on close
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -29,10 +29,14 @@ import java.io.IOException;
|
|||
public class IndexShardRelocatedException extends IllegalIndexShardStateException {
|
||||
|
||||
public IndexShardRelocatedException(ShardId shardId) {
|
||||
super(shardId, IndexShardState.RELOCATED, "Already relocated");
|
||||
this(shardId, "Already relocated");
|
||||
}
|
||||
|
||||
public IndexShardRelocatedException(ShardId shardId, String reason) {
|
||||
super(shardId, IndexShardState.RELOCATED, reason);
|
||||
}
|
||||
|
||||
public IndexShardRelocatedException(StreamInput in) throws IOException{
|
||||
super(in);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,30 +20,58 @@
|
|||
package org.elasticsearch.index.similarity;
|
||||
|
||||
import org.apache.lucene.search.similarities.DFISimilarity;
|
||||
import org.apache.lucene.search.similarities.Independence;
|
||||
import org.apache.lucene.search.similarities.IndependenceChiSquared;
|
||||
import org.apache.lucene.search.similarities.IndependenceSaturated;
|
||||
import org.apache.lucene.search.similarities.IndependenceStandardized;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
/**
|
||||
* {@link SimilarityProvider} for the {@link DFISimilarity}.
|
||||
* <p>
|
||||
* Configuration options available:
|
||||
* <ul>
|
||||
* <li>independence_measure</li>
|
||||
* <li>discount_overlaps</li>
|
||||
* </ul>
|
||||
* @see DFISimilarity For more information about configuration
|
||||
*/
|
||||
public class DFISimilarityProvider extends AbstractSimilarityProvider {
|
||||
// the "basic models" of divergence from independence
|
||||
private static final Map<String, Independence> INDEPENDENCE_MEASURES;
|
||||
static {
|
||||
Map<String, Independence> measures = new HashMap<>();
|
||||
measures.put("standardized", new IndependenceStandardized());
|
||||
measures.put("saturated", new IndependenceSaturated());
|
||||
measures.put("chisquared", new IndependenceChiSquared());
|
||||
INDEPENDENCE_MEASURES = unmodifiableMap(measures);
|
||||
}
|
||||
|
||||
private final DFISimilarity similarity;
|
||||
|
||||
public DFISimilarityProvider(String name, Settings settings) {
|
||||
super(name);
|
||||
boolean discountOverlaps = settings.getAsBoolean("discount_overlaps", true);
|
||||
|
||||
this.similarity = new DFISimilarity();
|
||||
Independence measure = parseIndependence(settings);
|
||||
this.similarity = new DFISimilarity(measure);
|
||||
this.similarity.setDiscountOverlaps(discountOverlaps);
|
||||
}
|
||||
|
||||
private Independence parseIndependence(Settings settings) {
|
||||
String name = settings.get("independence_measure");
|
||||
Independence measure = INDEPENDENCE_MEASURES.get(name);
|
||||
if (measure == null) {
|
||||
throw new IllegalArgumentException("Unsupported IndependenceMeasure [" + name + "]");
|
||||
}
|
||||
return measure;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Similarity get() {
|
||||
return similarity;
|
||||
|
|
|
@ -52,8 +52,8 @@ import static java.util.Collections.unmodifiableMap;
|
|||
* @see DFRSimilarity For more information about configuration
|
||||
*/
|
||||
public class DFRSimilarityProvider extends AbstractSimilarityProvider {
|
||||
private static final Map<String, BasicModel> MODEL_CACHE;
|
||||
private static final Map<String, AfterEffect> EFFECT_CACHE;
|
||||
private static final Map<String, BasicModel> BASIC_MODELS;
|
||||
private static final Map<String, AfterEffect> AFTER_EFFECTS;
|
||||
|
||||
static {
|
||||
Map<String, BasicModel> models = new HashMap<>();
|
||||
|
@ -64,13 +64,13 @@ public class DFRSimilarityProvider extends AbstractSimilarityProvider {
|
|||
models.put("in", new BasicModelIn());
|
||||
models.put("ine", new BasicModelIne());
|
||||
models.put("p", new BasicModelP());
|
||||
MODEL_CACHE = unmodifiableMap(models);
|
||||
BASIC_MODELS = unmodifiableMap(models);
|
||||
|
||||
Map<String, AfterEffect> effects = new HashMap<>();
|
||||
effects.put("no", new AfterEffect.NoAfterEffect());
|
||||
effects.put("b", new AfterEffectB());
|
||||
effects.put("l", new AfterEffectL());
|
||||
EFFECT_CACHE = unmodifiableMap(effects);
|
||||
AFTER_EFFECTS = unmodifiableMap(effects);
|
||||
}
|
||||
|
||||
private final DFRSimilarity similarity;
|
||||
|
@ -91,7 +91,7 @@ public class DFRSimilarityProvider extends AbstractSimilarityProvider {
|
|||
*/
|
||||
protected BasicModel parseBasicModel(Settings settings) {
|
||||
String basicModel = settings.get("basic_model");
|
||||
BasicModel model = MODEL_CACHE.get(basicModel);
|
||||
BasicModel model = BASIC_MODELS.get(basicModel);
|
||||
if (model == null) {
|
||||
throw new IllegalArgumentException("Unsupported BasicModel [" + basicModel + "]");
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ public class DFRSimilarityProvider extends AbstractSimilarityProvider {
|
|||
*/
|
||||
protected AfterEffect parseAfterEffect(Settings settings) {
|
||||
String afterEffect = settings.get("after_effect");
|
||||
AfterEffect effect = EFFECT_CACHE.get(afterEffect);
|
||||
AfterEffect effect = AFTER_EFFECTS.get(afterEffect);
|
||||
if (effect == null) {
|
||||
throw new IllegalArgumentException("Unsupported AfterEffect [" + afterEffect + "]");
|
||||
}
|
||||
|
|
|
@ -39,13 +39,11 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
|
|||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.Callback;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
|
@ -93,26 +91,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
|
||||
private static final ShardStateAction.Listener SHARD_STATE_ACTION_LISTENER = new ShardStateAction.Listener() {};
|
||||
|
||||
// a map of mappings type we have seen per index due to cluster state
|
||||
// we need this so we won't remove types automatically created as part of the indexing process
|
||||
private final ConcurrentMap<Tuple<String, String>, Boolean> seenMappings = ConcurrentCollections.newConcurrentMap();
|
||||
|
||||
// a list of shards that failed during recovery
|
||||
// we keep track of these shards in order to prevent repeated recovery of these shards on each cluster state update
|
||||
private final ConcurrentMap<ShardId, FailedShard> failedShards = ConcurrentCollections.newConcurrentMap();
|
||||
private final ConcurrentMap<ShardId, ShardRouting> failedShards = ConcurrentCollections.newConcurrentMap();
|
||||
private final RestoreService restoreService;
|
||||
private final RepositoriesService repositoriesService;
|
||||
|
||||
static class FailedShard {
|
||||
public final long version;
|
||||
public final long timestamp;
|
||||
|
||||
FailedShard(long version) {
|
||||
this.version = version;
|
||||
this.timestamp = System.currentTimeMillis();
|
||||
}
|
||||
}
|
||||
|
||||
private final Object mutex = new Object();
|
||||
private final FailedShardHandler failedShardHandler = new FailedShardHandler();
|
||||
|
||||
|
@ -322,7 +306,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
try {
|
||||
indicesService.createIndex(nodeServicesProvider, indexMetaData, buildInIndexListener);
|
||||
} catch (Throwable e) {
|
||||
sendFailShard(shard, indexMetaData.getIndexUUID(), "failed to create index", e);
|
||||
sendFailShard(shard, "failed to create index", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -387,7 +371,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
// so this failure typically means wrong node level configuration or something similar
|
||||
for (IndexShard indexShard : indexService) {
|
||||
ShardRouting shardRouting = indexShard.routingEntry();
|
||||
failAndRemoveShard(shardRouting, indexService.indexUUID(), indexService, true, "failed to update mappings", t);
|
||||
failAndRemoveShard(shardRouting, indexService, true, "failed to update mappings", t);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -436,6 +420,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
failedShards.clear();
|
||||
return;
|
||||
}
|
||||
|
||||
DiscoveryNodes nodes = event.state().nodes();
|
||||
|
||||
for (final ShardRouting shardRouting : routingNode) {
|
||||
|
@ -455,12 +440,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
if (!indexService.hasShard(shardId) && shardRouting.started()) {
|
||||
if (failedShards.containsKey(shardRouting.shardId())) {
|
||||
if (nodes.masterNode() != null) {
|
||||
shardStateAction.resendShardFailed(shardRouting, indexMetaData.getIndexUUID(),
|
||||
"master " + nodes.masterNode() + " marked shard as started, but shard has previous failed. resending shard failure.", null, SHARD_STATE_ACTION_LISTENER);
|
||||
String message = "master " + nodes.masterNode() + " marked shard as started, but shard has previous failed. resending shard failure";
|
||||
logger.trace("[{}] re-sending failed shard [{}], reason [{}]", shardRouting.shardId(), shardRouting, message);
|
||||
shardStateAction.shardFailed(shardRouting, shardRouting, message, null, SHARD_STATE_ACTION_LISTENER);
|
||||
}
|
||||
} else {
|
||||
// the master thinks we are started, but we don't have this shard at all, mark it as failed
|
||||
sendFailShard(shardRouting, indexMetaData.getIndexUUID(), "master [" + nodes.masterNode() + "] marked shard as started, but shard has not been created, mark shard as failed", null);
|
||||
sendFailShard(shardRouting, "master [" + nodes.masterNode() + "] marked shard as started, but shard has not been created, mark shard as failed", null);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -492,7 +478,11 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
// shadow replicas do not support primary promotion. The master would reinitialize the shard, giving it a new allocation, meaning we should be there.
|
||||
assert (shardRouting.primary() && currentRoutingEntry.primary() == false) == false || indexShard.allowsPrimaryPromotion() :
|
||||
"shard for doesn't support primary promotion but master promoted it with changing allocation. New routing " + shardRouting + ", current routing " + currentRoutingEntry;
|
||||
indexShard.updateRoutingEntry(shardRouting, event.state().blocks().disableStatePersistence() == false);
|
||||
try {
|
||||
indexShard.updateRoutingEntry(shardRouting, event.state().blocks().disableStatePersistence() == false);
|
||||
} catch (Throwable e) {
|
||||
failAndRemoveShard(shardRouting, indexService, true, "failed updating shard routing entry", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -503,40 +493,29 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
}
|
||||
|
||||
private void cleanFailedShards(final ClusterChangedEvent event) {
|
||||
RoutingTable routingTable = event.state().routingTable();
|
||||
RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
|
||||
if (routingNode == null) {
|
||||
failedShards.clear();
|
||||
return;
|
||||
}
|
||||
DiscoveryNodes nodes = event.state().nodes();
|
||||
long now = System.currentTimeMillis();
|
||||
String localNodeId = nodes.localNodeId();
|
||||
Iterator<Map.Entry<ShardId, FailedShard>> iterator = failedShards.entrySet().iterator();
|
||||
shards:
|
||||
while (iterator.hasNext()) {
|
||||
Map.Entry<ShardId, FailedShard> entry = iterator.next();
|
||||
FailedShard failedShard = entry.getValue();
|
||||
IndexRoutingTable indexRoutingTable = routingTable.index(entry.getKey().getIndex());
|
||||
if (indexRoutingTable != null) {
|
||||
IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(entry.getKey().id());
|
||||
if (shardRoutingTable != null) {
|
||||
for (ShardRouting shardRouting : shardRoutingTable.assignedShards()) {
|
||||
if (localNodeId.equals(shardRouting.currentNodeId())) {
|
||||
// we have a timeout here just to make sure we don't have dangled failed shards for some reason
|
||||
// its just another safely layer
|
||||
if (shardRouting.version() == failedShard.version && ((now - failedShard.timestamp) < TimeValue.timeValueMinutes(60).millis())) {
|
||||
// It's the same failed shard - keep it if it hasn't timed out
|
||||
continue shards;
|
||||
} else {
|
||||
// Different version or expired, remove it
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
RoutingTable routingTable = event.state().routingTable();
|
||||
for (Iterator<Map.Entry<ShardId, ShardRouting>> iterator = failedShards.entrySet().iterator(); iterator.hasNext(); ) {
|
||||
Map.Entry<ShardId, ShardRouting> entry = iterator.next();
|
||||
ShardId failedShardId = entry.getKey();
|
||||
ShardRouting failedShardRouting = entry.getValue();
|
||||
IndexRoutingTable indexRoutingTable = routingTable.index(failedShardId.getIndex());
|
||||
if (indexRoutingTable == null) {
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(failedShardId.id());
|
||||
if (shardRoutingTable == null) {
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
if (shardRoutingTable.assignedShards().stream().noneMatch(shr -> shr.isSameAllocation(failedShardRouting))) {
|
||||
iterator.remove();
|
||||
}
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -561,7 +540,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
indexShard.shardId(), indexShard.state(), nodes.masterNode());
|
||||
}
|
||||
if (nodes.masterNode() != null) {
|
||||
shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(),
|
||||
shardStateAction.shardStarted(shardRouting,
|
||||
"master " + nodes.masterNode() + " marked shard as initializing, but shard state is [" + indexShard.state() + "], mark shard as started",
|
||||
SHARD_STATE_ACTION_LISTENER);
|
||||
}
|
||||
|
@ -588,8 +567,9 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
if (!indexService.hasShard(shardId)) {
|
||||
if (failedShards.containsKey(shardRouting.shardId())) {
|
||||
if (nodes.masterNode() != null) {
|
||||
shardStateAction.resendShardFailed(shardRouting, indexMetaData.getIndexUUID(),
|
||||
"master " + nodes.masterNode() + " marked shard as initializing, but shard is marked as failed, resend shard failure", null, SHARD_STATE_ACTION_LISTENER);
|
||||
String message = "master " + nodes.masterNode() + " marked shard as initializing, but shard is marked as failed, resend shard failure";
|
||||
logger.trace("[{}] re-sending failed shard [{}], reason [{}]", shardRouting.shardId(), shardRouting, message);
|
||||
shardStateAction.shardFailed(shardRouting, shardRouting, message, null, SHARD_STATE_ACTION_LISTENER);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -602,7 +582,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
} catch (IndexShardAlreadyExistsException e) {
|
||||
// ignore this, the method call can happen several times
|
||||
} catch (Throwable e) {
|
||||
failAndRemoveShard(shardRouting, indexService.indexUUID(), indexService, true, "failed to create shard", e);
|
||||
failAndRemoveShard(shardRouting, indexService, true, "failed to create shard", e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -626,7 +606,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
// For primaries: requests in any case are routed to both when its relocating and that way we handle
|
||||
// the edge case where its mark as relocated, and we might need to roll it back...
|
||||
// For replicas: we are recovering a backup from a primary
|
||||
RecoveryState.Type type = shardRouting.primary() ? RecoveryState.Type.RELOCATION : RecoveryState.Type.REPLICA;
|
||||
RecoveryState.Type type = shardRouting.primary() ? RecoveryState.Type.PRIMARY_RELOCATION : RecoveryState.Type.REPLICA;
|
||||
RecoveryState recoveryState = new RecoveryState(indexShard.shardId(), shardRouting.primary(), type, sourceNode, nodes.localNode());
|
||||
indexShard.markAsRecovering("from " + sourceNode, recoveryState);
|
||||
recoveryTarget.startRecovery(indexShard, type, sourceNode, new PeerRecoveryListener(shardRouting, indexService, indexMetaData));
|
||||
|
@ -644,7 +624,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
threadPool.generic().execute(() -> {
|
||||
try {
|
||||
if (indexShard.recoverFromStore(nodes.localNode())) {
|
||||
shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(), "after recovery from store", SHARD_STATE_ACTION_LISTENER);
|
||||
shardStateAction.shardStarted(shardRouting, "after recovery from store", SHARD_STATE_ACTION_LISTENER);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
handleRecoveryFailure(indexService, shardRouting, true, t);
|
||||
|
@ -662,7 +642,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
final IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(restoreSource.snapshotId().getRepository());
|
||||
if (indexShard.restoreFromRepository(indexShardRepository, nodes.localNode())) {
|
||||
restoreService.indexShardRestoreCompleted(restoreSource.snapshotId(), sId);
|
||||
shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(), "after recovery from repository", SHARD_STATE_ACTION_LISTENER);
|
||||
shardStateAction.shardStarted(shardRouting, "after recovery from repository", SHARD_STATE_ACTION_LISTENER);
|
||||
}
|
||||
} catch (Throwable first) {
|
||||
try {
|
||||
|
@ -732,7 +712,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
|
||||
@Override
|
||||
public void onRecoveryDone(RecoveryState state) {
|
||||
shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(), "after recovery (replica) from node [" + state.getSourceNode() + "]", SHARD_STATE_ACTION_LISTENER);
|
||||
shardStateAction.shardStarted(shardRouting, "after recovery (replica) from node [" + state.getSourceNode() + "]", SHARD_STATE_ACTION_LISTENER);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -743,7 +723,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
|
||||
private void handleRecoveryFailure(IndexService indexService, ShardRouting shardRouting, boolean sendShardFailure, Throwable failure) {
|
||||
synchronized (mutex) {
|
||||
failAndRemoveShard(shardRouting, indexService.indexUUID(), indexService, sendShardFailure, "failed recovery", failure);
|
||||
failAndRemoveShard(shardRouting, indexService, sendShardFailure, "failed recovery", failure);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -764,7 +744,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
|
||||
}
|
||||
|
||||
private void failAndRemoveShard(ShardRouting shardRouting, String indexUUID, @Nullable IndexService indexService, boolean sendShardFailure, String message, @Nullable Throwable failure) {
|
||||
private void failAndRemoveShard(ShardRouting shardRouting, @Nullable IndexService indexService, boolean sendShardFailure, String message, @Nullable Throwable failure) {
|
||||
if (indexService != null && indexService.hasShard(shardRouting.getId())) {
|
||||
// if the indexService is null we can't remove the shard, that's fine since we might have a failure
|
||||
// when the index is remove and then we already removed the index service for that shard...
|
||||
|
@ -777,15 +757,15 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
}
|
||||
}
|
||||
if (sendShardFailure) {
|
||||
sendFailShard(shardRouting, indexUUID, message, failure);
|
||||
sendFailShard(shardRouting, message, failure);
|
||||
}
|
||||
}
|
||||
|
||||
private void sendFailShard(ShardRouting shardRouting, String indexUUID, String message, @Nullable Throwable failure) {
|
||||
private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Throwable failure) {
|
||||
try {
|
||||
logger.warn("[{}] marking and sending shard failed due to [{}]", failure, shardRouting.shardId(), message);
|
||||
failedShards.put(shardRouting.shardId(), new FailedShard(shardRouting.version()));
|
||||
shardStateAction.shardFailed(shardRouting, indexUUID, message, failure, SHARD_STATE_ACTION_LISTENER);
|
||||
failedShards.put(shardRouting.shardId(), shardRouting);
|
||||
shardStateAction.shardFailed(shardRouting, shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER);
|
||||
} catch (Throwable e1) {
|
||||
logger.warn("[{}][{}] failed to mark shard as failed (because of [{}])", e1, shardRouting.getIndexName(), shardRouting.getId(), message);
|
||||
}
|
||||
|
@ -798,7 +778,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
final ShardRouting shardRouting = shardFailure.routing;
|
||||
threadPool.generic().execute(() -> {
|
||||
synchronized (mutex) {
|
||||
failAndRemoveShard(shardRouting, shardFailure.indexUUID, indexService, true, "shard failure, reason [" + shardFailure.reason + "]", shardFailure.cause);
|
||||
failAndRemoveShard(shardRouting, indexService, true, "shard failure, reason [" + shardFailure.reason + "]", shardFailure.cause);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -435,7 +435,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
|||
if (indexShard.routingEntry().primary() == false) {
|
||||
throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard");
|
||||
}
|
||||
int opCount = indexShard.getOperationsCount();
|
||||
int opCount = indexShard.getActiveOperationsCount();
|
||||
logger.trace("{} in flight operations sampled at [{}]", request.shardId(), opCount);
|
||||
return new InFlightOpsResponse(opCount);
|
||||
}
|
||||
|
|
|
@ -61,8 +61,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
|
|||
|
||||
private final ClusterService clusterService;
|
||||
|
||||
private final OngoingRecoveres ongoingRecoveries = new OngoingRecoveres();
|
||||
|
||||
private final OngoingRecoveries ongoingRecoveries = new OngoingRecoveries();
|
||||
|
||||
@Inject
|
||||
public RecoverySource(Settings settings, TransportService transportService, IndicesService indicesService,
|
||||
|
@ -107,11 +106,11 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
|
|||
}
|
||||
if (!targetShardRouting.initializing()) {
|
||||
logger.debug("delaying recovery of {} as it is not listed as initializing on the target node {}. known shards state is [{}]",
|
||||
request.shardId(), request.targetNode(), targetShardRouting.state());
|
||||
request.shardId(), request.targetNode(), targetShardRouting.state());
|
||||
throw new DelayRecoveryException("source node has the state of the target shard to be [" + targetShardRouting.state() + "], expecting to be [initializing]");
|
||||
}
|
||||
|
||||
logger.trace("[{}][{}] starting recovery to {}, mark_as_relocated {}", request.shardId().getIndex().getName(), request.shardId().id(), request.targetNode(), request.markAsRelocated());
|
||||
logger.trace("[{}][{}] starting recovery to {}", request.shardId().getIndex().getName(), request.shardId().id(), request.targetNode());
|
||||
final RecoverySourceHandler handler;
|
||||
if (shard.indexSettings().isOnSharedFilesystem()) {
|
||||
handler = new SharedFSRecoverySourceHandler(shard, request, recoverySettings, transportService, logger);
|
||||
|
@ -134,8 +133,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
private static final class OngoingRecoveres {
|
||||
private static final class OngoingRecoveries {
|
||||
private final Map<IndexShard, Set<RecoverySourceHandler>> ongoingRecoveries = new HashMap<>();
|
||||
|
||||
synchronized void add(IndexShard shard, RecoverySourceHandler handler) {
|
||||
|
|
|
@ -393,9 +393,11 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
});
|
||||
|
||||
|
||||
if (request.markAsRelocated()) {
|
||||
// TODO what happens if the recovery process fails afterwards, we need to mark this back to started
|
||||
if (isPrimaryRelocation()) {
|
||||
/**
|
||||
* if the recovery process fails after setting the shard state to RELOCATED, both relocation source and
|
||||
* target are failed (see {@link IndexShard#updateRoutingEntry}).
|
||||
*/
|
||||
try {
|
||||
shard.relocated("to " + request.targetNode());
|
||||
} catch (IllegalIndexShardStateException e) {
|
||||
|
@ -406,7 +408,11 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
stopWatch.stop();
|
||||
logger.trace("[{}][{}] finalizing recovery to {}: took [{}]",
|
||||
indexName, shardId, request.targetNode(), stopWatch.totalTime());
|
||||
indexName, shardId, request.targetNode(), stopWatch.totalTime());
|
||||
}
|
||||
|
||||
protected boolean isPrimaryRelocation() {
|
||||
return request.recoveryType() == RecoveryState.Type.PRIMARY_RELOCATION;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -101,7 +101,7 @@ public class RecoveryState implements ToXContent, Streamable {
|
|||
STORE((byte) 0),
|
||||
SNAPSHOT((byte) 1),
|
||||
REPLICA((byte) 2),
|
||||
RELOCATION((byte) 3);
|
||||
PRIMARY_RELOCATION((byte) 3);
|
||||
|
||||
private static final Type[] TYPES = new Type[Type.values().length];
|
||||
|
||||
|
|
|
@ -138,7 +138,6 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
|
|||
// create a new recovery status, and process...
|
||||
final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout());
|
||||
threadPool.generic().execute(new RecoveryRunner(recoveryId));
|
||||
|
||||
}
|
||||
|
||||
protected void retryRecovery(final RecoveryStatus recoveryStatus, final Throwable reason, TimeValue retryAfter, final StartRecoveryRequest currentRequest) {
|
||||
|
@ -178,7 +177,7 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
|
|||
return;
|
||||
}
|
||||
final StartRecoveryRequest request = new StartRecoveryRequest(recoveryStatus.shardId(), recoveryStatus.sourceNode(), clusterService.localNode(),
|
||||
false, metadataSnapshot, recoveryStatus.state().getType(), recoveryStatus.recoveryId());
|
||||
metadataSnapshot, recoveryStatus.state().getType(), recoveryStatus.recoveryId());
|
||||
|
||||
final AtomicReference<RecoveryResponse> responseHolder = new AtomicReference<>();
|
||||
try {
|
||||
|
@ -267,7 +266,6 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
|
|||
onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, "source shard is closed", cause), false);
|
||||
return;
|
||||
}
|
||||
|
||||
onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, e), true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -84,8 +84,4 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler {
|
|||
return 0;
|
||||
}
|
||||
|
||||
private boolean isPrimaryRelocation() {
|
||||
return request.recoveryType() == RecoveryState.Type.RELOCATION && shard.routingEntry().primary();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -41,8 +41,6 @@ public class StartRecoveryRequest extends TransportRequest {
|
|||
|
||||
private DiscoveryNode targetNode;
|
||||
|
||||
private boolean markAsRelocated;
|
||||
|
||||
private Store.MetadataSnapshot metadataSnapshot;
|
||||
|
||||
private RecoveryState.Type recoveryType;
|
||||
|
@ -56,12 +54,11 @@ public class StartRecoveryRequest extends TransportRequest {
|
|||
* @param sourceNode The node to recover from
|
||||
* @param targetNode The node to recover to
|
||||
*/
|
||||
public StartRecoveryRequest(ShardId shardId, DiscoveryNode sourceNode, DiscoveryNode targetNode, boolean markAsRelocated, Store.MetadataSnapshot metadataSnapshot, RecoveryState.Type recoveryType, long recoveryId) {
|
||||
public StartRecoveryRequest(ShardId shardId, DiscoveryNode sourceNode, DiscoveryNode targetNode, Store.MetadataSnapshot metadataSnapshot, RecoveryState.Type recoveryType, long recoveryId) {
|
||||
this.recoveryId = recoveryId;
|
||||
this.shardId = shardId;
|
||||
this.sourceNode = sourceNode;
|
||||
this.targetNode = targetNode;
|
||||
this.markAsRelocated = markAsRelocated;
|
||||
this.recoveryType = recoveryType;
|
||||
this.metadataSnapshot = metadataSnapshot;
|
||||
}
|
||||
|
@ -82,10 +79,6 @@ public class StartRecoveryRequest extends TransportRequest {
|
|||
return targetNode;
|
||||
}
|
||||
|
||||
public boolean markAsRelocated() {
|
||||
return markAsRelocated;
|
||||
}
|
||||
|
||||
public RecoveryState.Type recoveryType() {
|
||||
return recoveryType;
|
||||
}
|
||||
|
@ -101,7 +94,6 @@ public class StartRecoveryRequest extends TransportRequest {
|
|||
shardId = ShardId.readShardId(in);
|
||||
sourceNode = DiscoveryNode.readNode(in);
|
||||
targetNode = DiscoveryNode.readNode(in);
|
||||
markAsRelocated = in.readBoolean();
|
||||
metadataSnapshot = new Store.MetadataSnapshot(in);
|
||||
recoveryType = RecoveryState.Type.fromId(in.readByte());
|
||||
|
||||
|
@ -114,7 +106,6 @@ public class StartRecoveryRequest extends TransportRequest {
|
|||
shardId.writeTo(out);
|
||||
sourceNode.writeTo(out);
|
||||
targetNode.writeTo(out);
|
||||
out.writeBoolean(markAsRelocated);
|
||||
metadataSnapshot.writeTo(out);
|
||||
out.writeByte(recoveryType.id());
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.ingest;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ingest.DeletePipelineRequest;
|
||||
|
@ -36,10 +37,8 @@ import org.elasticsearch.common.regex.Regex;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.ingest.core.Pipeline;
|
||||
import org.elasticsearch.ingest.core.PipelineFactoryError;
|
||||
import org.elasticsearch.ingest.core.Processor;
|
||||
import org.elasticsearch.ingest.core.TemplateService;
|
||||
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
||||
import java.io.Closeable;
|
||||
|
@ -104,8 +103,10 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
|
|||
for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) {
|
||||
try {
|
||||
pipelines.put(pipeline.getId(), factory.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactoryRegistry));
|
||||
} catch (ElasticsearchParseException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
throw new ElasticsearchParseException("Error updating pipeline with id [" + pipeline.getId() + "]", e);
|
||||
}
|
||||
}
|
||||
this.pipelines = Collections.unmodifiableMap(pipelines);
|
||||
|
@ -154,9 +155,10 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
|
|||
public void put(ClusterService clusterService, PutPipelineRequest request, ActionListener<WritePipelineResponse> listener) {
|
||||
// validates the pipeline and processor configuration before submitting a cluster update task:
|
||||
Map<String, Object> pipelineConfig = XContentHelper.convertToMap(request.getSource(), false).v2();
|
||||
WritePipelineResponse response = validatePipelineResponse(request.getId(), pipelineConfig);
|
||||
if (response != null) {
|
||||
listener.onResponse(response);
|
||||
try {
|
||||
factory.create(request.getId(), pipelineConfig, processorFactoryRegistry);
|
||||
} catch(Exception e) {
|
||||
listener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
clusterService.submitStateUpdateTask("put-pipeline-" + request.getId(), new AckedClusterStateUpdateTask<WritePipelineResponse>(request, listener) {
|
||||
|
@ -234,16 +236,4 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust
|
|||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
WritePipelineResponse validatePipelineResponse(String id, Map<String, Object> config) {
|
||||
try {
|
||||
factory.create(id, config, processorFactoryRegistry);
|
||||
return null;
|
||||
} catch (ConfigurationPropertyException e) {
|
||||
return new WritePipelineResponse(new PipelineFactoryError(e));
|
||||
} catch (Exception e) {
|
||||
return new WritePipelineResponse(new PipelineFactoryError(e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
|
||||
package org.elasticsearch.ingest.core;
|
||||
|
||||
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -32,7 +33,7 @@ public final class ConfigurationUtils {
|
|||
/**
|
||||
* Returns and removes the specified optional property from the specified configuration map.
|
||||
*
|
||||
* If the property value isn't of type string a {@link ConfigurationPropertyException} is thrown.
|
||||
* If the property value isn't of type string a {@link ElasticsearchParseException} is thrown.
|
||||
*/
|
||||
public static String readOptionalStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
|
||||
Object value = configuration.remove(propertyName);
|
||||
|
@ -42,8 +43,8 @@ public final class ConfigurationUtils {
|
|||
/**
|
||||
* Returns and removes the specified property from the specified configuration map.
|
||||
*
|
||||
* If the property value isn't of type string an {@link ConfigurationPropertyException} is thrown.
|
||||
* If the property is missing an {@link ConfigurationPropertyException} is thrown
|
||||
* If the property value isn't of type string an {@link ElasticsearchParseException} is thrown.
|
||||
* If the property is missing an {@link ElasticsearchParseException} is thrown
|
||||
*/
|
||||
public static String readStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
|
||||
return readStringProperty(processorType, processorTag, configuration, propertyName, null);
|
||||
|
@ -52,15 +53,15 @@ public final class ConfigurationUtils {
|
|||
/**
|
||||
* Returns and removes the specified property from the specified configuration map.
|
||||
*
|
||||
* If the property value isn't of type string a {@link ConfigurationPropertyException} is thrown.
|
||||
* If the property is missing and no default value has been specified a {@link ConfigurationPropertyException} is thrown
|
||||
* If the property value isn't of type string a {@link ElasticsearchParseException} is thrown.
|
||||
* If the property is missing and no default value has been specified a {@link ElasticsearchParseException} is thrown
|
||||
*/
|
||||
public static String readStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName, String defaultValue) {
|
||||
Object value = configuration.remove(propertyName);
|
||||
if (value == null && defaultValue != null) {
|
||||
return defaultValue;
|
||||
} else if (value == null) {
|
||||
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing");
|
||||
throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing");
|
||||
}
|
||||
return readString(processorType, processorTag, propertyName, value);
|
||||
}
|
||||
|
@ -72,13 +73,13 @@ public final class ConfigurationUtils {
|
|||
if (value instanceof String) {
|
||||
return (String) value;
|
||||
}
|
||||
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "property isn't a string, but of type [" + value.getClass().getName() + "]");
|
||||
throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a string, but of type [" + value.getClass().getName() + "]");
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns and removes the specified property of type list from the specified configuration map.
|
||||
*
|
||||
* If the property value isn't of type list an {@link ConfigurationPropertyException} is thrown.
|
||||
* If the property value isn't of type list an {@link ElasticsearchParseException} is thrown.
|
||||
*/
|
||||
public static <T> List<T> readOptionalList(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
|
||||
Object value = configuration.remove(propertyName);
|
||||
|
@ -91,13 +92,13 @@ public final class ConfigurationUtils {
|
|||
/**
|
||||
* Returns and removes the specified property of type list from the specified configuration map.
|
||||
*
|
||||
* If the property value isn't of type list an {@link ConfigurationPropertyException} is thrown.
|
||||
* If the property is missing an {@link ConfigurationPropertyException} is thrown
|
||||
* If the property value isn't of type list an {@link ElasticsearchParseException} is thrown.
|
||||
* If the property is missing an {@link ElasticsearchParseException} is thrown
|
||||
*/
|
||||
public static <T> List<T> readList(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
|
||||
Object value = configuration.remove(propertyName);
|
||||
if (value == null) {
|
||||
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing");
|
||||
throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing");
|
||||
}
|
||||
|
||||
return readList(processorType, processorTag, propertyName, value);
|
||||
|
@ -109,20 +110,20 @@ public final class ConfigurationUtils {
|
|||
List<T> stringList = (List<T>) value;
|
||||
return stringList;
|
||||
} else {
|
||||
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "property isn't a list, but of type [" + value.getClass().getName() + "]");
|
||||
throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a list, but of type [" + value.getClass().getName() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns and removes the specified property of type map from the specified configuration map.
|
||||
*
|
||||
* If the property value isn't of type map an {@link ConfigurationPropertyException} is thrown.
|
||||
* If the property is missing an {@link ConfigurationPropertyException} is thrown
|
||||
* If the property value isn't of type map an {@link ElasticsearchParseException} is thrown.
|
||||
* If the property is missing an {@link ElasticsearchParseException} is thrown
|
||||
*/
|
||||
public static <T> Map<String, T> readMap(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
|
||||
Object value = configuration.remove(propertyName);
|
||||
if (value == null) {
|
||||
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing");
|
||||
throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing");
|
||||
}
|
||||
|
||||
return readMap(processorType, processorTag, propertyName, value);
|
||||
|
@ -131,7 +132,7 @@ public final class ConfigurationUtils {
|
|||
/**
|
||||
* Returns and removes the specified property of type map from the specified configuration map.
|
||||
*
|
||||
* If the property value isn't of type map an {@link ConfigurationPropertyException} is thrown.
|
||||
* If the property value isn't of type map an {@link ElasticsearchParseException} is thrown.
|
||||
*/
|
||||
public static <T> Map<String, T> readOptionalMap(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
|
||||
Object value = configuration.remove(propertyName);
|
||||
|
@ -148,7 +149,7 @@ public final class ConfigurationUtils {
|
|||
Map<String, T> map = (Map<String, T>) value;
|
||||
return map;
|
||||
} else {
|
||||
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "property isn't a map, but of type [" + value.getClass().getName() + "]");
|
||||
throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a map, but of type [" + value.getClass().getName() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -158,8 +159,23 @@ public final class ConfigurationUtils {
|
|||
public static Object readObject(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
|
||||
Object value = configuration.remove(propertyName);
|
||||
if (value == null) {
|
||||
throw new ConfigurationPropertyException(processorType, processorTag, propertyName, "required property is missing");
|
||||
throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing");
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
public static ElasticsearchParseException newConfigurationException(String processorType, String processorTag, String propertyName, String reason) {
|
||||
ElasticsearchParseException exception = new ElasticsearchParseException("[" + propertyName + "] " + reason);
|
||||
|
||||
if (processorType != null) {
|
||||
exception.addHeader("processor_type", processorType);
|
||||
}
|
||||
if (processorTag != null) {
|
||||
exception.addHeader("processor_tag", processorTag);
|
||||
}
|
||||
if (propertyName != null) {
|
||||
exception.addHeader("property_name", propertyName);
|
||||
}
|
||||
return exception;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.ingest.core;
|
||||
|
||||
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -27,6 +27,7 @@ import java.util.Collections;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
||||
/**
|
||||
* A pipeline is a list of {@link Processor} instances grouped under a unique id.
|
||||
*/
|
||||
|
@ -84,20 +85,20 @@ public final class Pipeline {
|
|||
|
||||
public final static class Factory {
|
||||
|
||||
public Pipeline create(String id, Map<String, Object> config, Map<String, Processor.Factory> processorRegistry) throws ConfigurationPropertyException {
|
||||
public Pipeline create(String id, Map<String, Object> config, Map<String, Processor.Factory> processorRegistry) throws Exception {
|
||||
String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY);
|
||||
List<Map<String, Map<String, Object>>> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY);
|
||||
List<Processor> processors = readProcessorConfigs(processorConfigs, processorRegistry);
|
||||
List<Map<String, Map<String, Object>>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY);
|
||||
List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry);
|
||||
if (config.isEmpty() == false) {
|
||||
throw new ConfigurationPropertyException("pipeline [" + id + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
|
||||
throw new ElasticsearchParseException("pipeline [" + id + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
|
||||
}
|
||||
CompoundProcessor compoundProcessor = new CompoundProcessor(Collections.unmodifiableList(processors), Collections.unmodifiableList(onFailureProcessors));
|
||||
return new Pipeline(id, description, compoundProcessor);
|
||||
}
|
||||
|
||||
private List<Processor> readProcessorConfigs(List<Map<String, Map<String, Object>>> processorConfigs, Map<String, Processor.Factory> processorRegistry) throws ConfigurationPropertyException {
|
||||
private List<Processor> readProcessorConfigs(List<Map<String, Map<String, Object>>> processorConfigs, Map<String, Processor.Factory> processorRegistry) throws Exception {
|
||||
List<Processor> processors = new ArrayList<>();
|
||||
if (processorConfigs != null) {
|
||||
for (Map<String, Map<String, Object>> processorConfigWithKey : processorConfigs) {
|
||||
|
@ -110,28 +111,22 @@ public final class Pipeline {
|
|||
return processors;
|
||||
}
|
||||
|
||||
private Processor readProcessor(Map<String, Processor.Factory> processorRegistry, String type, Map<String, Object> config) throws ConfigurationPropertyException {
|
||||
private Processor readProcessor(Map<String, Processor.Factory> processorRegistry, String type, Map<String, Object> config) throws Exception {
|
||||
Processor.Factory factory = processorRegistry.get(type);
|
||||
if (factory != null) {
|
||||
List<Map<String, Map<String, Object>>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY);
|
||||
List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry);
|
||||
Processor processor;
|
||||
try {
|
||||
processor = factory.create(config);
|
||||
} catch (ConfigurationPropertyException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw new ConfigurationPropertyException(e.getMessage());
|
||||
}
|
||||
processor = factory.create(config);
|
||||
if (!config.isEmpty()) {
|
||||
throw new ConfigurationPropertyException("processor [" + type + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
|
||||
throw new ElasticsearchParseException("processor [" + type + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
|
||||
}
|
||||
if (onFailureProcessors.isEmpty()) {
|
||||
return processor;
|
||||
}
|
||||
return new CompoundProcessor(Collections.singletonList(processor), onFailureProcessors);
|
||||
}
|
||||
throw new ConfigurationPropertyException("No processor type exists with name [" + type + "]");
|
||||
throw new ElasticsearchParseException("No processor type exists with name [" + type + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,96 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.ingest.core;
|
||||
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class PipelineFactoryError implements Streamable, ToXContent {
|
||||
private String reason;
|
||||
private String processorType;
|
||||
private String processorTag;
|
||||
private String processorPropertyName;
|
||||
|
||||
public PipelineFactoryError() {
|
||||
|
||||
}
|
||||
|
||||
public PipelineFactoryError(ConfigurationPropertyException e) {
|
||||
this.reason = e.getMessage();
|
||||
this.processorType = e.getProcessorType();
|
||||
this.processorTag = e.getProcessorTag();
|
||||
this.processorPropertyName = e.getPropertyName();
|
||||
}
|
||||
|
||||
public PipelineFactoryError(String reason) {
|
||||
this.reason = "Constructing Pipeline failed:" + reason;
|
||||
}
|
||||
|
||||
public String getReason() {
|
||||
return reason;
|
||||
}
|
||||
|
||||
public String getProcessorTag() {
|
||||
return processorTag;
|
||||
}
|
||||
|
||||
public String getProcessorPropertyName() {
|
||||
return processorPropertyName;
|
||||
}
|
||||
|
||||
public String getProcessorType() {
|
||||
return processorType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
reason = in.readString();
|
||||
processorType = in.readOptionalString();
|
||||
processorTag = in.readOptionalString();
|
||||
processorPropertyName = in.readOptionalString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(reason);
|
||||
out.writeOptionalString(processorType);
|
||||
out.writeOptionalString(processorTag);
|
||||
out.writeOptionalString(processorPropertyName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject("error");
|
||||
builder.field("type", processorType);
|
||||
builder.field("tag", processorTag);
|
||||
builder.field("reason", reason);
|
||||
builder.field("property_name", processorPropertyName);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -17,11 +17,8 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
|
||||
package org.elasticsearch.ingest.core;
|
||||
|
||||
import org.elasticsearch.ingest.processor.ConfigurationPropertyException;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.ingest.core.AbstractProcessor;
|
|||
import org.elasticsearch.ingest.core.AbstractProcessorFactory;
|
||||
import org.elasticsearch.ingest.core.ConfigurationUtils;
|
||||
import org.elasticsearch.ingest.core.IngestDocument;
|
||||
import org.elasticsearch.ingest.core.Processor;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -31,7 +30,7 @@ import java.util.Map;
|
|||
* Base class for processors that manipulate strings and require a single "fields" array config value, which
|
||||
* holds a list of field names in string format.
|
||||
*/
|
||||
public abstract class AbstractStringProcessor extends AbstractProcessor {
|
||||
abstract class AbstractStringProcessor extends AbstractProcessor {
|
||||
private final String field;
|
||||
|
||||
protected AbstractStringProcessor(String tag, String field) {
|
||||
|
@ -54,7 +53,7 @@ public abstract class AbstractStringProcessor extends AbstractProcessor {
|
|||
|
||||
protected abstract String process(String value);
|
||||
|
||||
public static abstract class Factory<T extends AbstractStringProcessor> extends AbstractProcessorFactory<T> {
|
||||
static abstract class Factory<T extends AbstractStringProcessor> extends AbstractProcessorFactory<T> {
|
||||
protected final String processorType;
|
||||
|
||||
protected Factory(String processorType) {
|
||||
|
|
|
@ -33,7 +33,7 @@ import java.util.Map;
|
|||
* provided values will be added. If the field is a scalar it will be converted to a single item list and the provided
|
||||
* values will be added to the newly created list.
|
||||
*/
|
||||
public class AppendProcessor extends AbstractProcessor {
|
||||
public final class AppendProcessor extends AbstractProcessor {
|
||||
|
||||
public static final String TYPE = "append";
|
||||
|
||||
|
|
|
@ -1,53 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.ingest.processor;
|
||||
|
||||
/**
|
||||
* Exception class thrown by processor factories.
|
||||
*/
|
||||
public class ConfigurationPropertyException extends RuntimeException {
|
||||
private String processorType;
|
||||
private String processorTag;
|
||||
private String propertyName;
|
||||
|
||||
public ConfigurationPropertyException(String processorType, String processorTag, String propertyName, String message) {
|
||||
super("[" + propertyName + "] " + message);
|
||||
this.processorTag = processorTag;
|
||||
this.processorType = processorType;
|
||||
this.propertyName = propertyName;
|
||||
}
|
||||
|
||||
public ConfigurationPropertyException(String errorMessage) {
|
||||
super(errorMessage);
|
||||
}
|
||||
|
||||
public String getPropertyName() {
|
||||
return propertyName;
|
||||
}
|
||||
|
||||
public String getProcessorType() {
|
||||
return processorType;
|
||||
}
|
||||
|
||||
public String getProcessorTag() {
|
||||
return processorTag;
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue