Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
1d3e58ab9f
|
@ -112,7 +112,6 @@ subprojects {
|
|||
"org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb',
|
||||
"org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage',
|
||||
// for transport client
|
||||
"org.elasticsearch.plugin:transport-netty3-client:${version}": ':modules:transport-netty3',
|
||||
"org.elasticsearch.plugin:transport-netty4-client:${version}": ':modules:transport-netty4',
|
||||
"org.elasticsearch.plugin:reindex-client:${version}": ':modules:reindex',
|
||||
"org.elasticsearch.plugin:lang-mustache-client:${version}": ':modules:lang-mustache',
|
||||
|
|
|
@ -10,6 +10,13 @@
|
|||
<property name="file" value="${suppressions}" />
|
||||
</module>
|
||||
|
||||
<!-- Checks Java files and forbids empty Javadoc comments -->
|
||||
<module name="RegexpMultiline">
|
||||
<property name="format" value="\/\*[\s\*]*\*\/"/>
|
||||
<property name="fileExtensions" value="java"/>
|
||||
<property name="message" value="Empty javadoc comments are forbidden"/>
|
||||
</module>
|
||||
|
||||
<module name="TreeWalker">
|
||||
<!-- Its our official line length! See checkstyle_suppressions.xml for the files that don't pass this. For now we
|
||||
suppress the check there but enforce it everywhere else. This prevents the list from getting longer even if it is
|
||||
|
|
|
@ -968,14 +968,6 @@
|
|||
<suppress files="plugins[/\\]discovery-ec2[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]aws[/\\]AbstractAwsTestCase.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-ec2[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]ec2[/\\]AmazonEC2Mock.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-gce[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]gce[/\\]GceNetworkTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]JavaScriptScriptEngineService.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]JavaScriptScriptEngineTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]JavaScriptScriptMultiThreadedTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]JavaScriptSecurityTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]SimpleBench.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonScriptEngineTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonScriptMultiThreadedTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonSecurityTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapper.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapperTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapperUpgradeTests.java" checks="LineLength" />
|
||||
|
|
|
@ -49,6 +49,8 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VAL
|
|||
*/
|
||||
public class ElasticsearchException extends RuntimeException implements ToXContent, Writeable {
|
||||
|
||||
public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099);
|
||||
public static final Version UNKNOWN_VERSION_ADDED = Version.fromId(0);
|
||||
public static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip";
|
||||
public static final String REST_EXCEPTION_SKIP_STACK_TRACE = "rest.exception.stacktrace.skip";
|
||||
public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = true;
|
||||
|
@ -210,8 +212,12 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
/**
|
||||
* Returns <code>true</code> iff the given class is a registered for an exception to be read.
|
||||
*/
|
||||
public static boolean isRegistered(Class<? extends Throwable> exception) {
|
||||
return CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE.containsKey(exception);
|
||||
public static boolean isRegistered(Class<? extends Throwable> exception, Version version) {
|
||||
ElasticsearchExceptionHandle elasticsearchExceptionHandle = CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE.get(exception);
|
||||
if (elasticsearchExceptionHandle != null) {
|
||||
return version.onOrAfter(elasticsearchExceptionHandle.versionAdded);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static Set<Class<? extends ElasticsearchException>> getRegisteredKeys() { // for testing
|
||||
|
@ -432,279 +438,294 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
*/
|
||||
enum ElasticsearchExceptionHandle {
|
||||
INDEX_SHARD_SNAPSHOT_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException::new, 0),
|
||||
org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException::new, 0, UNKNOWN_VERSION_ADDED),
|
||||
DFS_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.dfs.DfsPhaseExecutionException.class,
|
||||
org.elasticsearch.search.dfs.DfsPhaseExecutionException::new, 1),
|
||||
org.elasticsearch.search.dfs.DfsPhaseExecutionException::new, 1, UNKNOWN_VERSION_ADDED),
|
||||
EXECUTION_CANCELLED_EXCEPTION(org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException.class,
|
||||
org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException::new, 2),
|
||||
org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException::new, 2, UNKNOWN_VERSION_ADDED),
|
||||
MASTER_NOT_DISCOVERED_EXCEPTION(org.elasticsearch.discovery.MasterNotDiscoveredException.class,
|
||||
org.elasticsearch.discovery.MasterNotDiscoveredException::new, 3),
|
||||
org.elasticsearch.discovery.MasterNotDiscoveredException::new, 3, UNKNOWN_VERSION_ADDED),
|
||||
ELASTICSEARCH_SECURITY_EXCEPTION(org.elasticsearch.ElasticsearchSecurityException.class,
|
||||
org.elasticsearch.ElasticsearchSecurityException::new, 4),
|
||||
org.elasticsearch.ElasticsearchSecurityException::new, 4, UNKNOWN_VERSION_ADDED),
|
||||
INDEX_SHARD_RESTORE_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardRestoreException::new, 5),
|
||||
org.elasticsearch.index.snapshots.IndexShardRestoreException::new, 5, UNKNOWN_VERSION_ADDED),
|
||||
INDEX_CLOSED_EXCEPTION(org.elasticsearch.indices.IndexClosedException.class,
|
||||
org.elasticsearch.indices.IndexClosedException::new, 6),
|
||||
org.elasticsearch.indices.IndexClosedException::new, 6, UNKNOWN_VERSION_ADDED),
|
||||
BIND_HTTP_EXCEPTION(org.elasticsearch.http.BindHttpException.class,
|
||||
org.elasticsearch.http.BindHttpException::new, 7),
|
||||
org.elasticsearch.http.BindHttpException::new, 7, UNKNOWN_VERSION_ADDED),
|
||||
REDUCE_SEARCH_PHASE_EXCEPTION(org.elasticsearch.action.search.ReduceSearchPhaseException.class,
|
||||
org.elasticsearch.action.search.ReduceSearchPhaseException::new, 8),
|
||||
org.elasticsearch.action.search.ReduceSearchPhaseException::new, 8, UNKNOWN_VERSION_ADDED),
|
||||
NODE_CLOSED_EXCEPTION(org.elasticsearch.node.NodeClosedException.class,
|
||||
org.elasticsearch.node.NodeClosedException::new, 9),
|
||||
org.elasticsearch.node.NodeClosedException::new, 9, UNKNOWN_VERSION_ADDED),
|
||||
SNAPSHOT_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.SnapshotFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.SnapshotFailedEngineException::new, 10),
|
||||
org.elasticsearch.index.engine.SnapshotFailedEngineException::new, 10, UNKNOWN_VERSION_ADDED),
|
||||
SHARD_NOT_FOUND_EXCEPTION(org.elasticsearch.index.shard.ShardNotFoundException.class,
|
||||
org.elasticsearch.index.shard.ShardNotFoundException::new, 11),
|
||||
org.elasticsearch.index.shard.ShardNotFoundException::new, 11, UNKNOWN_VERSION_ADDED),
|
||||
CONNECT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ConnectTransportException.class,
|
||||
org.elasticsearch.transport.ConnectTransportException::new, 12),
|
||||
org.elasticsearch.transport.ConnectTransportException::new, 12, UNKNOWN_VERSION_ADDED),
|
||||
NOT_SERIALIZABLE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.NotSerializableTransportException.class,
|
||||
org.elasticsearch.transport.NotSerializableTransportException::new, 13),
|
||||
org.elasticsearch.transport.NotSerializableTransportException::new, 13, UNKNOWN_VERSION_ADDED),
|
||||
RESPONSE_HANDLER_FAILURE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ResponseHandlerFailureTransportException.class,
|
||||
org.elasticsearch.transport.ResponseHandlerFailureTransportException::new, 14),
|
||||
org.elasticsearch.transport.ResponseHandlerFailureTransportException::new, 14, UNKNOWN_VERSION_ADDED),
|
||||
INDEX_CREATION_EXCEPTION(org.elasticsearch.indices.IndexCreationException.class,
|
||||
org.elasticsearch.indices.IndexCreationException::new, 15),
|
||||
org.elasticsearch.indices.IndexCreationException::new, 15, UNKNOWN_VERSION_ADDED),
|
||||
INDEX_NOT_FOUND_EXCEPTION(org.elasticsearch.index.IndexNotFoundException.class,
|
||||
org.elasticsearch.index.IndexNotFoundException::new, 16),
|
||||
org.elasticsearch.index.IndexNotFoundException::new, 16, UNKNOWN_VERSION_ADDED),
|
||||
ILLEGAL_SHARD_ROUTING_STATE_EXCEPTION(org.elasticsearch.cluster.routing.IllegalShardRoutingStateException.class,
|
||||
org.elasticsearch.cluster.routing.IllegalShardRoutingStateException::new, 17),
|
||||
org.elasticsearch.cluster.routing.IllegalShardRoutingStateException::new, 17, UNKNOWN_VERSION_ADDED),
|
||||
BROADCAST_SHARD_OPERATION_FAILED_EXCEPTION(org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException.class,
|
||||
org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException::new, 18),
|
||||
org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException::new, 18, UNKNOWN_VERSION_ADDED),
|
||||
RESOURCE_NOT_FOUND_EXCEPTION(org.elasticsearch.ResourceNotFoundException.class,
|
||||
org.elasticsearch.ResourceNotFoundException::new, 19),
|
||||
org.elasticsearch.ResourceNotFoundException::new, 19, UNKNOWN_VERSION_ADDED),
|
||||
ACTION_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionTransportException.class,
|
||||
org.elasticsearch.transport.ActionTransportException::new, 20),
|
||||
org.elasticsearch.transport.ActionTransportException::new, 20, UNKNOWN_VERSION_ADDED),
|
||||
ELASTICSEARCH_GENERATION_EXCEPTION(org.elasticsearch.ElasticsearchGenerationException.class,
|
||||
org.elasticsearch.ElasticsearchGenerationException::new, 21),
|
||||
org.elasticsearch.ElasticsearchGenerationException::new, 21, UNKNOWN_VERSION_ADDED),
|
||||
// 22 was CreateFailedEngineException
|
||||
INDEX_SHARD_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardStartedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
|
||||
org.elasticsearch.index.shard.IndexShardStartedException::new, 23, UNKNOWN_VERSION_ADDED),
|
||||
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class,
|
||||
org.elasticsearch.search.SearchContextMissingException::new, 24),
|
||||
org.elasticsearch.search.SearchContextMissingException::new, 24, UNKNOWN_VERSION_ADDED),
|
||||
GENERAL_SCRIPT_EXCEPTION(org.elasticsearch.script.GeneralScriptException.class,
|
||||
org.elasticsearch.script.GeneralScriptException::new, 25),
|
||||
org.elasticsearch.script.GeneralScriptException::new, 25, UNKNOWN_VERSION_ADDED),
|
||||
BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class,
|
||||
org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26),
|
||||
org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26, UNKNOWN_VERSION_ADDED),
|
||||
SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class,
|
||||
org.elasticsearch.snapshots.SnapshotCreationException::new, 27),
|
||||
DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28),// deprecated in 6.0, remove in 7.0
|
||||
org.elasticsearch.snapshots.SnapshotCreationException::new, 27, UNKNOWN_VERSION_ADDED),
|
||||
DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class, // deprecated in 6.0, remove in 7.0
|
||||
org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28, UNKNOWN_VERSION_ADDED),
|
||||
DOCUMENT_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentMissingException.class,
|
||||
org.elasticsearch.index.engine.DocumentMissingException::new, 29),
|
||||
org.elasticsearch.index.engine.DocumentMissingException::new, 29, UNKNOWN_VERSION_ADDED),
|
||||
SNAPSHOT_EXCEPTION(org.elasticsearch.snapshots.SnapshotException.class,
|
||||
org.elasticsearch.snapshots.SnapshotException::new, 30),
|
||||
org.elasticsearch.snapshots.SnapshotException::new, 30, UNKNOWN_VERSION_ADDED),
|
||||
INVALID_ALIAS_NAME_EXCEPTION(org.elasticsearch.indices.InvalidAliasNameException.class,
|
||||
org.elasticsearch.indices.InvalidAliasNameException::new, 31),
|
||||
org.elasticsearch.indices.InvalidAliasNameException::new, 31, UNKNOWN_VERSION_ADDED),
|
||||
INVALID_INDEX_NAME_EXCEPTION(org.elasticsearch.indices.InvalidIndexNameException.class,
|
||||
org.elasticsearch.indices.InvalidIndexNameException::new, 32),
|
||||
org.elasticsearch.indices.InvalidIndexNameException::new, 32, UNKNOWN_VERSION_ADDED),
|
||||
INDEX_PRIMARY_SHARD_NOT_ALLOCATED_EXCEPTION(org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException.class,
|
||||
org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException::new, 33),
|
||||
org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException::new, 33, UNKNOWN_VERSION_ADDED),
|
||||
TRANSPORT_EXCEPTION(org.elasticsearch.transport.TransportException.class,
|
||||
org.elasticsearch.transport.TransportException::new, 34),
|
||||
org.elasticsearch.transport.TransportException::new, 34, UNKNOWN_VERSION_ADDED),
|
||||
ELASTICSEARCH_PARSE_EXCEPTION(org.elasticsearch.ElasticsearchParseException.class,
|
||||
org.elasticsearch.ElasticsearchParseException::new, 35),
|
||||
org.elasticsearch.ElasticsearchParseException::new, 35, UNKNOWN_VERSION_ADDED),
|
||||
SEARCH_EXCEPTION(org.elasticsearch.search.SearchException.class,
|
||||
org.elasticsearch.search.SearchException::new, 36),
|
||||
org.elasticsearch.search.SearchException::new, 36, UNKNOWN_VERSION_ADDED),
|
||||
MAPPER_EXCEPTION(org.elasticsearch.index.mapper.MapperException.class,
|
||||
org.elasticsearch.index.mapper.MapperException::new, 37),
|
||||
org.elasticsearch.index.mapper.MapperException::new, 37, UNKNOWN_VERSION_ADDED),
|
||||
INVALID_TYPE_NAME_EXCEPTION(org.elasticsearch.indices.InvalidTypeNameException.class,
|
||||
org.elasticsearch.indices.InvalidTypeNameException::new, 38),
|
||||
org.elasticsearch.indices.InvalidTypeNameException::new, 38, UNKNOWN_VERSION_ADDED),
|
||||
SNAPSHOT_RESTORE_EXCEPTION(org.elasticsearch.snapshots.SnapshotRestoreException.class,
|
||||
org.elasticsearch.snapshots.SnapshotRestoreException::new, 39),
|
||||
PARSING_EXCEPTION(org.elasticsearch.common.ParsingException.class, org.elasticsearch.common.ParsingException::new, 40),
|
||||
org.elasticsearch.snapshots.SnapshotRestoreException::new, 39, UNKNOWN_VERSION_ADDED),
|
||||
PARSING_EXCEPTION(org.elasticsearch.common.ParsingException.class, org.elasticsearch.common.ParsingException::new, 40,
|
||||
UNKNOWN_VERSION_ADDED),
|
||||
INDEX_SHARD_CLOSED_EXCEPTION(org.elasticsearch.index.shard.IndexShardClosedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardClosedException::new, 41),
|
||||
org.elasticsearch.index.shard.IndexShardClosedException::new, 41, UNKNOWN_VERSION_ADDED),
|
||||
RECOVER_FILES_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.RecoverFilesRecoveryException.class,
|
||||
org.elasticsearch.indices.recovery.RecoverFilesRecoveryException::new, 42),
|
||||
org.elasticsearch.indices.recovery.RecoverFilesRecoveryException::new, 42, UNKNOWN_VERSION_ADDED),
|
||||
TRUNCATED_TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TruncatedTranslogException.class,
|
||||
org.elasticsearch.index.translog.TruncatedTranslogException::new, 43),
|
||||
org.elasticsearch.index.translog.TruncatedTranslogException::new, 43, UNKNOWN_VERSION_ADDED),
|
||||
RECOVERY_FAILED_EXCEPTION(org.elasticsearch.indices.recovery.RecoveryFailedException.class,
|
||||
org.elasticsearch.indices.recovery.RecoveryFailedException::new, 44),
|
||||
org.elasticsearch.indices.recovery.RecoveryFailedException::new, 44, UNKNOWN_VERSION_ADDED),
|
||||
INDEX_SHARD_RELOCATED_EXCEPTION(org.elasticsearch.index.shard.IndexShardRelocatedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardRelocatedException::new, 45),
|
||||
org.elasticsearch.index.shard.IndexShardRelocatedException::new, 45, UNKNOWN_VERSION_ADDED),
|
||||
NODE_SHOULD_NOT_CONNECT_EXCEPTION(org.elasticsearch.transport.NodeShouldNotConnectException.class,
|
||||
org.elasticsearch.transport.NodeShouldNotConnectException::new, 46),
|
||||
org.elasticsearch.transport.NodeShouldNotConnectException::new, 46, UNKNOWN_VERSION_ADDED),
|
||||
// 47 used to be for IndexTemplateAlreadyExistsException which was deprecated in 5.1 removed in 6.0
|
||||
TRANSLOG_CORRUPTED_EXCEPTION(org.elasticsearch.index.translog.TranslogCorruptedException.class,
|
||||
org.elasticsearch.index.translog.TranslogCorruptedException::new, 48),
|
||||
org.elasticsearch.index.translog.TranslogCorruptedException::new, 48, UNKNOWN_VERSION_ADDED),
|
||||
CLUSTER_BLOCK_EXCEPTION(org.elasticsearch.cluster.block.ClusterBlockException.class,
|
||||
org.elasticsearch.cluster.block.ClusterBlockException::new, 49),
|
||||
org.elasticsearch.cluster.block.ClusterBlockException::new, 49, UNKNOWN_VERSION_ADDED),
|
||||
FETCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.fetch.FetchPhaseExecutionException.class,
|
||||
org.elasticsearch.search.fetch.FetchPhaseExecutionException::new, 50),
|
||||
org.elasticsearch.search.fetch.FetchPhaseExecutionException::new, 50, UNKNOWN_VERSION_ADDED),
|
||||
// 51 used to be for IndexShardAlreadyExistsException which was deprecated in 5.1 removed in 6.0
|
||||
VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class,
|
||||
org.elasticsearch.index.engine.VersionConflictEngineException::new, 52),
|
||||
ENGINE_EXCEPTION(org.elasticsearch.index.engine.EngineException.class, org.elasticsearch.index.engine.EngineException::new, 53),
|
||||
org.elasticsearch.index.engine.VersionConflictEngineException::new, 52, UNKNOWN_VERSION_ADDED),
|
||||
ENGINE_EXCEPTION(org.elasticsearch.index.engine.EngineException.class, org.elasticsearch.index.engine.EngineException::new, 53,
|
||||
UNKNOWN_VERSION_ADDED),
|
||||
// 54 was DocumentAlreadyExistsException, which is superseded by VersionConflictEngineException
|
||||
NO_SUCH_NODE_EXCEPTION(org.elasticsearch.action.NoSuchNodeException.class, org.elasticsearch.action.NoSuchNodeException::new, 55),
|
||||
NO_SUCH_NODE_EXCEPTION(org.elasticsearch.action.NoSuchNodeException.class, org.elasticsearch.action.NoSuchNodeException::new, 55,
|
||||
UNKNOWN_VERSION_ADDED),
|
||||
SETTINGS_EXCEPTION(org.elasticsearch.common.settings.SettingsException.class,
|
||||
org.elasticsearch.common.settings.SettingsException::new, 56),
|
||||
org.elasticsearch.common.settings.SettingsException::new, 56, UNKNOWN_VERSION_ADDED),
|
||||
INDEX_TEMPLATE_MISSING_EXCEPTION(org.elasticsearch.indices.IndexTemplateMissingException.class,
|
||||
org.elasticsearch.indices.IndexTemplateMissingException::new, 57),
|
||||
org.elasticsearch.indices.IndexTemplateMissingException::new, 57, UNKNOWN_VERSION_ADDED),
|
||||
SEND_REQUEST_TRANSPORT_EXCEPTION(org.elasticsearch.transport.SendRequestTransportException.class,
|
||||
org.elasticsearch.transport.SendRequestTransportException::new, 58),
|
||||
org.elasticsearch.transport.SendRequestTransportException::new, 58, UNKNOWN_VERSION_ADDED),
|
||||
ES_REJECTED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class,
|
||||
org.elasticsearch.common.util.concurrent.EsRejectedExecutionException::new, 59),
|
||||
org.elasticsearch.common.util.concurrent.EsRejectedExecutionException::new, 59, UNKNOWN_VERSION_ADDED),
|
||||
EARLY_TERMINATION_EXCEPTION(org.elasticsearch.common.lucene.Lucene.EarlyTerminationException.class,
|
||||
org.elasticsearch.common.lucene.Lucene.EarlyTerminationException::new, 60),
|
||||
org.elasticsearch.common.lucene.Lucene.EarlyTerminationException::new, 60, UNKNOWN_VERSION_ADDED),
|
||||
// 61 used to be for RoutingValidationException
|
||||
NOT_SERIALIZABLE_EXCEPTION_WRAPPER(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class,
|
||||
org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62),
|
||||
org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62, UNKNOWN_VERSION_ADDED),
|
||||
ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class,
|
||||
org.elasticsearch.indices.AliasFilterParsingException::new, 63),
|
||||
org.elasticsearch.indices.AliasFilterParsingException::new, 63, UNKNOWN_VERSION_ADDED),
|
||||
// 64 was DeleteByQueryFailedEngineException, which was removed in 5.0
|
||||
GATEWAY_EXCEPTION(org.elasticsearch.gateway.GatewayException.class, org.elasticsearch.gateway.GatewayException::new, 65),
|
||||
GATEWAY_EXCEPTION(org.elasticsearch.gateway.GatewayException.class, org.elasticsearch.gateway.GatewayException::new, 65,
|
||||
UNKNOWN_VERSION_ADDED),
|
||||
INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class,
|
||||
org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66),
|
||||
HTTP_EXCEPTION(org.elasticsearch.http.HttpException.class, org.elasticsearch.http.HttpException::new, 67),
|
||||
org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66, UNKNOWN_VERSION_ADDED),
|
||||
HTTP_EXCEPTION(org.elasticsearch.http.HttpException.class, org.elasticsearch.http.HttpException::new, 67, UNKNOWN_VERSION_ADDED),
|
||||
ELASTICSEARCH_EXCEPTION(org.elasticsearch.ElasticsearchException.class,
|
||||
org.elasticsearch.ElasticsearchException::new, 68),
|
||||
org.elasticsearch.ElasticsearchException::new, 68, UNKNOWN_VERSION_ADDED),
|
||||
SNAPSHOT_MISSING_EXCEPTION(org.elasticsearch.snapshots.SnapshotMissingException.class,
|
||||
org.elasticsearch.snapshots.SnapshotMissingException::new, 69),
|
||||
org.elasticsearch.snapshots.SnapshotMissingException::new, 69, UNKNOWN_VERSION_ADDED),
|
||||
PRIMARY_MISSING_ACTION_EXCEPTION(org.elasticsearch.action.PrimaryMissingActionException.class,
|
||||
org.elasticsearch.action.PrimaryMissingActionException::new, 70),
|
||||
FAILED_NODE_EXCEPTION(org.elasticsearch.action.FailedNodeException.class, org.elasticsearch.action.FailedNodeException::new, 71),
|
||||
SEARCH_PARSE_EXCEPTION(org.elasticsearch.search.SearchParseException.class, org.elasticsearch.search.SearchParseException::new, 72),
|
||||
org.elasticsearch.action.PrimaryMissingActionException::new, 70, UNKNOWN_VERSION_ADDED),
|
||||
FAILED_NODE_EXCEPTION(org.elasticsearch.action.FailedNodeException.class, org.elasticsearch.action.FailedNodeException::new, 71,
|
||||
UNKNOWN_VERSION_ADDED),
|
||||
SEARCH_PARSE_EXCEPTION(org.elasticsearch.search.SearchParseException.class, org.elasticsearch.search.SearchParseException::new, 72,
|
||||
UNKNOWN_VERSION_ADDED),
|
||||
CONCURRENT_SNAPSHOT_EXECUTION_EXCEPTION(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException.class,
|
||||
org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException::new, 73),
|
||||
org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException::new, 73, UNKNOWN_VERSION_ADDED),
|
||||
BLOB_STORE_EXCEPTION(org.elasticsearch.common.blobstore.BlobStoreException.class,
|
||||
org.elasticsearch.common.blobstore.BlobStoreException::new, 74),
|
||||
org.elasticsearch.common.blobstore.BlobStoreException::new, 74, UNKNOWN_VERSION_ADDED),
|
||||
INCOMPATIBLE_CLUSTER_STATE_VERSION_EXCEPTION(org.elasticsearch.cluster.IncompatibleClusterStateVersionException.class,
|
||||
org.elasticsearch.cluster.IncompatibleClusterStateVersionException::new, 75),
|
||||
org.elasticsearch.cluster.IncompatibleClusterStateVersionException::new, 75, UNKNOWN_VERSION_ADDED),
|
||||
RECOVERY_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RecoveryEngineException.class,
|
||||
org.elasticsearch.index.engine.RecoveryEngineException::new, 76),
|
||||
org.elasticsearch.index.engine.RecoveryEngineException::new, 76, UNKNOWN_VERSION_ADDED),
|
||||
UNCATEGORIZED_EXECUTION_EXCEPTION(org.elasticsearch.common.util.concurrent.UncategorizedExecutionException.class,
|
||||
org.elasticsearch.common.util.concurrent.UncategorizedExecutionException::new, 77),
|
||||
org.elasticsearch.common.util.concurrent.UncategorizedExecutionException::new, 77, UNKNOWN_VERSION_ADDED),
|
||||
TIMESTAMP_PARSING_EXCEPTION(org.elasticsearch.action.TimestampParsingException.class,
|
||||
org.elasticsearch.action.TimestampParsingException::new, 78),
|
||||
org.elasticsearch.action.TimestampParsingException::new, 78, UNKNOWN_VERSION_ADDED),
|
||||
ROUTING_MISSING_EXCEPTION(org.elasticsearch.action.RoutingMissingException.class,
|
||||
org.elasticsearch.action.RoutingMissingException::new, 79),
|
||||
INDEX_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.IndexFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.IndexFailedEngineException::new, 80), // deprecated in 6.0, remove in 7.0
|
||||
org.elasticsearch.action.RoutingMissingException::new, 79, UNKNOWN_VERSION_ADDED),
|
||||
INDEX_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.IndexFailedEngineException.class, // deprecated in 6.0, remove in 7.0
|
||||
org.elasticsearch.index.engine.IndexFailedEngineException::new, 80, UNKNOWN_VERSION_ADDED),
|
||||
INDEX_SHARD_RESTORE_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardRestoreFailedException::new, 81),
|
||||
org.elasticsearch.index.snapshots.IndexShardRestoreFailedException::new, 81, UNKNOWN_VERSION_ADDED),
|
||||
REPOSITORY_EXCEPTION(org.elasticsearch.repositories.RepositoryException.class,
|
||||
org.elasticsearch.repositories.RepositoryException::new, 82),
|
||||
org.elasticsearch.repositories.RepositoryException::new, 82, UNKNOWN_VERSION_ADDED),
|
||||
RECEIVE_TIMEOUT_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ReceiveTimeoutTransportException.class,
|
||||
org.elasticsearch.transport.ReceiveTimeoutTransportException::new, 83),
|
||||
org.elasticsearch.transport.ReceiveTimeoutTransportException::new, 83, UNKNOWN_VERSION_ADDED),
|
||||
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class,
|
||||
org.elasticsearch.transport.NodeDisconnectedException::new, 84),
|
||||
org.elasticsearch.transport.NodeDisconnectedException::new, 84, UNKNOWN_VERSION_ADDED),
|
||||
ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class,
|
||||
org.elasticsearch.index.AlreadyExpiredException::new, 85),
|
||||
org.elasticsearch.index.AlreadyExpiredException::new, 85, UNKNOWN_VERSION_ADDED),
|
||||
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class,
|
||||
org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86),
|
||||
org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86, UNKNOWN_VERSION_ADDED),
|
||||
// 87 used to be for MergeMappingException
|
||||
INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class,
|
||||
org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),
|
||||
org.elasticsearch.indices.InvalidIndexTemplateException::new, 88, UNKNOWN_VERSION_ADDED),
|
||||
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
|
||||
org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90, UNKNOWN_VERSION_ADDED),
|
||||
AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class,
|
||||
org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91),
|
||||
org.elasticsearch.search.aggregations.AggregationInitializationException::new, 91, UNKNOWN_VERSION_ADDED),
|
||||
DELAY_RECOVERY_EXCEPTION(org.elasticsearch.indices.recovery.DelayRecoveryException.class,
|
||||
org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92),
|
||||
org.elasticsearch.indices.recovery.DelayRecoveryException::new, 92, UNKNOWN_VERSION_ADDED),
|
||||
// 93 used to be for IndexWarmerMissingException
|
||||
NO_NODE_AVAILABLE_EXCEPTION(org.elasticsearch.client.transport.NoNodeAvailableException.class,
|
||||
org.elasticsearch.client.transport.NoNodeAvailableException::new, 94),
|
||||
org.elasticsearch.client.transport.NoNodeAvailableException::new, 94, UNKNOWN_VERSION_ADDED),
|
||||
INVALID_SNAPSHOT_NAME_EXCEPTION(org.elasticsearch.snapshots.InvalidSnapshotNameException.class,
|
||||
org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96),
|
||||
org.elasticsearch.snapshots.InvalidSnapshotNameException::new, 96, UNKNOWN_VERSION_ADDED),
|
||||
ILLEGAL_INDEX_SHARD_STATE_EXCEPTION(org.elasticsearch.index.shard.IllegalIndexShardStateException.class,
|
||||
org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97),
|
||||
org.elasticsearch.index.shard.IllegalIndexShardStateException::new, 97, UNKNOWN_VERSION_ADDED),
|
||||
INDEX_SHARD_SNAPSHOT_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardSnapshotException.class,
|
||||
org.elasticsearch.index.snapshots.IndexShardSnapshotException::new, 98),
|
||||
org.elasticsearch.index.snapshots.IndexShardSnapshotException::new, 98, UNKNOWN_VERSION_ADDED),
|
||||
INDEX_SHARD_NOT_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotStartedException.class,
|
||||
org.elasticsearch.index.shard.IndexShardNotStartedException::new, 99),
|
||||
org.elasticsearch.index.shard.IndexShardNotStartedException::new, 99, UNKNOWN_VERSION_ADDED),
|
||||
SEARCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.action.search.SearchPhaseExecutionException.class,
|
||||
org.elasticsearch.action.search.SearchPhaseExecutionException::new, 100),
|
||||
org.elasticsearch.action.search.SearchPhaseExecutionException::new, 100, UNKNOWN_VERSION_ADDED),
|
||||
ACTION_NOT_FOUND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionNotFoundTransportException.class,
|
||||
org.elasticsearch.transport.ActionNotFoundTransportException::new, 101),
|
||||
org.elasticsearch.transport.ActionNotFoundTransportException::new, 101, UNKNOWN_VERSION_ADDED),
|
||||
TRANSPORT_SERIALIZATION_EXCEPTION(org.elasticsearch.transport.TransportSerializationException.class,
|
||||
org.elasticsearch.transport.TransportSerializationException::new, 102),
|
||||
org.elasticsearch.transport.TransportSerializationException::new, 102, UNKNOWN_VERSION_ADDED),
|
||||
REMOTE_TRANSPORT_EXCEPTION(org.elasticsearch.transport.RemoteTransportException.class,
|
||||
org.elasticsearch.transport.RemoteTransportException::new, 103),
|
||||
org.elasticsearch.transport.RemoteTransportException::new, 103, UNKNOWN_VERSION_ADDED),
|
||||
ENGINE_CREATION_FAILURE_EXCEPTION(org.elasticsearch.index.engine.EngineCreationFailureException.class,
|
||||
org.elasticsearch.index.engine.EngineCreationFailureException::new, 104),
|
||||
org.elasticsearch.index.engine.EngineCreationFailureException::new, 104, UNKNOWN_VERSION_ADDED),
|
||||
ROUTING_EXCEPTION(org.elasticsearch.cluster.routing.RoutingException.class,
|
||||
org.elasticsearch.cluster.routing.RoutingException::new, 105),
|
||||
org.elasticsearch.cluster.routing.RoutingException::new, 105, UNKNOWN_VERSION_ADDED),
|
||||
INDEX_SHARD_RECOVERY_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveryException.class,
|
||||
org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106),
|
||||
org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106, UNKNOWN_VERSION_ADDED),
|
||||
REPOSITORY_MISSING_EXCEPTION(org.elasticsearch.repositories.RepositoryMissingException.class,
|
||||
org.elasticsearch.repositories.RepositoryMissingException::new, 107),
|
||||
org.elasticsearch.repositories.RepositoryMissingException::new, 107, UNKNOWN_VERSION_ADDED),
|
||||
DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class,
|
||||
org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),
|
||||
org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109, UNKNOWN_VERSION_ADDED),
|
||||
// 110 used to be FlushNotAllowedEngineException
|
||||
NO_CLASS_SETTINGS_EXCEPTION(org.elasticsearch.common.settings.NoClassSettingsException.class,
|
||||
org.elasticsearch.common.settings.NoClassSettingsException::new, 111),
|
||||
org.elasticsearch.common.settings.NoClassSettingsException::new, 111, UNKNOWN_VERSION_ADDED),
|
||||
BIND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.BindTransportException.class,
|
||||
org.elasticsearch.transport.BindTransportException::new, 112),
|
||||
org.elasticsearch.transport.BindTransportException::new, 112, UNKNOWN_VERSION_ADDED),
|
||||
ALIASES_NOT_FOUND_EXCEPTION(org.elasticsearch.rest.action.admin.indices.AliasesNotFoundException.class,
|
||||
org.elasticsearch.rest.action.admin.indices.AliasesNotFoundException::new, 113),
|
||||
org.elasticsearch.rest.action.admin.indices.AliasesNotFoundException::new, 113, UNKNOWN_VERSION_ADDED),
|
||||
INDEX_SHARD_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardRecoveringException.class,
|
||||
org.elasticsearch.index.shard.IndexShardRecoveringException::new, 114),
|
||||
org.elasticsearch.index.shard.IndexShardRecoveringException::new, 114, UNKNOWN_VERSION_ADDED),
|
||||
TRANSLOG_EXCEPTION(org.elasticsearch.index.translog.TranslogException.class,
|
||||
org.elasticsearch.index.translog.TranslogException::new, 115),
|
||||
org.elasticsearch.index.translog.TranslogException::new, 115, UNKNOWN_VERSION_ADDED),
|
||||
PROCESS_CLUSTER_EVENT_TIMEOUT_EXCEPTION(org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException.class,
|
||||
org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException::new, 116),
|
||||
org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException::new, 116, UNKNOWN_VERSION_ADDED),
|
||||
RETRY_ON_PRIMARY_EXCEPTION(ReplicationOperation.RetryOnPrimaryException.class,
|
||||
ReplicationOperation.RetryOnPrimaryException::new, 117),
|
||||
ReplicationOperation.RetryOnPrimaryException::new, 117, UNKNOWN_VERSION_ADDED),
|
||||
ELASTICSEARCH_TIMEOUT_EXCEPTION(org.elasticsearch.ElasticsearchTimeoutException.class,
|
||||
org.elasticsearch.ElasticsearchTimeoutException::new, 118),
|
||||
org.elasticsearch.ElasticsearchTimeoutException::new, 118, UNKNOWN_VERSION_ADDED),
|
||||
QUERY_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.query.QueryPhaseExecutionException.class,
|
||||
org.elasticsearch.search.query.QueryPhaseExecutionException::new, 119),
|
||||
org.elasticsearch.search.query.QueryPhaseExecutionException::new, 119, UNKNOWN_VERSION_ADDED),
|
||||
REPOSITORY_VERIFICATION_EXCEPTION(org.elasticsearch.repositories.RepositoryVerificationException.class,
|
||||
org.elasticsearch.repositories.RepositoryVerificationException::new, 120),
|
||||
org.elasticsearch.repositories.RepositoryVerificationException::new, 120, UNKNOWN_VERSION_ADDED),
|
||||
INVALID_AGGREGATION_PATH_EXCEPTION(org.elasticsearch.search.aggregations.InvalidAggregationPathException.class,
|
||||
org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121),
|
||||
org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121, UNKNOWN_VERSION_ADDED),
|
||||
// 123 used to be IndexAlreadyExistsException and was renamed
|
||||
RESOURCE_ALREADY_EXISTS_EXCEPTION(ResourceAlreadyExistsException.class,
|
||||
ResourceAlreadyExistsException::new, 123),
|
||||
ResourceAlreadyExistsException::new, 123, UNKNOWN_VERSION_ADDED),
|
||||
// 124 used to be Script.ScriptParseException
|
||||
HTTP_ON_TRANSPORT_EXCEPTION(TcpTransport.HttpOnTransportException.class,
|
||||
TcpTransport.HttpOnTransportException::new, 125),
|
||||
TcpTransport.HttpOnTransportException::new, 125, UNKNOWN_VERSION_ADDED),
|
||||
MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class,
|
||||
org.elasticsearch.index.mapper.MapperParsingException::new, 126),
|
||||
org.elasticsearch.index.mapper.MapperParsingException::new, 126, UNKNOWN_VERSION_ADDED),
|
||||
SEARCH_CONTEXT_EXCEPTION(org.elasticsearch.search.SearchContextException.class,
|
||||
org.elasticsearch.search.SearchContextException::new, 127),
|
||||
org.elasticsearch.search.SearchContextException::new, 127, UNKNOWN_VERSION_ADDED),
|
||||
SEARCH_SOURCE_BUILDER_EXCEPTION(org.elasticsearch.search.builder.SearchSourceBuilderException.class,
|
||||
org.elasticsearch.search.builder.SearchSourceBuilderException::new, 128),
|
||||
org.elasticsearch.search.builder.SearchSourceBuilderException::new, 128, UNKNOWN_VERSION_ADDED),
|
||||
ENGINE_CLOSED_EXCEPTION(org.elasticsearch.index.engine.EngineClosedException.class,
|
||||
org.elasticsearch.index.engine.EngineClosedException::new, 129),
|
||||
org.elasticsearch.index.engine.EngineClosedException::new, 129, UNKNOWN_VERSION_ADDED),
|
||||
NO_SHARD_AVAILABLE_ACTION_EXCEPTION(org.elasticsearch.action.NoShardAvailableActionException.class,
|
||||
org.elasticsearch.action.NoShardAvailableActionException::new, 130),
|
||||
org.elasticsearch.action.NoShardAvailableActionException::new, 130, UNKNOWN_VERSION_ADDED),
|
||||
UNAVAILABLE_SHARDS_EXCEPTION(org.elasticsearch.action.UnavailableShardsException.class,
|
||||
org.elasticsearch.action.UnavailableShardsException::new, 131),
|
||||
org.elasticsearch.action.UnavailableShardsException::new, 131, UNKNOWN_VERSION_ADDED),
|
||||
FLUSH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushFailedEngineException.class,
|
||||
org.elasticsearch.index.engine.FlushFailedEngineException::new, 132),
|
||||
org.elasticsearch.index.engine.FlushFailedEngineException::new, 132, UNKNOWN_VERSION_ADDED),
|
||||
CIRCUIT_BREAKING_EXCEPTION(org.elasticsearch.common.breaker.CircuitBreakingException.class,
|
||||
org.elasticsearch.common.breaker.CircuitBreakingException::new, 133),
|
||||
org.elasticsearch.common.breaker.CircuitBreakingException::new, 133, UNKNOWN_VERSION_ADDED),
|
||||
NODE_NOT_CONNECTED_EXCEPTION(org.elasticsearch.transport.NodeNotConnectedException.class,
|
||||
org.elasticsearch.transport.NodeNotConnectedException::new, 134),
|
||||
org.elasticsearch.transport.NodeNotConnectedException::new, 134, UNKNOWN_VERSION_ADDED),
|
||||
STRICT_DYNAMIC_MAPPING_EXCEPTION(org.elasticsearch.index.mapper.StrictDynamicMappingException.class,
|
||||
org.elasticsearch.index.mapper.StrictDynamicMappingException::new, 135),
|
||||
org.elasticsearch.index.mapper.StrictDynamicMappingException::new, 135, UNKNOWN_VERSION_ADDED),
|
||||
RETRY_ON_REPLICA_EXCEPTION(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class,
|
||||
org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException::new, 136),
|
||||
org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException::new, 136,
|
||||
UNKNOWN_VERSION_ADDED),
|
||||
TYPE_MISSING_EXCEPTION(org.elasticsearch.indices.TypeMissingException.class,
|
||||
org.elasticsearch.indices.TypeMissingException::new, 137),
|
||||
org.elasticsearch.indices.TypeMissingException::new, 137, UNKNOWN_VERSION_ADDED),
|
||||
FAILED_TO_COMMIT_CLUSTER_STATE_EXCEPTION(org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class,
|
||||
org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException::new, 140),
|
||||
org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException::new, 140, UNKNOWN_VERSION_ADDED),
|
||||
QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class,
|
||||
org.elasticsearch.index.query.QueryShardException::new, 141),
|
||||
org.elasticsearch.index.query.QueryShardException::new, 141, UNKNOWN_VERSION_ADDED),
|
||||
NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class,
|
||||
ShardStateAction.NoLongerPrimaryShardException::new, 142),
|
||||
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143),
|
||||
NOT_MASTER_EXCEPTION(org.elasticsearch.cluster.NotMasterException.class, org.elasticsearch.cluster.NotMasterException::new, 144),
|
||||
STATUS_EXCEPTION(org.elasticsearch.ElasticsearchStatusException.class, org.elasticsearch.ElasticsearchStatusException::new, 145),
|
||||
ShardStateAction.NoLongerPrimaryShardException::new, 142, UNKNOWN_VERSION_ADDED),
|
||||
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143,
|
||||
UNKNOWN_VERSION_ADDED),
|
||||
NOT_MASTER_EXCEPTION(org.elasticsearch.cluster.NotMasterException.class, org.elasticsearch.cluster.NotMasterException::new, 144,
|
||||
UNKNOWN_VERSION_ADDED),
|
||||
STATUS_EXCEPTION(org.elasticsearch.ElasticsearchStatusException.class, org.elasticsearch.ElasticsearchStatusException::new, 145,
|
||||
UNKNOWN_VERSION_ADDED),
|
||||
TASK_CANCELLED_EXCEPTION(org.elasticsearch.tasks.TaskCancelledException.class,
|
||||
org.elasticsearch.tasks.TaskCancelledException::new, 146);
|
||||
org.elasticsearch.tasks.TaskCancelledException::new, 146, UNKNOWN_VERSION_ADDED),
|
||||
SHARD_LOCK_OBTAIN_FAILED_EXCEPTION(org.elasticsearch.env.ShardLockObtainFailedException.class,
|
||||
org.elasticsearch.env.ShardLockObtainFailedException::new, 147, V_5_1_0_UNRELEASED);
|
||||
|
||||
|
||||
final Class<? extends ElasticsearchException> exceptionClass;
|
||||
final FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> constructor;
|
||||
final int id;
|
||||
final Version versionAdded;
|
||||
|
||||
<E extends ElasticsearchException> ElasticsearchExceptionHandle(Class<E> exceptionClass,
|
||||
FunctionThatThrowsIOException<StreamInput, E> constructor, int id) {
|
||||
FunctionThatThrowsIOException<StreamInput, E> constructor, int id,
|
||||
Version versionAdded) {
|
||||
// We need the exceptionClass because you can't dig it out of the constructor reliably.
|
||||
this.exceptionClass = exceptionClass;
|
||||
this.constructor = constructor;
|
||||
this.versionAdded = versionAdded;
|
||||
this.id = id;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,8 +33,6 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
|
@ -108,26 +106,20 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
failIfOverShardCountLimit(clusterService, shardIterators.size());
|
||||
|
||||
// optimize search type for cases where there is only one shard group to search on
|
||||
try {
|
||||
if (shardIterators.size() == 1) {
|
||||
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
|
||||
searchRequest.searchType(QUERY_AND_FETCH);
|
||||
if (shardIterators.size() == 1) {
|
||||
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
|
||||
searchRequest.searchType(QUERY_AND_FETCH);
|
||||
}
|
||||
if (searchRequest.isSuggestOnly()) {
|
||||
// disable request cache if we have only suggest
|
||||
searchRequest.requestCache(false);
|
||||
switch (searchRequest.searchType()) {
|
||||
case DFS_QUERY_AND_FETCH:
|
||||
case DFS_QUERY_THEN_FETCH:
|
||||
// convert to Q_T_F if we have only suggest
|
||||
searchRequest.searchType(QUERY_THEN_FETCH);
|
||||
break;
|
||||
}
|
||||
if (searchRequest.isSuggestOnly()) {
|
||||
// disable request cache if we have only suggest
|
||||
searchRequest.requestCache(false);
|
||||
switch (searchRequest.searchType()) {
|
||||
case DFS_QUERY_AND_FETCH:
|
||||
case DFS_QUERY_THEN_FETCH:
|
||||
// convert to Q_T_F if we have only suggest
|
||||
searchRequest.searchType(QUERY_THEN_FETCH);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (IndexNotFoundException | IndexClosedException e) {
|
||||
// ignore these failures, we will notify the search response if its really the case from the actual action
|
||||
} catch (Exception e) {
|
||||
logger.debug("failed to optimize search type, continue as normal", e);
|
||||
}
|
||||
|
||||
searchAsyncAction((SearchTask)task, searchRequest, shardIterators, startTimeInMillis, clusterState,
|
||||
|
|
|
@ -1018,7 +1018,8 @@ public abstract class TransportReplicationAction<
|
|||
}
|
||||
transportService.sendRequest(node, transportReplicaAction,
|
||||
new ConcreteShardRequest<>(request, replica.allocationId().getId()), transportOptions,
|
||||
new ActionListenerResponseHandler<>(listener, ReplicaResponse::new));
|
||||
// Eclipse can't handle when this is <> so we specify the type here.
|
||||
new ActionListenerResponseHandler<ReplicaResponse>(listener, ReplicaResponse::new));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.node.NodeValidationException;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.security.Permission;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -69,6 +70,14 @@ class Elasticsearch extends SettingCommand {
|
|||
* Main entry point for starting elasticsearch
|
||||
*/
|
||||
public static void main(final String[] args) throws Exception {
|
||||
// we want the JVM to think there is a security manager installed so that if internal policy decisions that would be based on the
|
||||
// presence of a security manager or lack thereof act as if there is a security manager present (e.g., DNS cache policy)
|
||||
System.setSecurityManager(new SecurityManager() {
|
||||
@Override
|
||||
public void checkPermission(Permission perm) {
|
||||
// grant all permissions so that we can later set the security manager to the one that we want
|
||||
}
|
||||
});
|
||||
final Elasticsearch elasticsearch = new Elasticsearch();
|
||||
int status = main(args, elasticsearch, Terminal.DEFAULT);
|
||||
if (status != ExitCodes.OK) {
|
||||
|
|
|
@ -78,7 +78,7 @@ import java.util.Map;
|
|||
* when they are so dangerous that general code should not be granted the
|
||||
* permission, but there are extenuating circumstances.
|
||||
* <p>
|
||||
* Scripts (groovy, javascript, python) are assigned minimal permissions. This does not provide adequate
|
||||
* Scripts (groovy) are assigned minimal permissions. This does not provide adequate
|
||||
* sandboxing, as these scripts still have access to ES classes, and could
|
||||
* modify members, etc that would cause bad things to happen later on their
|
||||
* behalf (no package protections are yet in place, this would need some
|
||||
|
|
|
@ -20,17 +20,21 @@
|
|||
package org.elasticsearch.cluster;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexGraveyard;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
|
@ -143,6 +147,33 @@ public class ClusterChangedEvent {
|
|||
return state.metaData() != previousState.metaData();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a set of custom meta data types when any custom metadata for the cluster has changed
|
||||
* between the previous cluster state and the new cluster state. custom meta data types are
|
||||
* returned iff they have been added, updated or removed between the previous and the current state
|
||||
*/
|
||||
public Set<String> changedCustomMetaDataSet() {
|
||||
Set<String> result = new HashSet<>();
|
||||
ImmutableOpenMap<String, MetaData.Custom> currentCustoms = state.metaData().customs();
|
||||
ImmutableOpenMap<String, MetaData.Custom> previousCustoms = previousState.metaData().customs();
|
||||
if (currentCustoms.equals(previousCustoms) == false) {
|
||||
for (ObjectObjectCursor<String, MetaData.Custom> currentCustomMetaData : currentCustoms) {
|
||||
// new custom md added or existing custom md changed
|
||||
if (previousCustoms.containsKey(currentCustomMetaData.key) == false
|
||||
|| currentCustomMetaData.value.equals(previousCustoms.get(currentCustomMetaData.key)) == false) {
|
||||
result.add(currentCustomMetaData.key);
|
||||
}
|
||||
}
|
||||
// existing custom md deleted
|
||||
for (ObjectObjectCursor<String, MetaData.Custom> previousCustomMetaData : previousCustoms) {
|
||||
if (currentCustoms.containsKey(previousCustomMetaData.key) == false) {
|
||||
result.add(previousCustomMetaData.key);
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the {@link IndexMetaData} for a given index
|
||||
* has changed between the previous cluster state and the new cluster state.
|
||||
|
|
|
@ -108,13 +108,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
registerPrototype(RestoreInProgress.TYPE, RestoreInProgress.PROTO);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public static <T extends Custom> T lookupPrototype(String type) {
|
||||
//noinspection unchecked
|
||||
return (T) customPrototypes.get(type);
|
||||
}
|
||||
|
||||
public static <T extends Custom> T lookupPrototypeSafe(String type) {
|
||||
@SuppressWarnings("unchecked")
|
||||
T proto = (T) customPrototypes.get(type);
|
||||
if (proto == null) {
|
||||
|
@ -308,7 +302,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
|
||||
private final String value;
|
||||
|
||||
private Metric(String value) {
|
||||
Metric(String value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
|
@ -630,10 +624,6 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Custom getCustom(String type) {
|
||||
return customs.get(type);
|
||||
}
|
||||
|
||||
public Builder putCustom(String type, Custom custom) {
|
||||
customs.put(type, custom);
|
||||
return this;
|
||||
|
@ -707,7 +697,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
int customSize = in.readVInt();
|
||||
for (int i = 0; i < customSize; i++) {
|
||||
String type = in.readString();
|
||||
Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in);
|
||||
Custom customIndexMetaData = lookupPrototype(type).readFrom(in);
|
||||
builder.putCustom(type, customIndexMetaData);
|
||||
}
|
||||
return builder.build();
|
||||
|
@ -779,12 +769,12 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
public Custom read(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
return lookupPrototype(key).readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
return lookupPrototype(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -474,12 +474,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
CollectionUtil.timSort(templateMetadata, new Comparator<IndexTemplateMetaData>() {
|
||||
@Override
|
||||
public int compare(IndexTemplateMetaData o1, IndexTemplateMetaData o2) {
|
||||
return o2.order() - o1.order();
|
||||
}
|
||||
});
|
||||
CollectionUtil.timSort(templateMetadata, Comparator.comparingInt(IndexTemplateMetaData::order).reversed());
|
||||
return templateMetadata;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,6 @@ import java.util.Collections;
|
|||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to
|
||||
|
@ -56,19 +55,17 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
|
||||
private final String masterNodeId;
|
||||
private final String localNodeId;
|
||||
private final Version minNodeVersion;
|
||||
private final Version minNonClientNodeVersion;
|
||||
|
||||
private DiscoveryNodes(ImmutableOpenMap<String, DiscoveryNode> nodes, ImmutableOpenMap<String, DiscoveryNode> dataNodes,
|
||||
ImmutableOpenMap<String, DiscoveryNode> masterNodes, ImmutableOpenMap<String, DiscoveryNode> ingestNodes,
|
||||
String masterNodeId, String localNodeId, Version minNodeVersion, Version minNonClientNodeVersion) {
|
||||
String masterNodeId, String localNodeId, Version minNonClientNodeVersion) {
|
||||
this.nodes = nodes;
|
||||
this.dataNodes = dataNodes;
|
||||
this.masterNodes = masterNodes;
|
||||
this.ingestNodes = ingestNodes;
|
||||
this.masterNodeId = masterNodeId;
|
||||
this.localNodeId = localNodeId;
|
||||
this.minNodeVersion = minNodeVersion;
|
||||
this.minNonClientNodeVersion = minNonClientNodeVersion;
|
||||
}
|
||||
|
||||
|
@ -173,7 +170,6 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
return existing != null && existing.equals(node);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get the id of the master node
|
||||
*
|
||||
|
@ -230,16 +226,6 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
return nodesIds == null || nodesIds.length == 0 || (nodesIds.length == 1 && nodesIds[0].equals("_all"));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the version of the node with the oldest version in the cluster
|
||||
*
|
||||
* @return the oldest version in the cluster
|
||||
*/
|
||||
public Version getSmallestVersion() {
|
||||
return minNodeVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the version of the node with the oldest version in the cluster that is not a client node
|
||||
*
|
||||
|
@ -353,16 +339,6 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
}
|
||||
}
|
||||
|
||||
public DiscoveryNodes removeDeadMembers(Set<String> newNodes, String masterNodeId) {
|
||||
Builder builder = new Builder().masterNodeId(masterNodeId).localNodeId(localNodeId);
|
||||
for (DiscoveryNode node : this) {
|
||||
if (newNodes.contains(node.getId())) {
|
||||
builder.add(node);
|
||||
}
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public DiscoveryNodes newNode(DiscoveryNode node) {
|
||||
return new Builder(this).add(node).build();
|
||||
}
|
||||
|
@ -420,11 +396,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
private final List<DiscoveryNode> removed;
|
||||
private final List<DiscoveryNode> added;
|
||||
|
||||
public Delta(String localNodeId, List<DiscoveryNode> removed, List<DiscoveryNode> added) {
|
||||
this(null, null, localNodeId, removed, added);
|
||||
}
|
||||
|
||||
public Delta(@Nullable DiscoveryNode previousMasterNode, @Nullable DiscoveryNode newMasterNode, String localNodeId,
|
||||
private Delta(@Nullable DiscoveryNode previousMasterNode, @Nullable DiscoveryNode newMasterNode, String localNodeId,
|
||||
List<DiscoveryNode> removed, List<DiscoveryNode> added) {
|
||||
this.previousMasterNode = previousMasterNode;
|
||||
this.newMasterNode = newMasterNode;
|
||||
|
@ -677,7 +649,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
|
||||
return new DiscoveryNodes(
|
||||
nodes.build(), dataNodesBuilder.build(), masterNodesBuilder.build(), ingestNodesBuilder.build(),
|
||||
masterNodeId, localNodeId, minNodeVersion, minNonClientNodeVersion
|
||||
masterNodeId, localNodeId, minNonClientNodeVersion
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -30,29 +30,29 @@ import java.util.Map;
|
|||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Represents the allocation decision by an allocator for a shard.
|
||||
* Represents the allocation decision by an allocator for an unassigned shard.
|
||||
*/
|
||||
public class ShardAllocationDecision {
|
||||
public class AllocateUnassignedDecision {
|
||||
/** a constant representing a shard decision where no decision was taken */
|
||||
public static final ShardAllocationDecision DECISION_NOT_TAKEN =
|
||||
new ShardAllocationDecision(null, null, null, null, null, null, null);
|
||||
public static final AllocateUnassignedDecision NOT_TAKEN =
|
||||
new AllocateUnassignedDecision(null, null, null, null, null, null, null);
|
||||
/**
|
||||
* a map of cached common no/throttle decisions that don't need explanations,
|
||||
* this helps prevent unnecessary object allocations for the non-explain API case
|
||||
*/
|
||||
private static final Map<AllocationStatus, ShardAllocationDecision> CACHED_DECISIONS;
|
||||
private static final Map<AllocationStatus, AllocateUnassignedDecision> CACHED_DECISIONS;
|
||||
static {
|
||||
Map<AllocationStatus, ShardAllocationDecision> cachedDecisions = new HashMap<>();
|
||||
Map<AllocationStatus, AllocateUnassignedDecision> cachedDecisions = new HashMap<>();
|
||||
cachedDecisions.put(AllocationStatus.FETCHING_SHARD_DATA,
|
||||
new ShardAllocationDecision(Type.NO, AllocationStatus.FETCHING_SHARD_DATA, null, null, null, null, null));
|
||||
new AllocateUnassignedDecision(Type.NO, AllocationStatus.FETCHING_SHARD_DATA, null, null, null, null, null));
|
||||
cachedDecisions.put(AllocationStatus.NO_VALID_SHARD_COPY,
|
||||
new ShardAllocationDecision(Type.NO, AllocationStatus.NO_VALID_SHARD_COPY, null, null, null, null, null));
|
||||
new AllocateUnassignedDecision(Type.NO, AllocationStatus.NO_VALID_SHARD_COPY, null, null, null, null, null));
|
||||
cachedDecisions.put(AllocationStatus.DECIDERS_NO,
|
||||
new ShardAllocationDecision(Type.NO, AllocationStatus.DECIDERS_NO, null, null, null, null, null));
|
||||
new AllocateUnassignedDecision(Type.NO, AllocationStatus.DECIDERS_NO, null, null, null, null, null));
|
||||
cachedDecisions.put(AllocationStatus.DECIDERS_THROTTLED,
|
||||
new ShardAllocationDecision(Type.THROTTLE, AllocationStatus.DECIDERS_THROTTLED, null, null, null, null, null));
|
||||
new AllocateUnassignedDecision(Type.THROTTLE, AllocationStatus.DECIDERS_THROTTLED, null, null, null, null, null));
|
||||
cachedDecisions.put(AllocationStatus.DELAYED_ALLOCATION,
|
||||
new ShardAllocationDecision(Type.NO, AllocationStatus.DELAYED_ALLOCATION, null, null, null, null, null));
|
||||
new AllocateUnassignedDecision(Type.NO, AllocationStatus.DELAYED_ALLOCATION, null, null, null, null, null));
|
||||
CACHED_DECISIONS = Collections.unmodifiableMap(cachedDecisions);
|
||||
}
|
||||
|
||||
|
@ -67,17 +67,17 @@ public class ShardAllocationDecision {
|
|||
@Nullable
|
||||
private final String allocationId;
|
||||
@Nullable
|
||||
private final Map<String, WeightedDecision> nodeDecisions;
|
||||
private final Map<String, NodeAllocationResult> nodeDecisions;
|
||||
@Nullable
|
||||
private final Decision shardDecision;
|
||||
|
||||
private ShardAllocationDecision(Type finalDecision,
|
||||
AllocationStatus allocationStatus,
|
||||
String finalExplanation,
|
||||
String assignedNodeId,
|
||||
String allocationId,
|
||||
Map<String, WeightedDecision> nodeDecisions,
|
||||
Decision shardDecision) {
|
||||
private AllocateUnassignedDecision(Type finalDecision,
|
||||
AllocationStatus allocationStatus,
|
||||
String finalExplanation,
|
||||
String assignedNodeId,
|
||||
String allocationId,
|
||||
Map<String, NodeAllocationResult> nodeDecisions,
|
||||
Decision shardDecision) {
|
||||
assert assignedNodeId != null || finalDecision == null || finalDecision != Type.YES :
|
||||
"a yes decision must have a node to assign the shard to";
|
||||
assert allocationStatus != null || finalDecision == null || finalDecision == Type.YES :
|
||||
|
@ -96,9 +96,9 @@ public class ShardAllocationDecision {
|
|||
/**
|
||||
* Returns a NO decision with the given shard-level decision and explanation (if in explain mode).
|
||||
*/
|
||||
public static ShardAllocationDecision no(Decision shardDecision, @Nullable String explanation) {
|
||||
public static AllocateUnassignedDecision no(Decision shardDecision, @Nullable String explanation) {
|
||||
if (explanation != null) {
|
||||
return new ShardAllocationDecision(Type.NO, AllocationStatus.DECIDERS_NO, explanation, null, null, null, shardDecision);
|
||||
return new AllocateUnassignedDecision(Type.NO, AllocationStatus.DECIDERS_NO, explanation, null, null, null, shardDecision);
|
||||
} else {
|
||||
return getCachedDecision(AllocationStatus.DECIDERS_NO);
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ public class ShardAllocationDecision {
|
|||
/**
|
||||
* Returns a NO decision with the given {@link AllocationStatus} and explanation for the NO decision, if in explain mode.
|
||||
*/
|
||||
public static ShardAllocationDecision no(AllocationStatus allocationStatus, @Nullable String explanation) {
|
||||
public static AllocateUnassignedDecision no(AllocationStatus allocationStatus, @Nullable String explanation) {
|
||||
return no(allocationStatus, explanation, null);
|
||||
}
|
||||
|
||||
|
@ -115,11 +115,11 @@ public class ShardAllocationDecision {
|
|||
* Returns a NO decision with the given {@link AllocationStatus}, and the explanation for the NO decision
|
||||
* as well as the individual node-level decisions that comprised the final NO decision if in explain mode.
|
||||
*/
|
||||
public static ShardAllocationDecision no(AllocationStatus allocationStatus, @Nullable String explanation,
|
||||
@Nullable Map<String, Decision> nodeDecisions) {
|
||||
public static AllocateUnassignedDecision no(AllocationStatus allocationStatus, @Nullable String explanation,
|
||||
@Nullable Map<String, Decision> nodeDecisions) {
|
||||
Objects.requireNonNull(allocationStatus, "allocationStatus must not be null");
|
||||
if (explanation != null) {
|
||||
return new ShardAllocationDecision(Type.NO, allocationStatus, explanation, null, null, asExplanations(nodeDecisions), null);
|
||||
return new AllocateUnassignedDecision(Type.NO, allocationStatus, explanation, null, null, asExplanations(nodeDecisions), null);
|
||||
} else {
|
||||
return getCachedDecision(allocationStatus);
|
||||
}
|
||||
|
@ -129,9 +129,9 @@ public class ShardAllocationDecision {
|
|||
* Returns a THROTTLE decision, with the given explanation and individual node-level decisions that
|
||||
* comprised the final THROTTLE decision if in explain mode.
|
||||
*/
|
||||
public static ShardAllocationDecision throttle(@Nullable String explanation, @Nullable Map<String, Decision> nodeDecisions) {
|
||||
public static AllocateUnassignedDecision throttle(@Nullable String explanation, @Nullable Map<String, Decision> nodeDecisions) {
|
||||
if (explanation != null) {
|
||||
return new ShardAllocationDecision(Type.THROTTLE, AllocationStatus.DECIDERS_THROTTLED, explanation, null, null,
|
||||
return new AllocateUnassignedDecision(Type.THROTTLE, AllocationStatus.DECIDERS_THROTTLED, explanation, null, null,
|
||||
asExplanations(nodeDecisions), null);
|
||||
} else {
|
||||
return getCachedDecision(AllocationStatus.DECIDERS_THROTTLED);
|
||||
|
@ -143,17 +143,18 @@ public class ShardAllocationDecision {
|
|||
* comprised the final YES decision, along with the node id to which the shard is assigned and
|
||||
* the allocation id for the shard, if available.
|
||||
*/
|
||||
public static ShardAllocationDecision yes(String assignedNodeId, @Nullable String explanation, @Nullable String allocationId,
|
||||
@Nullable Map<String, Decision> nodeDecisions) {
|
||||
public static AllocateUnassignedDecision yes(String assignedNodeId, @Nullable String explanation, @Nullable String allocationId,
|
||||
@Nullable Map<String, Decision> nodeDecisions) {
|
||||
Objects.requireNonNull(assignedNodeId, "assignedNodeId must not be null");
|
||||
return new ShardAllocationDecision(Type.YES, null, explanation, assignedNodeId, allocationId, asExplanations(nodeDecisions), null);
|
||||
return new AllocateUnassignedDecision(Type.YES, null, explanation, assignedNodeId, allocationId,
|
||||
asExplanations(nodeDecisions), null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a {@link ShardAllocationDecision} from the given {@link Decision} and the assigned node, if any.
|
||||
* Creates a {@link AllocateUnassignedDecision} from the given {@link Decision} and the assigned node, if any.
|
||||
*/
|
||||
public static ShardAllocationDecision fromDecision(Decision decision, @Nullable String assignedNodeId, boolean explain,
|
||||
@Nullable Map<String, WeightedDecision> nodeDecisions) {
|
||||
public static AllocateUnassignedDecision fromDecision(Decision decision, @Nullable String assignedNodeId, boolean explain,
|
||||
@Nullable Map<String, NodeAllocationResult> nodeDecisions) {
|
||||
final Type decisionType = decision.type();
|
||||
AllocationStatus allocationStatus = decisionType != Type.YES ? AllocationStatus.fromDecision(decisionType) : null;
|
||||
String explanation = null;
|
||||
|
@ -168,19 +169,19 @@ public class ShardAllocationDecision {
|
|||
explanation = "shard cannot be assigned to any node in the cluster";
|
||||
}
|
||||
}
|
||||
return new ShardAllocationDecision(decisionType, allocationStatus, explanation, assignedNodeId, null, nodeDecisions, null);
|
||||
return new AllocateUnassignedDecision(decisionType, allocationStatus, explanation, assignedNodeId, null, nodeDecisions, null);
|
||||
}
|
||||
|
||||
private static ShardAllocationDecision getCachedDecision(AllocationStatus allocationStatus) {
|
||||
ShardAllocationDecision decision = CACHED_DECISIONS.get(allocationStatus);
|
||||
private static AllocateUnassignedDecision getCachedDecision(AllocationStatus allocationStatus) {
|
||||
AllocateUnassignedDecision decision = CACHED_DECISIONS.get(allocationStatus);
|
||||
return Objects.requireNonNull(decision, "precomputed decision not found for " + allocationStatus);
|
||||
}
|
||||
|
||||
private static Map<String, WeightedDecision> asExplanations(Map<String, Decision> decisionMap) {
|
||||
private static Map<String, NodeAllocationResult> asExplanations(Map<String, Decision> decisionMap) {
|
||||
if (decisionMap != null) {
|
||||
Map<String, WeightedDecision> explanationMap = new HashMap<>();
|
||||
Map<String, NodeAllocationResult> explanationMap = new HashMap<>();
|
||||
for (Map.Entry<String, Decision> entry : decisionMap.entrySet()) {
|
||||
explanationMap.put(entry.getKey(), new WeightedDecision(entry.getValue(), Float.POSITIVE_INFINITY));
|
||||
explanationMap.put(entry.getKey(), new NodeAllocationResult(entry.getValue(), Float.POSITIVE_INFINITY));
|
||||
}
|
||||
return explanationMap;
|
||||
}
|
||||
|
@ -259,7 +260,7 @@ public class ShardAllocationDecision {
|
|||
* as the decision for the given node.
|
||||
*/
|
||||
@Nullable
|
||||
public Map<String, WeightedDecision> getNodeDecisions() {
|
||||
public Map<String, NodeAllocationResult> getNodeDecisions() {
|
||||
return nodeDecisions;
|
||||
}
|
||||
|
||||
|
@ -273,56 +274,4 @@ public class ShardAllocationDecision {
|
|||
return shardDecision;
|
||||
}
|
||||
|
||||
/**
|
||||
* This class represents the shard allocation decision for a single node,
|
||||
* including the {@link Decision} whether to allocate to the node and the
|
||||
* weight assigned to the node for the shard in question.
|
||||
*/
|
||||
public static final class WeightedDecision {
|
||||
|
||||
private final Decision decision;
|
||||
private final float weight;
|
||||
|
||||
public WeightedDecision(Decision decision) {
|
||||
this.decision = Objects.requireNonNull(decision);
|
||||
this.weight = Float.POSITIVE_INFINITY;
|
||||
}
|
||||
|
||||
public WeightedDecision(Decision decision, float weight) {
|
||||
this.decision = Objects.requireNonNull(decision);
|
||||
this.weight = Objects.requireNonNull(weight);
|
||||
}
|
||||
|
||||
/**
|
||||
* The decision for allocating to the node.
|
||||
*/
|
||||
public Decision getDecision() {
|
||||
return decision;
|
||||
}
|
||||
|
||||
/**
|
||||
* The calculated weight for allocating a shard to the node. A value of {@link Float#POSITIVE_INFINITY}
|
||||
* means the weight was not calculated or factored into the decision.
|
||||
*/
|
||||
public float getWeight() {
|
||||
return weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
WeightedDecision that = (WeightedDecision) other;
|
||||
return decision.equals(that.decision) && Float.compare(weight, that.weight) == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(decision, weight);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Represents a decision to move a started shard because it is no longer allowed to remain on its current node.
|
||||
*/
|
||||
public final class MoveDecision extends RelocationDecision {
|
||||
/** a constant representing no decision taken */
|
||||
public static final MoveDecision NOT_TAKEN = new MoveDecision(null, null, null, null, null);
|
||||
/** cached decisions so we don't have to recreate objects for common decisions when not in explain mode. */
|
||||
private static final MoveDecision CACHED_STAY_DECISION = new MoveDecision(Decision.YES, Decision.Type.NO, null, null, null);
|
||||
private static final MoveDecision CACHED_CANNOT_MOVE_DECISION = new MoveDecision(Decision.NO, Decision.Type.NO, null, null, null);
|
||||
|
||||
@Nullable
|
||||
private final Decision canRemainDecision;
|
||||
@Nullable
|
||||
private final Map<String, NodeAllocationResult> nodeDecisions;
|
||||
|
||||
private MoveDecision(Decision canRemainDecision, Decision.Type finalDecision, String finalExplanation,
|
||||
String assignedNodeId, Map<String, NodeAllocationResult> nodeDecisions) {
|
||||
super(finalDecision, finalExplanation, assignedNodeId);
|
||||
this.canRemainDecision = canRemainDecision;
|
||||
this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a move decision for the shard being able to remain on its current node, so not moving.
|
||||
*/
|
||||
public static MoveDecision stay(Decision canRemainDecision, boolean explain) {
|
||||
assert canRemainDecision.type() != Decision.Type.NO;
|
||||
if (explain) {
|
||||
final String explanation;
|
||||
if (explain) {
|
||||
explanation = "shard is allowed to remain on its current node, so no reason to move";
|
||||
} else {
|
||||
explanation = null;
|
||||
}
|
||||
return new MoveDecision(Objects.requireNonNull(canRemainDecision), Decision.Type.NO, explanation, null, null);
|
||||
} else {
|
||||
return CACHED_STAY_DECISION;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a move decision for the shard not being able to remain on its current node.
|
||||
*
|
||||
* @param canRemainDecision the decision for whether the shard is allowed to remain on its current node
|
||||
* @param finalDecision the decision of whether to move the shard to another node
|
||||
* @param explain true if in explain mode
|
||||
* @param currentNodeId the current node id where the shard is assigned
|
||||
* @param assignedNodeId the node id for where the shard can move to
|
||||
* @param nodeDecisions the node-level decisions that comprised the final decision, non-null iff explain is true
|
||||
* @return the {@link MoveDecision} for moving the shard to another node
|
||||
*/
|
||||
public static MoveDecision decision(Decision canRemainDecision, Decision.Type finalDecision, boolean explain, String currentNodeId,
|
||||
String assignedNodeId, Map<String, NodeAllocationResult> nodeDecisions) {
|
||||
assert canRemainDecision != null;
|
||||
assert canRemainDecision.type() != Decision.Type.YES : "create decision with MoveDecision#stay instead";
|
||||
String finalExplanation = null;
|
||||
if (explain) {
|
||||
assert currentNodeId != null;
|
||||
if (finalDecision == Decision.Type.YES) {
|
||||
assert assignedNodeId != null;
|
||||
finalExplanation = "shard cannot remain on node [" + currentNodeId + "], moving to node [" + assignedNodeId + "]";
|
||||
} else if (finalDecision == Decision.Type.THROTTLE) {
|
||||
finalExplanation = "shard cannot remain on node [" + currentNodeId + "], throttled on moving to another node";
|
||||
} else {
|
||||
finalExplanation = "shard cannot remain on node [" + currentNodeId + "], but cannot be assigned to any other node";
|
||||
}
|
||||
}
|
||||
if (finalExplanation == null && finalDecision == Decision.Type.NO) {
|
||||
// the final decision is NO (no node to move the shard to) and we are not in explain mode, return a cached version
|
||||
return CACHED_CANNOT_MOVE_DECISION;
|
||||
} else {
|
||||
assert ((assignedNodeId == null) == (finalDecision != Decision.Type.YES));
|
||||
return new MoveDecision(canRemainDecision, finalDecision, finalExplanation, assignedNodeId, nodeDecisions);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code true} if the shard cannot remain on its current node and can be moved, returns {@code false} otherwise.
|
||||
*/
|
||||
public boolean move() {
|
||||
return cannotRemain() && getFinalDecisionType() == Decision.Type.YES;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code true} if the shard cannot remain on its current node.
|
||||
*/
|
||||
public boolean cannotRemain() {
|
||||
return isDecisionTaken() && canRemainDecision.type() == Decision.Type.NO;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the individual node-level decisions that went into making the final decision as represented by
|
||||
* {@link #getFinalDecisionType()}. The map that is returned has the node id as the key and a {@link NodeAllocationResult}.
|
||||
*/
|
||||
@Nullable
|
||||
public Map<String, NodeAllocationResult> getNodeDecisions() {
|
||||
return nodeDecisions;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* This class represents the shard allocation decision for a single node,
|
||||
* including the {@link Decision} whether to allocate to the node and other
|
||||
* information related to obtaining the decision for the node.
|
||||
*/
|
||||
public final class NodeAllocationResult {
|
||||
|
||||
private final Decision decision;
|
||||
private final float weight;
|
||||
|
||||
public NodeAllocationResult(Decision decision) {
|
||||
this.decision = Objects.requireNonNull(decision);
|
||||
this.weight = Float.POSITIVE_INFINITY;
|
||||
}
|
||||
|
||||
public NodeAllocationResult(Decision decision, float weight) {
|
||||
this.decision = Objects.requireNonNull(decision);
|
||||
this.weight = Objects.requireNonNull(weight);
|
||||
}
|
||||
|
||||
/**
|
||||
* The decision for allocating to the node.
|
||||
*/
|
||||
public Decision getDecision() {
|
||||
return decision;
|
||||
}
|
||||
|
||||
/**
|
||||
* The calculated weight for allocating a shard to the node. A value of {@link Float#POSITIVE_INFINITY}
|
||||
* means the weight was not calculated or factored into the decision.
|
||||
*/
|
||||
public float getWeight() {
|
||||
return weight;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
NodeAllocationResult that = (NodeAllocationResult) other;
|
||||
return decision.equals(that.decision) && Float.compare(weight, that.weight) == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(decision, weight);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A node-level explanation for the decision to rebalance a shard.
|
||||
*/
|
||||
public final class NodeRebalanceResult {
|
||||
private final Decision.Type nodeDecisionType;
|
||||
private final Decision canAllocate;
|
||||
private final boolean betterWeightThanCurrent;
|
||||
private final boolean deltaAboveThreshold;
|
||||
private final float currentWeight;
|
||||
private final float weightWithShardAdded;
|
||||
|
||||
public NodeRebalanceResult(Decision.Type nodeDecisionType, Decision canAllocate, boolean betterWeightThanCurrent,
|
||||
boolean deltaAboveThreshold, float currentWeight, float weightWithShardAdded) {
|
||||
this.nodeDecisionType = Objects.requireNonNull(nodeDecisionType);
|
||||
this.canAllocate = Objects.requireNonNull(canAllocate);
|
||||
this.betterWeightThanCurrent = betterWeightThanCurrent;
|
||||
this.deltaAboveThreshold = deltaAboveThreshold;
|
||||
this.currentWeight = currentWeight;
|
||||
this.weightWithShardAdded = weightWithShardAdded;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the decision to rebalance to the node.
|
||||
*/
|
||||
public Decision.Type getNodeDecisionType() {
|
||||
return nodeDecisionType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the shard is allowed to be allocated to the node.
|
||||
*/
|
||||
public Decision getCanAllocateDecision() {
|
||||
return canAllocate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the weight of the node is better than the weight of the node where the shard currently resides.
|
||||
*/
|
||||
public boolean isBetterWeightThanCurrent() {
|
||||
return betterWeightThanCurrent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns if the weight delta by assigning to this node was above the threshold to warrant a rebalance.
|
||||
*/
|
||||
public boolean isDeltaAboveThreshold() {
|
||||
return deltaAboveThreshold;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current weight of the node if the shard is not added to the node.
|
||||
*/
|
||||
public float getCurrentWeight() {
|
||||
return currentWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the weight of the node if the shard is added to the node.
|
||||
*/
|
||||
public float getWeightWithShardAdded() {
|
||||
return weightWithShardAdded;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Represents a decision to move a started shard to form a more optimally balanced cluster.
|
||||
*/
|
||||
public final class RebalanceDecision extends RelocationDecision {
|
||||
/** a constant representing no decision taken */
|
||||
public static final RebalanceDecision NOT_TAKEN = new RebalanceDecision(null, null, null, null, null, Float.POSITIVE_INFINITY);
|
||||
|
||||
@Nullable
|
||||
private final Decision canRebalanceDecision;
|
||||
@Nullable
|
||||
private final Map<String, NodeRebalanceResult> nodeDecisions;
|
||||
private float currentWeight;
|
||||
|
||||
public RebalanceDecision(Decision canRebalanceDecision, Type finalDecision, String finalExplanation) {
|
||||
this(canRebalanceDecision, finalDecision, finalExplanation, null, null, Float.POSITIVE_INFINITY);
|
||||
}
|
||||
|
||||
public RebalanceDecision(Decision canRebalanceDecision, Type finalDecision, String finalExplanation,
|
||||
String assignedNodeId, Map<String, NodeRebalanceResult> nodeDecisions, float currentWeight) {
|
||||
super(finalDecision, finalExplanation, assignedNodeId);
|
||||
this.canRebalanceDecision = canRebalanceDecision;
|
||||
this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null;
|
||||
this.currentWeight = currentWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link RebalanceDecision}, computing the explanation based on the decision parameters.
|
||||
*/
|
||||
public static RebalanceDecision decision(Decision canRebalanceDecision, Type finalDecision, String assignedNodeId,
|
||||
Map<String, NodeRebalanceResult> nodeDecisions, float currentWeight, float threshold) {
|
||||
final String explanation = produceFinalExplanation(finalDecision, assignedNodeId, threshold);
|
||||
return new RebalanceDecision(canRebalanceDecision, finalDecision, explanation, assignedNodeId, nodeDecisions, currentWeight);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the decision for being allowed to rebalance the shard.
|
||||
*/
|
||||
@Nullable
|
||||
public Decision getCanRebalanceDecision() {
|
||||
return canRebalanceDecision;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the individual node-level decisions that went into making the final decision as represented by
|
||||
* {@link #getFinalDecisionType()}. The map that is returned has the node id as the key and a {@link NodeRebalanceResult}.
|
||||
*/
|
||||
@Nullable
|
||||
public Map<String, NodeRebalanceResult> getNodeDecisions() {
|
||||
return nodeDecisions;
|
||||
}
|
||||
|
||||
private static String produceFinalExplanation(final Type finalDecisionType, final String assignedNodeId, final float threshold) {
|
||||
final String finalExplanation;
|
||||
if (assignedNodeId != null) {
|
||||
if (finalDecisionType == Type.THROTTLE) {
|
||||
finalExplanation = "throttle moving shard to node [" + assignedNodeId + "], as it is " +
|
||||
"currently busy with other shard relocations";
|
||||
} else {
|
||||
finalExplanation = "moving shard to node [" + assignedNodeId + "] to form a more balanced cluster";
|
||||
}
|
||||
} else {
|
||||
finalExplanation = "cannot rebalance shard, no other node exists that would form a more balanced " +
|
||||
"cluster within the defined threshold [" + threshold + "]";
|
||||
}
|
||||
return finalExplanation;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
||||
/**
|
||||
* Represents a decision to relocate a started shard from its current node.
|
||||
*/
|
||||
public abstract class RelocationDecision {
|
||||
@Nullable
|
||||
private final Decision.Type finalDecision;
|
||||
@Nullable
|
||||
private final String finalExplanation;
|
||||
@Nullable
|
||||
private final String assignedNodeId;
|
||||
|
||||
protected RelocationDecision(Decision.Type finalDecision, String finalExplanation, String assignedNodeId) {
|
||||
this.finalDecision = finalDecision;
|
||||
this.finalExplanation = finalExplanation;
|
||||
this.assignedNodeId = assignedNodeId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code true} if a decision was taken by the allocator, {@code false} otherwise.
|
||||
* If no decision was taken, then the rest of the fields in this object are meaningless and return {@code null}.
|
||||
*/
|
||||
public boolean isDecisionTaken() {
|
||||
return finalDecision != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the final decision made by the allocator on whether to assign the shard, and
|
||||
* {@code null} if no decision was taken.
|
||||
*/
|
||||
public Decision.Type getFinalDecisionType() {
|
||||
return finalDecision;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecisionType()}.
|
||||
*/
|
||||
@Nullable
|
||||
public String getFinalExplanation() {
|
||||
return finalExplanation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the node id that the allocator will assign the shard to, unless {@link #getFinalDecisionType()} returns
|
||||
* a value other than {@link Decision.Type#YES}, in which case this returns {@code null}.
|
||||
*/
|
||||
@Nullable
|
||||
public String getAssignedNodeId() {
|
||||
return assignedNodeId;
|
||||
}
|
||||
}
|
|
@ -30,14 +30,16 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
|
|||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.MoveDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.NodeRebalanceResult;
|
||||
import org.elasticsearch.cluster.routing.allocation.RebalanceDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision.WeightedDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.NodeAllocationResult;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -54,7 +56,6 @@ import java.util.HashSet;
|
|||
import java.util.IdentityHashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
|
||||
|
@ -368,7 +369,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
final float currentWeight = sorter.weight(currentNode);
|
||||
final AllocationDeciders deciders = allocation.deciders();
|
||||
final String idxName = shard.getIndexName();
|
||||
Map<String, NodeRebalanceDecision> nodeDecisions = new HashMap<>(modelNodes.length - 1);
|
||||
Map<String, NodeRebalanceResult> nodeDecisions = new HashMap<>(modelNodes.length - 1);
|
||||
Type rebalanceDecisionType = Type.NO;
|
||||
String assignedNodeId = null;
|
||||
for (ModelNode node : modelNodes) {
|
||||
|
@ -412,7 +413,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
assignedNodeId = node.getNodeId();
|
||||
}
|
||||
}
|
||||
nodeDecisions.put(node.getNodeId(), new NodeRebalanceDecision(
|
||||
nodeDecisions.put(node.getNodeId(), new NodeRebalanceResult(
|
||||
rebalanceConditionsMet ? canAllocate.type() : Type.NO,
|
||||
canAllocate,
|
||||
betterWeightThanCurrent,
|
||||
|
@ -683,14 +684,14 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
*/
|
||||
Type bestDecision = Type.NO;
|
||||
RoutingNode targetNode = null;
|
||||
final Map<String, WeightedDecision> nodeExplanationMap = explain ? new HashMap<>() : null;
|
||||
final Map<String, NodeAllocationResult> nodeExplanationMap = explain ? new HashMap<>() : null;
|
||||
for (ModelNode currentNode : sorter.modelNodes) {
|
||||
if (currentNode != sourceNode) {
|
||||
RoutingNode target = currentNode.getRoutingNode();
|
||||
// don't use canRebalance as we want hard filtering rules to apply. See #17698
|
||||
Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation);
|
||||
if (explain) {
|
||||
nodeExplanationMap.put(currentNode.getNodeId(), new WeightedDecision(allocationDecision, sorter.weight(currentNode)));
|
||||
nodeExplanationMap.put(currentNode.getNodeId(), new NodeAllocationResult(allocationDecision, sorter.weight(currentNode)));
|
||||
}
|
||||
// TODO maybe we can respect throttling here too?
|
||||
if (allocationDecision.type().higherThan(bestDecision)) {
|
||||
|
@ -791,7 +792,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
do {
|
||||
for (int i = 0; i < primaryLength; i++) {
|
||||
ShardRouting shard = primary[i];
|
||||
ShardAllocationDecision allocationDecision = decideAllocateUnassigned(shard, throttledNodes);
|
||||
AllocateUnassignedDecision allocationDecision = decideAllocateUnassigned(shard, throttledNodes);
|
||||
final Type decisionType = allocationDecision.getFinalDecisionType();
|
||||
final String assignedNodeId = allocationDecision.getAssignedNodeId();
|
||||
final ModelNode minNode = assignedNodeId != null ? nodes.get(assignedNodeId) : null;
|
||||
|
@ -864,16 +865,16 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
* {@link ModelNode} representing the node that the shard should be assigned to. If the decision returned
|
||||
* is of type {@link Type#NO}, then the assigned node will be null.
|
||||
*/
|
||||
private ShardAllocationDecision decideAllocateUnassigned(final ShardRouting shard, final Set<ModelNode> throttledNodes) {
|
||||
private AllocateUnassignedDecision decideAllocateUnassigned(final ShardRouting shard, final Set<ModelNode> throttledNodes) {
|
||||
if (shard.assignedToNode()) {
|
||||
// we only make decisions for unassigned shards here
|
||||
return ShardAllocationDecision.DECISION_NOT_TAKEN;
|
||||
return AllocateUnassignedDecision.NOT_TAKEN;
|
||||
}
|
||||
|
||||
Decision shardLevelDecision = allocation.deciders().canAllocate(shard, allocation);
|
||||
if (shardLevelDecision.type() == Type.NO) {
|
||||
// NO decision for allocating the shard, irrespective of any particular node, so exit early
|
||||
return ShardAllocationDecision.no(shardLevelDecision, explain("cannot allocate shard in its current state"));
|
||||
return AllocateUnassignedDecision.no(shardLevelDecision, explain("cannot allocate shard in its current state"));
|
||||
}
|
||||
|
||||
/* find an node with minimal weight we can allocate on*/
|
||||
|
@ -884,11 +885,11 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
if (throttledNodes.size() >= nodes.size() && explain == false) {
|
||||
// all nodes are throttled, so we know we won't be able to allocate this round,
|
||||
// so if we are not in explain mode, short circuit
|
||||
return ShardAllocationDecision.no(UnassignedInfo.AllocationStatus.DECIDERS_NO, null);
|
||||
return AllocateUnassignedDecision.no(UnassignedInfo.AllocationStatus.DECIDERS_NO, null);
|
||||
}
|
||||
/* Don't iterate over an identity hashset here the
|
||||
* iteration order is different for each run and makes testing hard */
|
||||
Map<String, WeightedDecision> nodeExplanationMap = explain ? new HashMap<>() : null;
|
||||
Map<String, NodeAllocationResult> nodeExplanationMap = explain ? new HashMap<>() : null;
|
||||
for (ModelNode node : nodes.values()) {
|
||||
if ((throttledNodes.contains(node) || node.containsShard(shard)) && explain == false) {
|
||||
// decision is NO without needing to check anything further, so short circuit
|
||||
|
@ -904,7 +905,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
|
||||
Decision currentDecision = allocation.deciders().canAllocate(shard, node.getRoutingNode(), allocation);
|
||||
if (explain) {
|
||||
nodeExplanationMap.put(node.getNodeId(), new WeightedDecision(currentDecision, currentWeight));
|
||||
nodeExplanationMap.put(node.getNodeId(), new NodeAllocationResult(currentDecision, currentWeight));
|
||||
}
|
||||
if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) {
|
||||
final boolean updateMinNode;
|
||||
|
@ -945,7 +946,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
// decision was not set and a node was not assigned, so treat it as a NO decision
|
||||
decision = Decision.NO;
|
||||
}
|
||||
return ShardAllocationDecision.fromDecision(
|
||||
return AllocateUnassignedDecision.fromDecision(
|
||||
decision,
|
||||
minNode != null ? minNode.getNodeId() : null,
|
||||
explain,
|
||||
|
@ -1223,287 +1224,4 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a decision to relocate a started shard from its current node.
|
||||
*/
|
||||
public abstract static class RelocationDecision {
|
||||
@Nullable
|
||||
private final Type finalDecision;
|
||||
@Nullable
|
||||
private final String finalExplanation;
|
||||
@Nullable
|
||||
private final String assignedNodeId;
|
||||
|
||||
protected RelocationDecision(Type finalDecision, String finalExplanation, String assignedNodeId) {
|
||||
this.finalDecision = finalDecision;
|
||||
this.finalExplanation = finalExplanation;
|
||||
this.assignedNodeId = assignedNodeId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code true} if a decision was taken by the allocator, {@code false} otherwise.
|
||||
* If no decision was taken, then the rest of the fields in this object are meaningless and return {@code null}.
|
||||
*/
|
||||
public boolean isDecisionTaken() {
|
||||
return finalDecision != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the final decision made by the allocator on whether to assign the shard, and
|
||||
* {@code null} if no decision was taken.
|
||||
*/
|
||||
public Type getFinalDecisionType() {
|
||||
return finalDecision;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecisionType()}.
|
||||
*/
|
||||
@Nullable
|
||||
public String getFinalExplanation() {
|
||||
return finalExplanation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the node id that the allocator will assign the shard to, unless {@link #getFinalDecisionType()} returns
|
||||
* a value other than {@link Decision.Type#YES}, in which case this returns {@code null}.
|
||||
*/
|
||||
@Nullable
|
||||
public String getAssignedNodeId() {
|
||||
return assignedNodeId;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a decision to move a started shard because it is no longer allowed to remain on its current node.
|
||||
*/
|
||||
public static final class MoveDecision extends RelocationDecision {
|
||||
/** a constant representing no decision taken */
|
||||
public static final MoveDecision NOT_TAKEN = new MoveDecision(null, null, null, null, null);
|
||||
/** cached decisions so we don't have to recreate objects for common decisions when not in explain mode. */
|
||||
private static final MoveDecision CACHED_STAY_DECISION = new MoveDecision(Decision.YES, Type.NO, null, null, null);
|
||||
private static final MoveDecision CACHED_CANNOT_MOVE_DECISION = new MoveDecision(Decision.NO, Type.NO, null, null, null);
|
||||
|
||||
@Nullable
|
||||
private final Decision canRemainDecision;
|
||||
@Nullable
|
||||
private final Map<String, WeightedDecision> nodeDecisions;
|
||||
|
||||
private MoveDecision(Decision canRemainDecision, Type finalDecision, String finalExplanation,
|
||||
String assignedNodeId, Map<String, WeightedDecision> nodeDecisions) {
|
||||
super(finalDecision, finalExplanation, assignedNodeId);
|
||||
this.canRemainDecision = canRemainDecision;
|
||||
this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a move decision for the shard being able to remain on its current node, so not moving.
|
||||
*/
|
||||
public static MoveDecision stay(Decision canRemainDecision, boolean explain) {
|
||||
assert canRemainDecision.type() != Type.NO;
|
||||
if (explain) {
|
||||
final String explanation;
|
||||
if (explain) {
|
||||
explanation = "shard is allowed to remain on its current node, so no reason to move";
|
||||
} else {
|
||||
explanation = null;
|
||||
}
|
||||
return new MoveDecision(Objects.requireNonNull(canRemainDecision), Type.NO, explanation, null, null);
|
||||
} else {
|
||||
return CACHED_STAY_DECISION;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a move decision for the shard not being able to remain on its current node.
|
||||
*
|
||||
* @param canRemainDecision the decision for whether the shard is allowed to remain on its current node
|
||||
* @param finalDecision the decision of whether to move the shard to another node
|
||||
* @param explain true if in explain mode
|
||||
* @param currentNodeId the current node id where the shard is assigned
|
||||
* @param assignedNodeId the node id for where the shard can move to
|
||||
* @param nodeDecisions the node-level decisions that comprised the final decision, non-null iff explain is true
|
||||
* @return the {@link MoveDecision} for moving the shard to another node
|
||||
*/
|
||||
public static MoveDecision decision(Decision canRemainDecision, Type finalDecision, boolean explain, String currentNodeId,
|
||||
String assignedNodeId, Map<String, WeightedDecision> nodeDecisions) {
|
||||
assert canRemainDecision != null;
|
||||
assert canRemainDecision.type() != Type.YES : "create decision with MoveDecision#stay instead";
|
||||
String finalExplanation = null;
|
||||
if (explain) {
|
||||
assert currentNodeId != null;
|
||||
if (finalDecision == Type.YES) {
|
||||
assert assignedNodeId != null;
|
||||
finalExplanation = "shard cannot remain on node [" + currentNodeId + "], moving to node [" + assignedNodeId + "]";
|
||||
} else if (finalDecision == Type.THROTTLE) {
|
||||
finalExplanation = "shard cannot remain on node [" + currentNodeId + "], throttled on moving to another node";
|
||||
} else {
|
||||
finalExplanation = "shard cannot remain on node [" + currentNodeId + "], but cannot be assigned to any other node";
|
||||
}
|
||||
}
|
||||
if (finalExplanation == null && finalDecision == Type.NO) {
|
||||
// the final decision is NO (no node to move the shard to) and we are not in explain mode, return a cached version
|
||||
return CACHED_CANNOT_MOVE_DECISION;
|
||||
} else {
|
||||
assert ((assignedNodeId == null) == (finalDecision != Type.YES));
|
||||
return new MoveDecision(canRemainDecision, finalDecision, finalExplanation, assignedNodeId, nodeDecisions);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code true} if the shard cannot remain on its current node and can be moved, returns {@code false} otherwise.
|
||||
*/
|
||||
public boolean move() {
|
||||
return cannotRemain() && getFinalDecisionType() == Type.YES;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code true} if the shard cannot remain on its current node.
|
||||
*/
|
||||
public boolean cannotRemain() {
|
||||
return isDecisionTaken() && canRemainDecision.type() == Type.NO;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the individual node-level decisions that went into making the final decision as represented by
|
||||
* {@link #getFinalDecisionType()}. The map that is returned has the node id as the key and a {@link WeightedDecision}.
|
||||
*/
|
||||
@Nullable
|
||||
public Map<String, WeightedDecision> getNodeDecisions() {
|
||||
return nodeDecisions;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a decision to move a started shard to form a more optimally balanced cluster.
|
||||
*/
|
||||
public static final class RebalanceDecision extends RelocationDecision {
|
||||
/** a constant representing no decision taken */
|
||||
public static final RebalanceDecision NOT_TAKEN = new RebalanceDecision(null, null, null, null, null, Float.POSITIVE_INFINITY);
|
||||
|
||||
@Nullable
|
||||
private final Decision canRebalanceDecision;
|
||||
@Nullable
|
||||
private final Map<String, NodeRebalanceDecision> nodeDecisions;
|
||||
private float currentWeight;
|
||||
|
||||
protected RebalanceDecision(Decision canRebalanceDecision, Type finalDecision, String finalExplanation) {
|
||||
this(canRebalanceDecision, finalDecision, finalExplanation, null, null, Float.POSITIVE_INFINITY);
|
||||
}
|
||||
|
||||
protected RebalanceDecision(Decision canRebalanceDecision, Type finalDecision, String finalExplanation,
|
||||
String assignedNodeId, Map<String, NodeRebalanceDecision> nodeDecisions, float currentWeight) {
|
||||
super(finalDecision, finalExplanation, assignedNodeId);
|
||||
this.canRebalanceDecision = canRebalanceDecision;
|
||||
this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null;
|
||||
this.currentWeight = currentWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link RebalanceDecision}, computing the explanation based on the decision parameters.
|
||||
*/
|
||||
public static RebalanceDecision decision(Decision canRebalanceDecision, Type finalDecision, String assignedNodeId,
|
||||
Map<String, NodeRebalanceDecision> nodeDecisions, float currentWeight, float threshold) {
|
||||
final String explanation = produceFinalExplanation(finalDecision, assignedNodeId, threshold);
|
||||
return new RebalanceDecision(canRebalanceDecision, finalDecision, explanation, assignedNodeId, nodeDecisions, currentWeight);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the decision for being allowed to rebalance the shard.
|
||||
*/
|
||||
@Nullable
|
||||
public Decision getCanRebalanceDecision() {
|
||||
return canRebalanceDecision;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the individual node-level decisions that went into making the final decision as represented by
|
||||
* {@link #getFinalDecisionType()}. The map that is returned has the node id as the key and a {@link NodeRebalanceDecision}.
|
||||
*/
|
||||
@Nullable
|
||||
public Map<String, NodeRebalanceDecision> getNodeDecisions() {
|
||||
return nodeDecisions;
|
||||
}
|
||||
|
||||
private static String produceFinalExplanation(final Type finalDecisionType, final String assignedNodeId, final float threshold) {
|
||||
final String finalExplanation;
|
||||
if (assignedNodeId != null) {
|
||||
if (finalDecisionType == Type.THROTTLE) {
|
||||
finalExplanation = "throttle moving shard to node [" + assignedNodeId + "], as it is " +
|
||||
"currently busy with other shard relocations";
|
||||
} else {
|
||||
finalExplanation = "moving shard to node [" + assignedNodeId + "] to form a more balanced cluster";
|
||||
}
|
||||
} else {
|
||||
finalExplanation = "cannot rebalance shard, no other node exists that would form a more balanced " +
|
||||
"cluster within the defined threshold [" + threshold + "]";
|
||||
}
|
||||
return finalExplanation;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A node-level explanation for the decision to rebalance a shard.
|
||||
*/
|
||||
public static final class NodeRebalanceDecision {
|
||||
private final Type nodeDecisionType;
|
||||
private final Decision canAllocate;
|
||||
private final boolean betterWeightThanCurrent;
|
||||
private final boolean deltaAboveThreshold;
|
||||
private final float currentWeight;
|
||||
private final float weightWithShardAdded;
|
||||
|
||||
NodeRebalanceDecision(Type nodeDecisionType, Decision canAllocate, boolean betterWeightThanCurrent,
|
||||
boolean deltaAboveThreshold, float currentWeight, float weightWithShardAdded) {
|
||||
this.nodeDecisionType = Objects.requireNonNull(nodeDecisionType);
|
||||
this.canAllocate = Objects.requireNonNull(canAllocate);
|
||||
this.betterWeightThanCurrent = betterWeightThanCurrent;
|
||||
this.deltaAboveThreshold = deltaAboveThreshold;
|
||||
this.currentWeight = currentWeight;
|
||||
this.weightWithShardAdded = weightWithShardAdded;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the decision to rebalance to the node.
|
||||
*/
|
||||
public Type getNodeDecisionType() {
|
||||
return nodeDecisionType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the shard is allowed to be allocated to the node.
|
||||
*/
|
||||
public Decision getCanAllocateDecision() {
|
||||
return canAllocate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the weight of the node is better than the weight of the node where the shard currently resides.
|
||||
*/
|
||||
public boolean isBetterWeightThanCurrent() {
|
||||
return betterWeightThanCurrent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns if the weight delta by assigning to this node was above the threshold to warrant a rebalance.
|
||||
*/
|
||||
public boolean isDeltaAboveThreshold() {
|
||||
return deltaAboveThreshold;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current weight of the node if the shard is not added to the node.
|
||||
*/
|
||||
public float getCurrentWeight() {
|
||||
return currentWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the weight of the node if the shard is added to the node.
|
||||
*/
|
||||
public float getWeightWithShardAdded() {
|
||||
return weightWithShardAdded;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -21,6 +21,9 @@ package org.elasticsearch.common;
|
|||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
|
||||
/**
|
||||
* A set of utilities for numbers.
|
||||
*/
|
||||
|
@ -178,4 +181,56 @@ public final class Numbers {
|
|||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Return the long that {@code n} stores, or throws an exception if the
|
||||
* stored value cannot be converted to a long that stores the exact same
|
||||
* value. */
|
||||
public static long toLongExact(Number n) {
|
||||
if (n instanceof Byte || n instanceof Short || n instanceof Integer
|
||||
|| n instanceof Long) {
|
||||
return n.longValue();
|
||||
} else if (n instanceof Float || n instanceof Double) {
|
||||
double d = n.doubleValue();
|
||||
if (d != Math.round(d)) {
|
||||
throw new IllegalArgumentException(n + " is not an integer value");
|
||||
}
|
||||
return n.longValue();
|
||||
} else if (n instanceof BigDecimal) {
|
||||
return ((BigDecimal) n).toBigIntegerExact().longValueExact();
|
||||
} else if (n instanceof BigInteger) {
|
||||
return ((BigInteger) n).longValueExact();
|
||||
} else {
|
||||
throw new IllegalArgumentException("Cannot check whether [" + n + "] of class [" + n.getClass().getName()
|
||||
+ "] is actually a long");
|
||||
}
|
||||
}
|
||||
|
||||
/** Return the int that {@code n} stores, or throws an exception if the
|
||||
* stored value cannot be converted to an int that stores the exact same
|
||||
* value. */
|
||||
public static int toIntExact(Number n) {
|
||||
return Math.toIntExact(toLongExact(n));
|
||||
}
|
||||
|
||||
/** Return the short that {@code n} stores, or throws an exception if the
|
||||
* stored value cannot be converted to a short that stores the exact same
|
||||
* value. */
|
||||
public static short toShortExact(Number n) {
|
||||
long l = toLongExact(n);
|
||||
if (l != (short) l) {
|
||||
throw new ArithmeticException("short overflow: " + l);
|
||||
}
|
||||
return (short) l;
|
||||
}
|
||||
|
||||
/** Return the byte that {@code n} stores, or throws an exception if the
|
||||
* stored value cannot be converted to a byte that stores the exact same
|
||||
* value. */
|
||||
public static byte toByteExact(Number n) {
|
||||
long l = toLongExact(n);
|
||||
if (l != (byte) l) {
|
||||
throw new ArithmeticException("byte overflow: " + l);
|
||||
}
|
||||
return (byte) l;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.util.BytesRefIterator;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
@ -215,6 +216,7 @@ public abstract class BytesReference implements Accountable, Comparable<BytesRef
|
|||
* that way.
|
||||
*/
|
||||
private static final class MarkSupportingStreamInputWrapper extends StreamInput {
|
||||
// can't use FilterStreamInput it needs to reset the delegate
|
||||
private final BytesReference reference;
|
||||
private BytesReferenceStreamInput input;
|
||||
private int mark = 0;
|
||||
|
@ -254,6 +256,11 @@ public abstract class BytesReference implements Accountable, Comparable<BytesRef
|
|||
return input.available();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void ensureCanReadBytes(int length) throws EOFException {
|
||||
input.ensureCanReadBytes(length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reset() throws IOException {
|
||||
input = new BytesReferenceStreamInput(reference.iterator(), reference.length());
|
||||
|
|
|
@ -114,6 +114,14 @@ final class BytesReferenceStreamInput extends StreamInput {
|
|||
return length - offset;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void ensureCanReadBytes(int bytesToRead) throws EOFException {
|
||||
int bytesAvailable = length - offset;
|
||||
if (bytesAvailable < bytesToRead) {
|
||||
throw new EOFException("tried to read: " + bytesToRead + " bytes but only " + bytesAvailable + " remaining");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long skip(long n) throws IOException {
|
||||
final int skip = (int) Math.min(Integer.MAX_VALUE, n);
|
||||
|
|
|
@ -86,6 +86,13 @@ public class ByteBufferStreamInput extends StreamInput {
|
|||
return buffer.remaining();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void ensureCanReadBytes(int length) throws EOFException {
|
||||
if (buffer.remaining() < length) {
|
||||
throw new EOFException("tried to read: " + length + " bytes but only " + buffer.remaining() + " remaining");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void mark(int readlimit) {
|
||||
buffer.mark();
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.io.stream;
|
|||
|
||||
import org.elasticsearch.Version;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
|
@ -28,7 +29,7 @@ import java.io.IOException;
|
|||
*/
|
||||
public abstract class FilterStreamInput extends StreamInput {
|
||||
|
||||
private final StreamInput delegate;
|
||||
protected final StreamInput delegate;
|
||||
|
||||
protected FilterStreamInput(StreamInput delegate) {
|
||||
this.delegate = delegate;
|
||||
|
@ -73,4 +74,9 @@ public abstract class FilterStreamInput extends StreamInput {
|
|||
public void setVersion(Version version) {
|
||||
delegate.setVersion(version);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void ensureCanReadBytes(int length) throws EOFException {
|
||||
delegate.ensureCanReadBytes(length);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -95,4 +95,9 @@ public class InputStreamStreamInput extends StreamInput {
|
|||
public long skip(long n) throws IOException {
|
||||
return is.skip(n);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void ensureCanReadBytes(int length) throws EOFException {
|
||||
// TODO what can we do here?
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,9 +24,10 @@ import org.apache.lucene.index.IndexFormatTooNewException;
|
|||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.BitUtil;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
import org.apache.lucene.util.CharsRef;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -111,7 +112,7 @@ public abstract class StreamInput extends InputStream {
|
|||
* bytes of the stream.
|
||||
*/
|
||||
public BytesReference readBytesReference() throws IOException {
|
||||
int length = readVInt();
|
||||
int length = readArraySize();
|
||||
return readBytesReference(length);
|
||||
}
|
||||
|
||||
|
@ -143,7 +144,7 @@ public abstract class StreamInput extends InputStream {
|
|||
}
|
||||
|
||||
public BytesRef readBytesRef() throws IOException {
|
||||
int length = readVInt();
|
||||
int length = readArraySize();
|
||||
return readBytesRef(length);
|
||||
}
|
||||
|
||||
|
@ -323,15 +324,22 @@ public abstract class StreamInput extends InputStream {
|
|||
return null;
|
||||
}
|
||||
|
||||
private final CharsRefBuilder spare = new CharsRefBuilder();
|
||||
// we don't use a CharsRefBuilder since we exactly know the size of the character array up front
|
||||
// this prevents calling grow for every character since we don't need this
|
||||
private final CharsRef spare = new CharsRef();
|
||||
|
||||
public String readString() throws IOException {
|
||||
final int charCount = readVInt();
|
||||
spare.clear();
|
||||
spare.grow(charCount);
|
||||
int c;
|
||||
while (spare.length() < charCount) {
|
||||
c = readByte() & 0xff;
|
||||
// TODO it would be nice to not call readByte() for every character but we don't know how much to read up-front
|
||||
// we can make the loop much more complicated but that won't buy us much compared to the bounds checks in readByte()
|
||||
final int charCount = readArraySize();
|
||||
if (spare.chars.length < charCount) {
|
||||
// we don't use ArrayUtils.grow since there is no need to copy the array
|
||||
spare.chars = new char[ArrayUtil.oversize(charCount, Character.BYTES)];
|
||||
}
|
||||
spare.length = charCount;
|
||||
final char[] buffer = spare.chars;
|
||||
for (int i = 0; i < charCount; i++) {
|
||||
final int c = readByte() & 0xff;
|
||||
switch (c >> 4) {
|
||||
case 0:
|
||||
case 1:
|
||||
|
@ -341,15 +349,17 @@ public abstract class StreamInput extends InputStream {
|
|||
case 5:
|
||||
case 6:
|
||||
case 7:
|
||||
spare.append((char) c);
|
||||
buffer[i] = (char) c;
|
||||
break;
|
||||
case 12:
|
||||
case 13:
|
||||
spare.append((char) ((c & 0x1F) << 6 | readByte() & 0x3F));
|
||||
buffer[i] = ((char) ((c & 0x1F) << 6 | readByte() & 0x3F));
|
||||
break;
|
||||
case 14:
|
||||
spare.append((char) ((c & 0x0F) << 12 | (readByte() & 0x3F) << 6 | (readByte() & 0x3F) << 0));
|
||||
buffer[i] = ((char) ((c & 0x0F) << 12 | (readByte() & 0x3F) << 6 | (readByte() & 0x3F) << 0));
|
||||
break;
|
||||
default:
|
||||
new AssertionError("unexpected character: " + c + " hex: " + Integer.toHexString(c));
|
||||
}
|
||||
}
|
||||
return spare.toString();
|
||||
|
@ -401,7 +411,7 @@ public abstract class StreamInput extends InputStream {
|
|||
public abstract int available() throws IOException;
|
||||
|
||||
public String[] readStringArray() throws IOException {
|
||||
int size = readVInt();
|
||||
int size = readArraySize();
|
||||
if (size == 0) {
|
||||
return Strings.EMPTY_ARRAY;
|
||||
}
|
||||
|
@ -421,7 +431,7 @@ public abstract class StreamInput extends InputStream {
|
|||
}
|
||||
|
||||
public <K, V> Map<K, V> readMap(Writeable.Reader<K> keyReader, Writeable.Reader<V> valueReader) throws IOException {
|
||||
int size = readVInt();
|
||||
int size = readArraySize();
|
||||
Map<K, V> map = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
K key = keyReader.read(this);
|
||||
|
@ -443,7 +453,7 @@ public abstract class StreamInput extends InputStream {
|
|||
*/
|
||||
public <K, V> Map<K, List<V>> readMapOfLists(final Writeable.Reader<K> keyReader, final Writeable.Reader<V> valueReader)
|
||||
throws IOException {
|
||||
final int size = readVInt();
|
||||
final int size = readArraySize();
|
||||
if (size == 0) {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
|
@ -520,7 +530,7 @@ public abstract class StreamInput extends InputStream {
|
|||
|
||||
@SuppressWarnings("unchecked")
|
||||
private List readArrayList() throws IOException {
|
||||
int size = readVInt();
|
||||
int size = readArraySize();
|
||||
List list = new ArrayList(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
list.add(readGenericValue());
|
||||
|
@ -534,7 +544,7 @@ public abstract class StreamInput extends InputStream {
|
|||
}
|
||||
|
||||
private Object[] readArray() throws IOException {
|
||||
int size8 = readVInt();
|
||||
int size8 = readArraySize();
|
||||
Object[] list8 = new Object[size8];
|
||||
for (int i = 0; i < size8; i++) {
|
||||
list8[i] = readGenericValue();
|
||||
|
@ -543,7 +553,7 @@ public abstract class StreamInput extends InputStream {
|
|||
}
|
||||
|
||||
private Map readLinkedHashMap() throws IOException {
|
||||
int size9 = readVInt();
|
||||
int size9 = readArraySize();
|
||||
Map map9 = new LinkedHashMap(size9);
|
||||
for (int i = 0; i < size9; i++) {
|
||||
map9.put(readString(), readGenericValue());
|
||||
|
@ -552,7 +562,7 @@ public abstract class StreamInput extends InputStream {
|
|||
}
|
||||
|
||||
private Map readHashMap() throws IOException {
|
||||
int size10 = readVInt();
|
||||
int size10 = readArraySize();
|
||||
Map map10 = new HashMap(size10);
|
||||
for (int i = 0; i < size10; i++) {
|
||||
map10.put(readString(), readGenericValue());
|
||||
|
@ -589,7 +599,7 @@ public abstract class StreamInput extends InputStream {
|
|||
}
|
||||
|
||||
public int[] readIntArray() throws IOException {
|
||||
int length = readVInt();
|
||||
int length = readArraySize();
|
||||
int[] values = new int[length];
|
||||
for (int i = 0; i < length; i++) {
|
||||
values[i] = readInt();
|
||||
|
@ -598,7 +608,7 @@ public abstract class StreamInput extends InputStream {
|
|||
}
|
||||
|
||||
public int[] readVIntArray() throws IOException {
|
||||
int length = readVInt();
|
||||
int length = readArraySize();
|
||||
int[] values = new int[length];
|
||||
for (int i = 0; i < length; i++) {
|
||||
values[i] = readVInt();
|
||||
|
@ -607,7 +617,7 @@ public abstract class StreamInput extends InputStream {
|
|||
}
|
||||
|
||||
public long[] readLongArray() throws IOException {
|
||||
int length = readVInt();
|
||||
int length = readArraySize();
|
||||
long[] values = new long[length];
|
||||
for (int i = 0; i < length; i++) {
|
||||
values[i] = readLong();
|
||||
|
@ -616,7 +626,7 @@ public abstract class StreamInput extends InputStream {
|
|||
}
|
||||
|
||||
public long[] readVLongArray() throws IOException {
|
||||
int length = readVInt();
|
||||
int length = readArraySize();
|
||||
long[] values = new long[length];
|
||||
for (int i = 0; i < length; i++) {
|
||||
values[i] = readVLong();
|
||||
|
@ -625,7 +635,7 @@ public abstract class StreamInput extends InputStream {
|
|||
}
|
||||
|
||||
public float[] readFloatArray() throws IOException {
|
||||
int length = readVInt();
|
||||
int length = readArraySize();
|
||||
float[] values = new float[length];
|
||||
for (int i = 0; i < length; i++) {
|
||||
values[i] = readFloat();
|
||||
|
@ -634,7 +644,7 @@ public abstract class StreamInput extends InputStream {
|
|||
}
|
||||
|
||||
public double[] readDoubleArray() throws IOException {
|
||||
int length = readVInt();
|
||||
int length = readArraySize();
|
||||
double[] values = new double[length];
|
||||
for (int i = 0; i < length; i++) {
|
||||
values[i] = readDouble();
|
||||
|
@ -643,14 +653,14 @@ public abstract class StreamInput extends InputStream {
|
|||
}
|
||||
|
||||
public byte[] readByteArray() throws IOException {
|
||||
final int length = readVInt();
|
||||
final int length = readArraySize();
|
||||
final byte[] bytes = new byte[length];
|
||||
readBytes(bytes, 0, bytes.length);
|
||||
return bytes;
|
||||
}
|
||||
|
||||
public <T> T[] readArray(Writeable.Reader<T> reader, IntFunction<T[]> arraySupplier) throws IOException {
|
||||
int length = readVInt();
|
||||
int length = readArraySize();
|
||||
T[] values = arraySupplier.apply(length);
|
||||
for (int i = 0; i < length; i++) {
|
||||
values[i] = reader.read(this);
|
||||
|
@ -822,7 +832,7 @@ public abstract class StreamInput extends InputStream {
|
|||
* @throws IOException if any step fails
|
||||
*/
|
||||
public <T extends Streamable> List<T> readStreamableList(Supplier<T> constructor) throws IOException {
|
||||
int count = readVInt();
|
||||
int count = readArraySize();
|
||||
List<T> builder = new ArrayList<>(count);
|
||||
for (int i=0; i<count; i++) {
|
||||
T instance = constructor.get();
|
||||
|
@ -836,7 +846,7 @@ public abstract class StreamInput extends InputStream {
|
|||
* Reads a list of objects
|
||||
*/
|
||||
public <T> List<T> readList(Writeable.Reader<T> reader) throws IOException {
|
||||
int count = readVInt();
|
||||
int count = readArraySize();
|
||||
List<T> builder = new ArrayList<>(count);
|
||||
for (int i=0; i<count; i++) {
|
||||
builder.add(reader.read(this));
|
||||
|
@ -848,7 +858,7 @@ public abstract class StreamInput extends InputStream {
|
|||
* Reads a list of {@link NamedWriteable}s.
|
||||
*/
|
||||
public <T extends NamedWriteable> List<T> readNamedWriteableList(Class<T> categoryClass) throws IOException {
|
||||
int count = readVInt();
|
||||
int count = readArraySize();
|
||||
List<T> builder = new ArrayList<>(count);
|
||||
for (int i=0; i<count; i++) {
|
||||
builder.add(readNamedWriteable(categoryClass));
|
||||
|
@ -864,4 +874,29 @@ public abstract class StreamInput extends InputStream {
|
|||
return new InputStreamStreamInput(new ByteArrayInputStream(bytes, offset, length));
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a vint via {@link #readVInt()} and applies basic checks to ensure the read array size is sane.
|
||||
* This method uses {@link #ensureCanReadBytes(int)} to ensure this stream has enough bytes to read for the read array size.
|
||||
*/
|
||||
private int readArraySize() throws IOException {
|
||||
final int arraySize = readVInt();
|
||||
if (arraySize > ArrayUtil.MAX_ARRAY_LENGTH) {
|
||||
throw new IllegalStateException("array length must be <= to " + ArrayUtil.MAX_ARRAY_LENGTH + " but was: " + arraySize);
|
||||
}
|
||||
if (arraySize < 0) {
|
||||
throw new NegativeArraySizeException("array size must be positive but was: " + arraySize);
|
||||
}
|
||||
// lets do a sanity check that if we are reading an array size that is bigger that the remaining bytes we can safely
|
||||
// throw an exception instead of allocating the array based on the size. A simple corrutpted byte can make a node go OOM
|
||||
// if the size is large and for perf reasons we allocate arrays ahead of time
|
||||
ensureCanReadBytes(arraySize);
|
||||
return arraySize;
|
||||
}
|
||||
|
||||
/**
|
||||
* This method throws an {@link EOFException} if the given number of bytes can not be read from the this stream. This method might
|
||||
* be a no-op depending on the underlying implementation if the information of the remaining bytes is not present.
|
||||
*/
|
||||
protected abstract void ensureCanReadBytes(int length) throws EOFException;
|
||||
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.index.IndexFormatTooNewException;
|
|||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.BitUtil;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
|
@ -298,23 +299,41 @@ public abstract class StreamOutput extends OutputStream {
|
|||
}
|
||||
}
|
||||
|
||||
// we use a small buffer to convert strings to bytes since we want to prevent calling writeByte
|
||||
// for every byte in the string (see #21660 for details).
|
||||
// This buffer will never be the oversized limit of 1024 bytes and will not be shared across streams
|
||||
private byte[] convertStringBuffer = BytesRef.EMPTY_BYTES; // TODO should we reduce it to 0 bytes once the stream is closed?
|
||||
|
||||
public void writeString(String str) throws IOException {
|
||||
int charCount = str.length();
|
||||
final int charCount = str.length();
|
||||
final int bufferSize = Math.min(3 * charCount, 1024); // at most 3 bytes per character is needed here
|
||||
if (convertStringBuffer.length < bufferSize) { // we don't use ArrayUtils.grow since copying the bytes is unnecessary
|
||||
convertStringBuffer = new byte[ArrayUtil.oversize(bufferSize, Byte.BYTES)];
|
||||
}
|
||||
byte[] buffer = convertStringBuffer;
|
||||
int offset = 0;
|
||||
writeVInt(charCount);
|
||||
int c;
|
||||
for (int i = 0; i < charCount; i++) {
|
||||
c = str.charAt(i);
|
||||
final int c = str.charAt(i);
|
||||
if (c <= 0x007F) {
|
||||
writeByte((byte) c);
|
||||
buffer[offset++] = ((byte) c);
|
||||
} else if (c > 0x07FF) {
|
||||
writeByte((byte) (0xE0 | c >> 12 & 0x0F));
|
||||
writeByte((byte) (0x80 | c >> 6 & 0x3F));
|
||||
writeByte((byte) (0x80 | c >> 0 & 0x3F));
|
||||
buffer[offset++] = ((byte) (0xE0 | c >> 12 & 0x0F));
|
||||
buffer[offset++] = ((byte) (0x80 | c >> 6 & 0x3F));
|
||||
buffer[offset++] = ((byte) (0x80 | c >> 0 & 0x3F));
|
||||
} else {
|
||||
writeByte((byte) (0xC0 | c >> 6 & 0x1F));
|
||||
writeByte((byte) (0x80 | c >> 0 & 0x3F));
|
||||
buffer[offset++] = ((byte) (0xC0 | c >> 6 & 0x1F));
|
||||
buffer[offset++] = ((byte) (0x80 | c >> 0 & 0x3F));
|
||||
}
|
||||
// make sure any possible char can fit into the buffer in any possible iteration
|
||||
// we need at most 3 bytes so we flush the buffer once we have less than 3 bytes
|
||||
// left before we start another iteration
|
||||
if (offset > buffer.length-3) {
|
||||
writeBytes(buffer, offset);
|
||||
offset = 0;
|
||||
}
|
||||
}
|
||||
writeBytes(buffer, offset);
|
||||
}
|
||||
|
||||
public void writeFloat(float v) throws IOException {
|
||||
|
@ -783,7 +802,7 @@ public abstract class StreamOutput extends OutputStream {
|
|||
writeVInt(17);
|
||||
} else {
|
||||
ElasticsearchException ex;
|
||||
if (throwable instanceof ElasticsearchException && ElasticsearchException.isRegistered(throwable.getClass())) {
|
||||
if (throwable instanceof ElasticsearchException && ElasticsearchException.isRegistered(throwable.getClass(), version)) {
|
||||
ex = (ElasticsearchException) throwable;
|
||||
} else {
|
||||
ex = new NotSerializableExceptionWrapper(throwable);
|
||||
|
|
|
@ -40,6 +40,7 @@ import java.util.TreeMap;
|
|||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
@ -445,10 +446,18 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
boolean changed = false;
|
||||
final Set<String> toRemove = new HashSet<>();
|
||||
Settings.Builder settingsBuilder = Settings.builder();
|
||||
final Predicate<String> canUpdate = (key) -> (onlyDynamic == false && get(key) != null) || hasDynamicSetting(key);
|
||||
final Predicate<String> canRemove = (key) ->( // we can delete if
|
||||
onlyDynamic && hasDynamicSetting(key) // it's a dynamicSetting and we only do dynamic settings
|
||||
|| get(key) == null && key.startsWith(ARCHIVED_SETTINGS_PREFIX) // the setting is not registered AND it's been archived
|
||||
|| (onlyDynamic == false && get(key) != null)); // if it's not dynamic AND we have a key
|
||||
for (Map.Entry<String, String> entry : toApply.getAsMap().entrySet()) {
|
||||
if (entry.getValue() == null) {
|
||||
if (entry.getValue() == null && (canRemove.test(entry.getKey()) || entry.getKey().endsWith("*"))) {
|
||||
// this either accepts null values that suffice the canUpdate test OR wildcard expressions (key ends with *)
|
||||
// we don't validate if there is any dynamic setting with that prefix yet we could do in the future
|
||||
toRemove.add(entry.getKey());
|
||||
} else if ((onlyDynamic == false && get(entry.getKey()) != null) || hasDynamicSetting(entry.getKey())) {
|
||||
// we don't set changed here it's set after we apply deletes below if something actually changed
|
||||
} else if (entry.getValue() != null && canUpdate.test(entry.getKey())) {
|
||||
validate(entry.getKey(), toApply);
|
||||
settingsBuilder.put(entry.getKey(), entry.getValue());
|
||||
updates.put(entry.getKey(), entry.getValue());
|
||||
|
@ -456,20 +465,22 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
} else {
|
||||
throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable");
|
||||
}
|
||||
|
||||
}
|
||||
changed |= applyDeletes(toRemove, target);
|
||||
changed |= applyDeletes(toRemove, target, canRemove);
|
||||
target.put(settingsBuilder.build());
|
||||
return changed;
|
||||
}
|
||||
|
||||
private static boolean applyDeletes(Set<String> deletes, Settings.Builder builder) {
|
||||
private static boolean applyDeletes(Set<String> deletes, Settings.Builder builder, Predicate<String> canRemove) {
|
||||
boolean changed = false;
|
||||
for (String entry : deletes) {
|
||||
Set<String> keysToRemove = new HashSet<>();
|
||||
Set<String> keySet = builder.internalMap().keySet();
|
||||
for (String key : keySet) {
|
||||
if (Regex.simpleMatch(entry, key)) {
|
||||
if (Regex.simpleMatch(entry, key) && canRemove.test(key)) {
|
||||
// we have to re-check with canRemove here since we might have a wildcard expression foo.* that matches
|
||||
// dynamic as well as static settings if that is the case we might remove static settings since we resolve the
|
||||
// wildcards late
|
||||
keysToRemove.add(key);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.util.automaton.Automaton;
|
|||
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
|
||||
import org.apache.lucene.util.automaton.Operations;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Numbers;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -357,7 +358,7 @@ public class XContentMapValues {
|
|||
|
||||
public static int nodeIntegerValue(Object node) {
|
||||
if (node instanceof Number) {
|
||||
return ((Number) node).intValue();
|
||||
return Numbers.toIntExact((Number) node);
|
||||
}
|
||||
return Integer.parseInt(node.toString());
|
||||
}
|
||||
|
@ -366,10 +367,7 @@ public class XContentMapValues {
|
|||
if (node == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
if (node instanceof Number) {
|
||||
return ((Number) node).intValue();
|
||||
}
|
||||
return Integer.parseInt(node.toString());
|
||||
return nodeIntegerValue(node);
|
||||
}
|
||||
|
||||
public static short nodeShortValue(Object node, short defaultValue) {
|
||||
|
@ -381,7 +379,7 @@ public class XContentMapValues {
|
|||
|
||||
public static short nodeShortValue(Object node) {
|
||||
if (node instanceof Number) {
|
||||
return ((Number) node).shortValue();
|
||||
return Numbers.toShortExact((Number) node);
|
||||
}
|
||||
return Short.parseShort(node.toString());
|
||||
}
|
||||
|
@ -395,7 +393,7 @@ public class XContentMapValues {
|
|||
|
||||
public static byte nodeByteValue(Object node) {
|
||||
if (node instanceof Number) {
|
||||
return ((Number) node).byteValue();
|
||||
return Numbers.toByteExact((Number) node);
|
||||
}
|
||||
return Byte.parseByte(node.toString());
|
||||
}
|
||||
|
@ -409,7 +407,7 @@ public class XContentMapValues {
|
|||
|
||||
public static long nodeLongValue(Object node) {
|
||||
if (node instanceof Number) {
|
||||
return ((Number) node).longValue();
|
||||
return Numbers.toLongExact((Number) node);
|
||||
}
|
||||
return Long.parseLong(node.toString());
|
||||
}
|
||||
|
|
|
@ -19,30 +19,36 @@
|
|||
|
||||
package org.elasticsearch.env;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Exception used when the in-memory lock for a shard cannot be obtained
|
||||
*/
|
||||
public class ShardLockObtainFailedException extends Exception {
|
||||
private final ShardId shardId;
|
||||
public class ShardLockObtainFailedException extends ElasticsearchException {
|
||||
|
||||
public ShardLockObtainFailedException(ShardId shardId, String message) {
|
||||
super(message);
|
||||
this.shardId = shardId;
|
||||
super(buildMessage(shardId, message));
|
||||
this.setShard(shardId);
|
||||
}
|
||||
|
||||
public ShardLockObtainFailedException(ShardId shardId, String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
this.shardId = shardId;
|
||||
super(buildMessage(shardId, message), cause);
|
||||
this.setShard(shardId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMessage() {
|
||||
public ShardLockObtainFailedException(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
|
||||
private static String buildMessage(ShardId shardId, String message) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(shardId.toString());
|
||||
sb.append(": ");
|
||||
sb.append(super.getMessage());
|
||||
sb.append(message);
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,8 +22,8 @@ package org.elasticsearch.gateway;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -53,21 +53,21 @@ public abstract class BaseGatewayShardAllocator extends AbstractComponent {
|
|||
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
|
||||
while (unassignedIterator.hasNext()) {
|
||||
final ShardRouting shard = unassignedIterator.next();
|
||||
final ShardAllocationDecision shardAllocationDecision = makeAllocationDecision(shard, allocation, logger);
|
||||
final AllocateUnassignedDecision allocateUnassignedDecision = makeAllocationDecision(shard, allocation, logger);
|
||||
|
||||
if (shardAllocationDecision.isDecisionTaken() == false) {
|
||||
if (allocateUnassignedDecision.isDecisionTaken() == false) {
|
||||
// no decision was taken by this allocator
|
||||
continue;
|
||||
}
|
||||
|
||||
if (shardAllocationDecision.getFinalDecisionSafe() == Decision.Type.YES) {
|
||||
unassignedIterator.initialize(shardAllocationDecision.getAssignedNodeId(),
|
||||
shardAllocationDecision.getAllocationId(),
|
||||
if (allocateUnassignedDecision.getFinalDecisionSafe() == Decision.Type.YES) {
|
||||
unassignedIterator.initialize(allocateUnassignedDecision.getAssignedNodeId(),
|
||||
allocateUnassignedDecision.getAllocationId(),
|
||||
shard.primary() ? ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE :
|
||||
allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE),
|
||||
allocation.changes());
|
||||
} else {
|
||||
unassignedIterator.removeAndIgnore(shardAllocationDecision.getAllocationStatus(), allocation.changes());
|
||||
unassignedIterator.removeAndIgnore(allocateUnassignedDecision.getAllocationStatus(), allocation.changes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -80,9 +80,9 @@ public abstract class BaseGatewayShardAllocator extends AbstractComponent {
|
|||
* @param unassignedShard the unassigned shard to allocate
|
||||
* @param allocation the current routing state
|
||||
* @param logger the logger
|
||||
* @return an {@link ShardAllocationDecision} with the final decision of whether to allocate and details of the decision
|
||||
* @return an {@link AllocateUnassignedDecision} with the final decision of whether to allocate and details of the decision
|
||||
*/
|
||||
public abstract ShardAllocationDecision makeAllocationDecision(ShardRouting unassignedShard,
|
||||
RoutingAllocation allocation,
|
||||
Logger logger);
|
||||
public abstract AllocateUnassignedDecision makeAllocationDecision(ShardRouting unassignedShard,
|
||||
RoutingAllocation allocation,
|
||||
Logger logger);
|
||||
}
|
||||
|
|
|
@ -31,8 +31,8 @@ import org.elasticsearch.cluster.routing.RoutingNode;
|
|||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -110,19 +110,19 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public ShardAllocationDecision makeAllocationDecision(final ShardRouting unassignedShard,
|
||||
final RoutingAllocation allocation,
|
||||
final Logger logger) {
|
||||
public AllocateUnassignedDecision makeAllocationDecision(final ShardRouting unassignedShard,
|
||||
final RoutingAllocation allocation,
|
||||
final Logger logger) {
|
||||
if (isResponsibleFor(unassignedShard) == false) {
|
||||
// this allocator is not responsible for allocating this shard
|
||||
return ShardAllocationDecision.DECISION_NOT_TAKEN;
|
||||
return AllocateUnassignedDecision.NOT_TAKEN;
|
||||
}
|
||||
|
||||
final boolean explain = allocation.debugDecision();
|
||||
final FetchResult<NodeGatewayStartedShards> shardState = fetchData(unassignedShard, allocation);
|
||||
if (shardState.hasData() == false) {
|
||||
allocation.setHasPendingAsyncFetch();
|
||||
return ShardAllocationDecision.no(AllocationStatus.FETCHING_SHARD_DATA,
|
||||
return AllocateUnassignedDecision.no(AllocationStatus.FETCHING_SHARD_DATA,
|
||||
explain ? "still fetching shard state from the nodes in the cluster" : null);
|
||||
}
|
||||
|
||||
|
@ -167,18 +167,18 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
|||
// let BalancedShardsAllocator take care of allocating this shard
|
||||
logger.debug("[{}][{}]: missing local data, will restore from [{}]",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard.recoverySource());
|
||||
return ShardAllocationDecision.DECISION_NOT_TAKEN;
|
||||
return AllocateUnassignedDecision.NOT_TAKEN;
|
||||
} else if (recoverOnAnyNode) {
|
||||
// let BalancedShardsAllocator take care of allocating this shard
|
||||
logger.debug("[{}][{}]: missing local data, recover from any node", unassignedShard.index(), unassignedShard.id());
|
||||
return ShardAllocationDecision.DECISION_NOT_TAKEN;
|
||||
return AllocateUnassignedDecision.NOT_TAKEN;
|
||||
} else {
|
||||
// We have a shard that was previously allocated, but we could not find a valid shard copy to allocate the primary.
|
||||
// We could just be waiting for the node that holds the primary to start back up, in which case the allocation for
|
||||
// this shard will be picked up when the node joins and we do another allocation reroute
|
||||
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]",
|
||||
unassignedShard.index(), unassignedShard.id(), nodeShardsResult.allocationsFound);
|
||||
return ShardAllocationDecision.no(AllocationStatus.NO_VALID_SHARD_COPY,
|
||||
return AllocateUnassignedDecision.no(AllocationStatus.NO_VALID_SHARD_COPY,
|
||||
explain ? "shard was previously allocated, but no valid shard copy could be found amongst the nodes in the cluster" : null);
|
||||
}
|
||||
}
|
||||
|
@ -191,7 +191,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
|||
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard, decidedNode.nodeShardState.getNode());
|
||||
final String nodeId = decidedNode.nodeShardState.getNode().getId();
|
||||
return ShardAllocationDecision.yes(nodeId,
|
||||
return AllocateUnassignedDecision.yes(nodeId,
|
||||
"the allocation deciders returned a YES decision to allocate to node [" + nodeId + "]",
|
||||
decidedNode.nodeShardState.allocationId(),
|
||||
buildNodeDecisions(nodesToAllocate, explain));
|
||||
|
@ -207,20 +207,20 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
|||
logger.debug("[{}][{}]: allocating [{}] to [{}] on forced primary allocation",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeShardState.getNode());
|
||||
final String nodeId = nodeShardState.getNode().getId();
|
||||
return ShardAllocationDecision.yes(nodeId,
|
||||
return AllocateUnassignedDecision.yes(nodeId,
|
||||
"allocating the primary shard to node [" + nodeId+ "], which has a complete copy of the shard data",
|
||||
nodeShardState.allocationId(),
|
||||
buildNodeDecisions(nodesToForceAllocate, explain));
|
||||
} else if (nodesToForceAllocate.throttleNodeShards.isEmpty() == false) {
|
||||
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on forced primary allocation",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodesToForceAllocate.throttleNodeShards);
|
||||
return ShardAllocationDecision.throttle(
|
||||
return AllocateUnassignedDecision.throttle(
|
||||
explain ? "allocation throttled as all nodes to which the shard may be force allocated are busy with other recoveries" : null,
|
||||
buildNodeDecisions(nodesToForceAllocate, explain));
|
||||
} else {
|
||||
logger.debug("[{}][{}]: forced primary allocation denied [{}]",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard);
|
||||
return ShardAllocationDecision.no(AllocationStatus.DECIDERS_NO,
|
||||
return AllocateUnassignedDecision.no(AllocationStatus.DECIDERS_NO,
|
||||
explain ? "all nodes that hold a valid shard copy returned a NO decision, and force allocation is not permitted" : null,
|
||||
buildNodeDecisions(nodesToForceAllocate, explain));
|
||||
}
|
||||
|
@ -229,7 +229,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
|||
// taking place on the node currently, ignore it for now
|
||||
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodesToAllocate.throttleNodeShards);
|
||||
return ShardAllocationDecision.throttle(
|
||||
return AllocateUnassignedDecision.throttle(
|
||||
explain ? "allocation throttled as all nodes to which the shard may be allocated are busy with other recoveries" : null,
|
||||
buildNodeDecisions(nodesToAllocate, explain));
|
||||
}
|
||||
|
|
|
@ -31,8 +31,8 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
|
|||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
|
@ -139,12 +139,12 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public ShardAllocationDecision makeAllocationDecision(final ShardRouting unassignedShard,
|
||||
final RoutingAllocation allocation,
|
||||
final Logger logger) {
|
||||
public AllocateUnassignedDecision makeAllocationDecision(final ShardRouting unassignedShard,
|
||||
final RoutingAllocation allocation,
|
||||
final Logger logger) {
|
||||
if (isResponsibleFor(unassignedShard) == false) {
|
||||
// this allocator is not responsible for deciding on this shard
|
||||
return ShardAllocationDecision.DECISION_NOT_TAKEN;
|
||||
return AllocateUnassignedDecision.NOT_TAKEN;
|
||||
}
|
||||
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
|
@ -153,7 +153,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator {
|
|||
Tuple<Decision, Map<String, Decision>> allocateDecision = canBeAllocatedToAtLeastOneNode(unassignedShard, allocation, explain);
|
||||
if (allocateDecision.v1().type() != Decision.Type.YES) {
|
||||
logger.trace("{}: ignoring allocation, can't be allocated on any node", unassignedShard);
|
||||
return ShardAllocationDecision.no(UnassignedInfo.AllocationStatus.fromDecision(allocateDecision.v1().type()),
|
||||
return AllocateUnassignedDecision.no(UnassignedInfo.AllocationStatus.fromDecision(allocateDecision.v1().type()),
|
||||
explain ? "all nodes returned a " + allocateDecision.v1().type() + " decision for allocating the replica shard" : null,
|
||||
allocateDecision.v2());
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator {
|
|||
if (shardStores.hasData() == false) {
|
||||
logger.trace("{}: ignoring allocation, still fetching shard stores", unassignedShard);
|
||||
allocation.setHasPendingAsyncFetch();
|
||||
return ShardAllocationDecision.no(AllocationStatus.FETCHING_SHARD_DATA,
|
||||
return AllocateUnassignedDecision.no(AllocationStatus.FETCHING_SHARD_DATA,
|
||||
explain ? "still fetching shard state from the nodes in the cluster" : null);
|
||||
}
|
||||
|
||||
|
@ -175,7 +175,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator {
|
|||
// will try and recover from
|
||||
// Note, this is the existing behavior, as exposed in running CorruptFileTest#testNoPrimaryData
|
||||
logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", unassignedShard);
|
||||
return ShardAllocationDecision.DECISION_NOT_TAKEN;
|
||||
return AllocateUnassignedDecision.NOT_TAKEN;
|
||||
}
|
||||
|
||||
MatchingNodes matchingNodes = findMatchingNodes(unassignedShard, allocation, primaryStore, shardStores, explain);
|
||||
|
@ -189,14 +189,14 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator {
|
|||
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeWithHighestMatch.node());
|
||||
// we are throttling this, as we have enough other shards to allocate to this node, so ignore it for now
|
||||
return ShardAllocationDecision.throttle(
|
||||
return AllocateUnassignedDecision.throttle(
|
||||
explain ? "returned a THROTTLE decision on each node that has an existing copy of the shard, so waiting to re-use one of those copies" : null,
|
||||
matchingNodes.nodeDecisions);
|
||||
} else {
|
||||
logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeWithHighestMatch.node());
|
||||
// we found a match
|
||||
return ShardAllocationDecision.yes(nodeWithHighestMatch.nodeId(),
|
||||
return AllocateUnassignedDecision.yes(nodeWithHighestMatch.nodeId(),
|
||||
"allocating to node [" + nodeWithHighestMatch.nodeId() + "] in order to re-use its unallocated persistent store",
|
||||
null,
|
||||
matchingNodes.nodeDecisions);
|
||||
|
@ -206,11 +206,11 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator {
|
|||
// unassigned due to a node leaving, so we delay allocation of this replica to see if the
|
||||
// node with the shard copy will rejoin so we can re-use the copy it has
|
||||
logger.debug("{}: allocation of [{}] is delayed", unassignedShard.shardId(), unassignedShard);
|
||||
return ShardAllocationDecision.no(AllocationStatus.DELAYED_ALLOCATION,
|
||||
return AllocateUnassignedDecision.no(AllocationStatus.DELAYED_ALLOCATION,
|
||||
explain ? "not allocating this shard, no nodes contain data for the replica and allocation is delayed" : null);
|
||||
}
|
||||
|
||||
return ShardAllocationDecision.DECISION_NOT_TAKEN;
|
||||
return AllocateUnassignedDecision.NOT_TAKEN;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -26,10 +26,12 @@ import org.apache.lucene.search.spans.SpanQuery;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.support.ToXContentToBytes;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcherSupplier;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.BytesRefs;
|
||||
import org.elasticsearch.common.xcontent.AbstractObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentLocation;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
@ -300,4 +302,15 @@ public abstract class AbstractQueryBuilder<QB extends AbstractQueryBuilder<QB>>
|
|||
+ processedFieldName + "] and [" + currentFieldName + "]");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds {@code boost} and {@code query_name} parsing to the
|
||||
* {@link AbstractObjectParser} passed in. All query builders except
|
||||
* {@link MatchAllQueryBuilder} and {@link MatchNoneQueryBuilder} support these fields so they
|
||||
* should use this method.
|
||||
*/
|
||||
protected static void declareStandardFields(AbstractObjectParser<? extends QueryBuilder, ? extends ParseFieldMatcherSupplier> parser) {
|
||||
parser.declareFloat((builder, value) -> builder.boost(value), AbstractQueryBuilder.BOOST_FIELD);
|
||||
parser.declareString((builder, value) -> builder.queryName(value), AbstractQueryBuilder.NAME_FIELD);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,25 +24,26 @@ import org.apache.lucene.search.Query;
|
|||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ObjectParser.fromList;
|
||||
|
||||
/**
|
||||
* A query that will return only documents matching specific ids (and a type).
|
||||
*/
|
||||
|
@ -54,23 +55,22 @@ public class IdsQueryBuilder extends AbstractQueryBuilder<IdsQueryBuilder> {
|
|||
|
||||
private final Set<String> ids = new HashSet<>();
|
||||
|
||||
private final String[] types;
|
||||
private String[] types = Strings.EMPTY_ARRAY;
|
||||
|
||||
/**
|
||||
* Creates a new IdsQueryBuilder without providing the types of the documents to look for
|
||||
* Creates a new IdsQueryBuilder with no types specified upfront
|
||||
*/
|
||||
public IdsQueryBuilder() {
|
||||
this.types = new String[0];
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new IdsQueryBuilder by providing the types of the documents to look for
|
||||
* @deprecated Replaced by {@link #types(String...)}
|
||||
*/
|
||||
@Deprecated
|
||||
public IdsQueryBuilder(String... types) {
|
||||
if (types == null) {
|
||||
throw new IllegalArgumentException("[ids] types cannot be null");
|
||||
}
|
||||
this.types = types;
|
||||
types(types);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -88,6 +88,17 @@ public class IdsQueryBuilder extends AbstractQueryBuilder<IdsQueryBuilder> {
|
|||
out.writeStringArray(ids.toArray(new String[ids.size()]));
|
||||
}
|
||||
|
||||
/**
|
||||
* Add types to query
|
||||
*/
|
||||
public IdsQueryBuilder types(String... types) {
|
||||
if (types == null) {
|
||||
throw new IllegalArgumentException("[" + NAME + "] types cannot be null");
|
||||
}
|
||||
this.types = types;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the types used in this query
|
||||
*/
|
||||
|
@ -100,7 +111,7 @@ public class IdsQueryBuilder extends AbstractQueryBuilder<IdsQueryBuilder> {
|
|||
*/
|
||||
public IdsQueryBuilder addIds(String... ids) {
|
||||
if (ids == null) {
|
||||
throw new IllegalArgumentException("[ids] ids cannot be null");
|
||||
throw new IllegalArgumentException("[" + NAME + "] ids cannot be null");
|
||||
}
|
||||
Collections.addAll(this.ids, ids);
|
||||
return this;
|
||||
|
@ -126,71 +137,21 @@ public class IdsQueryBuilder extends AbstractQueryBuilder<IdsQueryBuilder> {
|
|||
builder.endObject();
|
||||
}
|
||||
|
||||
public static Optional<IdsQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
List<String> ids = new ArrayList<>();
|
||||
List<String> types = new ArrayList<>();
|
||||
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
|
||||
String queryName = null;
|
||||
private static ObjectParser<IdsQueryBuilder, QueryParseContext> PARSER = new ObjectParser<>(NAME,
|
||||
() -> new IdsQueryBuilder());
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
boolean idsProvided = false;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUES_FIELD)) {
|
||||
idsProvided = true;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if ((token == XContentParser.Token.VALUE_STRING) ||
|
||||
(token == XContentParser.Token.VALUE_NUMBER)) {
|
||||
String id = parser.textOrNull();
|
||||
if (id == null) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "No value specified for term filter");
|
||||
}
|
||||
ids.add(id);
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"Illegal value for id, expecting a string or number, got: " + token);
|
||||
}
|
||||
}
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, TYPE_FIELD)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
String value = parser.textOrNull();
|
||||
if (value == null) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "No type specified for term filter");
|
||||
}
|
||||
types.add(value);
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + IdsQueryBuilder.NAME +
|
||||
"] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
} else if (token.isValue()) {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, TYPE_FIELD)) {
|
||||
types = Collections.singletonList(parser.text());
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + IdsQueryBuilder.NAME +
|
||||
"] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + IdsQueryBuilder.NAME +
|
||||
"] unknown token [" + token + "] after [" + currentFieldName + "]");
|
||||
}
|
||||
}
|
||||
if (!idsProvided) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + IdsQueryBuilder.NAME + "] query, no ids values provided");
|
||||
}
|
||||
static {
|
||||
PARSER.declareStringArray(fromList(String.class, IdsQueryBuilder::types), IdsQueryBuilder.TYPE_FIELD);
|
||||
PARSER.declareStringArray(fromList(String.class, IdsQueryBuilder::addIds), IdsQueryBuilder.VALUES_FIELD);
|
||||
declareStandardFields(PARSER);
|
||||
}
|
||||
|
||||
IdsQueryBuilder query = new IdsQueryBuilder(types.toArray(new String[types.size()]));
|
||||
query.addIds(ids.toArray(new String[ids.size()]));
|
||||
query.boost(boost).queryName(queryName);
|
||||
return Optional.of(query);
|
||||
public static Optional<IdsQueryBuilder> fromXContent(QueryParseContext context) {
|
||||
try {
|
||||
return Optional.of(PARSER.apply(context.parser(), context));
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new ParsingException(context.parser().getTokenLocation(), e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -20,13 +20,12 @@
|
|||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
|
@ -48,7 +47,7 @@ public class MatchAllQueryBuilder extends AbstractQueryBuilder<MatchAllQueryBuil
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
protected void doWriteTo(StreamOutput out) {
|
||||
// only superclass has state
|
||||
}
|
||||
|
||||
|
@ -59,38 +58,22 @@ public class MatchAllQueryBuilder extends AbstractQueryBuilder<MatchAllQueryBuil
|
|||
builder.endObject();
|
||||
}
|
||||
|
||||
public static Optional<MatchAllQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
private static ObjectParser<MatchAllQueryBuilder, QueryParseContext> PARSER = new ObjectParser<>(NAME, MatchAllQueryBuilder::new);
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
String queryName = null;
|
||||
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
|
||||
while (((token = parser.nextToken()) != XContentParser.Token.END_OBJECT && token != XContentParser.Token.END_ARRAY)) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + MatchAllQueryBuilder.NAME +
|
||||
"] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + MatchAllQueryBuilder.NAME +
|
||||
"] unknown token [" + token + "] after [" + currentFieldName + "]");
|
||||
}
|
||||
static {
|
||||
declareStandardFields(PARSER);
|
||||
}
|
||||
|
||||
public static Optional<MatchAllQueryBuilder> fromXContent(QueryParseContext context) {
|
||||
try {
|
||||
return Optional.of(PARSER.apply(context.parser(), context));
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new ParsingException(context.parser().getTokenLocation(), e.getMessage(), e);
|
||||
}
|
||||
MatchAllQueryBuilder queryBuilder = new MatchAllQueryBuilder();
|
||||
queryBuilder.boost(boost);
|
||||
queryBuilder.queryName(queryName);
|
||||
return Optional.of(queryBuilder);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Query doToQuery(QueryShardContext context) throws IOException {
|
||||
protected Query doToQuery(QueryShardContext context) {
|
||||
return Queries.newMatchAllQuery();
|
||||
}
|
||||
|
||||
|
|
|
@ -29,12 +29,10 @@ import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
|
|||
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
|
||||
import org.elasticsearch.indices.TermsLookup;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A static factory for simple "import static" usage.
|
||||
|
@ -120,7 +118,7 @@ public abstract class QueryBuilders {
|
|||
* @param types The mapping/doc type
|
||||
*/
|
||||
public static IdsQueryBuilder idsQuery(String... types) {
|
||||
return new IdsQueryBuilder(types);
|
||||
return new IdsQueryBuilder().types(types);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -964,8 +964,8 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
this.fieldsAndWeights.size() == 0)) {
|
||||
// Use the automatically determined expansion of all queryable fields
|
||||
resolvedFields = allQueryableDefaultFields(context);
|
||||
// Automatically set leniency to "true" so mismatched fields don't cause exceptions
|
||||
qpSettings.lenient(true);
|
||||
// Automatically set leniency to "true" if unset so mismatched fields don't cause exceptions
|
||||
qpSettings.lenient(lenient == null ? true : lenient);
|
||||
} else {
|
||||
qpSettings.defaultField(this.defaultField == null ? context.defaultField() : this.defaultField);
|
||||
|
||||
|
|
|
@ -129,6 +129,8 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
private int flags = DEFAULT_FLAGS;
|
||||
/** Flag specifying whether query should be forced to expand to all searchable fields */
|
||||
private Boolean useAllFields;
|
||||
/** Whether or not the lenient flag has been set or not */
|
||||
private boolean lenientSet = false;
|
||||
|
||||
/** Further search settings needed by the ES specific query string parser only. */
|
||||
private Settings settings = new Settings();
|
||||
|
@ -162,6 +164,9 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
in.readBoolean(); // lowercase_expanded_terms
|
||||
}
|
||||
settings.lenient(in.readBoolean());
|
||||
if (in.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) {
|
||||
this.lenientSet = in.readBoolean();
|
||||
}
|
||||
settings.analyzeWildcard(in.readBoolean());
|
||||
if (in.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
in.readString(); // locale
|
||||
|
@ -188,6 +193,9 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
out.writeBoolean(true); // lowercase_expanded_terms
|
||||
}
|
||||
out.writeBoolean(settings.lenient());
|
||||
if (out.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) {
|
||||
out.writeBoolean(lenientSet);
|
||||
}
|
||||
out.writeBoolean(settings.analyzeWildcard());
|
||||
if (out.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
out.writeString(Locale.ROOT.toLanguageTag()); // locale
|
||||
|
@ -315,6 +323,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
/** Specifies whether query parsing should be lenient. Defaults to false. */
|
||||
public SimpleQueryStringBuilder lenient(boolean lenient) {
|
||||
this.settings.lenient(lenient);
|
||||
this.lenientSet = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -372,7 +381,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
this.fieldsAndWeights.isEmpty())) {
|
||||
resolvedFieldsAndWeights = QueryStringQueryBuilder.allQueryableDefaultFields(context);
|
||||
// Need to use lenient mode when using "all-mode" so exceptions aren't thrown due to mismatched types
|
||||
newSettings.lenient(true);
|
||||
newSettings.lenient(lenientSet ? settings.lenient() : true);
|
||||
} else {
|
||||
// Use the default field if no fields specified
|
||||
if (fieldsAndWeights.isEmpty()) {
|
||||
|
@ -444,7 +453,9 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
|
||||
builder.field(FLAGS_FIELD.getPreferredName(), flags);
|
||||
builder.field(DEFAULT_OPERATOR_FIELD.getPreferredName(), defaultOperator.name().toLowerCase(Locale.ROOT));
|
||||
builder.field(LENIENT_FIELD.getPreferredName(), settings.lenient());
|
||||
if (lenientSet) {
|
||||
builder.field(LENIENT_FIELD.getPreferredName(), settings.lenient());
|
||||
}
|
||||
builder.field(ANALYZE_WILDCARD_FIELD.getPreferredName(), settings.analyzeWildcard());
|
||||
if (settings.quoteFieldSuffix() != null) {
|
||||
builder.field(QUOTE_FIELD_SUFFIX_FIELD.getPreferredName(), settings.quoteFieldSuffix());
|
||||
|
@ -473,7 +484,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
Operator defaultOperator = null;
|
||||
String analyzerName = null;
|
||||
int flags = SimpleQueryStringFlag.ALL.value();
|
||||
boolean lenient = SimpleQueryStringBuilder.DEFAULT_LENIENT;
|
||||
Boolean lenient = null;
|
||||
boolean analyzeWildcard = SimpleQueryStringBuilder.DEFAULT_ANALYZE_WILDCARD;
|
||||
String quoteFieldSuffix = null;
|
||||
Boolean useAllFields = null;
|
||||
|
@ -565,7 +576,10 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
SimpleQueryStringBuilder qb = new SimpleQueryStringBuilder(queryBody);
|
||||
qb.boost(boost).fields(fieldsAndWeights).analyzer(analyzerName).queryName(queryName).minimumShouldMatch(minimumShouldMatch);
|
||||
qb.flags(flags).defaultOperator(defaultOperator);
|
||||
qb.lenient(lenient).analyzeWildcard(analyzeWildcard).boost(boost).quoteFieldSuffix(quoteFieldSuffix);
|
||||
if (lenient != null) {
|
||||
qb.lenient(lenient);
|
||||
}
|
||||
qb.analyzeWildcard(analyzeWildcard).boost(boost).quoteFieldSuffix(quoteFieldSuffix);
|
||||
qb.useAllFields(useAllFields);
|
||||
return Optional.of(qb);
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.queries.ExtendedCommonTermsQuery;
|
|||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.MultiPhraseQuery;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
|
@ -303,31 +304,38 @@ public class MatchQuery {
|
|||
|
||||
public Query createPhrasePrefixQuery(String field, String queryText, int phraseSlop, int maxExpansions) {
|
||||
final Query query = createFieldQuery(getAnalyzer(), Occur.MUST, field, queryText, true, phraseSlop);
|
||||
float boost = 1;
|
||||
Query innerQuery = query;
|
||||
while (innerQuery instanceof BoostQuery) {
|
||||
BoostQuery bq = (BoostQuery) innerQuery;
|
||||
boost *= bq.getBoost();
|
||||
innerQuery = bq.getQuery();
|
||||
}
|
||||
final MultiPhrasePrefixQuery prefixQuery = new MultiPhrasePrefixQuery();
|
||||
prefixQuery.setMaxExpansions(maxExpansions);
|
||||
prefixQuery.setSlop(phraseSlop);
|
||||
if (query instanceof PhraseQuery) {
|
||||
PhraseQuery pq = (PhraseQuery)query;
|
||||
if (innerQuery instanceof PhraseQuery) {
|
||||
PhraseQuery pq = (PhraseQuery) innerQuery;
|
||||
Term[] terms = pq.getTerms();
|
||||
int[] positions = pq.getPositions();
|
||||
for (int i = 0; i < terms.length; i++) {
|
||||
prefixQuery.add(new Term[] {terms[i]}, positions[i]);
|
||||
}
|
||||
return prefixQuery;
|
||||
} else if (query instanceof MultiPhraseQuery) {
|
||||
MultiPhraseQuery pq = (MultiPhraseQuery)query;
|
||||
return boost == 1 ? prefixQuery : new BoostQuery(prefixQuery, boost);
|
||||
} else if (innerQuery instanceof MultiPhraseQuery) {
|
||||
MultiPhraseQuery pq = (MultiPhraseQuery) innerQuery;
|
||||
Term[][] terms = pq.getTermArrays();
|
||||
int[] positions = pq.getPositions();
|
||||
for (int i = 0; i < terms.length; i++) {
|
||||
prefixQuery.add(terms[i], positions[i]);
|
||||
}
|
||||
return prefixQuery;
|
||||
} else if (query instanceof TermQuery) {
|
||||
prefixQuery.add(((TermQuery) query).getTerm());
|
||||
return prefixQuery;
|
||||
} else if (query instanceof AllTermQuery) {
|
||||
prefixQuery.add(((AllTermQuery) query).getTerm());
|
||||
return prefixQuery;
|
||||
return boost == 1 ? prefixQuery : new BoostQuery(prefixQuery, boost);
|
||||
} else if (innerQuery instanceof TermQuery) {
|
||||
prefixQuery.add(((TermQuery) innerQuery).getTerm());
|
||||
return boost == 1 ? prefixQuery : new BoostQuery(prefixQuery, boost);
|
||||
} else if (innerQuery instanceof AllTermQuery) {
|
||||
prefixQuery.add(((AllTermQuery) innerQuery).getTerm());
|
||||
return boost == 1 ? prefixQuery : new BoostQuery(prefixQuery, boost);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
|
|
@ -20,8 +20,10 @@
|
|||
package org.elasticsearch.index.translog;
|
||||
|
||||
import org.apache.lucene.store.BufferedChecksum;
|
||||
import org.elasticsearch.common.io.stream.FilterStreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.util.zip.CRC32;
|
||||
import java.util.zip.Checksum;
|
||||
|
@ -30,19 +32,18 @@ import java.util.zip.Checksum;
|
|||
* Similar to Lucene's BufferedChecksumIndexInput, however this wraps a
|
||||
* {@link StreamInput} so anything read will update the checksum
|
||||
*/
|
||||
public final class BufferedChecksumStreamInput extends StreamInput {
|
||||
public final class BufferedChecksumStreamInput extends FilterStreamInput {
|
||||
private static final int SKIP_BUFFER_SIZE = 1024;
|
||||
private byte[] skipBuffer;
|
||||
private final StreamInput in;
|
||||
private final Checksum digest;
|
||||
|
||||
public BufferedChecksumStreamInput(StreamInput in) {
|
||||
this.in = in;
|
||||
super(in);
|
||||
this.digest = new BufferedChecksum(new CRC32());
|
||||
}
|
||||
|
||||
public BufferedChecksumStreamInput(StreamInput in, BufferedChecksumStreamInput reuse) {
|
||||
this.in = in;
|
||||
super(in);
|
||||
if (reuse == null ) {
|
||||
this.digest = new BufferedChecksum(new CRC32());
|
||||
} else {
|
||||
|
@ -58,20 +59,20 @@ public final class BufferedChecksumStreamInput extends StreamInput {
|
|||
|
||||
@Override
|
||||
public byte readByte() throws IOException {
|
||||
final byte b = in.readByte();
|
||||
final byte b = delegate.readByte();
|
||||
digest.update(b);
|
||||
return b;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readBytes(byte[] b, int offset, int len) throws IOException {
|
||||
in.readBytes(b, offset, len);
|
||||
delegate.readBytes(b, offset, len);
|
||||
digest.update(b, offset, len);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reset() throws IOException {
|
||||
in.reset();
|
||||
delegate.reset();
|
||||
digest.reset();
|
||||
}
|
||||
|
||||
|
@ -80,14 +81,9 @@ public final class BufferedChecksumStreamInput extends StreamInput {
|
|||
return readByte() & 0xFF;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
in.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean markSupported() {
|
||||
return in.markSupported();
|
||||
return delegate.markSupported();
|
||||
}
|
||||
|
||||
|
||||
|
@ -109,17 +105,14 @@ public final class BufferedChecksumStreamInput extends StreamInput {
|
|||
return skipped;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int available() throws IOException {
|
||||
return in.available();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void mark(int readlimit) {
|
||||
in.mark(readlimit);
|
||||
delegate.mark(readlimit);
|
||||
}
|
||||
|
||||
public void resetDigest() {
|
||||
digest.reset();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -252,11 +252,15 @@ public class Node implements Closeable {
|
|||
}
|
||||
|
||||
final boolean hadPredefinedNodeName = NODE_NAME_SETTING.exists(tmpSettings);
|
||||
tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeEnvironment.nodeId());
|
||||
Logger logger = Loggers.getLogger(Node.class, tmpSettings);
|
||||
final String nodeId = nodeEnvironment.nodeId();
|
||||
tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId);
|
||||
// this must be captured after the node name is possibly added to the settings
|
||||
final String nodeName = NODE_NAME_SETTING.get(tmpSettings);
|
||||
if (hadPredefinedNodeName == false) {
|
||||
logger.info("node name [{}] derived from node ID; set [{}] to override",
|
||||
NODE_NAME_SETTING.get(tmpSettings), NODE_NAME_SETTING.getKey());
|
||||
logger.info("node name [{}] derived from node ID [{}]; set [{}] to override", nodeName, nodeId, NODE_NAME_SETTING.getKey());
|
||||
} else {
|
||||
logger.info("node name [{}], node ID [{}]", nodeName, nodeId);
|
||||
}
|
||||
|
||||
final JvmInfo jvmInfo = JvmInfo.jvmInfo();
|
||||
|
@ -319,7 +323,7 @@ public class Node implements Closeable {
|
|||
final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool);
|
||||
clusterService.add(scriptModule.getScriptService());
|
||||
resourcesToClose.add(clusterService);
|
||||
final TribeService tribeService = new TribeService(settings, clusterService, nodeEnvironment.nodeId(),
|
||||
final TribeService tribeService = new TribeService(settings, clusterService, nodeId,
|
||||
s -> newTribeClientNode(s, classpathPlugins));
|
||||
resourcesToClose.add(tribeService);
|
||||
final IngestService ingestService = new IngestService(settings, threadPool, this.environment,
|
||||
|
@ -734,7 +738,7 @@ public class Node implements Closeable {
|
|||
toClose.add(() -> stopWatch.stop().start("plugin(" + plugin.getClass().getName() + ")"));
|
||||
toClose.add(plugin);
|
||||
}
|
||||
toClose.addAll(pluginsService.filterPlugins(Closeable.class));
|
||||
toClose.addAll(pluginsService.filterPlugins(Plugin.class));
|
||||
|
||||
toClose.add(() -> stopWatch.stop().start("script"));
|
||||
toClose.add(injector.getInstance(ScriptService.class));
|
||||
|
|
|
@ -47,9 +47,12 @@ import java.net.URLConnection;
|
|||
import java.net.URLDecoder;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.SimpleFileVisitor;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.nio.file.attribute.PosixFileAttributeView;
|
||||
import java.nio.file.attribute.PosixFileAttributes;
|
||||
import java.nio.file.attribute.PosixFilePermission;
|
||||
|
@ -493,15 +496,24 @@ class InstallPluginCommand extends SettingCommand {
|
|||
}
|
||||
|
||||
Files.move(tmpRoot, destination, StandardCopyOption.ATOMIC_MOVE);
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(destination)) {
|
||||
for (Path pluginFile : stream) {
|
||||
Files.walkFileTree(destination, new SimpleFileVisitor<Path>() {
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path pluginFile, BasicFileAttributes attrs) throws IOException {
|
||||
if (Files.isDirectory(pluginFile)) {
|
||||
setFileAttributes(pluginFile, PLUGIN_DIR_PERMS);
|
||||
} else {
|
||||
setFileAttributes(pluginFile, PLUGIN_FILES_PERMS);
|
||||
// There can also be "bin" directories under the plugin directory, storing native code executables
|
||||
Path parentDir = pluginFile.getParent().getFileName();
|
||||
if ("bin".equals(parentDir.toString())) {
|
||||
setFileAttributes(pluginFile, BIN_FILES_PERMS);
|
||||
} else {
|
||||
setFileAttributes(pluginFile, PLUGIN_FILES_PERMS);
|
||||
}
|
||||
}
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
terminal.println("-> Installed " + info.getName());
|
||||
|
||||
} catch (Exception installProblem) {
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -70,7 +72,7 @@ import java.util.function.UnaryOperator;
|
|||
* methods should cause any extensions of {@linkplain Plugin} that used the pre-5.x style extension syntax to fail to build and point the
|
||||
* plugin author at the new extension syntax. We hope that these make the process of upgrading a plugin from 2.x to 5.x only mildly painful.
|
||||
*/
|
||||
public abstract class Plugin {
|
||||
public abstract class Plugin implements Closeable {
|
||||
|
||||
/**
|
||||
* Node level guice modules.
|
||||
|
@ -162,6 +164,16 @@ public abstract class Plugin {
|
|||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the resources opened by this plugin.
|
||||
*
|
||||
* @throws IOException if the plugin failed to close its resources
|
||||
*/
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Old-style guice index level extension point. {@code @Deprecated} and {@code final} to act as a signpost for plugin authors upgrading
|
||||
* from 2.x.
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.lucene.search.highlight.SimpleFragmenter;
|
|||
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
|
||||
import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
|
||||
import org.apache.lucene.search.highlight.TextFragment;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefHash;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
|
@ -106,7 +107,12 @@ public class PlainHighlighter implements Highlighter {
|
|||
textsToHighlight = HighlightUtils.loadFieldValues(field, mapper, context, hitContext);
|
||||
|
||||
for (Object textToHighlight : textsToHighlight) {
|
||||
String text = textToHighlight.toString();
|
||||
String text;
|
||||
if (textToHighlight instanceof BytesRef) {
|
||||
text = mapper.fieldType().valueForDisplay(textToHighlight).toString();
|
||||
} else {
|
||||
text = textToHighlight.toString();
|
||||
}
|
||||
|
||||
try (TokenStream tokenStream = analyzer.tokenStream(mapper.fieldType().name(), text)) {
|
||||
if (!tokenStream.hasAttribute(CharTermAttribute.class) || !tokenStream.hasAttribute(OffsetAttribute.class)) {
|
||||
|
|
|
@ -58,7 +58,6 @@ import org.elasticsearch.env.Environment;
|
|||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
|
||||
|
@ -72,6 +71,7 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
|
@ -134,6 +134,29 @@ public class TribeService extends AbstractLifecycleComponent {
|
|||
return sb.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Interface to allow merging {@link org.elasticsearch.cluster.metadata.MetaData.Custom} in tribe node
|
||||
* When multiple Mergable Custom metadata of the same type is found (from underlying clusters), the
|
||||
* Custom metadata will be merged using {@link #merge(MetaData.Custom)} and the result will be stored
|
||||
* in the tribe cluster state
|
||||
*
|
||||
* @param <T> type of custom meta data
|
||||
*/
|
||||
public interface MergableCustomMetaData<T extends MetaData.Custom> {
|
||||
|
||||
/**
|
||||
* Merges this custom metadata with other, returning either this or <code>other</code> custom metadata
|
||||
* for tribe cluster state. This method should not mutate either <code>this</code> or the
|
||||
* <code>other</code> custom metadata.
|
||||
*
|
||||
* @param other custom meta data
|
||||
* @return the same instance or <code>other</code> custom metadata based on implementation
|
||||
* if both the instances are considered equal, implementations should return this
|
||||
* instance to avoid redundant cluster state changes.
|
||||
*/
|
||||
T merge(T other);
|
||||
}
|
||||
|
||||
// internal settings only
|
||||
public static final Setting<String> TRIBE_NAME_SETTING = Setting.simpleString("tribe.name", Property.NodeScope);
|
||||
private final ClusterService clusterService;
|
||||
|
@ -270,7 +293,7 @@ public class TribeService extends AbstractLifecycleComponent {
|
|||
public void startNodes() {
|
||||
for (Node node : nodes) {
|
||||
try {
|
||||
node.injector().getInstance(ClusterService.class).add(new TribeClusterStateListener(node));
|
||||
getClusterService(node).add(new TribeClusterStateListener(node));
|
||||
node.start();
|
||||
} catch (Exception e) {
|
||||
// calling close is safe for non started nodes, we can just iterate over all
|
||||
|
@ -348,23 +371,19 @@ public class TribeService extends AbstractLifecycleComponent {
|
|||
|
||||
@Override
|
||||
public BatchResult<ClusterChangedEvent> execute(ClusterState currentState, List<ClusterChangedEvent> tasks) throws Exception {
|
||||
ClusterState accumulator = ClusterState.builder(currentState).build();
|
||||
BatchResult.Builder<ClusterChangedEvent> builder = BatchResult.builder();
|
||||
|
||||
try {
|
||||
// we only need to apply the latest cluster state update
|
||||
accumulator = applyUpdate(accumulator, tasks.get(tasks.size() - 1));
|
||||
builder.successes(tasks);
|
||||
} catch (Exception e) {
|
||||
builder.failures(tasks, e);
|
||||
}
|
||||
|
||||
return builder.build(accumulator);
|
||||
ClusterState.Builder newState = ClusterState.builder(currentState).incrementVersion();
|
||||
boolean clusterStateChanged = updateNodes(currentState, tasks, newState);
|
||||
clusterStateChanged |= updateIndicesAndMetaData(currentState, tasks, newState);
|
||||
builder.successes(tasks);
|
||||
return builder.build(clusterStateChanged ? newState.build() : currentState);
|
||||
}
|
||||
|
||||
private ClusterState applyUpdate(ClusterState currentState, ClusterChangedEvent task) {
|
||||
private boolean updateNodes(ClusterState currentState, List<ClusterChangedEvent> tasks, ClusterState.Builder newState) {
|
||||
boolean clusterStateChanged = false;
|
||||
ClusterState tribeState = task.state();
|
||||
// we only need to apply the latest cluster state update
|
||||
ClusterChangedEvent latestTask = tasks.get(tasks.size() - 1);
|
||||
ClusterState tribeState = latestTask.state();
|
||||
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(currentState.nodes());
|
||||
// -- merge nodes
|
||||
// go over existing nodes, and see if they need to be removed
|
||||
|
@ -385,16 +404,25 @@ public class TribeService extends AbstractLifecycleComponent {
|
|||
Map<String, String> tribeAttr = new HashMap<>(tribe.getAttributes());
|
||||
tribeAttr.put(TRIBE_NAME_SETTING.getKey(), tribeName);
|
||||
DiscoveryNode discoNode = new DiscoveryNode(tribe.getName(), tribe.getId(), tribe.getEphemeralId(),
|
||||
tribe.getHostName(), tribe.getHostAddress(), tribe.getAddress(), unmodifiableMap(tribeAttr), tribe.getRoles(),
|
||||
tribe.getVersion());
|
||||
tribe.getHostName(), tribe.getHostAddress(), tribe.getAddress(), unmodifiableMap(tribeAttr), tribe.getRoles(),
|
||||
tribe.getVersion());
|
||||
clusterStateChanged = true;
|
||||
logger.info("[{}] adding node [{}]", tribeName, discoNode);
|
||||
nodes.remove(tribe.getId()); // remove any existing node with the same id but different ephemeral id
|
||||
nodes.add(discoNode);
|
||||
}
|
||||
}
|
||||
if (clusterStateChanged) {
|
||||
newState.nodes(nodes);
|
||||
}
|
||||
return clusterStateChanged;
|
||||
}
|
||||
|
||||
// -- merge metadata
|
||||
private boolean updateIndicesAndMetaData(ClusterState currentState, List<ClusterChangedEvent> tasks, ClusterState.Builder newState) {
|
||||
// we only need to apply the latest cluster state update
|
||||
ClusterChangedEvent latestTask = tasks.get(tasks.size() - 1);
|
||||
ClusterState tribeState = latestTask.state();
|
||||
boolean clusterStateChanged = false;
|
||||
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
MetaData.Builder metaData = MetaData.builder(currentState.metaData());
|
||||
RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable());
|
||||
|
@ -462,13 +490,49 @@ public class TribeService extends AbstractLifecycleComponent {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!clusterStateChanged) {
|
||||
return currentState;
|
||||
} else {
|
||||
return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData)
|
||||
.routingTable(routingTable.build()).build();
|
||||
clusterStateChanged |= updateCustoms(currentState, tasks, metaData);
|
||||
if (clusterStateChanged) {
|
||||
newState.blocks(blocks);
|
||||
newState.metaData(metaData);
|
||||
newState.routingTable(routingTable.build());
|
||||
}
|
||||
return clusterStateChanged;
|
||||
}
|
||||
|
||||
private boolean updateCustoms(ClusterState currentState, List<ClusterChangedEvent> tasks, MetaData.Builder metaData) {
|
||||
boolean clusterStateChanged = false;
|
||||
Set<String> changedCustomMetaDataTypeSet = tasks.stream()
|
||||
.map(ClusterChangedEvent::changedCustomMetaDataSet)
|
||||
.flatMap(Collection::stream)
|
||||
.collect(Collectors.toSet());
|
||||
final List<Node> tribeClientNodes = TribeService.this.nodes;
|
||||
Map<String, MetaData.Custom> mergedCustomMetaDataMap = mergeChangedCustomMetaData(changedCustomMetaDataTypeSet,
|
||||
customMetaDataType -> tribeClientNodes.stream()
|
||||
.map(TribeService::getClusterService).map(ClusterService::state)
|
||||
.map(ClusterState::metaData)
|
||||
.map(clusterMetaData -> ((MetaData.Custom) clusterMetaData.custom(customMetaDataType)))
|
||||
.filter(custom1 -> custom1 != null && custom1 instanceof MergableCustomMetaData)
|
||||
.map(custom2 -> (MergableCustomMetaData) custom2)
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
for (String changedCustomMetaDataType : changedCustomMetaDataTypeSet) {
|
||||
MetaData.Custom mergedCustomMetaData = mergedCustomMetaDataMap.get(changedCustomMetaDataType);
|
||||
if (mergedCustomMetaData == null) {
|
||||
// we ignore merging custom md which doesn't implement MergableCustomMetaData interface
|
||||
if (currentState.metaData().custom(changedCustomMetaDataType) instanceof MergableCustomMetaData) {
|
||||
// custom md has been removed
|
||||
clusterStateChanged = true;
|
||||
logger.info("[{}] removing custom meta data type [{}]", tribeName, changedCustomMetaDataType);
|
||||
metaData.removeCustom(changedCustomMetaDataType);
|
||||
}
|
||||
} else {
|
||||
// custom md has been changed
|
||||
clusterStateChanged = true;
|
||||
logger.info("[{}] updating custom meta data type [{}] data [{}]", tribeName, changedCustomMetaDataType, mergedCustomMetaData);
|
||||
metaData.putCustom(changedCustomMetaDataType, mergedCustomMetaData);
|
||||
}
|
||||
}
|
||||
return clusterStateChanged;
|
||||
}
|
||||
|
||||
private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable,
|
||||
|
@ -494,4 +558,23 @@ public class TribeService extends AbstractLifecycleComponent {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static ClusterService getClusterService(Node node) {
|
||||
return node.injector().getInstance(ClusterService.class);
|
||||
}
|
||||
|
||||
// pkg-private for testing
|
||||
static Map<String, MetaData.Custom> mergeChangedCustomMetaData(Set<String> changedCustomMetaDataTypeSet,
|
||||
Function<String, List<MergableCustomMetaData>> customMetaDataByTribeNode) {
|
||||
|
||||
Map<String, MetaData.Custom> changedCustomMetaDataMap = new HashMap<>(changedCustomMetaDataTypeSet.size());
|
||||
for (String customMetaDataType : changedCustomMetaDataTypeSet) {
|
||||
customMetaDataByTribeNode.apply(customMetaDataType).stream()
|
||||
.reduce((mergableCustomMD, mergableCustomMD2) ->
|
||||
((MergableCustomMetaData) mergableCustomMD.merge((MetaData.Custom) mergableCustomMD2)))
|
||||
.ifPresent(mergedCustomMetaData ->
|
||||
changedCustomMetaDataMap.put(customMetaDataType, ((MetaData.Custom) mergedCustomMetaData)));
|
||||
}
|
||||
return changedCustomMetaDataMap;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentLocation;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.env.ShardLockObtainFailedException;
|
||||
import org.elasticsearch.index.AlreadyExpiredException;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.engine.RecoveryEngineException;
|
||||
|
@ -107,6 +108,7 @@ import static java.util.Collections.emptyMap;
|
|||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.singleton;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
||||
public class ExceptionSerializationTests extends ESTestCase {
|
||||
|
||||
|
@ -160,10 +162,10 @@ public class ExceptionSerializationTests extends ESTestCase {
|
|||
if (isEsException(clazz) == false) {
|
||||
return;
|
||||
}
|
||||
if (ElasticsearchException.isRegistered(clazz.asSubclass(Throwable.class)) == false
|
||||
if (ElasticsearchException.isRegistered(clazz.asSubclass(Throwable.class), Version.CURRENT) == false
|
||||
&& ElasticsearchException.class.equals(clazz.getEnclosingClass()) == false) {
|
||||
notRegistered.add(clazz);
|
||||
} else if (ElasticsearchException.isRegistered(clazz.asSubclass(Throwable.class))) {
|
||||
} else if (ElasticsearchException.isRegistered(clazz.asSubclass(Throwable.class), Version.CURRENT)) {
|
||||
registered.add(clazz);
|
||||
try {
|
||||
if (clazz.getMethod("writeTo", StreamOutput.class) != null) {
|
||||
|
@ -218,10 +220,17 @@ public class ExceptionSerializationTests extends ESTestCase {
|
|||
}
|
||||
|
||||
private <T extends Exception> T serialize(T exception) throws IOException {
|
||||
ElasticsearchAssertions.assertVersionSerializable(VersionUtils.randomVersion(random()), exception);
|
||||
return serialize(exception, VersionUtils.randomVersion(random()));
|
||||
}
|
||||
|
||||
private <T extends Exception> T serialize(T exception, Version version) throws IOException {
|
||||
ElasticsearchAssertions.assertVersionSerializable(version, exception);
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
out.setVersion(version);
|
||||
out.writeException(exception);
|
||||
|
||||
StreamInput in = out.bytes().streamInput();
|
||||
in.setVersion(version);
|
||||
return in.readException();
|
||||
}
|
||||
|
||||
|
@ -769,6 +778,7 @@ public class ExceptionSerializationTests extends ESTestCase {
|
|||
ids.put(144, org.elasticsearch.cluster.NotMasterException.class);
|
||||
ids.put(145, org.elasticsearch.ElasticsearchStatusException.class);
|
||||
ids.put(146, org.elasticsearch.tasks.TaskCancelledException.class);
|
||||
ids.put(147, org.elasticsearch.env.ShardLockObtainFailedException.class);
|
||||
|
||||
Map<Class<? extends ElasticsearchException>, Integer> reverse = new HashMap<>();
|
||||
for (Map.Entry<Integer, Class<? extends ElasticsearchException>> entry : ids.entrySet()) {
|
||||
|
@ -826,4 +836,28 @@ public class ExceptionSerializationTests extends ESTestCase {
|
|||
assertEquals(ex.status(), e.status());
|
||||
assertEquals(RestStatus.TOO_MANY_REQUESTS, e.status());
|
||||
}
|
||||
|
||||
public void testShardLockObtainFailedException() throws IOException {
|
||||
ShardId shardId = new ShardId("foo", "_na_", 1);
|
||||
ShardLockObtainFailedException orig = new ShardLockObtainFailedException(shardId, "boom");
|
||||
Version version = VersionUtils.randomVersionBetween(random(),
|
||||
Version.V_5_0_0, Version.CURRENT);
|
||||
if (version.before(ElasticsearchException.V_5_1_0_UNRELEASED)) {
|
||||
// remove this once 5_1_0 is released randomVersionBetween asserts that this version is in the constant table..
|
||||
version = ElasticsearchException.V_5_1_0_UNRELEASED;
|
||||
}
|
||||
ShardLockObtainFailedException ex = serialize(orig, version);
|
||||
assertEquals(orig.getMessage(), ex.getMessage());
|
||||
assertEquals(orig.getShardId(), ex.getShardId());
|
||||
}
|
||||
|
||||
public void testBWCShardLockObtainFailedException() throws IOException {
|
||||
ShardId shardId = new ShardId("foo", "_na_", 1);
|
||||
ShardLockObtainFailedException orig = new ShardLockObtainFailedException(shardId, "boom");
|
||||
Exception ex = serialize((Exception)orig, Version.V_5_0_0);
|
||||
assertThat(ex, instanceOf(NotSerializableExceptionWrapper.class));
|
||||
assertEquals("shard_lock_obtain_failed_exception: [foo][1]: boom", ex.getMessage());
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -284,6 +284,7 @@ public class VersionTests extends ESTestCase {
|
|||
assertUnknownVersion(OsStats.V_5_1_0); // once we released 5.1.0 and it's added to Version.java we need to remove this constant
|
||||
assertUnknownVersion(QueryStringQueryBuilder.V_5_1_0_UNRELEASED);
|
||||
assertUnknownVersion(SimpleQueryStringBuilder.V_5_1_0_UNRELEASED);
|
||||
assertUnknownVersion(ElasticsearchException.V_5_1_0_UNRELEASED);
|
||||
// once we released 5.0.0 and it's added to Version.java we need to remove this constant
|
||||
assertUnknownVersion(Script.V_5_1_0_UNRELEASED);
|
||||
// once we released 5.0.0 and it's added to Version.java we need to remove this constant
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.cluster;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.IndexGraveyard;
|
||||
|
@ -33,6 +34,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.TestCustomMetaData;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -222,6 +224,128 @@ public class ClusterChangedEventTests extends ESTestCase {
|
|||
assertTrue("index routing table should not be the same object", event.indexRoutingTableChanged(initialIndices.get(0).getName()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test custom metadata change checks
|
||||
*/
|
||||
public void testChangedCustomMetaDataSet() {
|
||||
final int numNodesInCluster = 3;
|
||||
|
||||
final ClusterState originalState = createState(numNodesInCluster, randomBoolean(), initialIndices);
|
||||
CustomMetaData1 customMetaData1 = new CustomMetaData1("data");
|
||||
final ClusterState stateWithCustomMetaData = nextState(originalState, Collections.singletonList(customMetaData1));
|
||||
|
||||
// no custom metadata present in any state
|
||||
ClusterState nextState = ClusterState.builder(originalState).build();
|
||||
ClusterChangedEvent event = new ClusterChangedEvent("_na_", originalState, nextState);
|
||||
assertTrue(event.changedCustomMetaDataSet().isEmpty());
|
||||
|
||||
// next state has new custom metadata
|
||||
nextState = nextState(originalState, Collections.singletonList(customMetaData1));
|
||||
event = new ClusterChangedEvent("_na_", originalState, nextState);
|
||||
Set<String> changedCustomMetaDataTypeSet = event.changedCustomMetaDataSet();
|
||||
assertTrue(changedCustomMetaDataTypeSet.size() == 1);
|
||||
assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData1.type()));
|
||||
|
||||
// next state has same custom metadata
|
||||
nextState = nextState(originalState, Collections.singletonList(customMetaData1));
|
||||
event = new ClusterChangedEvent("_na_", stateWithCustomMetaData, nextState);
|
||||
changedCustomMetaDataTypeSet = event.changedCustomMetaDataSet();
|
||||
assertTrue(changedCustomMetaDataTypeSet.isEmpty());
|
||||
|
||||
// next state has equivalent custom metadata
|
||||
nextState = nextState(originalState, Collections.singletonList(new CustomMetaData1("data")));
|
||||
event = new ClusterChangedEvent("_na_", stateWithCustomMetaData, nextState);
|
||||
changedCustomMetaDataTypeSet = event.changedCustomMetaDataSet();
|
||||
assertTrue(changedCustomMetaDataTypeSet.isEmpty());
|
||||
|
||||
// next state removes custom metadata
|
||||
nextState = originalState;
|
||||
event = new ClusterChangedEvent("_na_", stateWithCustomMetaData, nextState);
|
||||
changedCustomMetaDataTypeSet = event.changedCustomMetaDataSet();
|
||||
assertTrue(changedCustomMetaDataTypeSet.size() == 1);
|
||||
assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData1.type()));
|
||||
|
||||
// next state updates custom metadata
|
||||
nextState = nextState(stateWithCustomMetaData, Collections.singletonList(new CustomMetaData1("data1")));
|
||||
event = new ClusterChangedEvent("_na_", stateWithCustomMetaData, nextState);
|
||||
changedCustomMetaDataTypeSet = event.changedCustomMetaDataSet();
|
||||
assertTrue(changedCustomMetaDataTypeSet.size() == 1);
|
||||
assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData1.type()));
|
||||
|
||||
// next state adds new custom metadata type
|
||||
CustomMetaData2 customMetaData2 = new CustomMetaData2("data2");
|
||||
nextState = nextState(stateWithCustomMetaData, Arrays.asList(customMetaData1, customMetaData2));
|
||||
event = new ClusterChangedEvent("_na_", stateWithCustomMetaData, nextState);
|
||||
changedCustomMetaDataTypeSet = event.changedCustomMetaDataSet();
|
||||
assertTrue(changedCustomMetaDataTypeSet.size() == 1);
|
||||
assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData2.type()));
|
||||
|
||||
// next state adds two custom metadata type
|
||||
nextState = nextState(originalState, Arrays.asList(customMetaData1, customMetaData2));
|
||||
event = new ClusterChangedEvent("_na_", originalState, nextState);
|
||||
changedCustomMetaDataTypeSet = event.changedCustomMetaDataSet();
|
||||
assertTrue(changedCustomMetaDataTypeSet.size() == 2);
|
||||
assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData2.type()));
|
||||
assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData1.type()));
|
||||
|
||||
// next state removes two custom metadata type
|
||||
nextState = originalState;
|
||||
event = new ClusterChangedEvent("_na_",
|
||||
nextState(originalState, Arrays.asList(customMetaData1, customMetaData2)), nextState);
|
||||
changedCustomMetaDataTypeSet = event.changedCustomMetaDataSet();
|
||||
assertTrue(changedCustomMetaDataTypeSet.size() == 2);
|
||||
assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData2.type()));
|
||||
assertTrue(changedCustomMetaDataTypeSet.contains(customMetaData1.type()));
|
||||
}
|
||||
|
||||
private static class CustomMetaData2 extends TestCustomMetaData {
|
||||
static {
|
||||
MetaData.registerPrototype("2", new CustomMetaData2(""));
|
||||
}
|
||||
protected CustomMetaData2(String data) {
|
||||
super(data);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TestCustomMetaData newTestCustomMetaData(String data) {
|
||||
return new CustomMetaData2(data);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String type() {
|
||||
return "2";
|
||||
}
|
||||
|
||||
@Override
|
||||
public EnumSet<MetaData.XContentContext> context() {
|
||||
return EnumSet.of(MetaData.XContentContext.GATEWAY);
|
||||
}
|
||||
}
|
||||
|
||||
private static class CustomMetaData1 extends TestCustomMetaData {
|
||||
static {
|
||||
MetaData.registerPrototype("1", new CustomMetaData1(""));
|
||||
}
|
||||
protected CustomMetaData1(String data) {
|
||||
super(data);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TestCustomMetaData newTestCustomMetaData(String data) {
|
||||
return new CustomMetaData1(data);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String type() {
|
||||
return "1";
|
||||
}
|
||||
|
||||
@Override
|
||||
public EnumSet<MetaData.XContentContext> context() {
|
||||
return EnumSet.of(MetaData.XContentContext.GATEWAY);
|
||||
}
|
||||
}
|
||||
|
||||
private static ClusterState createSimpleClusterState() {
|
||||
return ClusterState.builder(TEST_CLUSTER_NAME).build();
|
||||
}
|
||||
|
@ -244,6 +368,22 @@ public class ClusterChangedEventTests extends ESTestCase {
|
|||
.build();
|
||||
}
|
||||
|
||||
private static ClusterState nextState(final ClusterState previousState, List<TestCustomMetaData> customMetaDataList) {
|
||||
final ClusterState.Builder builder = ClusterState.builder(previousState);
|
||||
builder.stateUUID(UUIDs.randomBase64UUID());
|
||||
MetaData.Builder metaDataBuilder = new MetaData.Builder(previousState.metaData());
|
||||
for (ObjectObjectCursor<String, MetaData.Custom> customMetaData : previousState.metaData().customs()) {
|
||||
if (customMetaData.value instanceof TestCustomMetaData) {
|
||||
metaDataBuilder.removeCustom(customMetaData.key);
|
||||
}
|
||||
}
|
||||
for (TestCustomMetaData testCustomMetaData : customMetaDataList) {
|
||||
metaDataBuilder.putCustom(testCustomMetaData.type(), testCustomMetaData);
|
||||
}
|
||||
builder.metaData(metaDataBuilder);
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
// Create a modified cluster state from another one, but with some number of indices added and deleted.
|
||||
private static ClusterState nextState(final ClusterState previousState, final boolean changeClusterUUID,
|
||||
final List<Index> addedIndices, final List<Index> deletedIndices, final int numNodesToRemove) {
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision.WeightedDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
|
@ -32,27 +31,27 @@ import java.util.Map;
|
|||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Unit tests for the {@link ShardAllocationDecision} class.
|
||||
* Unit tests for the {@link AllocateUnassignedDecision} class.
|
||||
*/
|
||||
public class ShardAllocationDecisionTests extends ESTestCase {
|
||||
public class AllocateUnassignedDecisionTests extends ESTestCase {
|
||||
|
||||
public void testDecisionNotTaken() {
|
||||
ShardAllocationDecision shardAllocationDecision = ShardAllocationDecision.DECISION_NOT_TAKEN;
|
||||
assertFalse(shardAllocationDecision.isDecisionTaken());
|
||||
assertNull(shardAllocationDecision.getFinalDecisionType());
|
||||
assertNull(shardAllocationDecision.getAllocationStatus());
|
||||
assertNull(shardAllocationDecision.getAllocationId());
|
||||
assertNull(shardAllocationDecision.getAssignedNodeId());
|
||||
assertNull(shardAllocationDecision.getFinalExplanation());
|
||||
assertNull(shardAllocationDecision.getNodeDecisions());
|
||||
expectThrows(IllegalArgumentException.class, () -> shardAllocationDecision.getFinalDecisionSafe());
|
||||
AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN;
|
||||
assertFalse(allocateUnassignedDecision.isDecisionTaken());
|
||||
assertNull(allocateUnassignedDecision.getFinalDecisionType());
|
||||
assertNull(allocateUnassignedDecision.getAllocationStatus());
|
||||
assertNull(allocateUnassignedDecision.getAllocationId());
|
||||
assertNull(allocateUnassignedDecision.getAssignedNodeId());
|
||||
assertNull(allocateUnassignedDecision.getFinalExplanation());
|
||||
assertNull(allocateUnassignedDecision.getNodeDecisions());
|
||||
expectThrows(IllegalArgumentException.class, () -> allocateUnassignedDecision.getFinalDecisionSafe());
|
||||
}
|
||||
|
||||
public void testNoDecision() {
|
||||
final AllocationStatus allocationStatus = randomFrom(
|
||||
AllocationStatus.DELAYED_ALLOCATION, AllocationStatus.NO_VALID_SHARD_COPY, AllocationStatus.FETCHING_SHARD_DATA
|
||||
);
|
||||
ShardAllocationDecision noDecision = ShardAllocationDecision.no(allocationStatus, "something is wrong");
|
||||
AllocateUnassignedDecision noDecision = AllocateUnassignedDecision.no(allocationStatus, "something is wrong");
|
||||
assertTrue(noDecision.isDecisionTaken());
|
||||
assertEquals(Decision.Type.NO, noDecision.getFinalDecisionType());
|
||||
assertEquals(allocationStatus, noDecision.getAllocationStatus());
|
||||
|
@ -61,10 +60,10 @@ public class ShardAllocationDecisionTests extends ESTestCase {
|
|||
assertNull(noDecision.getAssignedNodeId());
|
||||
assertNull(noDecision.getAllocationId());
|
||||
|
||||
Map<String, ShardAllocationDecision.WeightedDecision> nodeDecisions = new HashMap<>();
|
||||
nodeDecisions.put("node1", new ShardAllocationDecision.WeightedDecision(Decision.NO));
|
||||
nodeDecisions.put("node2", new ShardAllocationDecision.WeightedDecision(Decision.NO));
|
||||
noDecision = ShardAllocationDecision.no(AllocationStatus.DECIDERS_NO, "something is wrong",
|
||||
Map<String, NodeAllocationResult> nodeDecisions = new HashMap<>();
|
||||
nodeDecisions.put("node1", new NodeAllocationResult(Decision.NO));
|
||||
nodeDecisions.put("node2", new NodeAllocationResult(Decision.NO));
|
||||
noDecision = AllocateUnassignedDecision.no(AllocationStatus.DECIDERS_NO, "something is wrong",
|
||||
nodeDecisions.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().getDecision()))
|
||||
);
|
||||
assertTrue(noDecision.isDecisionTaken());
|
||||
|
@ -76,14 +75,14 @@ public class ShardAllocationDecisionTests extends ESTestCase {
|
|||
assertNull(noDecision.getAllocationId());
|
||||
|
||||
// test bad values
|
||||
expectThrows(NullPointerException.class, () -> ShardAllocationDecision.no((AllocationStatus)null, "a"));
|
||||
expectThrows(NullPointerException.class, () -> AllocateUnassignedDecision.no((AllocationStatus)null, "a"));
|
||||
}
|
||||
|
||||
public void testThrottleDecision() {
|
||||
Map<String, WeightedDecision> nodeDecisions = new HashMap<>();
|
||||
nodeDecisions.put("node1", new ShardAllocationDecision.WeightedDecision(Decision.NO));
|
||||
nodeDecisions.put("node2", new ShardAllocationDecision.WeightedDecision(Decision.THROTTLE));
|
||||
ShardAllocationDecision throttleDecision = ShardAllocationDecision.throttle("too much happening",
|
||||
Map<String, NodeAllocationResult> nodeDecisions = new HashMap<>();
|
||||
nodeDecisions.put("node1", new NodeAllocationResult(Decision.NO));
|
||||
nodeDecisions.put("node2", new NodeAllocationResult(Decision.THROTTLE));
|
||||
AllocateUnassignedDecision throttleDecision = AllocateUnassignedDecision.throttle("too much happening",
|
||||
nodeDecisions.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().getDecision()))
|
||||
);
|
||||
assertTrue(throttleDecision.isDecisionTaken());
|
||||
|
@ -96,11 +95,11 @@ public class ShardAllocationDecisionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testYesDecision() {
|
||||
Map<String, ShardAllocationDecision.WeightedDecision> nodeDecisions = new HashMap<>();
|
||||
nodeDecisions.put("node1", new ShardAllocationDecision.WeightedDecision(Decision.YES));
|
||||
nodeDecisions.put("node2", new ShardAllocationDecision.WeightedDecision(Decision.NO));
|
||||
Map<String, NodeAllocationResult> nodeDecisions = new HashMap<>();
|
||||
nodeDecisions.put("node1", new NodeAllocationResult(Decision.YES));
|
||||
nodeDecisions.put("node2", new NodeAllocationResult(Decision.NO));
|
||||
String allocId = randomBoolean() ? "allocId" : null;
|
||||
ShardAllocationDecision yesDecision = ShardAllocationDecision.yes(
|
||||
AllocateUnassignedDecision yesDecision = AllocateUnassignedDecision.yes(
|
||||
"node1", "node was very kind", allocId, nodeDecisions.entrySet().stream().collect(
|
||||
Collectors.toMap(Map.Entry::getKey, e -> e.getValue().getDecision())
|
||||
)
|
||||
|
@ -119,27 +118,27 @@ public class ShardAllocationDecisionTests extends ESTestCase {
|
|||
AllocationStatus.NO_VALID_SHARD_COPY, AllocationStatus.FETCHING_SHARD_DATA, AllocationStatus.DELAYED_ALLOCATION);
|
||||
for (AllocationStatus allocationStatus : cachableStatuses) {
|
||||
if (allocationStatus == AllocationStatus.DECIDERS_THROTTLED) {
|
||||
ShardAllocationDecision cached = ShardAllocationDecision.throttle(null, null);
|
||||
ShardAllocationDecision another = ShardAllocationDecision.throttle(null, null);
|
||||
AllocateUnassignedDecision cached = AllocateUnassignedDecision.throttle(null, null);
|
||||
AllocateUnassignedDecision another = AllocateUnassignedDecision.throttle(null, null);
|
||||
assertSame(cached, another);
|
||||
ShardAllocationDecision notCached = ShardAllocationDecision.throttle("abc", null);
|
||||
another = ShardAllocationDecision.throttle("abc", null);
|
||||
AllocateUnassignedDecision notCached = AllocateUnassignedDecision.throttle("abc", null);
|
||||
another = AllocateUnassignedDecision.throttle("abc", null);
|
||||
assertNotSame(notCached, another);
|
||||
} else {
|
||||
ShardAllocationDecision cached = ShardAllocationDecision.no(allocationStatus, null);
|
||||
ShardAllocationDecision another = ShardAllocationDecision.no(allocationStatus, null);
|
||||
AllocateUnassignedDecision cached = AllocateUnassignedDecision.no(allocationStatus, null);
|
||||
AllocateUnassignedDecision another = AllocateUnassignedDecision.no(allocationStatus, null);
|
||||
assertSame(cached, another);
|
||||
ShardAllocationDecision notCached = ShardAllocationDecision.no(allocationStatus, "abc");
|
||||
another = ShardAllocationDecision.no(allocationStatus, "abc");
|
||||
AllocateUnassignedDecision notCached = AllocateUnassignedDecision.no(allocationStatus, "abc");
|
||||
another = AllocateUnassignedDecision.no(allocationStatus, "abc");
|
||||
assertNotSame(notCached, another);
|
||||
}
|
||||
}
|
||||
|
||||
// yes decisions are not precomputed and cached
|
||||
Map<String, Decision> dummyMap = Collections.emptyMap();
|
||||
ShardAllocationDecision first = ShardAllocationDecision.yes("node1", "abc", "alloc1", dummyMap);
|
||||
ShardAllocationDecision second = ShardAllocationDecision.yes("node1", "abc", "alloc1", dummyMap);
|
||||
// same fields for the ShardAllocationDecision, but should be different instances
|
||||
AllocateUnassignedDecision first = AllocateUnassignedDecision.yes("node1", "abc", "alloc1", dummyMap);
|
||||
AllocateUnassignedDecision second = AllocateUnassignedDecision.yes("node1", "abc", "alloc1", dummyMap);
|
||||
// same fields for the AllocateUnassignedDecision, but should be different instances
|
||||
assertNotSame(first, second);
|
||||
}
|
||||
|
|
@ -31,8 +31,6 @@ import org.elasticsearch.cluster.routing.ShardRouting;
|
|||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.Balancer;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.NodeRebalanceDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.RebalanceDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
|
@ -222,8 +220,8 @@ public class BalancedSingleShardTests extends ESAllocationTestCase {
|
|||
assertEquals(shardToRebalance.relocatingNodeId(), rebalanceDecision.getAssignedNodeId());
|
||||
// make sure all excluded nodes returned a NO decision
|
||||
for (String exludedNode : excludeNodes) {
|
||||
NodeRebalanceDecision nodeRebalanceDecision = rebalanceDecision.getNodeDecisions().get(exludedNode);
|
||||
assertEquals(Type.NO, nodeRebalanceDecision.getCanAllocateDecision().type());
|
||||
NodeRebalanceResult nodeRebalanceResult = rebalanceDecision.getNodeDecisions().get(exludedNode);
|
||||
assertEquals(Type.NO, nodeRebalanceResult.getCanAllocateDecision().type());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision.WeightedDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.MoveDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -79,9 +77,9 @@ public class MoveDecisionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testDecisionWithExplain() {
|
||||
Map<String, WeightedDecision> nodeDecisions = new HashMap<>();
|
||||
nodeDecisions.put("node1", new WeightedDecision(randomFrom(Decision.NO, Decision.THROTTLE, Decision.YES), randomFloat()));
|
||||
nodeDecisions.put("node2", new WeightedDecision(randomFrom(Decision.NO, Decision.THROTTLE, Decision.YES), randomFloat()));
|
||||
Map<String, NodeAllocationResult> nodeDecisions = new HashMap<>();
|
||||
nodeDecisions.put("node1", new NodeAllocationResult(randomFrom(Decision.NO, Decision.THROTTLE, Decision.YES), randomFloat()));
|
||||
nodeDecisions.put("node2", new NodeAllocationResult(randomFrom(Decision.NO, Decision.THROTTLE, Decision.YES), randomFloat()));
|
||||
MoveDecision decision = MoveDecision.decision(Decision.NO, Type.NO, true, "node1", null, nodeDecisions);
|
||||
assertNotNull(decision.getFinalDecisionType());
|
||||
assertNotNull(decision.getFinalExplanation());
|
||||
|
|
|
@ -150,6 +150,7 @@ public class ClusterServiceTests extends ESTestCase {
|
|||
|
||||
public void testTimedOutUpdateTaskCleanedUp() throws Exception {
|
||||
final CountDownLatch block = new CountDownLatch(1);
|
||||
final CountDownLatch blockCompleted = new CountDownLatch(1);
|
||||
clusterService.submitStateUpdateTask("block-task", new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
|
@ -158,6 +159,7 @@ public class ClusterServiceTests extends ESTestCase {
|
|||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
blockCompleted.countDown();
|
||||
return currentState;
|
||||
}
|
||||
|
||||
|
@ -187,6 +189,7 @@ public class ClusterServiceTests extends ESTestCase {
|
|||
});
|
||||
block.countDown();
|
||||
block2.await();
|
||||
blockCompleted.await();
|
||||
synchronized (clusterService.updateTasksPerExecutor) {
|
||||
assertTrue("expected empty map but was " + clusterService.updateTasksPerExecutor,
|
||||
clusterService.updateTasksPerExecutor.isEmpty());
|
||||
|
|
|
@ -0,0 +1,146 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
public class NumbersTests extends ESTestCase {
|
||||
|
||||
public void testToLongExact() {
|
||||
assertEquals(3L, Numbers.toLongExact(Long.valueOf(3L)));
|
||||
assertEquals(3L, Numbers.toLongExact(Integer.valueOf(3)));
|
||||
assertEquals(3L, Numbers.toLongExact(Short.valueOf((short) 3)));
|
||||
assertEquals(3L, Numbers.toLongExact(Byte.valueOf((byte) 3)));
|
||||
assertEquals(3L, Numbers.toLongExact(3d));
|
||||
assertEquals(3L, Numbers.toLongExact(3f));
|
||||
assertEquals(3L, Numbers.toLongExact(BigInteger.valueOf(3L)));
|
||||
assertEquals(3L, Numbers.toLongExact(BigDecimal.valueOf(3L)));
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toLongExact(3.1d));
|
||||
assertEquals("3.1 is not an integer value", e.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toLongExact(Double.NaN));
|
||||
assertEquals("NaN is not an integer value", e.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toLongExact(Double.POSITIVE_INFINITY));
|
||||
assertEquals("Infinity is not an integer value", e.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toLongExact(3.1f));
|
||||
assertEquals("3.1 is not an integer value", e.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toLongExact(new AtomicInteger(3))); // not supported
|
||||
assertEquals("Cannot check whether [3] of class [java.util.concurrent.atomic.AtomicInteger] is actually a long", e.getMessage());
|
||||
}
|
||||
|
||||
public void testToIntExact() {
|
||||
assertEquals(3L, Numbers.toIntExact(Long.valueOf(3L)));
|
||||
assertEquals(3L, Numbers.toIntExact(Integer.valueOf(3)));
|
||||
assertEquals(3L, Numbers.toIntExact(Short.valueOf((short) 3)));
|
||||
assertEquals(3L, Numbers.toIntExact(Byte.valueOf((byte) 3)));
|
||||
assertEquals(3L, Numbers.toIntExact(3d));
|
||||
assertEquals(3L, Numbers.toIntExact(3f));
|
||||
assertEquals(3L, Numbers.toIntExact(BigInteger.valueOf(3L)));
|
||||
assertEquals(3L, Numbers.toIntExact(BigDecimal.valueOf(3L)));
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toIntExact(3.1d));
|
||||
assertEquals("3.1 is not an integer value", e.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toLongExact(Double.NaN));
|
||||
assertEquals("NaN is not an integer value", e.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toLongExact(Double.POSITIVE_INFINITY));
|
||||
assertEquals("Infinity is not an integer value", e.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toIntExact(3.1f));
|
||||
assertEquals("3.1 is not an integer value", e.getMessage());
|
||||
ArithmeticException ae = expectThrows(ArithmeticException.class,
|
||||
() -> Numbers.toIntExact(1L << 40));
|
||||
assertEquals("integer overflow", ae.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toIntExact(new AtomicInteger(3))); // not supported
|
||||
assertEquals("Cannot check whether [3] of class [java.util.concurrent.atomic.AtomicInteger] is actually a long", e.getMessage());
|
||||
}
|
||||
|
||||
public void testToShortExact() {
|
||||
assertEquals(3L, Numbers.toShortExact(Long.valueOf(3L)));
|
||||
assertEquals(3L, Numbers.toShortExact(Integer.valueOf(3)));
|
||||
assertEquals(3L, Numbers.toShortExact(Short.valueOf((short) 3)));
|
||||
assertEquals(3L, Numbers.toShortExact(Byte.valueOf((byte) 3)));
|
||||
assertEquals(3L, Numbers.toShortExact(3d));
|
||||
assertEquals(3L, Numbers.toShortExact(3f));
|
||||
assertEquals(3L, Numbers.toShortExact(BigInteger.valueOf(3L)));
|
||||
assertEquals(3L, Numbers.toShortExact(BigDecimal.valueOf(3L)));
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toShortExact(3.1d));
|
||||
assertEquals("3.1 is not an integer value", e.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toLongExact(Double.NaN));
|
||||
assertEquals("NaN is not an integer value", e.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toLongExact(Double.POSITIVE_INFINITY));
|
||||
assertEquals("Infinity is not an integer value", e.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toShortExact(3.1f));
|
||||
assertEquals("3.1 is not an integer value", e.getMessage());
|
||||
ArithmeticException ae = expectThrows(ArithmeticException.class,
|
||||
() -> Numbers.toShortExact(100000));
|
||||
assertEquals("short overflow: " + 100000, ae.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toShortExact(new AtomicInteger(3))); // not supported
|
||||
assertEquals("Cannot check whether [3] of class [java.util.concurrent.atomic.AtomicInteger] is actually a long", e.getMessage());
|
||||
}
|
||||
|
||||
public void testToByteExact() {
|
||||
assertEquals(3L, Numbers.toByteExact(Long.valueOf(3L)));
|
||||
assertEquals(3L, Numbers.toByteExact(Integer.valueOf(3)));
|
||||
assertEquals(3L, Numbers.toByteExact(Short.valueOf((short) 3)));
|
||||
assertEquals(3L, Numbers.toByteExact(Byte.valueOf((byte) 3)));
|
||||
assertEquals(3L, Numbers.toByteExact(3d));
|
||||
assertEquals(3L, Numbers.toByteExact(3f));
|
||||
assertEquals(3L, Numbers.toByteExact(BigInteger.valueOf(3L)));
|
||||
assertEquals(3L, Numbers.toByteExact(BigDecimal.valueOf(3L)));
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toByteExact(3.1d));
|
||||
assertEquals("3.1 is not an integer value", e.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toLongExact(Double.NaN));
|
||||
assertEquals("NaN is not an integer value", e.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toLongExact(Double.POSITIVE_INFINITY));
|
||||
assertEquals("Infinity is not an integer value", e.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toByteExact(3.1f));
|
||||
assertEquals("3.1 is not an integer value", e.getMessage());
|
||||
ArithmeticException ae = expectThrows(ArithmeticException.class,
|
||||
() -> Numbers.toByteExact(300));
|
||||
assertEquals("byte overflow: " + 300, ae.getMessage());
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> Numbers.toByteExact(new AtomicInteger(3))); // not supported
|
||||
assertEquals("Cannot check whether [3] of class [java.util.concurrent.atomic.AtomicInteger] is actually a long", e.getMessage());
|
||||
}
|
||||
}
|
|
@ -19,7 +19,9 @@
|
|||
|
||||
package org.elasticsearch.common.io.stream;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.UnicodeUtil;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
|
@ -28,6 +30,7 @@ import org.elasticsearch.common.util.BigArrays;
|
|||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
@ -657,4 +660,106 @@ public class BytesStreamsTests extends ESTestCase {
|
|||
IntStream.range(0, size).forEach(i -> map.put(keyGenerator.get(), valueGenerator.get()));
|
||||
return map;
|
||||
}
|
||||
|
||||
public void testWriteRandomStrings() throws IOException {
|
||||
final int iters = scaledRandomIntBetween(5, 20);
|
||||
for (int iter = 0; iter < iters; iter++) {
|
||||
List<String> strings = new ArrayList<>();
|
||||
int numStrings = randomIntBetween(100, 1000);
|
||||
BytesStreamOutput output = new BytesStreamOutput(0);
|
||||
for (int i = 0; i < numStrings; i++) {
|
||||
String s = randomRealisticUnicodeOfLengthBetween(0, 2048);
|
||||
strings.add(s);
|
||||
output.writeString(s);
|
||||
}
|
||||
|
||||
try (StreamInput streamInput = output.bytes().streamInput()) {
|
||||
for (int i = 0; i < numStrings; i++) {
|
||||
String s = streamInput.readString();
|
||||
assertEquals(strings.get(i), s);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* tests the extreme case where characters use more than 2 bytes
|
||||
*/
|
||||
public void testWriteLargeSurrogateOnlyString() throws IOException {
|
||||
String deseretLetter = "\uD801\uDC00";
|
||||
assertEquals(2, deseretLetter.length());
|
||||
String largeString = IntStream.range(0, 2048).mapToObj(s -> deseretLetter).collect(Collectors.joining("")).trim();
|
||||
assertEquals("expands to 4 bytes", 4, new BytesRef(deseretLetter).length);
|
||||
try (BytesStreamOutput output = new BytesStreamOutput(0)) {
|
||||
output.writeString(largeString);
|
||||
try (StreamInput streamInput = output.bytes().streamInput()) {
|
||||
assertEquals(largeString, streamInput.readString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testReadTooLargeArraySize() throws IOException {
|
||||
try (BytesStreamOutput output = new BytesStreamOutput(0)) {
|
||||
output.writeVInt(10);
|
||||
for (int i = 0; i < 10; i ++) {
|
||||
output.writeInt(i);
|
||||
}
|
||||
|
||||
output.writeVInt(Integer.MAX_VALUE);
|
||||
for (int i = 0; i < 10; i ++) {
|
||||
output.writeInt(i);
|
||||
}
|
||||
try (StreamInput streamInput = output.bytes().streamInput()) {
|
||||
int[] ints = streamInput.readIntArray();
|
||||
for (int i = 0; i < 10; i ++) {
|
||||
assertEquals(i, ints[i]);
|
||||
}
|
||||
expectThrows(IllegalStateException.class, () -> streamInput.readIntArray());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testReadCorruptedArraySize() throws IOException {
|
||||
try (BytesStreamOutput output = new BytesStreamOutput(0)) {
|
||||
output.writeVInt(10);
|
||||
for (int i = 0; i < 10; i ++) {
|
||||
output.writeInt(i);
|
||||
}
|
||||
|
||||
output.writeVInt(100);
|
||||
for (int i = 0; i < 10; i ++) {
|
||||
output.writeInt(i);
|
||||
}
|
||||
try (StreamInput streamInput = output.bytes().streamInput()) {
|
||||
int[] ints = streamInput.readIntArray();
|
||||
for (int i = 0; i < 10; i ++) {
|
||||
assertEquals(i, ints[i]);
|
||||
}
|
||||
EOFException eofException = expectThrows(EOFException.class, () -> streamInput.readIntArray());
|
||||
assertEquals("tried to read: 100 bytes but only 40 remaining", eofException.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testReadNegativeArraySize() throws IOException {
|
||||
try (BytesStreamOutput output = new BytesStreamOutput(0)) {
|
||||
output.writeVInt(10);
|
||||
for (int i = 0; i < 10; i ++) {
|
||||
output.writeInt(i);
|
||||
}
|
||||
|
||||
output.writeVInt(Integer.MIN_VALUE);
|
||||
for (int i = 0; i < 10; i ++) {
|
||||
output.writeInt(i);
|
||||
}
|
||||
try (StreamInput streamInput = output.bytes().streamInput()) {
|
||||
int[] ints = streamInput.readIntArray();
|
||||
for (int i = 0; i < 10; i ++) {
|
||||
assertEquals(i, ints[i]);
|
||||
}
|
||||
NegativeArraySizeException exception = expectThrows(NegativeArraySizeException.class, () -> streamInput.readIntArray());
|
||||
assertEquals("array size must be positive but was: -2147483648", exception.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,6 +45,59 @@ import static org.hamcrest.CoreMatchers.equalTo;
|
|||
|
||||
public class ScopedSettingsTests extends ESTestCase {
|
||||
|
||||
public void testResetSetting() {
|
||||
Setting<Integer> dynamicSetting = Setting.intSetting("some.dyn.setting", 1, Property.Dynamic, Property.NodeScope);
|
||||
Setting<Integer> staticSetting = Setting.intSetting("some.static.setting", 1, Property.NodeScope);
|
||||
Settings currentSettings = Settings.builder().put("some.dyn.setting", 5).put("some.static.setting", 6).put("archived.foo.bar", 9)
|
||||
.build();
|
||||
ClusterSettings service = new ClusterSettings(currentSettings
|
||||
, new HashSet<>(Arrays.asList(dynamicSetting, staticSetting)));
|
||||
|
||||
expectThrows(IllegalArgumentException.class, () ->
|
||||
service.updateDynamicSettings(Settings.builder().put("some.dyn.setting", 8).putNull("some.static.setting").build(),
|
||||
Settings.builder().put(currentSettings), Settings.builder(), "node"));
|
||||
|
||||
Settings.Builder target = Settings.builder().put(currentSettings);
|
||||
Settings.Builder update = Settings.builder();
|
||||
assertTrue(service.updateDynamicSettings(Settings.builder().put("some.dyn.setting", 8).build(),
|
||||
target, update, "node"));
|
||||
assertEquals(8, dynamicSetting.get(target.build()).intValue());
|
||||
assertEquals(6, staticSetting.get(target.build()).intValue());
|
||||
assertEquals(9, target.build().getAsInt("archived.foo.bar", null).intValue());
|
||||
|
||||
target = Settings.builder().put(currentSettings);
|
||||
update = Settings.builder();
|
||||
assertTrue(service.updateDynamicSettings(Settings.builder().putNull("some.dyn.setting").build(),
|
||||
target, update, "node"));
|
||||
assertEquals(1, dynamicSetting.get(target.build()).intValue());
|
||||
assertEquals(6, staticSetting.get(target.build()).intValue());
|
||||
assertEquals(9, target.build().getAsInt("archived.foo.bar", null).intValue());
|
||||
|
||||
target = Settings.builder().put(currentSettings);
|
||||
update = Settings.builder();
|
||||
assertTrue(service.updateDynamicSettings(Settings.builder().putNull("archived.foo.bar").build(),
|
||||
target, update, "node"));
|
||||
assertEquals(5, dynamicSetting.get(target.build()).intValue());
|
||||
assertEquals(6, staticSetting.get(target.build()).intValue());
|
||||
assertNull(target.build().getAsInt("archived.foo.bar", null));
|
||||
|
||||
target = Settings.builder().put(currentSettings);
|
||||
update = Settings.builder();
|
||||
assertTrue(service.updateDynamicSettings(Settings.builder().putNull("some.*").build(),
|
||||
target, update, "node"));
|
||||
assertEquals(1, dynamicSetting.get(target.build()).intValue());
|
||||
assertEquals(6, staticSetting.get(target.build()).intValue());
|
||||
assertEquals(9, target.build().getAsInt("archived.foo.bar", null).intValue());
|
||||
|
||||
target = Settings.builder().put(currentSettings);
|
||||
update = Settings.builder();
|
||||
assertTrue(service.updateDynamicSettings(Settings.builder().putNull("*").build(),
|
||||
target, update, "node"));
|
||||
assertEquals(1, dynamicSetting.get(target.build()).intValue());
|
||||
assertEquals(6, staticSetting.get(target.build()).intValue());
|
||||
assertNull(target.build().getAsInt("archived.foo.bar", null));
|
||||
}
|
||||
|
||||
public void testAddConsumer() {
|
||||
Setting<Integer> testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope);
|
||||
Setting<Integer> testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope);
|
||||
|
|
|
@ -32,17 +32,9 @@ import org.elasticsearch.test.AbstractQueryTestCase;
|
|||
import java.io.IOException;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
|
||||
public class IdsQueryBuilderTests extends AbstractQueryTestCase<IdsQueryBuilder> {
|
||||
/**
|
||||
* Check that parser throws exception on missing values field.
|
||||
*/
|
||||
public void testIdsNotProvided() throws IOException {
|
||||
String noIdsFieldQuery = "{\"ids\" : { \"type\" : \"my_type\" }";
|
||||
ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(noIdsFieldQuery));
|
||||
assertThat(e.getMessage(), containsString("no ids values provided"));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IdsQueryBuilder doCreateTestQueryBuilder() {
|
||||
|
@ -71,7 +63,7 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase<IdsQueryBuilder>
|
|||
}
|
||||
IdsQueryBuilder query;
|
||||
if (types.length > 0 || randomBoolean()) {
|
||||
query = new IdsQueryBuilder(types);
|
||||
query = new IdsQueryBuilder().types(types);
|
||||
query.addIds(ids);
|
||||
} else {
|
||||
query = new IdsQueryBuilder();
|
||||
|
@ -90,11 +82,11 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase<IdsQueryBuilder>
|
|||
}
|
||||
|
||||
public void testIllegalArguments() {
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new IdsQueryBuilder((String[]) null));
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new IdsQueryBuilder().types((String[]) null));
|
||||
assertEquals("[ids] types cannot be null", e.getMessage());
|
||||
|
||||
IdsQueryBuilder idsQueryBuilder = new IdsQueryBuilder();
|
||||
e = expectThrows(IllegalArgumentException.class, () -> idsQueryBuilder.addIds((String[])null));
|
||||
e = expectThrows(IllegalArgumentException.class, () -> idsQueryBuilder.addIds((String[]) null));
|
||||
assertEquals("[ids] ids cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
|
@ -102,7 +94,7 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase<IdsQueryBuilder>
|
|||
public void testIdsQueryWithInvalidValues() throws Exception {
|
||||
String query = "{ \"ids\": { \"values\": [[1]] } }";
|
||||
ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(query));
|
||||
assertEquals("Illegal value for id, expecting a string or number, got: START_ARRAY", e.getMessage());
|
||||
assertEquals("[ids] failed to parse field [values]", e.getMessage());
|
||||
}
|
||||
|
||||
public void testFromJson() throws IOException {
|
||||
|
@ -116,44 +108,82 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase<IdsQueryBuilder>
|
|||
"}";
|
||||
IdsQueryBuilder parsed = (IdsQueryBuilder) parseQuery(json);
|
||||
checkGeneratedJson(json, parsed);
|
||||
assertEquals(json, 3, parsed.ids().size());
|
||||
assertThat(parsed.ids(), contains("1","100","4"));
|
||||
assertEquals(json, "my_type", parsed.types()[0]);
|
||||
|
||||
// check that type that is not an array and also ids that are numbers are parsed
|
||||
json =
|
||||
"{\n" +
|
||||
" \"ids\" : {\n" +
|
||||
" \"type\" : \"my_type\",\n" +
|
||||
" \"values\" : [ 1, 100, 4 ],\n" +
|
||||
" \"boost\" : 1.0\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
parsed = (IdsQueryBuilder) parseQuery(json);
|
||||
assertThat(parsed.ids(), contains("1","100","4"));
|
||||
assertEquals(json, "my_type", parsed.types()[0]);
|
||||
|
||||
// check with empty type array
|
||||
json =
|
||||
"{\n" +
|
||||
" \"ids\" : {\n" +
|
||||
" \"type\" : [ ],\n" +
|
||||
" \"values\" : [ \"1\", \"100\", \"4\" ],\n" +
|
||||
" \"boost\" : 1.0\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
parsed = (IdsQueryBuilder) parseQuery(json);
|
||||
assertThat(parsed.ids(), contains("1","100","4"));
|
||||
assertEquals(json, 0, parsed.types().length);
|
||||
|
||||
// check without type
|
||||
json =
|
||||
"{\n" +
|
||||
" \"ids\" : {\n" +
|
||||
" \"values\" : [ \"1\", \"100\", \"4\" ],\n" +
|
||||
" \"boost\" : 1.0\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
parsed = (IdsQueryBuilder) parseQuery(json);
|
||||
assertThat(parsed.ids(), contains("1","100","4"));
|
||||
assertEquals(json, 0, parsed.types().length);
|
||||
}
|
||||
|
||||
public void testFromJsonDeprecatedSyntax() throws IOException {
|
||||
IdsQueryBuilder tempQuery = createTestQueryBuilder();
|
||||
assumeTrue("test requires at least one type", tempQuery.types() != null && tempQuery.types().length > 0);
|
||||
|
||||
String type = tempQuery.types()[0];
|
||||
IdsQueryBuilder testQuery = new IdsQueryBuilder(type);
|
||||
IdsQueryBuilder testQuery = new IdsQueryBuilder().types("my_type");
|
||||
|
||||
//single value type can also be called _type
|
||||
final String contentString = "{\n" +
|
||||
" \"ids\" : {\n" +
|
||||
" \"_type\" : \"" + type + "\",\n" +
|
||||
" \"values\" : []\n" +
|
||||
" \"_type\" : \"my_type\",\n" +
|
||||
" \"values\" : [ ]\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
|
||||
IdsQueryBuilder parsed = (IdsQueryBuilder) parseQuery(contentString, ParseFieldMatcher.EMPTY);
|
||||
assertEquals(testQuery, parsed);
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(contentString));
|
||||
assertEquals("Deprecated field [_type] used, expected [type] instead", e.getMessage());
|
||||
ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(contentString));
|
||||
checkWarningHeaders("Deprecated field [_type] used, expected [type] instead");
|
||||
assertEquals("Deprecated field [_type] used, expected [type] instead", e.getMessage());
|
||||
assertEquals(3, e.getLineNumber());
|
||||
assertEquals(19, e.getColumnNumber());
|
||||
|
||||
//array of types can also be called type rather than types
|
||||
//array of types can also be called types rather than type
|
||||
final String contentString2 = "{\n" +
|
||||
" \"ids\" : {\n" +
|
||||
" \"types\" : [\"" + type + "\"],\n" +
|
||||
" \"values\" : []\n" +
|
||||
" \"types\" : [\"my_type\"],\n" +
|
||||
" \"values\" : [ ]\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
parsed = (IdsQueryBuilder) parseQuery(contentString, ParseFieldMatcher.EMPTY);
|
||||
parsed = (IdsQueryBuilder) parseQuery(contentString2, ParseFieldMatcher.EMPTY);
|
||||
assertEquals(testQuery, parsed);
|
||||
|
||||
e = expectThrows(IllegalArgumentException.class, () -> parseQuery(contentString2));
|
||||
e = expectThrows(ParsingException.class, () -> parseQuery(contentString2));
|
||||
checkWarningHeaders("Deprecated field [types] used, expected [type] instead");
|
||||
assertEquals("Deprecated field [types] used, expected [type] instead", e.getMessage());
|
||||
checkWarningHeaders("Deprecated field [_type] used, expected [type] instead");
|
||||
assertEquals(3, e.getLineNumber());
|
||||
assertEquals(19, e.getColumnNumber());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.index.query;
|
|||
import org.apache.lucene.queries.ExtendedCommonTermsQuery;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
|
@ -29,11 +30,15 @@ import org.apache.lucene.search.PhraseQuery;
|
|||
import org.apache.lucene.search.PointRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.search.MatchQuery;
|
||||
import org.elasticsearch.index.search.MatchQuery.Type;
|
||||
import org.elasticsearch.index.search.MatchQuery.ZeroTermsQuery;
|
||||
|
@ -458,4 +463,35 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase<MatchQueryBuil
|
|||
"}";
|
||||
expectThrows(IllegalStateException.class, () -> parseQuery(json2));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {
|
||||
mapperService.merge("t_boost", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("t_boost",
|
||||
"string_boost", "type=text,boost=4").string()), MapperService.MergeReason.MAPPING_UPDATE, false);
|
||||
}
|
||||
|
||||
public void testMatchPhrasePrefixWithBoost() throws Exception {
|
||||
assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
|
||||
QueryShardContext context = createShardContext();
|
||||
assumeTrue("test runs only when the index version is on or after V_5_0_0_alpha1",
|
||||
context.indexVersionCreated().onOrAfter(Version.V_5_0_0_alpha1));
|
||||
|
||||
{
|
||||
// field boost is applied on a single term query
|
||||
MatchPhrasePrefixQueryBuilder builder = new MatchPhrasePrefixQueryBuilder("string_boost", "foo");
|
||||
Query query = builder.toQuery(context);
|
||||
assertThat(query, instanceOf(BoostQuery.class));
|
||||
assertThat(((BoostQuery) query).getBoost(), equalTo(4f));
|
||||
Query innerQuery = ((BoostQuery) query).getQuery();
|
||||
assertThat(innerQuery, instanceOf(MultiPhrasePrefixQuery.class));
|
||||
}
|
||||
|
||||
{
|
||||
// field boost is ignored on phrase query
|
||||
MatchPhrasePrefixQueryBuilder builder = new MatchPhrasePrefixQueryBuilder("string_boost", "foo bar");
|
||||
Query query = builder.toQuery(context);
|
||||
assertThat(query, instanceOf(MultiPhrasePrefixQuery.class));
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
|
@ -41,7 +40,6 @@ import org.elasticsearch.index.query.QueryBuilder;
|
|||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
|
||||
import org.elasticsearch.index.search.MatchQuery;
|
||||
import org.elasticsearch.indices.IndicesRequestCache;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
|
@ -106,6 +104,29 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
|||
return Collections.singletonList(InternalSettingsPlugin.class);
|
||||
}
|
||||
|
||||
public void testHighlightingWithStoredKeyword() throws IOException {
|
||||
XContentBuilder mappings = jsonBuilder();
|
||||
mappings.startObject();
|
||||
mappings.startObject("type")
|
||||
.startObject("properties")
|
||||
.startObject("text")
|
||||
.field("type", "keyword")
|
||||
.field("store", true)
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject();
|
||||
mappings.endObject();
|
||||
assertAcked(prepareCreate("test")
|
||||
.addMapping("type", mappings));
|
||||
client().prepareIndex("test", "type", "1")
|
||||
.setSource(jsonBuilder().startObject().field("text", "foo").endObject())
|
||||
.get();
|
||||
refresh();
|
||||
SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "foo"))
|
||||
.highlighter(new HighlightBuilder().field(new Field("text"))).get();
|
||||
assertHighlight(search, 0, "text", 0, equalTo("<em>foo</em>"));
|
||||
}
|
||||
|
||||
public void testHighlightingWithWildcardName() throws IOException {
|
||||
// test the kibana case with * as fieldname that will try highlight all fields including meta fields
|
||||
XContentBuilder mappings = jsonBuilder();
|
||||
|
|
|
@ -1053,8 +1053,6 @@ public class SimpleNestedIT extends ESIntegTestCase {
|
|||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), equalTo(0L));
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
private void assertDocumentCount(String index, long numdocs) {
|
||||
IndicesStatsResponse stats = admin().indices().prepareStats(index).clear().setDocs(true).get();
|
||||
assertNoFailures(stats);
|
||||
|
@ -1062,5 +1060,4 @@ public class SimpleNestedIT extends ESIntegTestCase {
|
|||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -255,6 +255,14 @@ public class QueryStringIT extends ESIntegTestCase {
|
|||
containsString("Can't parse boolean value [foo], expected [true] or [false]"));
|
||||
}
|
||||
|
||||
public void testAllFieldsWithSpecifiedLeniency() throws IOException {
|
||||
Exception e = expectThrows(Exception.class, () ->
|
||||
client().prepareSearch("test").setQuery(
|
||||
queryStringQuery("f_date:[now-2D TO now]").lenient(false)).get());
|
||||
assertThat(ExceptionsHelper.detailedMessage(e),
|
||||
containsString("unit [D] not supported for date math [-2D]"));
|
||||
}
|
||||
|
||||
private void assertHits(SearchHits hits, String... ids) {
|
||||
assertThat(hits.totalHits(), equalTo((long) ids.length));
|
||||
Set<String> hitIds = new HashSet<>();
|
||||
|
@ -263,5 +271,4 @@ public class QueryStringIT extends ESIntegTestCase {
|
|||
}
|
||||
assertThat(hitIds, containsInAnyOrder(ids));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -564,6 +564,18 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
|
|||
assertHitCount(resp, 1L);
|
||||
}
|
||||
|
||||
public void testAllFieldsWithSpecifiedLeniency() throws IOException {
|
||||
String indexBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-query-index.json");
|
||||
prepareCreate("test").setSource(indexBody).get();
|
||||
ensureGreen("test");
|
||||
|
||||
Exception e = expectThrows(Exception.class, () ->
|
||||
client().prepareSearch("test").setQuery(
|
||||
simpleQueryStringQuery("foo123").lenient(false)).get());
|
||||
assertThat(ExceptionsHelper.detailedMessage(e),
|
||||
containsString("NumberFormatException[For input string: \"foo123\"]"));
|
||||
}
|
||||
|
||||
private void assertHits(SearchHits hits, String... ids) {
|
||||
assertThat(hits.totalHits(), equalTo((long) ids.length));
|
||||
Set<String> hitIds = new HashSet<>();
|
||||
|
|
|
@ -22,12 +22,15 @@ package org.elasticsearch.tribe;
|
|||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
|
@ -41,7 +44,10 @@ import org.elasticsearch.plugins.Plugin;
|
|||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.test.NodeConfigurationSource;
|
||||
import org.elasticsearch.test.TestCustomMetaData;
|
||||
import org.elasticsearch.transport.MockTcpTransportPlugin;
|
||||
import org.elasticsearch.tribe.TribeServiceTests.MergableCustomMetaData1;
|
||||
import org.elasticsearch.tribe.TribeServiceTests.MergableCustomMetaData2;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
@ -52,9 +58,12 @@ import java.util.Collection;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.UnaryOperator;
|
||||
import java.util.stream.Stream;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
|
@ -446,6 +455,132 @@ public class TribeIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testMergingRemovedCustomMetaData() throws Exception {
|
||||
MetaData.registerPrototype(MergableCustomMetaData1.TYPE, new MergableCustomMetaData1(""));
|
||||
removeCustomMetaData(cluster1, MergableCustomMetaData1.TYPE);
|
||||
removeCustomMetaData(cluster2, MergableCustomMetaData1.TYPE);
|
||||
MergableCustomMetaData1 customMetaData1 = new MergableCustomMetaData1("a");
|
||||
MergableCustomMetaData1 customMetaData2 = new MergableCustomMetaData1("b");
|
||||
try (Releasable tribeNode = startTribeNode()) {
|
||||
assertNodes(ALL);
|
||||
putCustomMetaData(cluster1, customMetaData1);
|
||||
putCustomMetaData(cluster2, customMetaData2);
|
||||
assertCustomMetaDataUpdated(internalCluster(), customMetaData2);
|
||||
removeCustomMetaData(cluster2, customMetaData2.type());
|
||||
assertCustomMetaDataUpdated(internalCluster(), customMetaData1);
|
||||
}
|
||||
}
|
||||
|
||||
public void testMergingCustomMetaData() throws Exception {
|
||||
MetaData.registerPrototype(MergableCustomMetaData1.TYPE, new MergableCustomMetaData1(""));
|
||||
removeCustomMetaData(cluster1, MergableCustomMetaData1.TYPE);
|
||||
removeCustomMetaData(cluster2, MergableCustomMetaData1.TYPE);
|
||||
MergableCustomMetaData1 customMetaData1 = new MergableCustomMetaData1(randomAsciiOfLength(10));
|
||||
MergableCustomMetaData1 customMetaData2 = new MergableCustomMetaData1(randomAsciiOfLength(10));
|
||||
List<MergableCustomMetaData1> customMetaDatas = Arrays.asList(customMetaData1, customMetaData2);
|
||||
Collections.sort(customMetaDatas, (cm1, cm2) -> cm2.getData().compareTo(cm1.getData()));
|
||||
final MergableCustomMetaData1 tribeNodeCustomMetaData = customMetaDatas.get(0);
|
||||
try (Releasable tribeNode = startTribeNode()) {
|
||||
assertNodes(ALL);
|
||||
putCustomMetaData(cluster1, customMetaData1);
|
||||
assertCustomMetaDataUpdated(internalCluster(), customMetaData1);
|
||||
putCustomMetaData(cluster2, customMetaData2);
|
||||
assertCustomMetaDataUpdated(internalCluster(), tribeNodeCustomMetaData);
|
||||
}
|
||||
}
|
||||
|
||||
public void testMergingMultipleCustomMetaData() throws Exception {
|
||||
MetaData.registerPrototype(MergableCustomMetaData1.TYPE, new MergableCustomMetaData1(""));
|
||||
MetaData.registerPrototype(MergableCustomMetaData2.TYPE, new MergableCustomMetaData2(""));
|
||||
removeCustomMetaData(cluster1, MergableCustomMetaData1.TYPE);
|
||||
removeCustomMetaData(cluster2, MergableCustomMetaData1.TYPE);
|
||||
MergableCustomMetaData1 firstCustomMetaDataType1 = new MergableCustomMetaData1(randomAsciiOfLength(10));
|
||||
MergableCustomMetaData1 secondCustomMetaDataType1 = new MergableCustomMetaData1(randomAsciiOfLength(10));
|
||||
MergableCustomMetaData2 firstCustomMetaDataType2 = new MergableCustomMetaData2(randomAsciiOfLength(10));
|
||||
MergableCustomMetaData2 secondCustomMetaDataType2 = new MergableCustomMetaData2(randomAsciiOfLength(10));
|
||||
List<MergableCustomMetaData1> mergedCustomMetaDataType1 = Arrays.asList(firstCustomMetaDataType1, secondCustomMetaDataType1);
|
||||
List<MergableCustomMetaData2> mergedCustomMetaDataType2 = Arrays.asList(firstCustomMetaDataType2, secondCustomMetaDataType2);
|
||||
Collections.sort(mergedCustomMetaDataType1, (cm1, cm2) -> cm2.getData().compareTo(cm1.getData()));
|
||||
Collections.sort(mergedCustomMetaDataType2, (cm1, cm2) -> cm2.getData().compareTo(cm1.getData()));
|
||||
try (Releasable tribeNode = startTribeNode()) {
|
||||
assertNodes(ALL);
|
||||
// test putting multiple custom md types propagates to tribe
|
||||
putCustomMetaData(cluster1, firstCustomMetaDataType1);
|
||||
putCustomMetaData(cluster1, firstCustomMetaDataType2);
|
||||
assertCustomMetaDataUpdated(internalCluster(), firstCustomMetaDataType1);
|
||||
assertCustomMetaDataUpdated(internalCluster(), firstCustomMetaDataType2);
|
||||
|
||||
// test multiple same type custom md is merged and propagates to tribe
|
||||
putCustomMetaData(cluster2, secondCustomMetaDataType1);
|
||||
assertCustomMetaDataUpdated(internalCluster(), firstCustomMetaDataType2);
|
||||
assertCustomMetaDataUpdated(internalCluster(), mergedCustomMetaDataType1.get(0));
|
||||
|
||||
// test multiple same type custom md is merged and propagates to tribe
|
||||
putCustomMetaData(cluster2, secondCustomMetaDataType2);
|
||||
assertCustomMetaDataUpdated(internalCluster(), mergedCustomMetaDataType1.get(0));
|
||||
assertCustomMetaDataUpdated(internalCluster(), mergedCustomMetaDataType2.get(0));
|
||||
|
||||
// test removing custom md is propagates to tribe
|
||||
removeCustomMetaData(cluster2, secondCustomMetaDataType1.type());
|
||||
assertCustomMetaDataUpdated(internalCluster(), firstCustomMetaDataType1);
|
||||
assertCustomMetaDataUpdated(internalCluster(), mergedCustomMetaDataType2.get(0));
|
||||
removeCustomMetaData(cluster2, secondCustomMetaDataType2.type());
|
||||
assertCustomMetaDataUpdated(internalCluster(), firstCustomMetaDataType1);
|
||||
assertCustomMetaDataUpdated(internalCluster(), firstCustomMetaDataType2);
|
||||
}
|
||||
}
|
||||
|
||||
private static void assertCustomMetaDataUpdated(InternalTestCluster cluster,
|
||||
TestCustomMetaData expectedCustomMetaData) throws Exception {
|
||||
assertBusy(() -> {
|
||||
ClusterState tribeState = cluster.getInstance(ClusterService.class, cluster.getNodeNames()[0]).state();
|
||||
MetaData.Custom custom = tribeState.metaData().custom(expectedCustomMetaData.type());
|
||||
assertNotNull(custom);
|
||||
assertThat(custom, equalTo(expectedCustomMetaData));
|
||||
});
|
||||
}
|
||||
|
||||
private void removeCustomMetaData(InternalTestCluster cluster, final String customMetaDataType) {
|
||||
logger.info("removing custom_md type [{}] from [{}]", customMetaDataType, cluster.getClusterName());
|
||||
updateMetaData(cluster, builder -> builder.removeCustom(customMetaDataType));
|
||||
}
|
||||
|
||||
private void putCustomMetaData(InternalTestCluster cluster, final TestCustomMetaData customMetaData) {
|
||||
logger.info("putting custom_md type [{}] with data[{}] from [{}]", customMetaData.type(),
|
||||
customMetaData.getData(), cluster.getClusterName());
|
||||
updateMetaData(cluster, builder -> builder.putCustom(customMetaData.type(), customMetaData));
|
||||
}
|
||||
|
||||
private static void updateMetaData(InternalTestCluster cluster, UnaryOperator<MetaData.Builder> addCustoms) {
|
||||
ClusterService clusterService = cluster.getInstance(ClusterService.class, cluster.getMasterName());
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
clusterService.submitStateUpdateTask("update customMetaData", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
@Override
|
||||
public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
MetaData.Builder builder = MetaData.builder(currentState.metaData());
|
||||
builder = addCustoms.apply(builder);
|
||||
return new ClusterState.Builder(currentState).metaData(builder).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
fail("failed to apply cluster state from [" + source + "] with " + e.getMessage());
|
||||
}
|
||||
});
|
||||
try {
|
||||
latch.await(1, TimeUnit.MINUTES);
|
||||
} catch (InterruptedException e) {
|
||||
fail("latch waiting on publishing custom md interrupted [" + e.getMessage() + "]");
|
||||
}
|
||||
assertThat("timed out trying to add custom metadata to " + cluster.getClusterName(), latch.getCount(), equalTo(0L));
|
||||
|
||||
}
|
||||
|
||||
private void assertIndicesExist(Client client, String... indices) throws Exception {
|
||||
assertBusy(() -> {
|
||||
ClusterState state = client.admin().cluster().prepareState().setRoutingTable(true).setMetaData(true).get().getState();
|
||||
|
|
|
@ -19,9 +19,22 @@
|
|||
|
||||
package org.elasticsearch.tribe;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.TestCustomMetaData;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
||||
public class TribeServiceTests extends ESTestCase {
|
||||
public void testMinimalSettings() {
|
||||
|
@ -96,4 +109,128 @@ public class TribeServiceTests extends ESTestCase {
|
|||
assertEquals("7.7.7.7", clientSettings.get("transport.bind_host"));
|
||||
assertEquals("8.8.8.8", clientSettings.get("transport.publish_host"));
|
||||
}
|
||||
|
||||
public void testMergeCustomMetaDataSimple() {
|
||||
Map<String, MetaData.Custom> mergedCustoms =
|
||||
TribeService.mergeChangedCustomMetaData(Collections.singleton(MergableCustomMetaData1.TYPE),
|
||||
s -> Collections.singletonList(new MergableCustomMetaData1("data1")));
|
||||
TestCustomMetaData mergedCustom = (TestCustomMetaData) mergedCustoms.get(MergableCustomMetaData1.TYPE);
|
||||
assertThat(mergedCustom, instanceOf(MergableCustomMetaData1.class));
|
||||
assertNotNull(mergedCustom);
|
||||
assertEquals(mergedCustom.getData(), "data1");
|
||||
}
|
||||
|
||||
public void testMergeCustomMetaData() {
|
||||
Map<String, MetaData.Custom> mergedCustoms =
|
||||
TribeService.mergeChangedCustomMetaData(Collections.singleton(MergableCustomMetaData1.TYPE),
|
||||
s -> Arrays.asList(new MergableCustomMetaData1("data1"), new MergableCustomMetaData1("data2")));
|
||||
TestCustomMetaData mergedCustom = (TestCustomMetaData) mergedCustoms.get(MergableCustomMetaData1.TYPE);
|
||||
assertThat(mergedCustom, instanceOf(MergableCustomMetaData1.class));
|
||||
assertNotNull(mergedCustom);
|
||||
assertEquals(mergedCustom.getData(), "data2");
|
||||
}
|
||||
|
||||
public void testMergeMultipleCustomMetaData() {
|
||||
Map<String, List<TribeService.MergableCustomMetaData>> inputMap = new HashMap<>();
|
||||
inputMap.put(MergableCustomMetaData1.TYPE,
|
||||
Arrays.asList(new MergableCustomMetaData1("data10"), new MergableCustomMetaData1("data11")));
|
||||
inputMap.put(MergableCustomMetaData2.TYPE,
|
||||
Arrays.asList(new MergableCustomMetaData2("data21"), new MergableCustomMetaData2("data20")));
|
||||
Map<String, MetaData.Custom> mergedCustoms = TribeService.mergeChangedCustomMetaData(
|
||||
Sets.newHashSet(MergableCustomMetaData1.TYPE, MergableCustomMetaData2.TYPE), inputMap::get);
|
||||
TestCustomMetaData mergedCustom = (TestCustomMetaData) mergedCustoms.get(MergableCustomMetaData1.TYPE);
|
||||
assertNotNull(mergedCustom);
|
||||
assertThat(mergedCustom, instanceOf(MergableCustomMetaData1.class));
|
||||
assertEquals(mergedCustom.getData(), "data11");
|
||||
mergedCustom = (TestCustomMetaData) mergedCustoms.get(MergableCustomMetaData2.TYPE);
|
||||
assertNotNull(mergedCustom);
|
||||
assertThat(mergedCustom, instanceOf(MergableCustomMetaData2.class));
|
||||
assertEquals(mergedCustom.getData(), "data21");
|
||||
}
|
||||
|
||||
public void testMergeCustomMetaDataFromMany() {
|
||||
Map<String, List<TribeService.MergableCustomMetaData>> inputMap = new HashMap<>();
|
||||
int n = randomIntBetween(3, 5);
|
||||
List<TribeService.MergableCustomMetaData> customList1 = new ArrayList<>();
|
||||
for (int i = 0; i <= n; i++) {
|
||||
customList1.add(new MergableCustomMetaData1("data1"+String.valueOf(i)));
|
||||
}
|
||||
Collections.shuffle(customList1, random());
|
||||
inputMap.put(MergableCustomMetaData1.TYPE, customList1);
|
||||
List<TribeService.MergableCustomMetaData> customList2 = new ArrayList<>();
|
||||
for (int i = 0; i <= n; i++) {
|
||||
customList2.add(new MergableCustomMetaData2("data2"+String.valueOf(i)));
|
||||
}
|
||||
Collections.shuffle(customList2, random());
|
||||
inputMap.put(MergableCustomMetaData2.TYPE, customList2);
|
||||
|
||||
Map<String, MetaData.Custom> mergedCustoms = TribeService.mergeChangedCustomMetaData(
|
||||
Sets.newHashSet(MergableCustomMetaData1.TYPE, MergableCustomMetaData2.TYPE), inputMap::get);
|
||||
TestCustomMetaData mergedCustom = (TestCustomMetaData) mergedCustoms.get(MergableCustomMetaData1.TYPE);
|
||||
assertNotNull(mergedCustom);
|
||||
assertThat(mergedCustom, instanceOf(MergableCustomMetaData1.class));
|
||||
assertEquals(mergedCustom.getData(), "data1"+String.valueOf(n));
|
||||
mergedCustom = (TestCustomMetaData) mergedCustoms.get(MergableCustomMetaData2.TYPE);
|
||||
assertNotNull(mergedCustom);
|
||||
assertThat(mergedCustom, instanceOf(MergableCustomMetaData2.class));
|
||||
assertEquals(mergedCustom.getData(), "data2"+String.valueOf(n));
|
||||
}
|
||||
|
||||
static class MergableCustomMetaData1 extends TestCustomMetaData
|
||||
implements TribeService.MergableCustomMetaData<MergableCustomMetaData1> {
|
||||
public static final String TYPE = "custom_md_1";
|
||||
|
||||
protected MergableCustomMetaData1(String data) {
|
||||
super(data);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TestCustomMetaData newTestCustomMetaData(String data) {
|
||||
return new MergableCustomMetaData1(data);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String type() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public EnumSet<MetaData.XContentContext> context() {
|
||||
return EnumSet.of(MetaData.XContentContext.GATEWAY);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MergableCustomMetaData1 merge(MergableCustomMetaData1 other) {
|
||||
return (getData().compareTo(other.getData()) >= 0) ? this : other;
|
||||
}
|
||||
}
|
||||
|
||||
static class MergableCustomMetaData2 extends TestCustomMetaData
|
||||
implements TribeService.MergableCustomMetaData<MergableCustomMetaData2> {
|
||||
public static final String TYPE = "custom_md_2";
|
||||
|
||||
protected MergableCustomMetaData2(String data) {
|
||||
super(data);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TestCustomMetaData newTestCustomMetaData(String data) {
|
||||
return new MergableCustomMetaData2(data);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String type() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public EnumSet<MetaData.XContentContext> context() {
|
||||
return EnumSet.of(MetaData.XContentContext.GATEWAY);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MergableCustomMetaData2 merge(MergableCustomMetaData2 other) {
|
||||
return (getData().compareTo(other.getData()) >= 0) ? this : other;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,8 +69,6 @@ DEFAULT_PLUGINS = ["analysis-icu",
|
|||
"ingest-attachment",
|
||||
"ingest-geoip",
|
||||
"ingest-user-agent",
|
||||
"lang-javascript",
|
||||
"lang-python",
|
||||
"mapper-attachments",
|
||||
"mapper-murmur3",
|
||||
"mapper-size",
|
||||
|
@ -297,6 +295,3 @@ if __name__ == "__main__":
|
|||
else:
|
||||
download_url = 'https://staging.elastic.co/%s-%s/downloads/elasticsearch' % (version, hash)
|
||||
download_and_verify(version, hash, files, download_url, plugins=DEFAULT_PLUGINS + plugins)
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ BulkProcessor bulkProcessor = BulkProcessor.builder(
|
|||
Throwable failure) { ... } <4>
|
||||
})
|
||||
.setBulkActions(10000) <5>
|
||||
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) <6>
|
||||
.setBulkSize(new ByteSizeValue(5, ByteSizeUnit.MB)) <6>
|
||||
.setFlushInterval(TimeValue.timeValueSeconds(5)) <7>
|
||||
.setConcurrentRequests(1) <8>
|
||||
.setBackoffPolicy(
|
||||
|
@ -85,7 +85,7 @@ BulkProcessor bulkProcessor = BulkProcessor.builder(
|
|||
with `response.hasFailures()`
|
||||
<4> This method is called when the bulk failed and raised a `Throwable`
|
||||
<5> We want to execute the bulk every 10 000 requests
|
||||
<6> We want to flush the bulk every 1gb
|
||||
<6> We want to flush the bulk every 5mb
|
||||
<7> We want to flush the bulk every 5 seconds whatever the number of requests
|
||||
<8> Set the number of concurrent requests. A value of 0 means that only a single request will be allowed to be
|
||||
executed. A value of 1 means 1 concurrent request is allowed to be executed while accumulating new bulk requests.
|
||||
|
|
|
@ -250,9 +250,20 @@ name - highly undesirable). Tagging elasticsearch ec2 nodes and then filtering b
|
|||
[[discovery-ec2-attributes]]
|
||||
===== Automatic Node Attributes
|
||||
|
||||
Though not dependent on actually using `ec2` as discovery (but still requires the cloud aws plugin installed), the
|
||||
plugin can automatically add node attributes relating to ec2 (for example, availability zone, that can be used with
|
||||
the awareness allocation feature). In order to enable it, set `cloud.node.auto_attributes` to `true` in the settings.
|
||||
Though not dependent on actually using `ec2` as discovery (but still requires the `discovery-ec2` plugin installed), the
|
||||
plugin can automatically add node attributes relating to ec2. In the future this may support other attributes, but this will
|
||||
currently only add an `aws_availability_zone` node attribute, which is the availability zone of the current node. Attributes
|
||||
can be used to isolate primary and replica shards across availability zones by using the
|
||||
{ref}/allocation-awareness.html[Allocation Awareness] feature.
|
||||
|
||||
In order to enable it, set `cloud.node.auto_attributes` to `true` in the settings. For example:
|
||||
|
||||
[source,yaml]
|
||||
----
|
||||
cloud.node.auto_attributes: true
|
||||
|
||||
cluster.routing.allocation.awareness.attributes: aws_availability_zone
|
||||
----
|
||||
|
||||
[[discovery-ec2-endpoint]]
|
||||
===== Using other EC2 endpoint
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[[discovery-file]]
|
||||
=== File-Based Discovery Plugin
|
||||
|
||||
The file-based discovery plugin uses a list of hosts/ports in a `unicast_hosts.txt` file
|
||||
The file-based discovery plugin uses a list of hosts/ports in a `unicast_hosts.txt` file
|
||||
in the `config/discovery-file` directory for unicast discovery.
|
||||
|
||||
[[discovery-file-install]]
|
||||
|
@ -20,6 +20,10 @@ be restarted after installation. Note that installing the plugin will add a
|
|||
`discovery-file` directory to the `config` folder, and a default `unicast_hosts.txt`
|
||||
file that must be edited with the correct unicast hosts list before starting the node.
|
||||
|
||||
This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
|
||||
{plugin_url}/discovery-file/discovery-file-{version}.zip.
|
||||
|
||||
|
||||
[[discovery-file-remove]]
|
||||
[float]
|
||||
==== Removal
|
||||
|
@ -37,7 +41,7 @@ The node must be stopped before removing the plugin.
|
|||
[float]
|
||||
==== Using the file-based discovery plugin
|
||||
|
||||
The file-based discovery plugin provides the ability to specify the
|
||||
The file-based discovery plugin provides the ability to specify the
|
||||
unicast hosts list through a simple `unicast_hosts.txt` file that can
|
||||
be dynamically updated at any time. To enable, add the following in `elasticsearch.yml`:
|
||||
|
||||
|
@ -47,12 +51,12 @@ discovery.zen.hosts_provider: file
|
|||
----
|
||||
|
||||
This plugin simply provides a facility to supply the unicast hosts list for
|
||||
zen discovery through an external file that can be updated at any time by a side process.
|
||||
zen discovery through an external file that can be updated at any time by a side process.
|
||||
|
||||
For example, this gives a convenient mechanism for an Elasticsearch instance
|
||||
that is run in docker containers to be dynamically supplied a list of IP
|
||||
addresses to connect to for zen discovery when those IP addresses may not be
|
||||
known at node startup.
|
||||
For example, this gives a convenient mechanism for an Elasticsearch instance
|
||||
that is run in docker containers to be dynamically supplied a list of IP
|
||||
addresses to connect to for zen discovery when those IP addresses may not be
|
||||
known at node startup.
|
||||
|
||||
Note that the file-based discovery plugin is meant to augment the unicast
|
||||
hosts list in `elasticsearch.yml` (if specified), not replace it. Therefore,
|
||||
|
@ -73,11 +77,11 @@ cannot start in the middle of a line).
|
|||
[float]
|
||||
==== unicast_hosts.txt file format
|
||||
|
||||
The format of the file is to specify one unicast host entry per line.
|
||||
The format of the file is to specify one unicast host entry per line.
|
||||
Each unicast host entry consists of the host (host name or IP address) and
|
||||
an optional transport port number. If the port number is specified, is must
|
||||
come immediately after the host (on the same line) separated by a `:`.
|
||||
If the port number is not specified, a default value of 9300 is used.
|
||||
an optional transport port number. If the port number is specified, is must
|
||||
come immediately after the host (on the same line) separated by a `:`.
|
||||
If the port number is not specified, a default value of 9300 is used.
|
||||
|
||||
For example, this is an example of `unicast_hosts.txt` for a cluster with
|
||||
four nodes that participate in unicast discovery, some of which are not
|
||||
|
@ -92,6 +96,6 @@ running on the default port:
|
|||
[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:9301
|
||||
----------------------------------------------------------------
|
||||
|
||||
Host names are allowed instead of IP addresses (similar to
|
||||
Host names are allowed instead of IP addresses (similar to
|
||||
`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be
|
||||
specified in brackets with the port coming after the brackets.
|
||||
specified in brackets with the port coming after the brackets.
|
||||
|
|
|
@ -56,8 +56,6 @@ include::management.asciidoc[]
|
|||
|
||||
include::mapper.asciidoc[]
|
||||
|
||||
include::scripting.asciidoc[]
|
||||
|
||||
include::security.asciidoc[]
|
||||
|
||||
include::repository.asciidoc[]
|
||||
|
|
|
@ -27,7 +27,10 @@ Integrations are not plugins, but are external tools or modules that make it eas
|
|||
Tiki has native support for Elasticsearch. This provides faster & better
|
||||
search (facets, etc), along with some Natural Language Processing features
|
||||
(ex.: More like this)
|
||||
|
||||
|
||||
* http://extensions.xwiki.org/xwiki/bin/view/Extension/Elastic+Search+Macro/[XWiki Next Generation Wiki]:
|
||||
XWiki has an Elasticsearch and Kibana macro allowing to run Elasticsearch queries and display the results in XWiki pages using XWiki's scripting language as well as include Kibana Widgets in XWiki pages
|
||||
|
||||
[float]
|
||||
[[data-integrations]]
|
||||
=== Data import/export and validation
|
||||
|
|
|
@ -1,189 +0,0 @@
|
|||
[[lang-javascript]]
|
||||
=== JavaScript Language Plugin
|
||||
|
||||
deprecated[5.0.0,JavaScript will be replaced by the new scripting language {ref}/modules-scripting-painless.html[`Painless`]]
|
||||
|
||||
The JavaScript language plugin enables the use of JavaScript in Elasticsearch
|
||||
scripts, via Mozilla's
|
||||
https://developer.mozilla.org/en-US/docs/Mozilla/Projects/Rhino[Rhino JavaScript] engine.
|
||||
|
||||
[[lang-javascript-install]]
|
||||
[float]
|
||||
==== Installation
|
||||
|
||||
This plugin can be installed using the plugin manager:
|
||||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/elasticsearch-plugin install lang-javascript
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
be restarted after installation.
|
||||
|
||||
This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
|
||||
{plugin_url}/lang-javascript/lang-javascript-{version}.zip.
|
||||
|
||||
[[lang-javascript-remove]]
|
||||
[float]
|
||||
==== Removal
|
||||
|
||||
The plugin can be removed with the following command:
|
||||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/elasticsearch-plugin remove lang-javascript
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
||||
[[lang-javascript-usage]]
|
||||
==== Using JavaScript in Elasticsearch
|
||||
|
||||
Once the plugin has been installed, JavaScript can be used at a scripting
|
||||
language by setting the `lang` parameter to `javascript`.
|
||||
|
||||
Scripting is available in many APIs, but we will use an example with the
|
||||
`function_score` for demonstration purposes:
|
||||
|
||||
[[lang-javascript-inline]]
|
||||
[float]
|
||||
=== Inline scripts
|
||||
|
||||
WARNING: Enabling inline scripting on an unprotected Elasticsearch cluster is dangerous.
|
||||
See <<lang-javascript-file>> for a safer option.
|
||||
|
||||
If you have enabled {ref}/modules-scripting-security.html#enable-dynamic-scripting[inline scripts],
|
||||
you can use JavaScript as follows:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
PUT test/doc/1
|
||||
{
|
||||
"num": 1.0
|
||||
}
|
||||
|
||||
PUT test/doc/2
|
||||
{
|
||||
"num": 2.0
|
||||
}
|
||||
|
||||
GET test/_search
|
||||
{
|
||||
"query": {
|
||||
"function_score": {
|
||||
"script_score": {
|
||||
"script": {
|
||||
"inline": "doc[\"num\"].value * factor",
|
||||
"lang": "javascript",
|
||||
"params": {
|
||||
"factor": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
// CONSOLE
|
||||
|
||||
[[lang-javascript-stored]]
|
||||
[float]
|
||||
=== Stored scripts
|
||||
|
||||
WARNING: Enabling stored scripts on an unprotected Elasticsearch cluster is dangerous.
|
||||
See <<lang-javascript-file>> for a safer option.
|
||||
|
||||
If you have enabled {ref}/modules-scripting-security.html#enable-dynamic-scripting[stored scripts],
|
||||
you can use JavaScript as follows:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
PUT test/doc/1
|
||||
{
|
||||
"num": 1.0
|
||||
}
|
||||
|
||||
PUT test/doc/2
|
||||
{
|
||||
"num": 2.0
|
||||
}
|
||||
|
||||
POST _scripts/javascript/my_script <1>
|
||||
{
|
||||
"script": "doc[\"num\"].value * factor"
|
||||
}
|
||||
|
||||
GET test/_search
|
||||
{
|
||||
"query": {
|
||||
"function_score": {
|
||||
"script_score": {
|
||||
"script": {
|
||||
"stored": "my_script", <2>
|
||||
"lang": "javascript",
|
||||
"params": {
|
||||
"factor": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
// CONSOLE
|
||||
|
||||
<1> We store the script under the id `my_script`.
|
||||
<2> The function score query retrieves the script with id `my_script`.
|
||||
|
||||
|
||||
[[lang-javascript-file]]
|
||||
[float]
|
||||
=== File scripts
|
||||
|
||||
You can save your scripts to a file in the `config/scripts/` directory on
|
||||
every node. The `.javascript` file suffix identifies the script as containing
|
||||
JavaScript:
|
||||
|
||||
First, save this file as `config/scripts/my_script.js` on every node
|
||||
in the cluster:
|
||||
|
||||
[source,painless]
|
||||
----
|
||||
doc["num"].value * factor
|
||||
----
|
||||
|
||||
then use the script as follows:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
PUT test/doc/1
|
||||
{
|
||||
"num": 1.0
|
||||
}
|
||||
|
||||
PUT test/doc/2
|
||||
{
|
||||
"num": 2.0
|
||||
}
|
||||
|
||||
GET test/_search
|
||||
{
|
||||
"query": {
|
||||
"function_score": {
|
||||
"script_score": {
|
||||
"script": {
|
||||
"file": "my_script", <1>
|
||||
"lang": "javascript",
|
||||
"params": {
|
||||
"factor": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
// CONSOLE
|
||||
|
||||
<1> The function score query retrieves the script with filename `my_script.javascript`.
|
|
@ -1,189 +0,0 @@
|
|||
[[lang-python]]
|
||||
=== Python Language Plugin
|
||||
|
||||
deprecated[5.0.0,Python will be replaced by the new scripting language {ref}/modules-scripting-painless.html[`Painless`]]
|
||||
|
||||
The Python language plugin enables the use of Python in Elasticsearch
|
||||
scripts, via the http://www.jython.org/[Jython] Java implementation of Python.
|
||||
|
||||
[[lang-python-install]]
|
||||
[float]
|
||||
==== Installation
|
||||
|
||||
This plugin can be installed using the plugin manager:
|
||||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/elasticsearch-plugin install lang-python
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
be restarted after installation.
|
||||
|
||||
This plugin can be downloaded for <<plugin-management-custom-url,offline install>> from
|
||||
{plugin_url}/lang-python/lang-python-{version}.zip.
|
||||
|
||||
[[lang-python-remove]]
|
||||
[float]
|
||||
==== Removal
|
||||
|
||||
The plugin can be removed with the following command:
|
||||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/elasticsearch-plugin remove lang-python
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
||||
[[lang-python-usage]]
|
||||
==== Using Python in Elasticsearch
|
||||
|
||||
Once the plugin has been installed, Python can be used at a scripting
|
||||
language by setting the `lang` parameter to `python`.
|
||||
|
||||
Scripting is available in many APIs, but we will use an example with the
|
||||
`function_score` for demonstration purposes:
|
||||
|
||||
[[lang-python-inline]]
|
||||
[float]
|
||||
=== Inline scripts
|
||||
|
||||
WARNING: Enabling inline scripting on an unprotected Elasticsearch cluster is dangerous.
|
||||
See <<lang-python-file>> for a safer option.
|
||||
|
||||
If you have enabled {ref}/modules-scripting-security.html#enable-dynamic-scripting[inline scripts],
|
||||
you can use Python as follows:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
PUT test/doc/1
|
||||
{
|
||||
"num": 1.0
|
||||
}
|
||||
|
||||
PUT test/doc/2
|
||||
{
|
||||
"num": 2.0
|
||||
}
|
||||
|
||||
GET test/_search
|
||||
{
|
||||
"query": {
|
||||
"function_score": {
|
||||
"script_score": {
|
||||
"script": {
|
||||
"inline": "doc[\"num\"].value * factor",
|
||||
"lang": "python",
|
||||
"params": {
|
||||
"factor": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
// CONSOLE
|
||||
|
||||
[[lang-python-stored]]
|
||||
[float]
|
||||
=== Stored scripts
|
||||
|
||||
WARNING: Enabling stored scripts on an unprotected Elasticsearch cluster is dangerous.
|
||||
See <<lang-python-file>> for a safer option.
|
||||
|
||||
If you have enabled {ref}/modules-scripting-security.html#enable-dynamic-scripting[stored scripts],
|
||||
you can use Python as follows:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
PUT test/doc/1
|
||||
{
|
||||
"num": 1.0
|
||||
}
|
||||
|
||||
PUT test/doc/2
|
||||
{
|
||||
"num": 2.0
|
||||
}
|
||||
|
||||
POST _scripts/python/my_script <1>
|
||||
{
|
||||
"script": "doc[\"num\"].value * factor"
|
||||
}
|
||||
|
||||
GET test/_search
|
||||
{
|
||||
"query": {
|
||||
"function_score": {
|
||||
"script_score": {
|
||||
"script": {
|
||||
"stored": "my_script", <2>
|
||||
"lang": "python",
|
||||
"params": {
|
||||
"factor": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
----
|
||||
// CONSOLE
|
||||
|
||||
<1> We store the script under the id `my_script`.
|
||||
<2> The function score query retrieves the script with id `my_script`.
|
||||
|
||||
|
||||
[[lang-python-file]]
|
||||
[float]
|
||||
=== File scripts
|
||||
|
||||
You can save your scripts to a file in the `config/scripts/` directory on
|
||||
every node. The `.py` file suffix identifies the script as containing
|
||||
Python:
|
||||
|
||||
First, save this file as `config/scripts/my_script.py` on every node
|
||||
in the cluster:
|
||||
|
||||
[source,python]
|
||||
----
|
||||
doc["num"].value * factor
|
||||
----
|
||||
|
||||
then use the script as follows:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
PUT test/doc/1
|
||||
{
|
||||
"num": 1.0
|
||||
}
|
||||
|
||||
PUT test/doc/2
|
||||
{
|
||||
"num": 2.0
|
||||
}
|
||||
|
||||
GET test/_search
|
||||
{
|
||||
"query": {
|
||||
"function_score": {
|
||||
"script_score": {
|
||||
"script": {
|
||||
"file": "my_script", <1>
|
||||
"lang": "python",
|
||||
"params": {
|
||||
"factor": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
// CONSOLE
|
||||
|
||||
<1> The function score query retrieves the script with filename `my_script.py`.
|
|
@ -1,25 +0,0 @@
|
|||
[[scripting]]
|
||||
== Scripting Plugins
|
||||
|
||||
Scripting plugins extend the scripting functionality in Elasticsearch to allow
|
||||
the use of other scripting languages.
|
||||
|
||||
[float]
|
||||
=== Core scripting plugins
|
||||
|
||||
The core scripting plugins are:
|
||||
|
||||
<<lang-javascript,JavaScript Language>>::
|
||||
|
||||
The JavaScript language plugin enables the use of JavaScript in Elasticsearch
|
||||
scripts, via Mozilla's
|
||||
https://developer.mozilla.org/en-US/docs/Mozilla/Projects/Rhino[Rhino JavaScript] engine.
|
||||
|
||||
<<lang-python,Python Language>>::
|
||||
|
||||
The Python language plugin enables the use of Python in Elasticsearch
|
||||
scripts, via the http://www.jython.org/[Jython] Java implementation of Python.
|
||||
|
||||
include::lang-javascript.asciidoc[]
|
||||
|
||||
include::lang-python.asciidoc[]
|
|
@ -107,7 +107,7 @@ request. The response for this example would be:
|
|||
{
|
||||
"doc_count" : 34,
|
||||
"monthly" : {
|
||||
"buckets : [
|
||||
"buckets" : [
|
||||
... // the histogram monthly breakdown
|
||||
]
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ request. The response for this example would be:
|
|||
{
|
||||
"doc_count" : 439,
|
||||
"monthly" : {
|
||||
"buckets : [
|
||||
"buckets" : [
|
||||
... // the histogram monthly breakdown
|
||||
]
|
||||
}
|
||||
|
|
|
@ -28,8 +28,6 @@ U7321H6 ingest-attachment {version} Ingest processor that uses Apache Tika
|
|||
U7321H6 ingest-geoip {version} Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database
|
||||
U7321H6 ingest-user-agent {version} Ingest processor that extracts information from a user agent
|
||||
U7321H6 jvm-example {version} Demonstrates all the pluggable Java entry points in Elasticsearch
|
||||
U7321H6 lang-javascript {version} The JavaScript language plugin allows to have javascript as the language of scripts to execute.
|
||||
U7321H6 lang-python {version} The Python language plugin allows to have python as the language of scripts to execute.
|
||||
U7321H6 mapper-murmur3 {version} The Mapper Murmur3 plugin allows to compute hashes of a field's values at index-time and to store them in the index.
|
||||
U7321H6 mapper-size {version} The Mapper Size plugin allows document to record their uncompressed size at index time.
|
||||
U7321H6 store-smb {version} The Store SMB plugin adds support for SMB stores.
|
||||
|
|
|
@ -201,7 +201,7 @@ Let's start with a basic health check, which we can use to see how our cluster i
|
|||
To check the cluster health, we will be using the <<cat,`_cat` API>>. You can
|
||||
run the command below in https://www.elastic.co/guide/en/kibana/{branch}/console-kibana.html[Kibana's Console]
|
||||
by clicking "VIEW IN CONSOLE" or with `curl` by clicking the "COPY AS CURL"
|
||||
link below and pasting the into a terminal.
|
||||
link below and pasting it into a terminal.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -11,6 +11,35 @@ for workloads that fall into the database domain, such as retrieving all
|
|||
documents that match a particular query. If you need to do this, make sure to
|
||||
use the <<search-request-scroll,Scroll>> API.
|
||||
|
||||
[float]
|
||||
[[maximum-document-size]]
|
||||
=== Avoid large documents
|
||||
|
||||
Given that the default <<modules-http,`http.max_context_length`>> is set to
|
||||
100MB, Elasticsearch will refuse to index any document that is larger than
|
||||
that. You might decide to increase that particular setting, but Lucene still
|
||||
has a limit of about 2GB.
|
||||
|
||||
Even without considering hard limits, large documents are usually not
|
||||
practical. Large documents put more stress on network, memory usage and disk,
|
||||
even for search requests that do not request the `_source` since Elasticsearch
|
||||
needs to fetch the `_id` of the document in all cases, and the cost of getting
|
||||
this field is bigger for large documents due to how the filesystem cache works.
|
||||
Indexing this document can use an amount of memory that is a multiplier of the
|
||||
original size of the document. Proximity search (phrase queries for instance)
|
||||
and <<search-request-highlighting,highlighting>> also become more expensive
|
||||
since their cost directly depends on the size of the original document.
|
||||
|
||||
It is sometimes useful to reconsider what the unit of information should be.
|
||||
For instance, the fact you want to make books searchable doesn't necesarily
|
||||
mean that a document should consist of a whole book. It might be a better idea
|
||||
to use chapters or even paragraphs as documents, and then have a property in
|
||||
these documents that identifies which book they belong to. This does not only
|
||||
avoid the issues with large documents, it also makes the search experience
|
||||
better. For instance if a user searches for two words `foo` and `bar`, a match
|
||||
across different chapters is probably very poor, while a match within the same
|
||||
paragraph is likely good.
|
||||
|
||||
[float]
|
||||
[[sparsity]]
|
||||
=== Avoid sparsity
|
||||
|
|
|
@ -7,7 +7,7 @@ index is considered to be too large or too old.
|
|||
The API accepts a single alias name and a list of `conditions`. The alias
|
||||
must point to a single index only. If the index satisfies the specified
|
||||
conditions then a new index is created and the alias is switched to point to
|
||||
the new alias.
|
||||
the new index.
|
||||
|
||||
|
||||
[source,js]
|
||||
|
|
|
@ -41,11 +41,11 @@ PUT _template/template_1
|
|||
// TESTSETUP
|
||||
|
||||
NOTE: Index templates provide C-style /* */ block comments. Comments are allowed
|
||||
everywhere in the JSON document except before to the initial opening curly bracket.
|
||||
everywhere in the JSON document except before the initial opening curly bracket.
|
||||
|
||||
Defines a template named template_1, with a template pattern of `te*`.
|
||||
Defines a template named `template_1`, with a template pattern of `te*`.
|
||||
The settings and mappings will be applied to any index name that matches
|
||||
the `te*` template.
|
||||
the `te*` pattern.
|
||||
|
||||
It is also possible to include aliases in an index template as follows:
|
||||
|
||||
|
@ -72,8 +72,8 @@ PUT _template/template_1
|
|||
// CONSOLE
|
||||
// TEST[s/^/DELETE _template\/template_1\n/]
|
||||
|
||||
<1> the `{index}` placeholder within the alias name will be replaced with the
|
||||
actual index name that the template gets applied to during index creation.
|
||||
<1> the `{index}` placeholder in the alias name will be replaced with the
|
||||
actual index name that the template gets applied to, during index creation.
|
||||
|
||||
[float]
|
||||
[[delete]]
|
||||
|
@ -120,7 +120,7 @@ GET /_template
|
|||
|
||||
[float]
|
||||
[[indices-templates-exists]]
|
||||
=== Templates exists
|
||||
=== Template exists
|
||||
|
||||
Used to check if the template exists or not. For example:
|
||||
|
||||
|
@ -131,11 +131,12 @@ HEAD _template/template_1
|
|||
// CONSOLE
|
||||
|
||||
The HTTP status code indicates if the template with the given name
|
||||
exists or not. A status code `200` means it exists, a `404` it does not.
|
||||
exists or not. Status code `200` means it exists and `404` means
|
||||
it does not.
|
||||
|
||||
[float]
|
||||
[[multiple-templates]]
|
||||
=== Multiple Template Matching
|
||||
=== Multiple Templates Matching
|
||||
|
||||
Multiple index templates can potentially match an index, in this case,
|
||||
both the settings and mappings are merged into the final configuration
|
||||
|
@ -177,7 +178,7 @@ PUT /_template/template_2
|
|||
// TEST[s/^/DELETE _template\/template_1\n/]
|
||||
|
||||
The above will disable storing the `_source` on all `type1` types, but
|
||||
for indices of that start with `te*`, source will still be enabled.
|
||||
for indices that start with `te*`, `_source` will still be enabled.
|
||||
Note, for mappings, the merging is "deep", meaning that specific
|
||||
object/property based mappings can easily be added/overridden on higher
|
||||
order templates, with lower order templates providing the basis.
|
||||
|
@ -206,7 +207,7 @@ PUT /_template/template_1
|
|||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
To check for the `version`, you can
|
||||
To check the `version`, you can
|
||||
<<common-options-response-filtering, filter responses>>
|
||||
using `filter_path` to limit the response to just the `version`:
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ way to reindex old indices is to use the `reindex` API.
|
|||
=== Also see:
|
||||
|
||||
* <<breaking_60_cat_changes>>
|
||||
* <<breaking_60_stats_changes>>
|
||||
* <<breaking_60_rest_changes>>
|
||||
* <<breaking_60_search_changes>>
|
||||
* <<breaking_60_docs_changes>>
|
||||
|
@ -35,6 +36,8 @@ way to reindex old indices is to use the `reindex` API.
|
|||
|
||||
include::migrate_6_0/cat.asciidoc[]
|
||||
|
||||
include::migrate_6_0/stats.asciidoc[]
|
||||
|
||||
include::migrate_6_0/rest.asciidoc[]
|
||||
|
||||
include::migrate_6_0/search.asciidoc[]
|
||||
|
|
|
@ -7,4 +7,10 @@ The internal setting `cluster.routing.allocation.snapshot.relocation_enabled` th
|
|||
different nodes has been removed. Enabling this setting could cause allocation issues if a shard got allocated off a node and then
|
||||
reallocated back to this node while a snapshot was running.
|
||||
|
||||
==== Store throttling settings
|
||||
|
||||
Store throttling has been removed. As a consequence, the
|
||||
`indices.store.throttle.type` and `indices.store.throttle.max_bytes_per_sec`
|
||||
cluster settings and the `index.store.throttle.type` and
|
||||
`index.store.throttle.max_bytes_per_sec` index settings are not
|
||||
recognized anymore.
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
[[breaking_60_stats_changes]]
|
||||
=== Stats and info changes
|
||||
|
||||
==== Removal of `throttle_time` in the `store` stats
|
||||
|
||||
Given that store throttling has been removed, the `store` stats do not report
|
||||
`throttle_time` anymore.
|
|
@ -45,10 +45,10 @@ The modules in this section are:
|
|||
<<modules-node,Node client>>::
|
||||
|
||||
A Java node client joins the cluster, but doesn't hold data or act as a master node.
|
||||
|
||||
|
||||
<<modules-scripting-painless,Painless>>::
|
||||
|
||||
A built-in scripting language for Elasticsearch that's designed to be as secure as possible.
|
||||
A built-in scripting language for Elasticsearch that's designed to be as secure as possible.
|
||||
|
||||
<<modules-plugins,Plugins>>::
|
||||
|
||||
|
@ -56,8 +56,8 @@ The modules in this section are:
|
|||
|
||||
<<modules-scripting,Scripting>>::
|
||||
|
||||
Custom scripting available in Lucene Expressions, Groovy, Python, and
|
||||
Javascript. You can also write scripts in the built-in scripting language,
|
||||
Custom scripting available in Lucene Expressions, ad Groovy. You can also
|
||||
write scripts in the built-in scripting language,
|
||||
<<modules-scripting-painless, Painless>>.
|
||||
|
||||
<<modules-snapshots,Snapshot/Restore>>::
|
||||
|
@ -105,7 +105,3 @@ include::modules/threadpool.asciidoc[]
|
|||
include::modules/transport.asciidoc[]
|
||||
|
||||
include::modules/tribe.asciidoc[]
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -30,14 +30,6 @@ and give the most flexibility.
|
|||
|<<modules-scripting-security, no>>
|
||||
|built-in
|
||||
|
||||
|{plugins}/lang-javascript.html[`javascript`]
|
||||
|<<modules-scripting-security, no>>
|
||||
|{plugins}/lang-javascript.html[`lang-javascript`]
|
||||
|
||||
|{plugins}/lang-python.html[`python`]
|
||||
|<<modules-scripting-security, no>>
|
||||
|{plugins}/lang-python.html[`lang-python`]
|
||||
|
||||
|=======================================================================
|
||||
|
||||
[float]
|
||||
|
@ -98,4 +90,3 @@ include::scripting/expression.asciidoc[]
|
|||
include::scripting/native.asciidoc[]
|
||||
|
||||
include::scripting/advanced-scripting.asciidoc[]
|
||||
|
||||
|
|
|
@ -13,8 +13,8 @@ to construct the script. The actual script will extend either
|
|||
`AbstractExecutableScript` or `AbstractSearchScript`. The second one is likely
|
||||
the most useful and has several helpful subclasses you can extend like
|
||||
`AbstractLongSearchScript` and `AbstractDoubleSearchScript`.
|
||||
Finally, your plugin should register the native
|
||||
script by declaring the `onModule(ScriptModule)` method.
|
||||
Finally, your plugin should register the native script by implementing the
|
||||
`ScriptPlugin` interface.
|
||||
|
||||
If you squashed the whole thing into one class it'd look like:
|
||||
|
||||
|
|
|
@ -175,6 +175,21 @@ There are only a few minor differences and add-ons:
|
|||
* `=~` true if a portion of the text matches a pattern (e.g. `x =~ /b/`)
|
||||
* `==~` true if the entire text matches a pattern (e.g. `x ==~ /[Bb]ob/`)
|
||||
|
||||
The `?:` (aka Elvis) operator coalesces null values. So `x ?: 0` is `0` if `x`
|
||||
is `null` and whatever value `x` has otherwise. It is a convenient way to write
|
||||
default values like `doc['x'].value ?: 0` which is 0 if `x` is not in the
|
||||
document being processed. It can also work with null safe dereferences to
|
||||
efficiently handle null in chains. For example,
|
||||
`doc['foo.keyword'].value?.length() ?: 0` is 0 if the document being processed
|
||||
doesn't have a `foo.keyword` field but is the length of that field if it does.
|
||||
Lastly, `?:` is lazy so the right hand side is not evaluated at all if the left
|
||||
hand side isn't null.
|
||||
|
||||
NOTE: Unlike Groovy, Painless' `?:` operator only coalesces `null`, not `false`
|
||||
or http://groovy-lang.org/semantics.html#Groovy-Truth[falsy] values. Strictly
|
||||
speaking Painless' `?:` is more like Kotlin's `?:` than Groovy's `?:`.
|
||||
|
||||
|
||||
[float]
|
||||
[[painless-control-flow]]
|
||||
=== Control flow
|
||||
|
|
|
@ -150,7 +150,7 @@ The benefit of doing this is that it severely limits the attack vectors
|
|||
available to a hacker.
|
||||
|
||||
Restricting permissions is particularly important with scripting languages
|
||||
like Groovy and Javascript which are designed to do anything that can be done
|
||||
like Groovy which is designed to do anything that can be done
|
||||
in Java itself, including writing to the file system, opening sockets to
|
||||
remote servers, etc.
|
||||
|
||||
|
@ -287,4 +287,3 @@ doing so.
|
|||
======================================
|
||||
|
||||
See http://docs.oracle.com/javase/7/docs/technotes/guides/security/PolicyFiles.html for more information.
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ GET /_search
|
|||
"person.location" : {
|
||||
"points" : [
|
||||
[-70, 40],
|
||||
[-80, 30],
|
||||
[-80, 30],
|
||||
[-90, 20]
|
||||
]
|
||||
}
|
||||
|
|
|
@ -106,12 +106,7 @@ POST _suggest
|
|||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
The response contains suggestions scored by the most likely spell
|
||||
correction first. In this case we received the expected correction
|
||||
`xorr the god jewel` first while the second correction is less
|
||||
conservative where only one of the errors is corrected. Note, the
|
||||
request is executed with `max_errors` set to `0.5` so 50% of the terms
|
||||
can contain misspellings (See parameter descriptions below).
|
||||
The response contains suggestions scored by the most likely spell correction first. In this case we received the expected correction "nobel prize".
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -320,7 +315,7 @@ The direct generators support the following parameters:
|
|||
filtered out using `confidence`. Three possible values can be specified:
|
||||
** `missing`: Only generate suggestions for terms that are not in the
|
||||
shard. This is the default.
|
||||
** `popular`: Only suggest terms that occur in more docs on the shard then
|
||||
** `popular`: Only suggest terms that occur in more docs on the shard than
|
||||
the original term.
|
||||
** `always`: Suggest any matching suggestions based on terms in the
|
||||
suggest text.
|
||||
|
|
|
@ -87,7 +87,7 @@ due to dynamic mapping, and 'foo' does not correctly parse into a date:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET twitter/tweet/_validate/query?q=post_date:foo
|
||||
GET twitter/tweet/_validate/query?q=post_date:foo%5d
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
|
@ -102,7 +102,7 @@ about why a query failed:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET twitter/tweet/_validate/query?q=post_date:foo&explain=true
|
||||
GET twitter/tweet/_validate/query?q=post_date:foo%5d&explain=true
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
|
|
|
@ -158,16 +158,15 @@ discovery.zen.ping.unicast.hosts:
|
|||
=== `discovery.zen.minimum_master_nodes`
|
||||
|
||||
To prevent data loss, it is vital to configure the
|
||||
`discovery.zen.minimum_master_nodes setting` so that each master-eligible node
|
||||
`discovery.zen.minimum_master_nodes` setting so that each master-eligible node
|
||||
knows the _minimum number of master-eligible nodes_ that must be visible in
|
||||
order to form a cluster.
|
||||
|
||||
Without this setting, a cluster that suffers a network failure is at risk of
|
||||
having the cluster split into two independent clusters -- a split brain --
|
||||
which will lead to data loss. A more detailed explanation is provided
|
||||
which will lead to data loss. A more detailed explanation is provided
|
||||
in <<split-brain>>.
|
||||
|
||||
|
||||
To avoid a split brain, this setting should be set to a _quorum_ of master-
|
||||
eligible nodes:
|
||||
|
||||
|
@ -181,6 +180,3 @@ nodes should be set to `(3 / 2) + 1` or `2`:
|
|||
discovery.zen.minimum_master_nodes: 2
|
||||
--------------------------------------------------
|
||||
|
||||
IMPORTANT: If `discovery.zen.minimum_master_nodes` is not set when
|
||||
Elasticsearch is running in <<dev-vs-prod,production mode>>, an exception will
|
||||
be thrown which will prevent the node from starting.
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
|
|||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.codehaus.groovy.GroovyBugError;
|
||||
import org.codehaus.groovy.ast.ClassCodeExpressionTransformer;
|
||||
import org.codehaus.groovy.ast.ClassNode;
|
||||
import org.codehaus.groovy.ast.expr.ConstantExpression;
|
||||
|
@ -67,6 +68,7 @@ import java.security.AccessControlContext;
|
|||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -302,20 +304,24 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
|
|||
// NOTE: we truncate the stack because IndyInterface has security issue (needs getClassLoader)
|
||||
// we don't do a security check just as a tradeoff, it cannot really escalate to anything.
|
||||
return AccessController.doPrivileged((PrivilegedAction<Object>) script::run);
|
||||
} catch (AssertionError ae) {
|
||||
} catch (final AssertionError ae) {
|
||||
if (ae instanceof GroovyBugError) {
|
||||
// we encountered a bug in Groovy; we wrap this so it does not go to the uncaught exception handler and tear us down
|
||||
final String message = "encountered bug in Groovy while executing script [" + compiledScript.name() + "]";
|
||||
throw new ScriptException(message, ae, Collections.emptyList(), compiledScript.toString(), compiledScript.lang());
|
||||
}
|
||||
// Groovy asserts are not java asserts, and cannot be disabled, so we do a best-effort trying to determine if this is a
|
||||
// Groovy assert (in which case we wrap it and throw), or a real Java assert, in which case we rethrow it as-is, likely
|
||||
// resulting in the uncaughtExceptionHandler handling it.
|
||||
final StackTraceElement[] elements = ae.getStackTrace();
|
||||
if (elements.length > 0 && "org.codehaus.groovy.runtime.InvokerHelper".equals(elements[0].getClassName())) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to run {}", compiledScript), ae);
|
||||
throw new ScriptException("Error evaluating " + compiledScript.name(),
|
||||
ae, emptyList(), "", compiledScript.lang());
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to run {}", compiledScript), ae);
|
||||
throw new ScriptException("error evaluating " + compiledScript.name(), ae, emptyList(), "", compiledScript.lang());
|
||||
}
|
||||
throw ae;
|
||||
} catch (Exception | NoClassDefFoundError e) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to run {}", compiledScript), e);
|
||||
throw new ScriptException("Error evaluating " + compiledScript.name(), e, emptyList(), "", compiledScript.lang());
|
||||
throw new ScriptException("error evaluating " + compiledScript.name(), e, emptyList(), "", compiledScript.lang());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -130,6 +130,14 @@ public class GroovySecurityTests extends ESTestCase {
|
|||
assertFailure("def foo=false; assert foo, \"msg2\";", NoClassDefFoundError.class);
|
||||
}
|
||||
|
||||
public void testGroovyBugError() {
|
||||
// this script throws a GroovyBugError because our security manager permissions prevent Groovy from accessing this private field
|
||||
// and Groovy does not handle it gracefully; this test will likely start failing if the bug is fixed upstream so that a
|
||||
// GroovyBugError no longer surfaces here in which case the script should be replaced with another script that intentionally
|
||||
// surfaces a GroovyBugError
|
||||
assertFailure("[1, 2].size", AssertionError.class);
|
||||
}
|
||||
|
||||
/** runs a script */
|
||||
private void doTest(String script) {
|
||||
Map<String, Object> vars = new HashMap<String, Object>();
|
||||
|
|
|
@ -36,7 +36,8 @@ RP: ')';
|
|||
// between shortcuts and decimal values. Without the mode switch
|
||||
// shortcuts such as id.0.0 will fail because 0.0 will be interpreted
|
||||
// as a decimal value instead of two individual list-style shortcuts.
|
||||
DOT: '.' -> mode(AFTER_DOT);
|
||||
DOT: '.' -> mode(AFTER_DOT);
|
||||
NSDOT: '?.' -> mode(AFTER_DOT);
|
||||
COMMA: ',';
|
||||
SEMICOLON: ';';
|
||||
IF: 'if';
|
||||
|
@ -80,6 +81,7 @@ BOOLAND: '&&';
|
|||
BOOLOR: '||';
|
||||
COND: '?';
|
||||
COLON: ':';
|
||||
ELVIS: '?:';
|
||||
REF: '::';
|
||||
ARROW: '->';
|
||||
FIND: '=~';
|
||||
|
|
|
@ -7,79 +7,81 @@ RBRACE=6
|
|||
LP=7
|
||||
RP=8
|
||||
DOT=9
|
||||
COMMA=10
|
||||
SEMICOLON=11
|
||||
IF=12
|
||||
IN=13
|
||||
ELSE=14
|
||||
WHILE=15
|
||||
DO=16
|
||||
FOR=17
|
||||
CONTINUE=18
|
||||
BREAK=19
|
||||
RETURN=20
|
||||
NEW=21
|
||||
TRY=22
|
||||
CATCH=23
|
||||
THROW=24
|
||||
THIS=25
|
||||
INSTANCEOF=26
|
||||
BOOLNOT=27
|
||||
BWNOT=28
|
||||
MUL=29
|
||||
DIV=30
|
||||
REM=31
|
||||
ADD=32
|
||||
SUB=33
|
||||
LSH=34
|
||||
RSH=35
|
||||
USH=36
|
||||
LT=37
|
||||
LTE=38
|
||||
GT=39
|
||||
GTE=40
|
||||
EQ=41
|
||||
EQR=42
|
||||
NE=43
|
||||
NER=44
|
||||
BWAND=45
|
||||
XOR=46
|
||||
BWOR=47
|
||||
BOOLAND=48
|
||||
BOOLOR=49
|
||||
COND=50
|
||||
COLON=51
|
||||
REF=52
|
||||
ARROW=53
|
||||
FIND=54
|
||||
MATCH=55
|
||||
INCR=56
|
||||
DECR=57
|
||||
ASSIGN=58
|
||||
AADD=59
|
||||
ASUB=60
|
||||
AMUL=61
|
||||
ADIV=62
|
||||
AREM=63
|
||||
AAND=64
|
||||
AXOR=65
|
||||
AOR=66
|
||||
ALSH=67
|
||||
ARSH=68
|
||||
AUSH=69
|
||||
OCTAL=70
|
||||
HEX=71
|
||||
INTEGER=72
|
||||
DECIMAL=73
|
||||
STRING=74
|
||||
REGEX=75
|
||||
TRUE=76
|
||||
FALSE=77
|
||||
NULL=78
|
||||
TYPE=79
|
||||
ID=80
|
||||
DOTINTEGER=81
|
||||
DOTID=82
|
||||
NSDOT=10
|
||||
COMMA=11
|
||||
SEMICOLON=12
|
||||
IF=13
|
||||
IN=14
|
||||
ELSE=15
|
||||
WHILE=16
|
||||
DO=17
|
||||
FOR=18
|
||||
CONTINUE=19
|
||||
BREAK=20
|
||||
RETURN=21
|
||||
NEW=22
|
||||
TRY=23
|
||||
CATCH=24
|
||||
THROW=25
|
||||
THIS=26
|
||||
INSTANCEOF=27
|
||||
BOOLNOT=28
|
||||
BWNOT=29
|
||||
MUL=30
|
||||
DIV=31
|
||||
REM=32
|
||||
ADD=33
|
||||
SUB=34
|
||||
LSH=35
|
||||
RSH=36
|
||||
USH=37
|
||||
LT=38
|
||||
LTE=39
|
||||
GT=40
|
||||
GTE=41
|
||||
EQ=42
|
||||
EQR=43
|
||||
NE=44
|
||||
NER=45
|
||||
BWAND=46
|
||||
XOR=47
|
||||
BWOR=48
|
||||
BOOLAND=49
|
||||
BOOLOR=50
|
||||
COND=51
|
||||
COLON=52
|
||||
ELVIS=53
|
||||
REF=54
|
||||
ARROW=55
|
||||
FIND=56
|
||||
MATCH=57
|
||||
INCR=58
|
||||
DECR=59
|
||||
ASSIGN=60
|
||||
AADD=61
|
||||
ASUB=62
|
||||
AMUL=63
|
||||
ADIV=64
|
||||
AREM=65
|
||||
AAND=66
|
||||
AXOR=67
|
||||
AOR=68
|
||||
ALSH=69
|
||||
ARSH=70
|
||||
AUSH=71
|
||||
OCTAL=72
|
||||
HEX=73
|
||||
INTEGER=74
|
||||
DECIMAL=75
|
||||
STRING=76
|
||||
REGEX=77
|
||||
TRUE=78
|
||||
FALSE=79
|
||||
NULL=80
|
||||
TYPE=81
|
||||
ID=82
|
||||
DOTINTEGER=83
|
||||
DOTID=84
|
||||
'{'=3
|
||||
'}'=4
|
||||
'['=5
|
||||
|
@ -87,66 +89,68 @@ DOTID=82
|
|||
'('=7
|
||||
')'=8
|
||||
'.'=9
|
||||
','=10
|
||||
';'=11
|
||||
'if'=12
|
||||
'in'=13
|
||||
'else'=14
|
||||
'while'=15
|
||||
'do'=16
|
||||
'for'=17
|
||||
'continue'=18
|
||||
'break'=19
|
||||
'return'=20
|
||||
'new'=21
|
||||
'try'=22
|
||||
'catch'=23
|
||||
'throw'=24
|
||||
'this'=25
|
||||
'instanceof'=26
|
||||
'!'=27
|
||||
'~'=28
|
||||
'*'=29
|
||||
'/'=30
|
||||
'%'=31
|
||||
'+'=32
|
||||
'-'=33
|
||||
'<<'=34
|
||||
'>>'=35
|
||||
'>>>'=36
|
||||
'<'=37
|
||||
'<='=38
|
||||
'>'=39
|
||||
'>='=40
|
||||
'=='=41
|
||||
'==='=42
|
||||
'!='=43
|
||||
'!=='=44
|
||||
'&'=45
|
||||
'^'=46
|
||||
'|'=47
|
||||
'&&'=48
|
||||
'||'=49
|
||||
'?'=50
|
||||
':'=51
|
||||
'::'=52
|
||||
'->'=53
|
||||
'=~'=54
|
||||
'==~'=55
|
||||
'++'=56
|
||||
'--'=57
|
||||
'='=58
|
||||
'+='=59
|
||||
'-='=60
|
||||
'*='=61
|
||||
'/='=62
|
||||
'%='=63
|
||||
'&='=64
|
||||
'^='=65
|
||||
'|='=66
|
||||
'<<='=67
|
||||
'>>='=68
|
||||
'>>>='=69
|
||||
'true'=76
|
||||
'false'=77
|
||||
'null'=78
|
||||
'?.'=10
|
||||
','=11
|
||||
';'=12
|
||||
'if'=13
|
||||
'in'=14
|
||||
'else'=15
|
||||
'while'=16
|
||||
'do'=17
|
||||
'for'=18
|
||||
'continue'=19
|
||||
'break'=20
|
||||
'return'=21
|
||||
'new'=22
|
||||
'try'=23
|
||||
'catch'=24
|
||||
'throw'=25
|
||||
'this'=26
|
||||
'instanceof'=27
|
||||
'!'=28
|
||||
'~'=29
|
||||
'*'=30
|
||||
'/'=31
|
||||
'%'=32
|
||||
'+'=33
|
||||
'-'=34
|
||||
'<<'=35
|
||||
'>>'=36
|
||||
'>>>'=37
|
||||
'<'=38
|
||||
'<='=39
|
||||
'>'=40
|
||||
'>='=41
|
||||
'=='=42
|
||||
'==='=43
|
||||
'!='=44
|
||||
'!=='=45
|
||||
'&'=46
|
||||
'^'=47
|
||||
'|'=48
|
||||
'&&'=49
|
||||
'||'=50
|
||||
'?'=51
|
||||
':'=52
|
||||
'?:'=53
|
||||
'::'=54
|
||||
'->'=55
|
||||
'=~'=56
|
||||
'==~'=57
|
||||
'++'=58
|
||||
'--'=59
|
||||
'='=60
|
||||
'+='=61
|
||||
'-='=62
|
||||
'*='=63
|
||||
'/='=64
|
||||
'%='=65
|
||||
'&='=66
|
||||
'^='=67
|
||||
'|='=68
|
||||
'<<='=69
|
||||
'>>='=70
|
||||
'>>>='=71
|
||||
'true'=78
|
||||
'false'=79
|
||||
'null'=80
|
||||
|
|
|
@ -110,6 +110,7 @@ expression
|
|||
| expression BOOLAND expression # bool
|
||||
| expression BOOLOR expression # bool
|
||||
| <assoc=right> expression COND expression COLON expression # conditional
|
||||
| <assoc=right> expression ELVIS expression # elvis
|
||||
| <assoc=right> expression ( ASSIGN | AADD | ASUB | AMUL |
|
||||
ADIV | AREM | AAND | AXOR |
|
||||
AOR | ALSH | ARSH | AUSH ) expression # assignment
|
||||
|
@ -156,11 +157,11 @@ postdot
|
|||
;
|
||||
|
||||
callinvoke
|
||||
: COND? DOT DOTID arguments
|
||||
: ( DOT | NSDOT ) DOTID arguments
|
||||
;
|
||||
|
||||
fieldaccess
|
||||
: COND? DOT ( DOTID | DOTINTEGER )
|
||||
: ( DOT | NSDOT ) ( DOTID | DOTINTEGER )
|
||||
;
|
||||
|
||||
braceaccess
|
||||
|
|
|
@ -7,79 +7,81 @@ RBRACE=6
|
|||
LP=7
|
||||
RP=8
|
||||
DOT=9
|
||||
COMMA=10
|
||||
SEMICOLON=11
|
||||
IF=12
|
||||
IN=13
|
||||
ELSE=14
|
||||
WHILE=15
|
||||
DO=16
|
||||
FOR=17
|
||||
CONTINUE=18
|
||||
BREAK=19
|
||||
RETURN=20
|
||||
NEW=21
|
||||
TRY=22
|
||||
CATCH=23
|
||||
THROW=24
|
||||
THIS=25
|
||||
INSTANCEOF=26
|
||||
BOOLNOT=27
|
||||
BWNOT=28
|
||||
MUL=29
|
||||
DIV=30
|
||||
REM=31
|
||||
ADD=32
|
||||
SUB=33
|
||||
LSH=34
|
||||
RSH=35
|
||||
USH=36
|
||||
LT=37
|
||||
LTE=38
|
||||
GT=39
|
||||
GTE=40
|
||||
EQ=41
|
||||
EQR=42
|
||||
NE=43
|
||||
NER=44
|
||||
BWAND=45
|
||||
XOR=46
|
||||
BWOR=47
|
||||
BOOLAND=48
|
||||
BOOLOR=49
|
||||
COND=50
|
||||
COLON=51
|
||||
REF=52
|
||||
ARROW=53
|
||||
FIND=54
|
||||
MATCH=55
|
||||
INCR=56
|
||||
DECR=57
|
||||
ASSIGN=58
|
||||
AADD=59
|
||||
ASUB=60
|
||||
AMUL=61
|
||||
ADIV=62
|
||||
AREM=63
|
||||
AAND=64
|
||||
AXOR=65
|
||||
AOR=66
|
||||
ALSH=67
|
||||
ARSH=68
|
||||
AUSH=69
|
||||
OCTAL=70
|
||||
HEX=71
|
||||
INTEGER=72
|
||||
DECIMAL=73
|
||||
STRING=74
|
||||
REGEX=75
|
||||
TRUE=76
|
||||
FALSE=77
|
||||
NULL=78
|
||||
TYPE=79
|
||||
ID=80
|
||||
DOTINTEGER=81
|
||||
DOTID=82
|
||||
NSDOT=10
|
||||
COMMA=11
|
||||
SEMICOLON=12
|
||||
IF=13
|
||||
IN=14
|
||||
ELSE=15
|
||||
WHILE=16
|
||||
DO=17
|
||||
FOR=18
|
||||
CONTINUE=19
|
||||
BREAK=20
|
||||
RETURN=21
|
||||
NEW=22
|
||||
TRY=23
|
||||
CATCH=24
|
||||
THROW=25
|
||||
THIS=26
|
||||
INSTANCEOF=27
|
||||
BOOLNOT=28
|
||||
BWNOT=29
|
||||
MUL=30
|
||||
DIV=31
|
||||
REM=32
|
||||
ADD=33
|
||||
SUB=34
|
||||
LSH=35
|
||||
RSH=36
|
||||
USH=37
|
||||
LT=38
|
||||
LTE=39
|
||||
GT=40
|
||||
GTE=41
|
||||
EQ=42
|
||||
EQR=43
|
||||
NE=44
|
||||
NER=45
|
||||
BWAND=46
|
||||
XOR=47
|
||||
BWOR=48
|
||||
BOOLAND=49
|
||||
BOOLOR=50
|
||||
COND=51
|
||||
COLON=52
|
||||
ELVIS=53
|
||||
REF=54
|
||||
ARROW=55
|
||||
FIND=56
|
||||
MATCH=57
|
||||
INCR=58
|
||||
DECR=59
|
||||
ASSIGN=60
|
||||
AADD=61
|
||||
ASUB=62
|
||||
AMUL=63
|
||||
ADIV=64
|
||||
AREM=65
|
||||
AAND=66
|
||||
AXOR=67
|
||||
AOR=68
|
||||
ALSH=69
|
||||
ARSH=70
|
||||
AUSH=71
|
||||
OCTAL=72
|
||||
HEX=73
|
||||
INTEGER=74
|
||||
DECIMAL=75
|
||||
STRING=76
|
||||
REGEX=77
|
||||
TRUE=78
|
||||
FALSE=79
|
||||
NULL=80
|
||||
TYPE=81
|
||||
ID=82
|
||||
DOTINTEGER=83
|
||||
DOTID=84
|
||||
'{'=3
|
||||
'}'=4
|
||||
'['=5
|
||||
|
@ -87,66 +89,68 @@ DOTID=82
|
|||
'('=7
|
||||
')'=8
|
||||
'.'=9
|
||||
','=10
|
||||
';'=11
|
||||
'if'=12
|
||||
'in'=13
|
||||
'else'=14
|
||||
'while'=15
|
||||
'do'=16
|
||||
'for'=17
|
||||
'continue'=18
|
||||
'break'=19
|
||||
'return'=20
|
||||
'new'=21
|
||||
'try'=22
|
||||
'catch'=23
|
||||
'throw'=24
|
||||
'this'=25
|
||||
'instanceof'=26
|
||||
'!'=27
|
||||
'~'=28
|
||||
'*'=29
|
||||
'/'=30
|
||||
'%'=31
|
||||
'+'=32
|
||||
'-'=33
|
||||
'<<'=34
|
||||
'>>'=35
|
||||
'>>>'=36
|
||||
'<'=37
|
||||
'<='=38
|
||||
'>'=39
|
||||
'>='=40
|
||||
'=='=41
|
||||
'==='=42
|
||||
'!='=43
|
||||
'!=='=44
|
||||
'&'=45
|
||||
'^'=46
|
||||
'|'=47
|
||||
'&&'=48
|
||||
'||'=49
|
||||
'?'=50
|
||||
':'=51
|
||||
'::'=52
|
||||
'->'=53
|
||||
'=~'=54
|
||||
'==~'=55
|
||||
'++'=56
|
||||
'--'=57
|
||||
'='=58
|
||||
'+='=59
|
||||
'-='=60
|
||||
'*='=61
|
||||
'/='=62
|
||||
'%='=63
|
||||
'&='=64
|
||||
'^='=65
|
||||
'|='=66
|
||||
'<<='=67
|
||||
'>>='=68
|
||||
'>>>='=69
|
||||
'true'=76
|
||||
'false'=77
|
||||
'null'=78
|
||||
'?.'=10
|
||||
','=11
|
||||
';'=12
|
||||
'if'=13
|
||||
'in'=14
|
||||
'else'=15
|
||||
'while'=16
|
||||
'do'=17
|
||||
'for'=18
|
||||
'continue'=19
|
||||
'break'=20
|
||||
'return'=21
|
||||
'new'=22
|
||||
'try'=23
|
||||
'catch'=24
|
||||
'throw'=25
|
||||
'this'=26
|
||||
'instanceof'=27
|
||||
'!'=28
|
||||
'~'=29
|
||||
'*'=30
|
||||
'/'=31
|
||||
'%'=32
|
||||
'+'=33
|
||||
'-'=34
|
||||
'<<'=35
|
||||
'>>'=36
|
||||
'>>>'=37
|
||||
'<'=38
|
||||
'<='=39
|
||||
'>'=40
|
||||
'>='=41
|
||||
'=='=42
|
||||
'==='=43
|
||||
'!='=44
|
||||
'!=='=45
|
||||
'&'=46
|
||||
'^'=47
|
||||
'|'=48
|
||||
'&&'=49
|
||||
'||'=50
|
||||
'?'=51
|
||||
':'=52
|
||||
'?:'=53
|
||||
'::'=54
|
||||
'->'=55
|
||||
'=~'=56
|
||||
'==~'=57
|
||||
'++'=58
|
||||
'--'=59
|
||||
'='=60
|
||||
'+='=61
|
||||
'-='=62
|
||||
'*='=63
|
||||
'/='=64
|
||||
'%='=65
|
||||
'&='=66
|
||||
'^='=67
|
||||
'|='=68
|
||||
'<<='=69
|
||||
'>>='=70
|
||||
'>>>='=71
|
||||
'true'=78
|
||||
'false'=79
|
||||
'null'=80
|
||||
|
|
|
@ -36,7 +36,7 @@ import java.util.Set;
|
|||
* Tracks user defined methods and variables across compilation phases.
|
||||
*/
|
||||
public final class Locals {
|
||||
|
||||
|
||||
/** Reserved word: params map parameter */
|
||||
public static final String PARAMS = "params";
|
||||
/** Reserved word: Lucene scorer parameter */
|
||||
|
@ -53,25 +53,35 @@ public final class Locals {
|
|||
public static final String THIS = "#this";
|
||||
/** Reserved word: unused */
|
||||
public static final String DOC = "doc";
|
||||
|
||||
/** Map of always reserved keywords */
|
||||
public static final Set<String> KEYWORDS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
|
||||
THIS,PARAMS,SCORER,DOC,VALUE,SCORE,CTX,LOOP
|
||||
|
||||
/** Map of always reserved keywords for the main scope */
|
||||
public static final Set<String> MAIN_KEYWORDS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
|
||||
THIS,PARAMS,SCORER,DOC,VALUE,SCORE,CTX,LOOP
|
||||
)));
|
||||
|
||||
|
||||
/** Map of always reserved keywords for a function scope */
|
||||
public static final Set<String> FUNCTION_KEYWORDS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
|
||||
THIS,LOOP
|
||||
)));
|
||||
|
||||
/** Map of always reserved keywords for a lambda scope */
|
||||
public static final Set<String> LAMBDA_KEYWORDS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
|
||||
THIS,LOOP
|
||||
)));
|
||||
|
||||
/** Creates a new local variable scope (e.g. loop) inside the current scope */
|
||||
public static Locals newLocalScope(Locals currentScope) {
|
||||
return new Locals(currentScope);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
/**
|
||||
* Creates a new lambda scope inside the current scope
|
||||
* <p>
|
||||
* This is just like {@link #newFunctionScope}, except the captured parameters are made read-only.
|
||||
*/
|
||||
public static Locals newLambdaScope(Locals programScope, Type returnType, List<Parameter> parameters,
|
||||
public static Locals newLambdaScope(Locals programScope, Type returnType, List<Parameter> parameters,
|
||||
int captureCount, int maxLoopCounter) {
|
||||
Locals locals = new Locals(programScope, returnType);
|
||||
Locals locals = new Locals(programScope, returnType, LAMBDA_KEYWORDS);
|
||||
for (int i = 0; i < parameters.size(); i++) {
|
||||
Parameter parameter = parameters.get(i);
|
||||
// TODO: allow non-captures to be r/w:
|
||||
|
@ -87,10 +97,10 @@ public final class Locals {
|
|||
}
|
||||
return locals;
|
||||
}
|
||||
|
||||
|
||||
/** Creates a new function scope inside the current scope */
|
||||
public static Locals newFunctionScope(Locals programScope, Type returnType, List<Parameter> parameters, int maxLoopCounter) {
|
||||
Locals locals = new Locals(programScope, returnType);
|
||||
Locals locals = new Locals(programScope, returnType, FUNCTION_KEYWORDS);
|
||||
for (Parameter parameter : parameters) {
|
||||
locals.addVariable(parameter.location, parameter.type, parameter.name, false);
|
||||
}
|
||||
|
@ -100,10 +110,10 @@ public final class Locals {
|
|||
}
|
||||
return locals;
|
||||
}
|
||||
|
||||
|
||||
/** Creates a new main method scope */
|
||||
public static Locals newMainMethodScope(Locals programScope, boolean usesScore, boolean usesCtx, int maxLoopCounter) {
|
||||
Locals locals = new Locals(programScope, Definition.OBJECT_TYPE);
|
||||
Locals locals = new Locals(programScope, Definition.OBJECT_TYPE, MAIN_KEYWORDS);
|
||||
// This reference. Internal use only.
|
||||
locals.defineVariable(null, Definition.getType("Object"), THIS, true);
|
||||
|
||||
|
@ -137,16 +147,16 @@ public final class Locals {
|
|||
}
|
||||
return locals;
|
||||
}
|
||||
|
||||
|
||||
/** Creates a new program scope: the list of methods. It is the parent for all methods */
|
||||
public static Locals newProgramScope(Collection<Method> methods) {
|
||||
Locals locals = new Locals(null, null);
|
||||
Locals locals = new Locals(null, null, null);
|
||||
for (Method method : methods) {
|
||||
locals.addMethod(method);
|
||||
}
|
||||
return locals;
|
||||
}
|
||||
|
||||
|
||||
/** Checks if a variable exists or not, in this scope or any parents. */
|
||||
public boolean hasVariable(String name) {
|
||||
Variable variable = lookupVariable(null, name);
|
||||
|
@ -158,7 +168,7 @@ public final class Locals {
|
|||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/** Accesses a variable. This will throw IAE if the variable does not exist */
|
||||
public Variable getVariable(Location location, String name) {
|
||||
Variable variable = lookupVariable(location, name);
|
||||
|
@ -170,7 +180,7 @@ public final class Locals {
|
|||
}
|
||||
throw location.createError(new IllegalArgumentException("Variable [" + name + "] is not defined."));
|
||||
}
|
||||
|
||||
|
||||
/** Looks up a method. Returns null if the method does not exist. */
|
||||
public Method getMethod(MethodKey key) {
|
||||
Method method = lookupMethod(key);
|
||||
|
@ -182,23 +192,23 @@ public final class Locals {
|
|||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
/** Creates a new variable. Throws IAE if the variable has already been defined (even in a parent) or reserved. */
|
||||
public Variable addVariable(Location location, Type type, String name, boolean readonly) {
|
||||
if (hasVariable(name)) {
|
||||
throw location.createError(new IllegalArgumentException("Variable [" + name + "] is already defined."));
|
||||
}
|
||||
if (KEYWORDS.contains(name)) {
|
||||
if (keywords.contains(name)) {
|
||||
throw location.createError(new IllegalArgumentException("Variable [" + name + "] is reserved."));
|
||||
}
|
||||
return defineVariable(location, type, name, readonly);
|
||||
}
|
||||
|
||||
|
||||
/** Return type of this scope (e.g. int, if inside a function that returns int) */
|
||||
public Type getReturnType() {
|
||||
return returnType;
|
||||
}
|
||||
|
||||
|
||||
/** Returns the top-level program scope. */
|
||||
public Locals getProgramScope() {
|
||||
Locals locals = this;
|
||||
|
@ -207,13 +217,15 @@ public final class Locals {
|
|||
}
|
||||
return locals;
|
||||
}
|
||||
|
||||
|
||||
///// private impl
|
||||
|
||||
// parent scope
|
||||
private final Locals parent;
|
||||
// return type of this scope
|
||||
private final Type returnType;
|
||||
// keywords for this scope
|
||||
private final Set<String> keywords;
|
||||
// next slot number to assign
|
||||
private int nextSlotNumber;
|
||||
// variable name -> variable
|
||||
|
@ -225,15 +237,16 @@ public final class Locals {
|
|||
* Create a new Locals
|
||||
*/
|
||||
private Locals(Locals parent) {
|
||||
this(parent, parent.getReturnType());
|
||||
this(parent, parent.returnType, parent.keywords);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Create a new Locals with specified return type
|
||||
*/
|
||||
private Locals(Locals parent, Type returnType) {
|
||||
private Locals(Locals parent, Type returnType, Set<String> keywords) {
|
||||
this.parent = parent;
|
||||
this.returnType = returnType;
|
||||
this.keywords = keywords;
|
||||
if (parent == null) {
|
||||
this.nextSlotNumber = 0;
|
||||
} else {
|
||||
|
@ -262,7 +275,7 @@ public final class Locals {
|
|||
return methods.get(key);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/** Defines a variable at this scope internally. */
|
||||
private Variable defineVariable(Location location, Type type, String name, boolean readonly) {
|
||||
if (variables == null) {
|
||||
|
@ -273,7 +286,7 @@ public final class Locals {
|
|||
nextSlotNumber += type.type.getSize();
|
||||
return variable;
|
||||
}
|
||||
|
||||
|
||||
private void addMethod(Method method) {
|
||||
if (methods == null) {
|
||||
methods = new HashMap<>();
|
||||
|
@ -293,7 +306,7 @@ public final class Locals {
|
|||
public final Type type;
|
||||
public final boolean readonly;
|
||||
private final int slot;
|
||||
|
||||
|
||||
public Variable(Location location, String name, Type type, int slot, boolean readonly) {
|
||||
this.location = location;
|
||||
this.name = name;
|
||||
|
@ -301,12 +314,12 @@ public final class Locals {
|
|||
this.slot = slot;
|
||||
this.readonly = readonly;
|
||||
}
|
||||
|
||||
|
||||
public int getSlot() {
|
||||
return slot;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static final class Parameter {
|
||||
public final Location location;
|
||||
public final String name;
|
||||
|
|
|
@ -27,7 +27,7 @@ package org.elasticsearch.painless;
|
|||
*/
|
||||
public enum Operation {
|
||||
|
||||
MUL ( "+" ),
|
||||
MUL ( "*" ),
|
||||
DIV ( "/" ),
|
||||
REM ( "%" ),
|
||||
ADD ( "+" ),
|
||||
|
|
|
@ -21,16 +21,16 @@ class PainlessLexer extends Lexer {
|
|||
new PredictionContextCache();
|
||||
public static final int
|
||||
WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9,
|
||||
COMMA=10, SEMICOLON=11, IF=12, IN=13, ELSE=14, WHILE=15, DO=16, FOR=17,
|
||||
CONTINUE=18, BREAK=19, RETURN=20, NEW=21, TRY=22, CATCH=23, THROW=24,
|
||||
THIS=25, INSTANCEOF=26, BOOLNOT=27, BWNOT=28, MUL=29, DIV=30, REM=31,
|
||||
ADD=32, SUB=33, LSH=34, RSH=35, USH=36, LT=37, LTE=38, GT=39, GTE=40,
|
||||
EQ=41, EQR=42, NE=43, NER=44, BWAND=45, XOR=46, BWOR=47, BOOLAND=48, BOOLOR=49,
|
||||
COND=50, COLON=51, REF=52, ARROW=53, FIND=54, MATCH=55, INCR=56, DECR=57,
|
||||
ASSIGN=58, AADD=59, ASUB=60, AMUL=61, ADIV=62, AREM=63, AAND=64, AXOR=65,
|
||||
AOR=66, ALSH=67, ARSH=68, AUSH=69, OCTAL=70, HEX=71, INTEGER=72, DECIMAL=73,
|
||||
STRING=74, REGEX=75, TRUE=76, FALSE=77, NULL=78, TYPE=79, ID=80, DOTINTEGER=81,
|
||||
DOTID=82;
|
||||
NSDOT=10, COMMA=11, SEMICOLON=12, IF=13, IN=14, ELSE=15, WHILE=16, DO=17,
|
||||
FOR=18, CONTINUE=19, BREAK=20, RETURN=21, NEW=22, TRY=23, CATCH=24, THROW=25,
|
||||
THIS=26, INSTANCEOF=27, BOOLNOT=28, BWNOT=29, MUL=30, DIV=31, REM=32,
|
||||
ADD=33, SUB=34, LSH=35, RSH=36, USH=37, LT=38, LTE=39, GT=40, GTE=41,
|
||||
EQ=42, EQR=43, NE=44, NER=45, BWAND=46, XOR=47, BWOR=48, BOOLAND=49, BOOLOR=50,
|
||||
COND=51, COLON=52, ELVIS=53, REF=54, ARROW=55, FIND=56, MATCH=57, INCR=58,
|
||||
DECR=59, ASSIGN=60, AADD=61, ASUB=62, AMUL=63, ADIV=64, AREM=65, AAND=66,
|
||||
AXOR=67, AOR=68, ALSH=69, ARSH=70, AUSH=71, OCTAL=72, HEX=73, INTEGER=74,
|
||||
DECIMAL=75, STRING=76, REGEX=77, TRUE=78, FALSE=79, NULL=80, TYPE=81,
|
||||
ID=82, DOTINTEGER=83, DOTID=84;
|
||||
public static final int AFTER_DOT = 1;
|
||||
public static String[] modeNames = {
|
||||
"DEFAULT_MODE", "AFTER_DOT"
|
||||
|
@ -38,39 +38,39 @@ class PainlessLexer extends Lexer {
|
|||
|
||||
public static final String[] ruleNames = {
|
||||
"WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", "DOT",
|
||||
"COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", "FOR", "CONTINUE",
|
||||
"BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "THIS", "INSTANCEOF",
|
||||
"BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", "SUB", "LSH", "RSH", "USH",
|
||||
"LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND", "XOR", "BWOR",
|
||||
"BOOLAND", "BOOLOR", "COND", "COLON", "REF", "ARROW", "FIND", "MATCH",
|
||||
"INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", "ADIV", "AREM", "AAND",
|
||||
"AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", "HEX", "INTEGER", "DECIMAL",
|
||||
"STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", "ID", "DOTINTEGER",
|
||||
"DOTID"
|
||||
};
|
||||
|
||||
private static final String[] _LITERAL_NAMES = {
|
||||
null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "','",
|
||||
"';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'",
|
||||
"'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'",
|
||||
"'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'",
|
||||
"'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='",
|
||||
"'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'::'", "'->'",
|
||||
"'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", "'/='",
|
||||
"'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, null,
|
||||
null, null, null, null, "'true'", "'false'", "'null'"
|
||||
};
|
||||
private static final String[] _SYMBOLIC_NAMES = {
|
||||
null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP",
|
||||
"DOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", "FOR",
|
||||
"NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", "FOR",
|
||||
"CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "THIS",
|
||||
"INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", "SUB", "LSH",
|
||||
"RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND",
|
||||
"XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "REF", "ARROW", "FIND",
|
||||
"MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", "ADIV", "AREM",
|
||||
"AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", "HEX", "INTEGER",
|
||||
"DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", "ID", "DOTINTEGER",
|
||||
"DOTID"
|
||||
"XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", "REF", "ARROW",
|
||||
"FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", "ADIV",
|
||||
"AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", "HEX",
|
||||
"INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE",
|
||||
"ID", "DOTINTEGER", "DOTID"
|
||||
};
|
||||
|
||||
private static final String[] _LITERAL_NAMES = {
|
||||
null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "'?.'",
|
||||
"','", "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'",
|
||||
"'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'",
|
||||
"'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'",
|
||||
"'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='",
|
||||
"'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'?:'", "'::'",
|
||||
"'->'", "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='",
|
||||
"'/='", "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null,
|
||||
null, null, null, null, null, "'true'", "'false'", "'null'"
|
||||
};
|
||||
private static final String[] _SYMBOLIC_NAMES = {
|
||||
null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP",
|
||||
"DOT", "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO",
|
||||
"FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW",
|
||||
"THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD",
|
||||
"SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE",
|
||||
"NER", "BWAND", "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS",
|
||||
"REF", "ARROW", "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB",
|
||||
"AMUL", "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH",
|
||||
"OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE",
|
||||
"NULL", "TYPE", "ID", "DOTINTEGER", "DOTID"
|
||||
};
|
||||
public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES);
|
||||
|
||||
|
@ -129,11 +129,11 @@ class PainlessLexer extends Lexer {
|
|||
@Override
|
||||
public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) {
|
||||
switch (ruleIndex) {
|
||||
case 29:
|
||||
case 30:
|
||||
return DIV_sempred((RuleContext)_localctx, predIndex);
|
||||
case 74:
|
||||
case 76:
|
||||
return REGEX_sempred((RuleContext)_localctx, predIndex);
|
||||
case 78:
|
||||
case 80:
|
||||
return TYPE_sempred((RuleContext)_localctx, predIndex);
|
||||
}
|
||||
return true;
|
||||
|
@ -161,7 +161,7 @@ class PainlessLexer extends Lexer {
|
|||
}
|
||||
|
||||
public static final String _serializedATN =
|
||||
"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2T\u024b\b\1\b\1\4"+
|
||||
"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2V\u0257\b\1\b\1\4"+
|
||||
"\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n"+
|
||||
"\4\13\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22"+
|
||||
"\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31"+
|
||||
|
@ -170,202 +170,206 @@ class PainlessLexer extends Lexer {
|
|||
"+\4,\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64"+
|
||||
"\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t;\4<\t<\4=\t"+
|
||||
"=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4"+
|
||||
"I\tI\4J\tJ\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\3\2\6"+
|
||||
"\2\u00aa\n\2\r\2\16\2\u00ab\3\2\3\2\3\3\3\3\3\3\3\3\7\3\u00b4\n\3\f\3"+
|
||||
"\16\3\u00b7\13\3\3\3\3\3\3\3\3\3\3\3\7\3\u00be\n\3\f\3\16\3\u00c1\13\3"+
|
||||
"\3\3\3\3\5\3\u00c5\n\3\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3\b"+
|
||||
"\3\t\3\t\3\n\3\n\3\n\3\n\3\13\3\13\3\f\3\f\3\r\3\r\3\r\3\16\3\16\3\16"+
|
||||
"\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21"+
|
||||
"\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\24"+
|
||||
"\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26"+
|
||||
"\3\26\3\26\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\31\3\31"+
|
||||
"\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33\3\33"+
|
||||
"\3\33\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3\35\3\35\3\36\3\36\3\37\3\37"+
|
||||
"\3\37\3 \3 \3!\3!\3\"\3\"\3#\3#\3#\3$\3$\3$\3%\3%\3%\3%\3&\3&\3\'\3\'"+
|
||||
"\3\'\3(\3(\3)\3)\3)\3*\3*\3*\3+\3+\3+\3+\3,\3,\3,\3-\3-\3-\3-\3.\3.\3"+
|
||||
"/\3/\3\60\3\60\3\61\3\61\3\61\3\62\3\62\3\62\3\63\3\63\3\64\3\64\3\65"+
|
||||
"\3\65\3\65\3\66\3\66\3\66\3\67\3\67\3\67\38\38\38\38\39\39\39\3:\3:\3"+
|
||||
":\3;\3;\3<\3<\3<\3=\3=\3=\3>\3>\3>\3?\3?\3?\3@\3@\3@\3A\3A\3A\3B\3B\3"+
|
||||
"B\3C\3C\3C\3D\3D\3D\3D\3E\3E\3E\3E\3F\3F\3F\3F\3F\3G\3G\6G\u01ac\nG\r"+
|
||||
"G\16G\u01ad\3G\5G\u01b1\nG\3H\3H\3H\6H\u01b6\nH\rH\16H\u01b7\3H\5H\u01bb"+
|
||||
"\nH\3I\3I\3I\7I\u01c0\nI\fI\16I\u01c3\13I\5I\u01c5\nI\3I\5I\u01c8\nI\3"+
|
||||
"J\3J\3J\7J\u01cd\nJ\fJ\16J\u01d0\13J\5J\u01d2\nJ\3J\3J\6J\u01d6\nJ\rJ"+
|
||||
"\16J\u01d7\5J\u01da\nJ\3J\3J\5J\u01de\nJ\3J\6J\u01e1\nJ\rJ\16J\u01e2\5"+
|
||||
"J\u01e5\nJ\3J\5J\u01e8\nJ\3K\3K\3K\3K\3K\3K\7K\u01f0\nK\fK\16K\u01f3\13"+
|
||||
"K\3K\3K\3K\3K\3K\3K\3K\7K\u01fc\nK\fK\16K\u01ff\13K\3K\5K\u0202\nK\3L"+
|
||||
"\3L\3L\3L\6L\u0208\nL\rL\16L\u0209\3L\3L\7L\u020e\nL\fL\16L\u0211\13L"+
|
||||
"\3L\3L\3M\3M\3M\3M\3M\3N\3N\3N\3N\3N\3N\3O\3O\3O\3O\3O\3P\3P\3P\3P\7P"+
|
||||
"\u0229\nP\fP\16P\u022c\13P\3P\3P\3Q\3Q\7Q\u0232\nQ\fQ\16Q\u0235\13Q\3"+
|
||||
"R\3R\3R\7R\u023a\nR\fR\16R\u023d\13R\5R\u023f\nR\3R\3R\3S\3S\7S\u0245"+
|
||||
"\nS\fS\16S\u0248\13S\3S\3S\6\u00b5\u00bf\u01f1\u01fd\2T\4\3\6\4\b\5\n"+
|
||||
"\6\f\7\16\b\20\t\22\n\24\13\26\f\30\r\32\16\34\17\36\20 \21\"\22$\23&"+
|
||||
"\24(\25*\26,\27.\30\60\31\62\32\64\33\66\348\35:\36<\37> @!B\"D#F$H%J"+
|
||||
"&L\'N(P)R*T+V,X-Z.\\/^\60`\61b\62d\63f\64h\65j\66l\67n8p9r:t;v<x=z>|?"+
|
||||
"~@\u0080A\u0082B\u0084C\u0086D\u0088E\u008aF\u008cG\u008eH\u0090I\u0092"+
|
||||
"J\u0094K\u0096L\u0098M\u009aN\u009cO\u009eP\u00a0Q\u00a2R\u00a4S\u00a6"+
|
||||
"T\4\2\3\24\5\2\13\f\17\17\"\"\4\2\f\f\17\17\3\2\629\4\2NNnn\4\2ZZzz\5"+
|
||||
"\2\62;CHch\3\2\63;\3\2\62;\b\2FFHHNNffhhnn\4\2GGgg\4\2--//\6\2FFHHffh"+
|
||||
"h\4\2$$^^\4\2\f\f\61\61\3\2\f\f\t\2WWeekknouuwwzz\5\2C\\aac|\6\2\62;C"+
|
||||
"\\aac|\u026b\2\4\3\2\2\2\2\6\3\2\2\2\2\b\3\2\2\2\2\n\3\2\2\2\2\f\3\2\2"+
|
||||
"\2\2\16\3\2\2\2\2\20\3\2\2\2\2\22\3\2\2\2\2\24\3\2\2\2\2\26\3\2\2\2\2"+
|
||||
"\30\3\2\2\2\2\32\3\2\2\2\2\34\3\2\2\2\2\36\3\2\2\2\2 \3\2\2\2\2\"\3\2"+
|
||||
"\2\2\2$\3\2\2\2\2&\3\2\2\2\2(\3\2\2\2\2*\3\2\2\2\2,\3\2\2\2\2.\3\2\2\2"+
|
||||
"\2\60\3\2\2\2\2\62\3\2\2\2\2\64\3\2\2\2\2\66\3\2\2\2\28\3\2\2\2\2:\3\2"+
|
||||
"\2\2\2<\3\2\2\2\2>\3\2\2\2\2@\3\2\2\2\2B\3\2\2\2\2D\3\2\2\2\2F\3\2\2\2"+
|
||||
"\2H\3\2\2\2\2J\3\2\2\2\2L\3\2\2\2\2N\3\2\2\2\2P\3\2\2\2\2R\3\2\2\2\2T"+
|
||||
"\3\2\2\2\2V\3\2\2\2\2X\3\2\2\2\2Z\3\2\2\2\2\\\3\2\2\2\2^\3\2\2\2\2`\3"+
|
||||
"\2\2\2\2b\3\2\2\2\2d\3\2\2\2\2f\3\2\2\2\2h\3\2\2\2\2j\3\2\2\2\2l\3\2\2"+
|
||||
"\2\2n\3\2\2\2\2p\3\2\2\2\2r\3\2\2\2\2t\3\2\2\2\2v\3\2\2\2\2x\3\2\2\2\2"+
|
||||
"z\3\2\2\2\2|\3\2\2\2\2~\3\2\2\2\2\u0080\3\2\2\2\2\u0082\3\2\2\2\2\u0084"+
|
||||
"\3\2\2\2\2\u0086\3\2\2\2\2\u0088\3\2\2\2\2\u008a\3\2\2\2\2\u008c\3\2\2"+
|
||||
"\2\2\u008e\3\2\2\2\2\u0090\3\2\2\2\2\u0092\3\2\2\2\2\u0094\3\2\2\2\2\u0096"+
|
||||
"\3\2\2\2\2\u0098\3\2\2\2\2\u009a\3\2\2\2\2\u009c\3\2\2\2\2\u009e\3\2\2"+
|
||||
"\2\2\u00a0\3\2\2\2\2\u00a2\3\2\2\2\3\u00a4\3\2\2\2\3\u00a6\3\2\2\2\4\u00a9"+
|
||||
"\3\2\2\2\6\u00c4\3\2\2\2\b\u00c8\3\2\2\2\n\u00ca\3\2\2\2\f\u00cc\3\2\2"+
|
||||
"\2\16\u00ce\3\2\2\2\20\u00d0\3\2\2\2\22\u00d2\3\2\2\2\24\u00d4\3\2\2\2"+
|
||||
"\26\u00d8\3\2\2\2\30\u00da\3\2\2\2\32\u00dc\3\2\2\2\34\u00df\3\2\2\2\36"+
|
||||
"\u00e2\3\2\2\2 \u00e7\3\2\2\2\"\u00ed\3\2\2\2$\u00f0\3\2\2\2&\u00f4\3"+
|
||||
"\2\2\2(\u00fd\3\2\2\2*\u0103\3\2\2\2,\u010a\3\2\2\2.\u010e\3\2\2\2\60"+
|
||||
"\u0112\3\2\2\2\62\u0118\3\2\2\2\64\u011e\3\2\2\2\66\u0123\3\2\2\28\u012e"+
|
||||
"\3\2\2\2:\u0130\3\2\2\2<\u0132\3\2\2\2>\u0134\3\2\2\2@\u0137\3\2\2\2B"+
|
||||
"\u0139\3\2\2\2D\u013b\3\2\2\2F\u013d\3\2\2\2H\u0140\3\2\2\2J\u0143\3\2"+
|
||||
"\2\2L\u0147\3\2\2\2N\u0149\3\2\2\2P\u014c\3\2\2\2R\u014e\3\2\2\2T\u0151"+
|
||||
"\3\2\2\2V\u0154\3\2\2\2X\u0158\3\2\2\2Z\u015b\3\2\2\2\\\u015f\3\2\2\2"+
|
||||
"^\u0161\3\2\2\2`\u0163\3\2\2\2b\u0165\3\2\2\2d\u0168\3\2\2\2f\u016b\3"+
|
||||
"\2\2\2h\u016d\3\2\2\2j\u016f\3\2\2\2l\u0172\3\2\2\2n\u0175\3\2\2\2p\u0178"+
|
||||
"\3\2\2\2r\u017c\3\2\2\2t\u017f\3\2\2\2v\u0182\3\2\2\2x\u0184\3\2\2\2z"+
|
||||
"\u0187\3\2\2\2|\u018a\3\2\2\2~\u018d\3\2\2\2\u0080\u0190\3\2\2\2\u0082"+
|
||||
"\u0193\3\2\2\2\u0084\u0196\3\2\2\2\u0086\u0199\3\2\2\2\u0088\u019c\3\2"+
|
||||
"\2\2\u008a\u01a0\3\2\2\2\u008c\u01a4\3\2\2\2\u008e\u01a9\3\2\2\2\u0090"+
|
||||
"\u01b2\3\2\2\2\u0092\u01c4\3\2\2\2\u0094\u01d1\3\2\2\2\u0096\u0201\3\2"+
|
||||
"\2\2\u0098\u0203\3\2\2\2\u009a\u0214\3\2\2\2\u009c\u0219\3\2\2\2\u009e"+
|
||||
"\u021f\3\2\2\2\u00a0\u0224\3\2\2\2\u00a2\u022f\3\2\2\2\u00a4\u023e\3\2"+
|
||||
"\2\2\u00a6\u0242\3\2\2\2\u00a8\u00aa\t\2\2\2\u00a9\u00a8\3\2\2\2\u00aa"+
|
||||
"\u00ab\3\2\2\2\u00ab\u00a9\3\2\2\2\u00ab\u00ac\3\2\2\2\u00ac\u00ad\3\2"+
|
||||
"\2\2\u00ad\u00ae\b\2\2\2\u00ae\5\3\2\2\2\u00af\u00b0\7\61\2\2\u00b0\u00b1"+
|
||||
"\7\61\2\2\u00b1\u00b5\3\2\2\2\u00b2\u00b4\13\2\2\2\u00b3\u00b2\3\2\2\2"+
|
||||
"\u00b4\u00b7\3\2\2\2\u00b5\u00b6\3\2\2\2\u00b5\u00b3\3\2\2\2\u00b6\u00b8"+
|
||||
"\3\2\2\2\u00b7\u00b5\3\2\2\2\u00b8\u00c5\t\3\2\2\u00b9\u00ba\7\61\2\2"+
|
||||
"\u00ba\u00bb\7,\2\2\u00bb\u00bf\3\2\2\2\u00bc\u00be\13\2\2\2\u00bd\u00bc"+
|
||||
"\3\2\2\2\u00be\u00c1\3\2\2\2\u00bf\u00c0\3\2\2\2\u00bf\u00bd\3\2\2\2\u00c0"+
|
||||
"\u00c2\3\2\2\2\u00c1\u00bf\3\2\2\2\u00c2\u00c3\7,\2\2\u00c3\u00c5\7\61"+
|
||||
"\2\2\u00c4\u00af\3\2\2\2\u00c4\u00b9\3\2\2\2\u00c5\u00c6\3\2\2\2\u00c6"+
|
||||
"\u00c7\b\3\2\2\u00c7\7\3\2\2\2\u00c8\u00c9\7}\2\2\u00c9\t\3\2\2\2\u00ca"+
|
||||
"\u00cb\7\177\2\2\u00cb\13\3\2\2\2\u00cc\u00cd\7]\2\2\u00cd\r\3\2\2\2\u00ce"+
|
||||
"\u00cf\7_\2\2\u00cf\17\3\2\2\2\u00d0\u00d1\7*\2\2\u00d1\21\3\2\2\2\u00d2"+
|
||||
"\u00d3\7+\2\2\u00d3\23\3\2\2\2\u00d4\u00d5\7\60\2\2\u00d5\u00d6\3\2\2"+
|
||||
"\2\u00d6\u00d7\b\n\3\2\u00d7\25\3\2\2\2\u00d8\u00d9\7.\2\2\u00d9\27\3"+
|
||||
"\2\2\2\u00da\u00db\7=\2\2\u00db\31\3\2\2\2\u00dc\u00dd\7k\2\2\u00dd\u00de"+
|
||||
"\7h\2\2\u00de\33\3\2\2\2\u00df\u00e0\7k\2\2\u00e0\u00e1\7p\2\2\u00e1\35"+
|
||||
"\3\2\2\2\u00e2\u00e3\7g\2\2\u00e3\u00e4\7n\2\2\u00e4\u00e5\7u\2\2\u00e5"+
|
||||
"\u00e6\7g\2\2\u00e6\37\3\2\2\2\u00e7\u00e8\7y\2\2\u00e8\u00e9\7j\2\2\u00e9"+
|
||||
"\u00ea\7k\2\2\u00ea\u00eb\7n\2\2\u00eb\u00ec\7g\2\2\u00ec!\3\2\2\2\u00ed"+
|
||||
"\u00ee\7f\2\2\u00ee\u00ef\7q\2\2\u00ef#\3\2\2\2\u00f0\u00f1\7h\2\2\u00f1"+
|
||||
"\u00f2\7q\2\2\u00f2\u00f3\7t\2\2\u00f3%\3\2\2\2\u00f4\u00f5\7e\2\2\u00f5"+
|
||||
"\u00f6\7q\2\2\u00f6\u00f7\7p\2\2\u00f7\u00f8\7v\2\2\u00f8\u00f9\7k\2\2"+
|
||||
"\u00f9\u00fa\7p\2\2\u00fa\u00fb\7w\2\2\u00fb\u00fc\7g\2\2\u00fc\'\3\2"+
|
||||
"\2\2\u00fd\u00fe\7d\2\2\u00fe\u00ff\7t\2\2\u00ff\u0100\7g\2\2\u0100\u0101"+
|
||||
"\7c\2\2\u0101\u0102\7m\2\2\u0102)\3\2\2\2\u0103\u0104\7t\2\2\u0104\u0105"+
|
||||
"\7g\2\2\u0105\u0106\7v\2\2\u0106\u0107\7w\2\2\u0107\u0108\7t\2\2\u0108"+
|
||||
"\u0109\7p\2\2\u0109+\3\2\2\2\u010a\u010b\7p\2\2\u010b\u010c\7g\2\2\u010c"+
|
||||
"\u010d\7y\2\2\u010d-\3\2\2\2\u010e\u010f\7v\2\2\u010f\u0110\7t\2\2\u0110"+
|
||||
"\u0111\7{\2\2\u0111/\3\2\2\2\u0112\u0113\7e\2\2\u0113\u0114\7c\2\2\u0114"+
|
||||
"\u0115\7v\2\2\u0115\u0116\7e\2\2\u0116\u0117\7j\2\2\u0117\61\3\2\2\2\u0118"+
|
||||
"\u0119\7v\2\2\u0119\u011a\7j\2\2\u011a\u011b\7t\2\2\u011b\u011c\7q\2\2"+
|
||||
"\u011c\u011d\7y\2\2\u011d\63\3\2\2\2\u011e\u011f\7v\2\2\u011f\u0120\7"+
|
||||
"j\2\2\u0120\u0121\7k\2\2\u0121\u0122\7u\2\2\u0122\65\3\2\2\2\u0123\u0124"+
|
||||
"\7k\2\2\u0124\u0125\7p\2\2\u0125\u0126\7u\2\2\u0126\u0127\7v\2\2\u0127"+
|
||||
"\u0128\7c\2\2\u0128\u0129\7p\2\2\u0129\u012a\7e\2\2\u012a\u012b\7g\2\2"+
|
||||
"\u012b\u012c\7q\2\2\u012c\u012d\7h\2\2\u012d\67\3\2\2\2\u012e\u012f\7"+
|
||||
"#\2\2\u012f9\3\2\2\2\u0130\u0131\7\u0080\2\2\u0131;\3\2\2\2\u0132\u0133"+
|
||||
"\7,\2\2\u0133=\3\2\2\2\u0134\u0135\7\61\2\2\u0135\u0136\6\37\2\2\u0136"+
|
||||
"?\3\2\2\2\u0137\u0138\7\'\2\2\u0138A\3\2\2\2\u0139\u013a\7-\2\2\u013a"+
|
||||
"C\3\2\2\2\u013b\u013c\7/\2\2\u013cE\3\2\2\2\u013d\u013e\7>\2\2\u013e\u013f"+
|
||||
"\7>\2\2\u013fG\3\2\2\2\u0140\u0141\7@\2\2\u0141\u0142\7@\2\2\u0142I\3"+
|
||||
"\2\2\2\u0143\u0144\7@\2\2\u0144\u0145\7@\2\2\u0145\u0146\7@\2\2\u0146"+
|
||||
"K\3\2\2\2\u0147\u0148\7>\2\2\u0148M\3\2\2\2\u0149\u014a\7>\2\2\u014a\u014b"+
|
||||
"\7?\2\2\u014bO\3\2\2\2\u014c\u014d\7@\2\2\u014dQ\3\2\2\2\u014e\u014f\7"+
|
||||
"@\2\2\u014f\u0150\7?\2\2\u0150S\3\2\2\2\u0151\u0152\7?\2\2\u0152\u0153"+
|
||||
"\7?\2\2\u0153U\3\2\2\2\u0154\u0155\7?\2\2\u0155\u0156\7?\2\2\u0156\u0157"+
|
||||
"\7?\2\2\u0157W\3\2\2\2\u0158\u0159\7#\2\2\u0159\u015a\7?\2\2\u015aY\3"+
|
||||
"\2\2\2\u015b\u015c\7#\2\2\u015c\u015d\7?\2\2\u015d\u015e\7?\2\2\u015e"+
|
||||
"[\3\2\2\2\u015f\u0160\7(\2\2\u0160]\3\2\2\2\u0161\u0162\7`\2\2\u0162_"+
|
||||
"\3\2\2\2\u0163\u0164\7~\2\2\u0164a\3\2\2\2\u0165\u0166\7(\2\2\u0166\u0167"+
|
||||
"\7(\2\2\u0167c\3\2\2\2\u0168\u0169\7~\2\2\u0169\u016a\7~\2\2\u016ae\3"+
|
||||
"\2\2\2\u016b\u016c\7A\2\2\u016cg\3\2\2\2\u016d\u016e\7<\2\2\u016ei\3\2"+
|
||||
"\2\2\u016f\u0170\7<\2\2\u0170\u0171\7<\2\2\u0171k\3\2\2\2\u0172\u0173"+
|
||||
"\7/\2\2\u0173\u0174\7@\2\2\u0174m\3\2\2\2\u0175\u0176\7?\2\2\u0176\u0177"+
|
||||
"\7\u0080\2\2\u0177o\3\2\2\2\u0178\u0179\7?\2\2\u0179\u017a\7?\2\2\u017a"+
|
||||
"\u017b\7\u0080\2\2\u017bq\3\2\2\2\u017c\u017d\7-\2\2\u017d\u017e\7-\2"+
|
||||
"\2\u017es\3\2\2\2\u017f\u0180\7/\2\2\u0180\u0181\7/\2\2\u0181u\3\2\2\2"+
|
||||
"\u0182\u0183\7?\2\2\u0183w\3\2\2\2\u0184\u0185\7-\2\2\u0185\u0186\7?\2"+
|
||||
"\2\u0186y\3\2\2\2\u0187\u0188\7/\2\2\u0188\u0189\7?\2\2\u0189{\3\2\2\2"+
|
||||
"\u018a\u018b\7,\2\2\u018b\u018c\7?\2\2\u018c}\3\2\2\2\u018d\u018e\7\61"+
|
||||
"\2\2\u018e\u018f\7?\2\2\u018f\177\3\2\2\2\u0190\u0191\7\'\2\2\u0191\u0192"+
|
||||
"\7?\2\2\u0192\u0081\3\2\2\2\u0193\u0194\7(\2\2\u0194\u0195\7?\2\2\u0195"+
|
||||
"\u0083\3\2\2\2\u0196\u0197\7`\2\2\u0197\u0198\7?\2\2\u0198\u0085\3\2\2"+
|
||||
"\2\u0199\u019a\7~\2\2\u019a\u019b\7?\2\2\u019b\u0087\3\2\2\2\u019c\u019d"+
|
||||
"\7>\2\2\u019d\u019e\7>\2\2\u019e\u019f\7?\2\2\u019f\u0089\3\2\2\2\u01a0"+
|
||||
"\u01a1\7@\2\2\u01a1\u01a2\7@\2\2\u01a2\u01a3\7?\2\2\u01a3\u008b\3\2\2"+
|
||||
"\2\u01a4\u01a5\7@\2\2\u01a5\u01a6\7@\2\2\u01a6\u01a7\7@\2\2\u01a7\u01a8"+
|
||||
"\7?\2\2\u01a8\u008d\3\2\2\2\u01a9\u01ab\7\62\2\2\u01aa\u01ac\t\4\2\2\u01ab"+
|
||||
"\u01aa\3\2\2\2\u01ac\u01ad\3\2\2\2\u01ad\u01ab\3\2\2\2\u01ad\u01ae\3\2"+
|
||||
"\2\2\u01ae\u01b0\3\2\2\2\u01af\u01b1\t\5\2\2\u01b0\u01af\3\2\2\2\u01b0"+
|
||||
"\u01b1\3\2\2\2\u01b1\u008f\3\2\2\2\u01b2\u01b3\7\62\2\2\u01b3\u01b5\t"+
|
||||
"\6\2\2\u01b4\u01b6\t\7\2\2\u01b5\u01b4\3\2\2\2\u01b6\u01b7\3\2\2\2\u01b7"+
|
||||
"\u01b5\3\2\2\2\u01b7\u01b8\3\2\2\2\u01b8\u01ba\3\2\2\2\u01b9\u01bb\t\5"+
|
||||
"\2\2\u01ba\u01b9\3\2\2\2\u01ba\u01bb\3\2\2\2\u01bb\u0091\3\2\2\2\u01bc"+
|
||||
"\u01c5\7\62\2\2\u01bd\u01c1\t\b\2\2\u01be\u01c0\t\t\2\2\u01bf\u01be\3"+
|
||||
"\2\2\2\u01c0\u01c3\3\2\2\2\u01c1\u01bf\3\2\2\2\u01c1\u01c2\3\2\2\2\u01c2"+
|
||||
"\u01c5\3\2\2\2\u01c3\u01c1\3\2\2\2\u01c4\u01bc\3\2\2\2\u01c4\u01bd\3\2"+
|
||||
"\2\2\u01c5\u01c7\3\2\2\2\u01c6\u01c8\t\n\2\2\u01c7\u01c6\3\2\2\2\u01c7"+
|
||||
"\u01c8\3\2\2\2\u01c8\u0093\3\2\2\2\u01c9\u01d2\7\62\2\2\u01ca\u01ce\t"+
|
||||
"\b\2\2\u01cb\u01cd\t\t\2\2\u01cc\u01cb\3\2\2\2\u01cd\u01d0\3\2\2\2\u01ce"+
|
||||
"\u01cc\3\2\2\2\u01ce\u01cf\3\2\2\2\u01cf\u01d2\3\2\2\2\u01d0\u01ce\3\2"+
|
||||
"\2\2\u01d1\u01c9\3\2\2\2\u01d1\u01ca\3\2\2\2\u01d2\u01d9\3\2\2\2\u01d3"+
|
||||
"\u01d5\5\24\n\2\u01d4\u01d6\t\t\2\2\u01d5\u01d4\3\2\2\2\u01d6\u01d7\3"+
|
||||
"\2\2\2\u01d7\u01d5\3\2\2\2\u01d7\u01d8\3\2\2\2\u01d8\u01da\3\2\2\2\u01d9"+
|
||||
"\u01d3\3\2\2\2\u01d9\u01da\3\2\2\2\u01da\u01e4\3\2\2\2\u01db\u01dd\t\13"+
|
||||
"\2\2\u01dc\u01de\t\f\2\2\u01dd\u01dc\3\2\2\2\u01dd\u01de\3\2\2\2\u01de"+
|
||||
"\u01e0\3\2\2\2\u01df\u01e1\t\t\2\2\u01e0\u01df\3\2\2\2\u01e1\u01e2\3\2"+
|
||||
"\2\2\u01e2\u01e0\3\2\2\2\u01e2\u01e3\3\2\2\2\u01e3\u01e5\3\2\2\2\u01e4"+
|
||||
"\u01db\3\2\2\2\u01e4\u01e5\3\2\2\2\u01e5\u01e7\3\2\2\2\u01e6\u01e8\t\r"+
|
||||
"\2\2\u01e7\u01e6\3\2\2\2\u01e7\u01e8\3\2\2\2\u01e8\u0095\3\2\2\2\u01e9"+
|
||||
"\u01f1\7$\2\2\u01ea\u01eb\7^\2\2\u01eb\u01f0\7$\2\2\u01ec\u01ed\7^\2\2"+
|
||||
"\u01ed\u01f0\7^\2\2\u01ee\u01f0\n\16\2\2\u01ef\u01ea\3\2\2\2\u01ef\u01ec"+
|
||||
"\3\2\2\2\u01ef\u01ee\3\2\2\2\u01f0\u01f3\3\2\2\2\u01f1\u01f2\3\2\2\2\u01f1"+
|
||||
"\u01ef\3\2\2\2\u01f2\u01f4\3\2\2\2\u01f3\u01f1\3\2\2\2\u01f4\u0202\7$"+
|
||||
"\2\2\u01f5\u01fd\7)\2\2\u01f6\u01f7\7^\2\2\u01f7\u01fc\7)\2\2\u01f8\u01f9"+
|
||||
"\7^\2\2\u01f9\u01fc\7^\2\2\u01fa\u01fc\n\16\2\2\u01fb\u01f6\3\2\2\2\u01fb"+
|
||||
"\u01f8\3\2\2\2\u01fb\u01fa\3\2\2\2\u01fc\u01ff\3\2\2\2\u01fd\u01fe\3\2"+
|
||||
"\2\2\u01fd\u01fb\3\2\2\2\u01fe\u0200\3\2\2\2\u01ff\u01fd\3\2\2\2\u0200"+
|
||||
"\u0202\7)\2\2\u0201\u01e9\3\2\2\2\u0201\u01f5\3\2\2\2\u0202\u0097\3\2"+
|
||||
"\2\2\u0203\u0207\7\61\2\2\u0204\u0208\n\17\2\2\u0205\u0206\7^\2\2\u0206"+
|
||||
"\u0208\n\20\2\2\u0207\u0204\3\2\2\2\u0207\u0205\3\2\2\2\u0208\u0209\3"+
|
||||
"\2\2\2\u0209\u0207\3\2\2\2\u0209\u020a\3\2\2\2\u020a\u020b\3\2\2\2\u020b"+
|
||||
"\u020f\7\61\2\2\u020c\u020e\t\21\2\2\u020d\u020c\3\2\2\2\u020e\u0211\3"+
|
||||
"\2\2\2\u020f\u020d\3\2\2\2\u020f\u0210\3\2\2\2\u0210\u0212\3\2\2\2\u0211"+
|
||||
"\u020f\3\2\2\2\u0212\u0213\6L\3\2\u0213\u0099\3\2\2\2\u0214\u0215\7v\2"+
|
||||
"\2\u0215\u0216\7t\2\2\u0216\u0217\7w\2\2\u0217\u0218\7g\2\2\u0218\u009b"+
|
||||
"\3\2\2\2\u0219\u021a\7h\2\2\u021a\u021b\7c\2\2\u021b\u021c\7n\2\2\u021c"+
|
||||
"\u021d\7u\2\2\u021d\u021e\7g\2\2\u021e\u009d\3\2\2\2\u021f\u0220\7p\2"+
|
||||
"\2\u0220\u0221\7w\2\2\u0221\u0222\7n\2\2\u0222\u0223\7n\2\2\u0223\u009f"+
|
||||
"\3\2\2\2\u0224\u022a\5\u00a2Q\2\u0225\u0226\5\24\n\2\u0226\u0227\5\u00a2"+
|
||||
"Q\2\u0227\u0229\3\2\2\2\u0228\u0225\3\2\2\2\u0229\u022c\3\2\2\2\u022a"+
|
||||
"\u0228\3\2\2\2\u022a\u022b\3\2\2\2\u022b\u022d\3\2\2\2\u022c\u022a\3\2"+
|
||||
"\2\2\u022d\u022e\6P\4\2\u022e\u00a1\3\2\2\2\u022f\u0233\t\22\2\2\u0230"+
|
||||
"\u0232\t\23\2\2\u0231\u0230\3\2\2\2\u0232\u0235\3\2\2\2\u0233\u0231\3"+
|
||||
"\2\2\2\u0233\u0234\3\2\2\2\u0234\u00a3\3\2\2\2\u0235\u0233\3\2\2\2\u0236"+
|
||||
"\u023f\7\62\2\2\u0237\u023b\t\b\2\2\u0238\u023a\t\t\2\2\u0239\u0238\3"+
|
||||
"\2\2\2\u023a\u023d\3\2\2\2\u023b\u0239\3\2\2\2\u023b\u023c\3\2\2\2\u023c"+
|
||||
"\u023f\3\2\2\2\u023d\u023b\3\2\2\2\u023e\u0236\3\2\2\2\u023e\u0237\3\2"+
|
||||
"\2\2\u023f\u0240\3\2\2\2\u0240\u0241\bR\4\2\u0241\u00a5\3\2\2\2\u0242"+
|
||||
"\u0246\t\22\2\2\u0243\u0245\t\23\2\2\u0244\u0243\3\2\2\2\u0245\u0248\3"+
|
||||
"\2\2\2\u0246\u0244\3\2\2\2\u0246\u0247\3\2\2\2\u0247\u0249\3\2\2\2\u0248"+
|
||||
"\u0246\3\2\2\2\u0249\u024a\bS\4\2\u024a\u00a7\3\2\2\2$\2\3\u00ab\u00b5"+
|
||||
"\u00bf\u00c4\u01ad\u01b0\u01b7\u01ba\u01c1\u01c4\u01c7\u01ce\u01d1\u01d7"+
|
||||
"\u01d9\u01dd\u01e2\u01e4\u01e7\u01ef\u01f1\u01fb\u01fd\u0201\u0207\u0209"+
|
||||
"\u020f\u022a\u0233\u023b\u023e\u0246\5\b\2\2\4\3\2\4\2\2";
|
||||
"I\tI\4J\tJ\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\t"+
|
||||
"T\4U\tU\3\2\6\2\u00ae\n\2\r\2\16\2\u00af\3\2\3\2\3\3\3\3\3\3\3\3\7\3\u00b8"+
|
||||
"\n\3\f\3\16\3\u00bb\13\3\3\3\3\3\3\3\3\3\3\3\7\3\u00c2\n\3\f\3\16\3\u00c5"+
|
||||
"\13\3\3\3\3\3\5\3\u00c9\n\3\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3"+
|
||||
"\b\3\b\3\t\3\t\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\13\3\f\3\f\3\r\3"+
|
||||
"\r\3\16\3\16\3\16\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3"+
|
||||
"\21\3\21\3\21\3\21\3\22\3\22\3\22\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3"+
|
||||
"\24\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3"+
|
||||
"\26\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\31\3"+
|
||||
"\31\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3"+
|
||||
"\33\3\33\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\35\3"+
|
||||
"\35\3\36\3\36\3\37\3\37\3 \3 \3 \3!\3!\3\"\3\"\3#\3#\3$\3$\3$\3%\3%\3"+
|
||||
"%\3&\3&\3&\3&\3\'\3\'\3(\3(\3(\3)\3)\3*\3*\3*\3+\3+\3+\3,\3,\3,\3,\3-"+
|
||||
"\3-\3-\3.\3.\3.\3.\3/\3/\3\60\3\60\3\61\3\61\3\62\3\62\3\62\3\63\3\63"+
|
||||
"\3\63\3\64\3\64\3\65\3\65\3\66\3\66\3\66\3\67\3\67\3\67\38\38\38\39\3"+
|
||||
"9\39\3:\3:\3:\3:\3;\3;\3;\3<\3<\3<\3=\3=\3>\3>\3>\3?\3?\3?\3@\3@\3@\3"+
|
||||
"A\3A\3A\3B\3B\3B\3C\3C\3C\3D\3D\3D\3E\3E\3E\3F\3F\3F\3F\3G\3G\3G\3G\3"+
|
||||
"H\3H\3H\3H\3H\3I\3I\6I\u01b8\nI\rI\16I\u01b9\3I\5I\u01bd\nI\3J\3J\3J\6"+
|
||||
"J\u01c2\nJ\rJ\16J\u01c3\3J\5J\u01c7\nJ\3K\3K\3K\7K\u01cc\nK\fK\16K\u01cf"+
|
||||
"\13K\5K\u01d1\nK\3K\5K\u01d4\nK\3L\3L\3L\7L\u01d9\nL\fL\16L\u01dc\13L"+
|
||||
"\5L\u01de\nL\3L\3L\6L\u01e2\nL\rL\16L\u01e3\5L\u01e6\nL\3L\3L\5L\u01ea"+
|
||||
"\nL\3L\6L\u01ed\nL\rL\16L\u01ee\5L\u01f1\nL\3L\5L\u01f4\nL\3M\3M\3M\3"+
|
||||
"M\3M\3M\7M\u01fc\nM\fM\16M\u01ff\13M\3M\3M\3M\3M\3M\3M\3M\7M\u0208\nM"+
|
||||
"\fM\16M\u020b\13M\3M\5M\u020e\nM\3N\3N\3N\3N\6N\u0214\nN\rN\16N\u0215"+
|
||||
"\3N\3N\7N\u021a\nN\fN\16N\u021d\13N\3N\3N\3O\3O\3O\3O\3O\3P\3P\3P\3P\3"+
|
||||
"P\3P\3Q\3Q\3Q\3Q\3Q\3R\3R\3R\3R\7R\u0235\nR\fR\16R\u0238\13R\3R\3R\3S"+
|
||||
"\3S\7S\u023e\nS\fS\16S\u0241\13S\3T\3T\3T\7T\u0246\nT\fT\16T\u0249\13"+
|
||||
"T\5T\u024b\nT\3T\3T\3U\3U\7U\u0251\nU\fU\16U\u0254\13U\3U\3U\6\u00b9\u00c3"+
|
||||
"\u01fd\u0209\2V\4\3\6\4\b\5\n\6\f\7\16\b\20\t\22\n\24\13\26\f\30\r\32"+
|
||||
"\16\34\17\36\20 \21\"\22$\23&\24(\25*\26,\27.\30\60\31\62\32\64\33\66"+
|
||||
"\348\35:\36<\37> @!B\"D#F$H%J&L\'N(P)R*T+V,X-Z.\\/^\60`\61b\62d\63f\64"+
|
||||
"h\65j\66l\67n8p9r:t;v<x=z>|?~@\u0080A\u0082B\u0084C\u0086D\u0088E\u008a"+
|
||||
"F\u008cG\u008eH\u0090I\u0092J\u0094K\u0096L\u0098M\u009aN\u009cO\u009e"+
|
||||
"P\u00a0Q\u00a2R\u00a4S\u00a6T\u00a8U\u00aaV\4\2\3\24\5\2\13\f\17\17\""+
|
||||
"\"\4\2\f\f\17\17\3\2\629\4\2NNnn\4\2ZZzz\5\2\62;CHch\3\2\63;\3\2\62;\b"+
|
||||
"\2FFHHNNffhhnn\4\2GGgg\4\2--//\6\2FFHHffhh\4\2$$^^\4\2\f\f\61\61\3\2\f"+
|
||||
"\f\t\2WWeekknouuwwzz\5\2C\\aac|\6\2\62;C\\aac|\u0277\2\4\3\2\2\2\2\6\3"+
|
||||
"\2\2\2\2\b\3\2\2\2\2\n\3\2\2\2\2\f\3\2\2\2\2\16\3\2\2\2\2\20\3\2\2\2\2"+
|
||||
"\22\3\2\2\2\2\24\3\2\2\2\2\26\3\2\2\2\2\30\3\2\2\2\2\32\3\2\2\2\2\34\3"+
|
||||
"\2\2\2\2\36\3\2\2\2\2 \3\2\2\2\2\"\3\2\2\2\2$\3\2\2\2\2&\3\2\2\2\2(\3"+
|
||||
"\2\2\2\2*\3\2\2\2\2,\3\2\2\2\2.\3\2\2\2\2\60\3\2\2\2\2\62\3\2\2\2\2\64"+
|
||||
"\3\2\2\2\2\66\3\2\2\2\28\3\2\2\2\2:\3\2\2\2\2<\3\2\2\2\2>\3\2\2\2\2@\3"+
|
||||
"\2\2\2\2B\3\2\2\2\2D\3\2\2\2\2F\3\2\2\2\2H\3\2\2\2\2J\3\2\2\2\2L\3\2\2"+
|
||||
"\2\2N\3\2\2\2\2P\3\2\2\2\2R\3\2\2\2\2T\3\2\2\2\2V\3\2\2\2\2X\3\2\2\2\2"+
|
||||
"Z\3\2\2\2\2\\\3\2\2\2\2^\3\2\2\2\2`\3\2\2\2\2b\3\2\2\2\2d\3\2\2\2\2f\3"+
|
||||
"\2\2\2\2h\3\2\2\2\2j\3\2\2\2\2l\3\2\2\2\2n\3\2\2\2\2p\3\2\2\2\2r\3\2\2"+
|
||||
"\2\2t\3\2\2\2\2v\3\2\2\2\2x\3\2\2\2\2z\3\2\2\2\2|\3\2\2\2\2~\3\2\2\2\2"+
|
||||
"\u0080\3\2\2\2\2\u0082\3\2\2\2\2\u0084\3\2\2\2\2\u0086\3\2\2\2\2\u0088"+
|
||||
"\3\2\2\2\2\u008a\3\2\2\2\2\u008c\3\2\2\2\2\u008e\3\2\2\2\2\u0090\3\2\2"+
|
||||
"\2\2\u0092\3\2\2\2\2\u0094\3\2\2\2\2\u0096\3\2\2\2\2\u0098\3\2\2\2\2\u009a"+
|
||||
"\3\2\2\2\2\u009c\3\2\2\2\2\u009e\3\2\2\2\2\u00a0\3\2\2\2\2\u00a2\3\2\2"+
|
||||
"\2\2\u00a4\3\2\2\2\2\u00a6\3\2\2\2\3\u00a8\3\2\2\2\3\u00aa\3\2\2\2\4\u00ad"+
|
||||
"\3\2\2\2\6\u00c8\3\2\2\2\b\u00cc\3\2\2\2\n\u00ce\3\2\2\2\f\u00d0\3\2\2"+
|
||||
"\2\16\u00d2\3\2\2\2\20\u00d4\3\2\2\2\22\u00d6\3\2\2\2\24\u00d8\3\2\2\2"+
|
||||
"\26\u00dc\3\2\2\2\30\u00e1\3\2\2\2\32\u00e3\3\2\2\2\34\u00e5\3\2\2\2\36"+
|
||||
"\u00e8\3\2\2\2 \u00eb\3\2\2\2\"\u00f0\3\2\2\2$\u00f6\3\2\2\2&\u00f9\3"+
|
||||
"\2\2\2(\u00fd\3\2\2\2*\u0106\3\2\2\2,\u010c\3\2\2\2.\u0113\3\2\2\2\60"+
|
||||
"\u0117\3\2\2\2\62\u011b\3\2\2\2\64\u0121\3\2\2\2\66\u0127\3\2\2\28\u012c"+
|
||||
"\3\2\2\2:\u0137\3\2\2\2<\u0139\3\2\2\2>\u013b\3\2\2\2@\u013d\3\2\2\2B"+
|
||||
"\u0140\3\2\2\2D\u0142\3\2\2\2F\u0144\3\2\2\2H\u0146\3\2\2\2J\u0149\3\2"+
|
||||
"\2\2L\u014c\3\2\2\2N\u0150\3\2\2\2P\u0152\3\2\2\2R\u0155\3\2\2\2T\u0157"+
|
||||
"\3\2\2\2V\u015a\3\2\2\2X\u015d\3\2\2\2Z\u0161\3\2\2\2\\\u0164\3\2\2\2"+
|
||||
"^\u0168\3\2\2\2`\u016a\3\2\2\2b\u016c\3\2\2\2d\u016e\3\2\2\2f\u0171\3"+
|
||||
"\2\2\2h\u0174\3\2\2\2j\u0176\3\2\2\2l\u0178\3\2\2\2n\u017b\3\2\2\2p\u017e"+
|
||||
"\3\2\2\2r\u0181\3\2\2\2t\u0184\3\2\2\2v\u0188\3\2\2\2x\u018b\3\2\2\2z"+
|
||||
"\u018e\3\2\2\2|\u0190\3\2\2\2~\u0193\3\2\2\2\u0080\u0196\3\2\2\2\u0082"+
|
||||
"\u0199\3\2\2\2\u0084\u019c\3\2\2\2\u0086\u019f\3\2\2\2\u0088\u01a2\3\2"+
|
||||
"\2\2\u008a\u01a5\3\2\2\2\u008c\u01a8\3\2\2\2\u008e\u01ac\3\2\2\2\u0090"+
|
||||
"\u01b0\3\2\2\2\u0092\u01b5\3\2\2\2\u0094\u01be\3\2\2\2\u0096\u01d0\3\2"+
|
||||
"\2\2\u0098\u01dd\3\2\2\2\u009a\u020d\3\2\2\2\u009c\u020f\3\2\2\2\u009e"+
|
||||
"\u0220\3\2\2\2\u00a0\u0225\3\2\2\2\u00a2\u022b\3\2\2\2\u00a4\u0230\3\2"+
|
||||
"\2\2\u00a6\u023b\3\2\2\2\u00a8\u024a\3\2\2\2\u00aa\u024e\3\2\2\2\u00ac"+
|
||||
"\u00ae\t\2\2\2\u00ad\u00ac\3\2\2\2\u00ae\u00af\3\2\2\2\u00af\u00ad\3\2"+
|
||||
"\2\2\u00af\u00b0\3\2\2\2\u00b0\u00b1\3\2\2\2\u00b1\u00b2\b\2\2\2\u00b2"+
|
||||
"\5\3\2\2\2\u00b3\u00b4\7\61\2\2\u00b4\u00b5\7\61\2\2\u00b5\u00b9\3\2\2"+
|
||||
"\2\u00b6\u00b8\13\2\2\2\u00b7\u00b6\3\2\2\2\u00b8\u00bb\3\2\2\2\u00b9"+
|
||||
"\u00ba\3\2\2\2\u00b9\u00b7\3\2\2\2\u00ba\u00bc\3\2\2\2\u00bb\u00b9\3\2"+
|
||||
"\2\2\u00bc\u00c9\t\3\2\2\u00bd\u00be\7\61\2\2\u00be\u00bf\7,\2\2\u00bf"+
|
||||
"\u00c3\3\2\2\2\u00c0\u00c2\13\2\2\2\u00c1\u00c0\3\2\2\2\u00c2\u00c5\3"+
|
||||
"\2\2\2\u00c3\u00c4\3\2\2\2\u00c3\u00c1\3\2\2\2\u00c4\u00c6\3\2\2\2\u00c5"+
|
||||
"\u00c3\3\2\2\2\u00c6\u00c7\7,\2\2\u00c7\u00c9\7\61\2\2\u00c8\u00b3\3\2"+
|
||||
"\2\2\u00c8\u00bd\3\2\2\2\u00c9\u00ca\3\2\2\2\u00ca\u00cb\b\3\2\2\u00cb"+
|
||||
"\7\3\2\2\2\u00cc\u00cd\7}\2\2\u00cd\t\3\2\2\2\u00ce\u00cf\7\177\2\2\u00cf"+
|
||||
"\13\3\2\2\2\u00d0\u00d1\7]\2\2\u00d1\r\3\2\2\2\u00d2\u00d3\7_\2\2\u00d3"+
|
||||
"\17\3\2\2\2\u00d4\u00d5\7*\2\2\u00d5\21\3\2\2\2\u00d6\u00d7\7+\2\2\u00d7"+
|
||||
"\23\3\2\2\2\u00d8\u00d9\7\60\2\2\u00d9\u00da\3\2\2\2\u00da\u00db\b\n\3"+
|
||||
"\2\u00db\25\3\2\2\2\u00dc\u00dd\7A\2\2\u00dd\u00de\7\60\2\2\u00de\u00df"+
|
||||
"\3\2\2\2\u00df\u00e0\b\13\3\2\u00e0\27\3\2\2\2\u00e1\u00e2\7.\2\2\u00e2"+
|
||||
"\31\3\2\2\2\u00e3\u00e4\7=\2\2\u00e4\33\3\2\2\2\u00e5\u00e6\7k\2\2\u00e6"+
|
||||
"\u00e7\7h\2\2\u00e7\35\3\2\2\2\u00e8\u00e9\7k\2\2\u00e9\u00ea\7p\2\2\u00ea"+
|
||||
"\37\3\2\2\2\u00eb\u00ec\7g\2\2\u00ec\u00ed\7n\2\2\u00ed\u00ee\7u\2\2\u00ee"+
|
||||
"\u00ef\7g\2\2\u00ef!\3\2\2\2\u00f0\u00f1\7y\2\2\u00f1\u00f2\7j\2\2\u00f2"+
|
||||
"\u00f3\7k\2\2\u00f3\u00f4\7n\2\2\u00f4\u00f5\7g\2\2\u00f5#\3\2\2\2\u00f6"+
|
||||
"\u00f7\7f\2\2\u00f7\u00f8\7q\2\2\u00f8%\3\2\2\2\u00f9\u00fa\7h\2\2\u00fa"+
|
||||
"\u00fb\7q\2\2\u00fb\u00fc\7t\2\2\u00fc\'\3\2\2\2\u00fd\u00fe\7e\2\2\u00fe"+
|
||||
"\u00ff\7q\2\2\u00ff\u0100\7p\2\2\u0100\u0101\7v\2\2\u0101\u0102\7k\2\2"+
|
||||
"\u0102\u0103\7p\2\2\u0103\u0104\7w\2\2\u0104\u0105\7g\2\2\u0105)\3\2\2"+
|
||||
"\2\u0106\u0107\7d\2\2\u0107\u0108\7t\2\2\u0108\u0109\7g\2\2\u0109\u010a"+
|
||||
"\7c\2\2\u010a\u010b\7m\2\2\u010b+\3\2\2\2\u010c\u010d\7t\2\2\u010d\u010e"+
|
||||
"\7g\2\2\u010e\u010f\7v\2\2\u010f\u0110\7w\2\2\u0110\u0111\7t\2\2\u0111"+
|
||||
"\u0112\7p\2\2\u0112-\3\2\2\2\u0113\u0114\7p\2\2\u0114\u0115\7g\2\2\u0115"+
|
||||
"\u0116\7y\2\2\u0116/\3\2\2\2\u0117\u0118\7v\2\2\u0118\u0119\7t\2\2\u0119"+
|
||||
"\u011a\7{\2\2\u011a\61\3\2\2\2\u011b\u011c\7e\2\2\u011c\u011d\7c\2\2\u011d"+
|
||||
"\u011e\7v\2\2\u011e\u011f\7e\2\2\u011f\u0120\7j\2\2\u0120\63\3\2\2\2\u0121"+
|
||||
"\u0122\7v\2\2\u0122\u0123\7j\2\2\u0123\u0124\7t\2\2\u0124\u0125\7q\2\2"+
|
||||
"\u0125\u0126\7y\2\2\u0126\65\3\2\2\2\u0127\u0128\7v\2\2\u0128\u0129\7"+
|
||||
"j\2\2\u0129\u012a\7k\2\2\u012a\u012b\7u\2\2\u012b\67\3\2\2\2\u012c\u012d"+
|
||||
"\7k\2\2\u012d\u012e\7p\2\2\u012e\u012f\7u\2\2\u012f\u0130\7v\2\2\u0130"+
|
||||
"\u0131\7c\2\2\u0131\u0132\7p\2\2\u0132\u0133\7e\2\2\u0133\u0134\7g\2\2"+
|
||||
"\u0134\u0135\7q\2\2\u0135\u0136\7h\2\2\u01369\3\2\2\2\u0137\u0138\7#\2"+
|
||||
"\2\u0138;\3\2\2\2\u0139\u013a\7\u0080\2\2\u013a=\3\2\2\2\u013b\u013c\7"+
|
||||
",\2\2\u013c?\3\2\2\2\u013d\u013e\7\61\2\2\u013e\u013f\6 \2\2\u013fA\3"+
|
||||
"\2\2\2\u0140\u0141\7\'\2\2\u0141C\3\2\2\2\u0142\u0143\7-\2\2\u0143E\3"+
|
||||
"\2\2\2\u0144\u0145\7/\2\2\u0145G\3\2\2\2\u0146\u0147\7>\2\2\u0147\u0148"+
|
||||
"\7>\2\2\u0148I\3\2\2\2\u0149\u014a\7@\2\2\u014a\u014b\7@\2\2\u014bK\3"+
|
||||
"\2\2\2\u014c\u014d\7@\2\2\u014d\u014e\7@\2\2\u014e\u014f\7@\2\2\u014f"+
|
||||
"M\3\2\2\2\u0150\u0151\7>\2\2\u0151O\3\2\2\2\u0152\u0153\7>\2\2\u0153\u0154"+
|
||||
"\7?\2\2\u0154Q\3\2\2\2\u0155\u0156\7@\2\2\u0156S\3\2\2\2\u0157\u0158\7"+
|
||||
"@\2\2\u0158\u0159\7?\2\2\u0159U\3\2\2\2\u015a\u015b\7?\2\2\u015b\u015c"+
|
||||
"\7?\2\2\u015cW\3\2\2\2\u015d\u015e\7?\2\2\u015e\u015f\7?\2\2\u015f\u0160"+
|
||||
"\7?\2\2\u0160Y\3\2\2\2\u0161\u0162\7#\2\2\u0162\u0163\7?\2\2\u0163[\3"+
|
||||
"\2\2\2\u0164\u0165\7#\2\2\u0165\u0166\7?\2\2\u0166\u0167\7?\2\2\u0167"+
|
||||
"]\3\2\2\2\u0168\u0169\7(\2\2\u0169_\3\2\2\2\u016a\u016b\7`\2\2\u016ba"+
|
||||
"\3\2\2\2\u016c\u016d\7~\2\2\u016dc\3\2\2\2\u016e\u016f\7(\2\2\u016f\u0170"+
|
||||
"\7(\2\2\u0170e\3\2\2\2\u0171\u0172\7~\2\2\u0172\u0173\7~\2\2\u0173g\3"+
|
||||
"\2\2\2\u0174\u0175\7A\2\2\u0175i\3\2\2\2\u0176\u0177\7<\2\2\u0177k\3\2"+
|
||||
"\2\2\u0178\u0179\7A\2\2\u0179\u017a\7<\2\2\u017am\3\2\2\2\u017b\u017c"+
|
||||
"\7<\2\2\u017c\u017d\7<\2\2\u017do\3\2\2\2\u017e\u017f\7/\2\2\u017f\u0180"+
|
||||
"\7@\2\2\u0180q\3\2\2\2\u0181\u0182\7?\2\2\u0182\u0183\7\u0080\2\2\u0183"+
|
||||
"s\3\2\2\2\u0184\u0185\7?\2\2\u0185\u0186\7?\2\2\u0186\u0187\7\u0080\2"+
|
||||
"\2\u0187u\3\2\2\2\u0188\u0189\7-\2\2\u0189\u018a\7-\2\2\u018aw\3\2\2\2"+
|
||||
"\u018b\u018c\7/\2\2\u018c\u018d\7/\2\2\u018dy\3\2\2\2\u018e\u018f\7?\2"+
|
||||
"\2\u018f{\3\2\2\2\u0190\u0191\7-\2\2\u0191\u0192\7?\2\2\u0192}\3\2\2\2"+
|
||||
"\u0193\u0194\7/\2\2\u0194\u0195\7?\2\2\u0195\177\3\2\2\2\u0196\u0197\7"+
|
||||
",\2\2\u0197\u0198\7?\2\2\u0198\u0081\3\2\2\2\u0199\u019a\7\61\2\2\u019a"+
|
||||
"\u019b\7?\2\2\u019b\u0083\3\2\2\2\u019c\u019d\7\'\2\2\u019d\u019e\7?\2"+
|
||||
"\2\u019e\u0085\3\2\2\2\u019f\u01a0\7(\2\2\u01a0\u01a1\7?\2\2\u01a1\u0087"+
|
||||
"\3\2\2\2\u01a2\u01a3\7`\2\2\u01a3\u01a4\7?\2\2\u01a4\u0089\3\2\2\2\u01a5"+
|
||||
"\u01a6\7~\2\2\u01a6\u01a7\7?\2\2\u01a7\u008b\3\2\2\2\u01a8\u01a9\7>\2"+
|
||||
"\2\u01a9\u01aa\7>\2\2\u01aa\u01ab\7?\2\2\u01ab\u008d\3\2\2\2\u01ac\u01ad"+
|
||||
"\7@\2\2\u01ad\u01ae\7@\2\2\u01ae\u01af\7?\2\2\u01af\u008f\3\2\2\2\u01b0"+
|
||||
"\u01b1\7@\2\2\u01b1\u01b2\7@\2\2\u01b2\u01b3\7@\2\2\u01b3\u01b4\7?\2\2"+
|
||||
"\u01b4\u0091\3\2\2\2\u01b5\u01b7\7\62\2\2\u01b6\u01b8\t\4\2\2\u01b7\u01b6"+
|
||||
"\3\2\2\2\u01b8\u01b9\3\2\2\2\u01b9\u01b7\3\2\2\2\u01b9\u01ba\3\2\2\2\u01ba"+
|
||||
"\u01bc\3\2\2\2\u01bb\u01bd\t\5\2\2\u01bc\u01bb\3\2\2\2\u01bc\u01bd\3\2"+
|
||||
"\2\2\u01bd\u0093\3\2\2\2\u01be\u01bf\7\62\2\2\u01bf\u01c1\t\6\2\2\u01c0"+
|
||||
"\u01c2\t\7\2\2\u01c1\u01c0\3\2\2\2\u01c2\u01c3\3\2\2\2\u01c3\u01c1\3\2"+
|
||||
"\2\2\u01c3\u01c4\3\2\2\2\u01c4\u01c6\3\2\2\2\u01c5\u01c7\t\5\2\2\u01c6"+
|
||||
"\u01c5\3\2\2\2\u01c6\u01c7\3\2\2\2\u01c7\u0095\3\2\2\2\u01c8\u01d1\7\62"+
|
||||
"\2\2\u01c9\u01cd\t\b\2\2\u01ca\u01cc\t\t\2\2\u01cb\u01ca\3\2\2\2\u01cc"+
|
||||
"\u01cf\3\2\2\2\u01cd\u01cb\3\2\2\2\u01cd\u01ce\3\2\2\2\u01ce\u01d1\3\2"+
|
||||
"\2\2\u01cf\u01cd\3\2\2\2\u01d0\u01c8\3\2\2\2\u01d0\u01c9\3\2\2\2\u01d1"+
|
||||
"\u01d3\3\2\2\2\u01d2\u01d4\t\n\2\2\u01d3\u01d2\3\2\2\2\u01d3\u01d4\3\2"+
|
||||
"\2\2\u01d4\u0097\3\2\2\2\u01d5\u01de\7\62\2\2\u01d6\u01da\t\b\2\2\u01d7"+
|
||||
"\u01d9\t\t\2\2\u01d8\u01d7\3\2\2\2\u01d9\u01dc\3\2\2\2\u01da\u01d8\3\2"+
|
||||
"\2\2\u01da\u01db\3\2\2\2\u01db\u01de\3\2\2\2\u01dc\u01da\3\2\2\2\u01dd"+
|
||||
"\u01d5\3\2\2\2\u01dd\u01d6\3\2\2\2\u01de\u01e5\3\2\2\2\u01df\u01e1\5\24"+
|
||||
"\n\2\u01e0\u01e2\t\t\2\2\u01e1\u01e0\3\2\2\2\u01e2\u01e3\3\2\2\2\u01e3"+
|
||||
"\u01e1\3\2\2\2\u01e3\u01e4\3\2\2\2\u01e4\u01e6\3\2\2\2\u01e5\u01df\3\2"+
|
||||
"\2\2\u01e5\u01e6\3\2\2\2\u01e6\u01f0\3\2\2\2\u01e7\u01e9\t\13\2\2\u01e8"+
|
||||
"\u01ea\t\f\2\2\u01e9\u01e8\3\2\2\2\u01e9\u01ea\3\2\2\2\u01ea\u01ec\3\2"+
|
||||
"\2\2\u01eb\u01ed\t\t\2\2\u01ec\u01eb\3\2\2\2\u01ed\u01ee\3\2\2\2\u01ee"+
|
||||
"\u01ec\3\2\2\2\u01ee\u01ef\3\2\2\2\u01ef\u01f1\3\2\2\2\u01f0\u01e7\3\2"+
|
||||
"\2\2\u01f0\u01f1\3\2\2\2\u01f1\u01f3\3\2\2\2\u01f2\u01f4\t\r\2\2\u01f3"+
|
||||
"\u01f2\3\2\2\2\u01f3\u01f4\3\2\2\2\u01f4\u0099\3\2\2\2\u01f5\u01fd\7$"+
|
||||
"\2\2\u01f6\u01f7\7^\2\2\u01f7\u01fc\7$\2\2\u01f8\u01f9\7^\2\2\u01f9\u01fc"+
|
||||
"\7^\2\2\u01fa\u01fc\n\16\2\2\u01fb\u01f6\3\2\2\2\u01fb\u01f8\3\2\2\2\u01fb"+
|
||||
"\u01fa\3\2\2\2\u01fc\u01ff\3\2\2\2\u01fd\u01fe\3\2\2\2\u01fd\u01fb\3\2"+
|
||||
"\2\2\u01fe\u0200\3\2\2\2\u01ff\u01fd\3\2\2\2\u0200\u020e\7$\2\2\u0201"+
|
||||
"\u0209\7)\2\2\u0202\u0203\7^\2\2\u0203\u0208\7)\2\2\u0204\u0205\7^\2\2"+
|
||||
"\u0205\u0208\7^\2\2\u0206\u0208\n\16\2\2\u0207\u0202\3\2\2\2\u0207\u0204"+
|
||||
"\3\2\2\2\u0207\u0206\3\2\2\2\u0208\u020b\3\2\2\2\u0209\u020a\3\2\2\2\u0209"+
|
||||
"\u0207\3\2\2\2\u020a\u020c\3\2\2\2\u020b\u0209\3\2\2\2\u020c\u020e\7)"+
|
||||
"\2\2\u020d\u01f5\3\2\2\2\u020d\u0201\3\2\2\2\u020e\u009b\3\2\2\2\u020f"+
|
||||
"\u0213\7\61\2\2\u0210\u0214\n\17\2\2\u0211\u0212\7^\2\2\u0212\u0214\n"+
|
||||
"\20\2\2\u0213\u0210\3\2\2\2\u0213\u0211\3\2\2\2\u0214\u0215\3\2\2\2\u0215"+
|
||||
"\u0213\3\2\2\2\u0215\u0216\3\2\2\2\u0216\u0217\3\2\2\2\u0217\u021b\7\61"+
|
||||
"\2\2\u0218\u021a\t\21\2\2\u0219\u0218\3\2\2\2\u021a\u021d\3\2\2\2\u021b"+
|
||||
"\u0219\3\2\2\2\u021b\u021c\3\2\2\2\u021c\u021e\3\2\2\2\u021d\u021b\3\2"+
|
||||
"\2\2\u021e\u021f\6N\3\2\u021f\u009d\3\2\2\2\u0220\u0221\7v\2\2\u0221\u0222"+
|
||||
"\7t\2\2\u0222\u0223\7w\2\2\u0223\u0224\7g\2\2\u0224\u009f\3\2\2\2\u0225"+
|
||||
"\u0226\7h\2\2\u0226\u0227\7c\2\2\u0227\u0228\7n\2\2\u0228\u0229\7u\2\2"+
|
||||
"\u0229\u022a\7g\2\2\u022a\u00a1\3\2\2\2\u022b\u022c\7p\2\2\u022c\u022d"+
|
||||
"\7w\2\2\u022d\u022e\7n\2\2\u022e\u022f\7n\2\2\u022f\u00a3\3\2\2\2\u0230"+
|
||||
"\u0236\5\u00a6S\2\u0231\u0232\5\24\n\2\u0232\u0233\5\u00a6S\2\u0233\u0235"+
|
||||
"\3\2\2\2\u0234\u0231\3\2\2\2\u0235\u0238\3\2\2\2\u0236\u0234\3\2\2\2\u0236"+
|
||||
"\u0237\3\2\2\2\u0237\u0239\3\2\2\2\u0238\u0236\3\2\2\2\u0239\u023a\6R"+
|
||||
"\4\2\u023a\u00a5\3\2\2\2\u023b\u023f\t\22\2\2\u023c\u023e\t\23\2\2\u023d"+
|
||||
"\u023c\3\2\2\2\u023e\u0241\3\2\2\2\u023f\u023d\3\2\2\2\u023f\u0240\3\2"+
|
||||
"\2\2\u0240\u00a7\3\2\2\2\u0241\u023f\3\2\2\2\u0242\u024b\7\62\2\2\u0243"+
|
||||
"\u0247\t\b\2\2\u0244\u0246\t\t\2\2\u0245\u0244\3\2\2\2\u0246\u0249\3\2"+
|
||||
"\2\2\u0247\u0245\3\2\2\2\u0247\u0248\3\2\2\2\u0248\u024b\3\2\2\2\u0249"+
|
||||
"\u0247\3\2\2\2\u024a\u0242\3\2\2\2\u024a\u0243\3\2\2\2\u024b\u024c\3\2"+
|
||||
"\2\2\u024c\u024d\bT\4\2\u024d\u00a9\3\2\2\2\u024e\u0252\t\22\2\2\u024f"+
|
||||
"\u0251\t\23\2\2\u0250\u024f\3\2\2\2\u0251\u0254\3\2\2\2\u0252\u0250\3"+
|
||||
"\2\2\2\u0252\u0253\3\2\2\2\u0253\u0255\3\2\2\2\u0254\u0252\3\2\2\2\u0255"+
|
||||
"\u0256\bU\4\2\u0256\u00ab\3\2\2\2$\2\3\u00af\u00b9\u00c3\u00c8\u01b9\u01bc"+
|
||||
"\u01c3\u01c6\u01cd\u01d0\u01d3\u01da\u01dd\u01e3\u01e5\u01e9\u01ee\u01f0"+
|
||||
"\u01f3\u01fb\u01fd\u0207\u0209\u020d\u0213\u0215\u021b\u0236\u023f\u0247"+
|
||||
"\u024a\u0252\5\b\2\2\4\3\2\4\2\2";
|
||||
public static final ATN _ATN =
|
||||
new ATNDeserializer().deserialize(_serializedATN.toCharArray());
|
||||
static {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue