Merge branch 'master' into feature/query-refactoring

Conflicts:
	core/src/main/java/org/elasticsearch/ElasticsearchException.java
This commit is contained in:
javanna 2015-09-25 09:38:24 +02:00 committed by Luca Cavanna
commit 34de79370f
179 changed files with 2794 additions and 1826 deletions

View File

@ -76,7 +76,7 @@ Contributing to the Elasticsearch codebase
**Repository:** [https://github.com/elasticsearch/elasticsearch](https://github.com/elastic/elasticsearch)
Make sure you have [Maven](http://maven.apache.org) installed, as Elasticsearch uses it as its build system. Integration with IntelliJ and Eclipse should work out of the box. Eclipse users can automatically configure their IDE by running `mvn eclipse:eclipse` and then importing the project into their workspace: `File > Import > Existing project into workspace`.
Make sure you have [Maven](http://maven.apache.org) installed, as Elasticsearch uses it as its build system. Integration with IntelliJ and Eclipse should work out of the box. Eclipse users can automatically configure their IDE by running `mvn eclipse:eclipse` and then importing the project into their workspace: `File > Import > Existing project into workspace` and make sure to select `Search for nested projects...` option as Elasticsearch is a multi-module maven project. Additionally you will want to ensure that Eclipse is using 2048m of heap by modifying `eclipse.ini` accordingly to avoid GC overhead errors. Please make sure the [m2e-connector](http://marketplace.eclipse.org/content/m2e-connector-maven-dependency-plugin) is not installed in your Eclipse distribution as it will interfere with setup performed by `mvn eclipse:eclipse`.
Elasticsearch also works perfectly with Eclipse's [m2e](http://www.eclipse.org/m2e/). Once you've installed m2e you can import Elasticsearch as an `Existing Maven Project`.

View File

@ -445,12 +445,12 @@ These are the linux flavors the Vagrantfile currently supports:
* centos-7
* fedora-22
* oel-7 aka Oracle Enterprise Linux 7
* sles-12
We're missing the following from the support matrix because there aren't high
quality boxes available in vagrant atlas:
* sles-11
* sles-12
* opensuse-13
* oel-6

36
Vagrantfile vendored
View File

@ -71,13 +71,22 @@ Vagrant.configure(2) do |config|
config.vm.define "opensuse-13" do |config|
config.vm.box = "chef/opensuse-13"
config.vm.box_url = "http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_opensuse-13.2-x86_64_chef-provisionerless.box"
suse_common config
opensuse_common config
end
# The SLES boxes are not considered to be highest quality, but seem to be sufficient for a test run
config.vm.define "sles-12" do |config|
config.vm.box = "idar/sles12"
sles_common config
end
# Switch the default share for the project root from /vagrant to
# /elasticsearch because /vagrant is confusing when there is a project inside
# the elasticsearch project called vagrant....
config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.synced_folder ".", "/elasticsearch"
config.vm.provider "virtualbox" do |v|
# Give the boxes 2GB so they can run our tests if they have to.
v.memory = 2048
end
if Vagrant.has_plugin?("vagrant-cachier")
config.cache.scope = :box
end
@ -127,9 +136,7 @@ def deb_common(config, add_openjdk_repository_command, openjdk_list)
ls /etc/apt/sources.list.d/#{openjdk_list}.list > /dev/null 2>&1 ||
(echo "Importing java-8 ppa" &&
#{add_openjdk_repository_command} &&
apt-get update -o \
Dir::Etc::sourcelist="$(ls /etc/apt/sources.list.d/#{openjdk_list}.list)" \
-o Dir::Etc::sourceparts="-" -o APT::Get::List-Cleanup="0")
apt-get update)
SHELL
)
end
@ -155,12 +162,29 @@ def dnf_common(config)
end
end
def suse_common(config)
def opensuse_common(config)
suse_common config, ''
end
def suse_common(config, extra)
provision(config,
update_command: "zypper --non-interactive list-updates",
update_tracking_file: "/var/cache/zypp/packages/last_update",
install_command: "zypper --non-interactive --quiet install --no-recommends",
java_package: "java-1_8_0-openjdk-devel")
java_package: "java-1_8_0-openjdk-devel",
extra: extra)
end
def sles_common(config)
extra = <<-SHELL
zypper rr systemsmanagement_puppet
zypper addrepo -t yast2 http://demeter.uni-regensburg.de/SLES12-x64/DVD1/ dvd1 || true
zypper addrepo -t yast2 http://demeter.uni-regensburg.de/SLES12-x64/DVD2/ dvd2 || true
zypper addrepo http://download.opensuse.org/repositories/Java:Factory/SLE_12/Java:Factory.repo || true
zypper --no-gpg-checks --non-interactive refresh
zypper --non-interactive install git-core
SHELL
suse_common config, extra
end
# Register the main box provisioning script.

View File

@ -86,11 +86,6 @@
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-spatial</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-expressions</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>com.spatial4j</groupId>
<artifactId>spatial4j</artifactId>

View File

@ -463,154 +463,151 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
// change due to refactorings etc. like renaming we have to keep the ordinal <--> class mapping
// to deserialize the exception coming from another node or from an corruption marker on
// a corrupted index.
// NOTE: ONLY APPEND TO THE END and NEVER REMOVE EXCEPTIONS IN MINOR VERSIONS
final Map<Class<? extends ElasticsearchException>, Integer> exceptions = new HashMap<>();
exceptions.put(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException.class, 0);
exceptions.put(org.elasticsearch.search.dfs.DfsPhaseExecutionException.class, 1);
exceptions.put(org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException.class, 2);
exceptions.put(org.elasticsearch.discovery.MasterNotDiscoveredException.class, 3);
exceptions.put(org.elasticsearch.ElasticsearchSecurityException.class, 4);
exceptions.put(org.elasticsearch.index.snapshots.IndexShardRestoreException.class, 5);
exceptions.put(org.elasticsearch.indices.IndexClosedException.class, 6);
exceptions.put(org.elasticsearch.http.BindHttpException.class, 7);
exceptions.put(org.elasticsearch.action.search.ReduceSearchPhaseException.class, 8);
exceptions.put(org.elasticsearch.node.NodeClosedException.class, 9);
exceptions.put(org.elasticsearch.index.engine.SnapshotFailedEngineException.class, 10);
exceptions.put(org.elasticsearch.index.shard.ShardNotFoundException.class, 11);
exceptions.put(org.elasticsearch.transport.ConnectTransportException.class, 12);
exceptions.put(org.elasticsearch.transport.NotSerializableTransportException.class, 13);
exceptions.put(org.elasticsearch.transport.ResponseHandlerFailureTransportException.class, 14);
exceptions.put(org.elasticsearch.indices.IndexCreationException.class, 15);
exceptions.put(org.elasticsearch.index.IndexNotFoundException.class, 16);
exceptions.put(org.elasticsearch.cluster.routing.IllegalShardRoutingStateException.class, 17);
exceptions.put(org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException.class, 18);
exceptions.put(org.elasticsearch.ResourceNotFoundException.class, 19);
exceptions.put(org.elasticsearch.transport.ActionTransportException.class, 20);
exceptions.put(org.elasticsearch.ElasticsearchGenerationException.class, 21);
exceptions.put(org.elasticsearch.index.engine.CreateFailedEngineException.class, 22);
exceptions.put(org.elasticsearch.index.shard.IndexShardStartedException.class, 23);
exceptions.put(org.elasticsearch.search.SearchContextMissingException.class, 24);
exceptions.put(org.elasticsearch.script.ScriptException.class, 25);
exceptions.put(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class, 26);
exceptions.put(org.elasticsearch.snapshots.SnapshotCreationException.class, 27);
exceptions.put(org.elasticsearch.index.engine.DeleteFailedEngineException.class, 28);
exceptions.put(org.elasticsearch.index.engine.DocumentMissingException.class, 29);
exceptions.put(org.elasticsearch.snapshots.SnapshotException.class, 30);
exceptions.put(org.elasticsearch.indices.InvalidAliasNameException.class, 31);
exceptions.put(org.elasticsearch.indices.InvalidIndexNameException.class, 32);
exceptions.put(org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException.class, 33);
exceptions.put(org.elasticsearch.transport.TransportException.class, 34);
exceptions.put(org.elasticsearch.ElasticsearchParseException.class, 35);
exceptions.put(org.elasticsearch.search.SearchException.class, 36);
exceptions.put(org.elasticsearch.index.mapper.MapperException.class, 37);
exceptions.put(org.elasticsearch.indices.InvalidTypeNameException.class, 38);
exceptions.put(org.elasticsearch.snapshots.SnapshotRestoreException.class, 39);
exceptions.put(org.elasticsearch.common.ParsingException.class, 40);
exceptions.put(org.elasticsearch.index.shard.IndexShardClosedException.class, 41);
exceptions.put(org.elasticsearch.script.expression.ExpressionScriptCompilationException.class, 42);
exceptions.put(org.elasticsearch.indices.recovery.RecoverFilesRecoveryException.class, 43);
exceptions.put(org.elasticsearch.index.translog.TruncatedTranslogException.class, 44);
exceptions.put(org.elasticsearch.indices.recovery.RecoveryFailedException.class, 45);
exceptions.put(org.elasticsearch.index.shard.IndexShardRelocatedException.class, 46);
exceptions.put(org.elasticsearch.transport.NodeShouldNotConnectException.class, 47);
exceptions.put(org.elasticsearch.indices.IndexTemplateAlreadyExistsException.class, 48);
exceptions.put(org.elasticsearch.index.translog.TranslogCorruptedException.class, 49);
exceptions.put(org.elasticsearch.cluster.block.ClusterBlockException.class, 50);
exceptions.put(org.elasticsearch.search.fetch.FetchPhaseExecutionException.class, 51);
exceptions.put(org.elasticsearch.index.IndexShardAlreadyExistsException.class, 52);
exceptions.put(org.elasticsearch.index.engine.VersionConflictEngineException.class, 53);
exceptions.put(org.elasticsearch.index.engine.EngineException.class, 54);
exceptions.put(org.elasticsearch.index.engine.DocumentAlreadyExistsException.class, 55);
exceptions.put(org.elasticsearch.action.NoSuchNodeException.class, 56);
exceptions.put(org.elasticsearch.common.settings.SettingsException.class, 57);
exceptions.put(org.elasticsearch.indices.IndexTemplateMissingException.class, 58);
exceptions.put(org.elasticsearch.transport.SendRequestTransportException.class, 59);
exceptions.put(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class, 60);
exceptions.put(org.elasticsearch.common.lucene.Lucene.EarlyTerminationException.class, 61);
exceptions.put(org.elasticsearch.cluster.routing.RoutingValidationException.class, 62);
exceptions.put(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class, 63);
exceptions.put(org.elasticsearch.indices.AliasFilterParsingException.class, 64);
exceptions.put(org.elasticsearch.index.engine.DeleteByQueryFailedEngineException.class, 65);
exceptions.put(org.elasticsearch.gateway.GatewayException.class, 66);
exceptions.put(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class, 67);
exceptions.put(org.elasticsearch.http.HttpException.class, 68);
exceptions.put(org.elasticsearch.ElasticsearchException.class, 69);
exceptions.put(org.elasticsearch.snapshots.SnapshotMissingException.class, 70);
exceptions.put(org.elasticsearch.action.PrimaryMissingActionException.class, 71);
exceptions.put(org.elasticsearch.action.FailedNodeException.class, 72);
exceptions.put(org.elasticsearch.search.SearchParseException.class, 73);
exceptions.put(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException.class, 74);
exceptions.put(org.elasticsearch.common.blobstore.BlobStoreException.class, 75);
exceptions.put(org.elasticsearch.cluster.IncompatibleClusterStateVersionException.class, 76);
exceptions.put(org.elasticsearch.index.engine.RecoveryEngineException.class, 77);
exceptions.put(org.elasticsearch.common.util.concurrent.UncategorizedExecutionException.class, 78);
exceptions.put(org.elasticsearch.action.TimestampParsingException.class, 79);
exceptions.put(org.elasticsearch.action.RoutingMissingException.class, 80);
exceptions.put(org.elasticsearch.index.engine.IndexFailedEngineException.class, 81);
exceptions.put(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class, 82);
exceptions.put(org.elasticsearch.repositories.RepositoryException.class, 83);
exceptions.put(org.elasticsearch.transport.ReceiveTimeoutTransportException.class, 84);
exceptions.put(org.elasticsearch.transport.NodeDisconnectedException.class, 85);
exceptions.put(org.elasticsearch.index.AlreadyExpiredException.class, 86);
exceptions.put(org.elasticsearch.search.aggregations.AggregationExecutionException.class, 87);
exceptions.put(org.elasticsearch.index.mapper.MergeMappingException.class, 88);
exceptions.put(org.elasticsearch.indices.InvalidIndexTemplateException.class, 89);
exceptions.put(org.elasticsearch.percolator.PercolateException.class, 90);
exceptions.put(org.elasticsearch.index.engine.RefreshFailedEngineException.class, 91);
exceptions.put(org.elasticsearch.search.aggregations.AggregationInitializationException.class, 92);
exceptions.put(org.elasticsearch.indices.recovery.DelayRecoveryException.class, 93);
exceptions.put(org.elasticsearch.search.warmer.IndexWarmerMissingException.class, 94);
exceptions.put(org.elasticsearch.client.transport.NoNodeAvailableException.class, 95);
exceptions.put(org.elasticsearch.script.groovy.GroovyScriptCompilationException.class, 96);
exceptions.put(org.elasticsearch.snapshots.InvalidSnapshotNameException.class, 97);
exceptions.put(org.elasticsearch.index.shard.IllegalIndexShardStateException.class, 98);
exceptions.put(org.elasticsearch.index.snapshots.IndexShardSnapshotException.class, 99);
exceptions.put(org.elasticsearch.index.shard.IndexShardNotStartedException.class, 100);
exceptions.put(org.elasticsearch.action.search.SearchPhaseExecutionException.class, 101);
exceptions.put(org.elasticsearch.transport.ActionNotFoundTransportException.class, 102);
exceptions.put(org.elasticsearch.transport.TransportSerializationException.class, 103);
exceptions.put(org.elasticsearch.transport.RemoteTransportException.class, 104);
exceptions.put(org.elasticsearch.index.engine.EngineCreationFailureException.class, 105);
exceptions.put(org.elasticsearch.cluster.routing.RoutingException.class, 106);
exceptions.put(org.elasticsearch.index.shard.IndexShardRecoveryException.class, 107);
exceptions.put(org.elasticsearch.repositories.RepositoryMissingException.class, 108);
exceptions.put(org.elasticsearch.script.expression.ExpressionScriptExecutionException.class, 109);
exceptions.put(org.elasticsearch.index.percolator.PercolatorException.class, 110);
exceptions.put(org.elasticsearch.index.engine.DocumentSourceMissingException.class, 111);
exceptions.put(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class, 112);
exceptions.put(org.elasticsearch.common.settings.NoClassSettingsException.class, 113);
exceptions.put(org.elasticsearch.transport.BindTransportException.class, 114);
exceptions.put(org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException.class, 115);
exceptions.put(org.elasticsearch.index.shard.IndexShardRecoveringException.class, 116);
exceptions.put(org.elasticsearch.index.translog.TranslogException.class, 117);
exceptions.put(org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException.class, 118);
exceptions.put(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException.class, 119);
exceptions.put(org.elasticsearch.ElasticsearchTimeoutException.class, 120);
exceptions.put(org.elasticsearch.search.query.QueryPhaseExecutionException.class, 121);
exceptions.put(org.elasticsearch.repositories.RepositoryVerificationException.class, 122);
exceptions.put(org.elasticsearch.search.aggregations.InvalidAggregationPathException.class, 123);
exceptions.put(org.elasticsearch.script.groovy.GroovyScriptExecutionException.class, 124);
exceptions.put(org.elasticsearch.indices.IndexAlreadyExistsException.class, 125);
exceptions.put(org.elasticsearch.script.Script.ScriptParseException.class, 126);
exceptions.put(org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class, 127);
exceptions.put(org.elasticsearch.index.mapper.MapperParsingException.class, 128);
exceptions.put(org.elasticsearch.search.SearchContextException.class, 129);
exceptions.put(org.elasticsearch.search.builder.SearchSourceBuilderException.class, 130);
exceptions.put(org.elasticsearch.index.engine.EngineClosedException.class, 131);
exceptions.put(org.elasticsearch.action.NoShardAvailableActionException.class, 132);
exceptions.put(org.elasticsearch.action.UnavailableShardsException.class, 133);
exceptions.put(org.elasticsearch.index.engine.FlushFailedEngineException.class, 134);
exceptions.put(org.elasticsearch.common.breaker.CircuitBreakingException.class, 135);
exceptions.put(org.elasticsearch.transport.NodeNotConnectedException.class, 136);
exceptions.put(org.elasticsearch.index.mapper.StrictDynamicMappingException.class, 137);
exceptions.put(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class, 138);
exceptions.put(org.elasticsearch.indices.TypeMissingException.class, 139);
exceptions.put(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException.class, exceptions.size());
exceptions.put(org.elasticsearch.search.dfs.DfsPhaseExecutionException.class, exceptions.size());
exceptions.put(org.elasticsearch.common.util.CancellableThreads.ExecutionCancelledException.class, exceptions.size());
exceptions.put(org.elasticsearch.discovery.MasterNotDiscoveredException.class, exceptions.size());
exceptions.put(org.elasticsearch.ElasticsearchSecurityException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.snapshots.IndexShardRestoreException.class, exceptions.size());
exceptions.put(org.elasticsearch.indices.IndexClosedException.class, exceptions.size());
exceptions.put(org.elasticsearch.http.BindHttpException.class, exceptions.size());
exceptions.put(org.elasticsearch.action.search.ReduceSearchPhaseException.class, exceptions.size());
exceptions.put(org.elasticsearch.node.NodeClosedException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.SnapshotFailedEngineException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.shard.ShardNotFoundException.class, exceptions.size());
exceptions.put(org.elasticsearch.transport.ConnectTransportException.class, exceptions.size());
exceptions.put(org.elasticsearch.transport.NotSerializableTransportException.class, exceptions.size());
exceptions.put(org.elasticsearch.transport.ResponseHandlerFailureTransportException.class, exceptions.size());
exceptions.put(org.elasticsearch.indices.IndexCreationException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.IndexNotFoundException.class, exceptions.size());
exceptions.put(org.elasticsearch.cluster.routing.IllegalShardRoutingStateException.class, exceptions.size());
exceptions.put(org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException.class, exceptions.size());
exceptions.put(org.elasticsearch.ResourceNotFoundException.class, exceptions.size());
exceptions.put(org.elasticsearch.transport.ActionTransportException.class, exceptions.size());
exceptions.put(org.elasticsearch.ElasticsearchGenerationException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.CreateFailedEngineException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.shard.IndexShardStartedException.class, exceptions.size());
exceptions.put(org.elasticsearch.search.SearchContextMissingException.class, exceptions.size());
exceptions.put(org.elasticsearch.script.ScriptException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class, exceptions.size());
exceptions.put(org.elasticsearch.snapshots.SnapshotCreationException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.DeleteFailedEngineException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.DocumentMissingException.class, exceptions.size());
exceptions.put(org.elasticsearch.snapshots.SnapshotException.class, exceptions.size());
exceptions.put(org.elasticsearch.indices.InvalidAliasNameException.class, exceptions.size());
exceptions.put(org.elasticsearch.indices.InvalidIndexNameException.class, exceptions.size());
exceptions.put(org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException.class, exceptions.size());
exceptions.put(org.elasticsearch.transport.TransportException.class, exceptions.size());
exceptions.put(org.elasticsearch.ElasticsearchParseException.class, exceptions.size());
exceptions.put(org.elasticsearch.search.SearchException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.mapper.MapperException.class, exceptions.size());
exceptions.put(org.elasticsearch.indices.InvalidTypeNameException.class, exceptions.size());
exceptions.put(org.elasticsearch.snapshots.SnapshotRestoreException.class, exceptions.size());
exceptions.put(org.elasticsearch.common.ParsingException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.shard.IndexShardClosedException.class, exceptions.size());
exceptions.put(org.elasticsearch.indices.recovery.RecoverFilesRecoveryException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.translog.TruncatedTranslogException.class, exceptions.size());
exceptions.put(org.elasticsearch.indices.recovery.RecoveryFailedException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.shard.IndexShardRelocatedException.class, exceptions.size());
exceptions.put(org.elasticsearch.transport.NodeShouldNotConnectException.class, exceptions.size());
exceptions.put(org.elasticsearch.indices.IndexTemplateAlreadyExistsException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.translog.TranslogCorruptedException.class, exceptions.size());
exceptions.put(org.elasticsearch.cluster.block.ClusterBlockException.class, exceptions.size());
exceptions.put(org.elasticsearch.search.fetch.FetchPhaseExecutionException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.IndexShardAlreadyExistsException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.VersionConflictEngineException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.EngineException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.DocumentAlreadyExistsException.class, exceptions.size());
exceptions.put(org.elasticsearch.action.NoSuchNodeException.class, exceptions.size());
exceptions.put(org.elasticsearch.common.settings.SettingsException.class, exceptions.size());
exceptions.put(org.elasticsearch.indices.IndexTemplateMissingException.class, exceptions.size());
exceptions.put(org.elasticsearch.transport.SendRequestTransportException.class, exceptions.size());
exceptions.put(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.class, exceptions.size());
exceptions.put(org.elasticsearch.common.lucene.Lucene.EarlyTerminationException.class, exceptions.size());
exceptions.put(org.elasticsearch.cluster.routing.RoutingValidationException.class, exceptions.size());
exceptions.put(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper.class, exceptions.size());
exceptions.put(org.elasticsearch.indices.AliasFilterParsingException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.DeleteByQueryFailedEngineException.class, exceptions.size());
exceptions.put(org.elasticsearch.gateway.GatewayException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class, exceptions.size());
exceptions.put(org.elasticsearch.http.HttpException.class, exceptions.size());
exceptions.put(org.elasticsearch.ElasticsearchException.class, exceptions.size());
exceptions.put(org.elasticsearch.snapshots.SnapshotMissingException.class, exceptions.size());
exceptions.put(org.elasticsearch.action.PrimaryMissingActionException.class, exceptions.size());
exceptions.put(org.elasticsearch.action.FailedNodeException.class, exceptions.size());
exceptions.put(org.elasticsearch.search.SearchParseException.class, exceptions.size());
exceptions.put(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException.class, exceptions.size());
exceptions.put(org.elasticsearch.common.blobstore.BlobStoreException.class, exceptions.size());
exceptions.put(org.elasticsearch.cluster.IncompatibleClusterStateVersionException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.RecoveryEngineException.class, exceptions.size());
exceptions.put(org.elasticsearch.common.util.concurrent.UncategorizedExecutionException.class, exceptions.size());
exceptions.put(org.elasticsearch.action.TimestampParsingException.class, exceptions.size());
exceptions.put(org.elasticsearch.action.RoutingMissingException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.IndexFailedEngineException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class, exceptions.size());
exceptions.put(org.elasticsearch.repositories.RepositoryException.class, exceptions.size());
exceptions.put(org.elasticsearch.transport.ReceiveTimeoutTransportException.class, exceptions.size());
exceptions.put(org.elasticsearch.transport.NodeDisconnectedException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.AlreadyExpiredException.class, exceptions.size());
exceptions.put(org.elasticsearch.search.aggregations.AggregationExecutionException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.mapper.MergeMappingException.class, exceptions.size());
exceptions.put(org.elasticsearch.indices.InvalidIndexTemplateException.class, exceptions.size());
exceptions.put(org.elasticsearch.percolator.PercolateException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.RefreshFailedEngineException.class, exceptions.size());
exceptions.put(org.elasticsearch.search.aggregations.AggregationInitializationException.class, exceptions.size());
exceptions.put(org.elasticsearch.indices.recovery.DelayRecoveryException.class, exceptions.size());
exceptions.put(org.elasticsearch.search.warmer.IndexWarmerMissingException.class, exceptions.size());
exceptions.put(org.elasticsearch.client.transport.NoNodeAvailableException.class, exceptions.size());
exceptions.put(org.elasticsearch.script.groovy.GroovyScriptCompilationException.class, exceptions.size());
exceptions.put(org.elasticsearch.snapshots.InvalidSnapshotNameException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.shard.IllegalIndexShardStateException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.snapshots.IndexShardSnapshotException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.shard.IndexShardNotStartedException.class, exceptions.size());
exceptions.put(org.elasticsearch.action.search.SearchPhaseExecutionException.class, exceptions.size());
exceptions.put(org.elasticsearch.transport.ActionNotFoundTransportException.class, exceptions.size());
exceptions.put(org.elasticsearch.transport.TransportSerializationException.class, exceptions.size());
exceptions.put(org.elasticsearch.transport.RemoteTransportException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.EngineCreationFailureException.class, exceptions.size());
exceptions.put(org.elasticsearch.cluster.routing.RoutingException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.shard.IndexShardRecoveryException.class, exceptions.size());
exceptions.put(org.elasticsearch.repositories.RepositoryMissingException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.percolator.PercolatorException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.DocumentSourceMissingException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class, exceptions.size());
exceptions.put(org.elasticsearch.common.settings.NoClassSettingsException.class, exceptions.size());
exceptions.put(org.elasticsearch.transport.BindTransportException.class, exceptions.size());
exceptions.put(org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.shard.IndexShardRecoveringException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.translog.TranslogException.class, exceptions.size());
exceptions.put(org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException.class, exceptions.size());
exceptions.put(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnPrimaryException.class, exceptions.size());
exceptions.put(org.elasticsearch.ElasticsearchTimeoutException.class, exceptions.size());
exceptions.put(org.elasticsearch.search.query.QueryPhaseExecutionException.class, exceptions.size());
exceptions.put(org.elasticsearch.repositories.RepositoryVerificationException.class, exceptions.size());
exceptions.put(org.elasticsearch.search.aggregations.InvalidAggregationPathException.class, exceptions.size());
exceptions.put(org.elasticsearch.script.groovy.GroovyScriptExecutionException.class, exceptions.size());
exceptions.put(org.elasticsearch.indices.IndexAlreadyExistsException.class, exceptions.size());
exceptions.put(org.elasticsearch.script.Script.ScriptParseException.class, exceptions.size());
exceptions.put(org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.mapper.MapperParsingException.class, exceptions.size());
exceptions.put(org.elasticsearch.search.SearchContextException.class, exceptions.size());
exceptions.put(org.elasticsearch.search.builder.SearchSourceBuilderException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.EngineClosedException.class, exceptions.size());
exceptions.put(org.elasticsearch.action.NoShardAvailableActionException.class, exceptions.size());
exceptions.put(org.elasticsearch.action.UnavailableShardsException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.engine.FlushFailedEngineException.class, exceptions.size());
exceptions.put(org.elasticsearch.common.breaker.CircuitBreakingException.class, exceptions.size());
exceptions.put(org.elasticsearch.transport.NodeNotConnectedException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.mapper.StrictDynamicMappingException.class, exceptions.size());
exceptions.put(org.elasticsearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class, exceptions.size());
exceptions.put(org.elasticsearch.indices.TypeMissingException.class, exceptions.size());
// added in 3.x
exceptions.put(org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class, 140);
exceptions.put(org.elasticsearch.index.query.QueryShardException.class, 141);
final int maxOrd = 141;
assert exceptions.size() == maxOrd + 1;
Constructor<? extends ElasticsearchException>[] idToSupplier = new Constructor[maxOrd + 1];
exceptions.put(org.elasticsearch.discovery.Discovery.FailedToCommitClusterStateException.class, exceptions.size());
exceptions.put(org.elasticsearch.index.query.QueryShardException.class, exceptions.size());
// NOTE: ONLY APPEND TO THE END and NEVER REMOVE EXCEPTIONS IN MINOR VERSIONS
Constructor<? extends ElasticsearchException>[] idToSupplier = new Constructor[exceptions.size()];
for (Map.Entry<Class<? extends ElasticsearchException>, Integer> e : exceptions.entrySet()) {
try {
Constructor<? extends ElasticsearchException> constructor = e.getKey().getDeclaredConstructor(StreamInput.class);

View File

@ -73,7 +73,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) {
if (request.waitForEvents() != null) {
final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis();
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ProcessedClusterStateUpdateTask() {
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;

View File

@ -42,9 +42,8 @@ import java.util.Map;
* Node information (static, does not change over time).
*/
public class NodeInfo extends BaseNodeResponse {
@Nullable
private ImmutableMap<String, String> serviceAttributes;
private Map<String, String> serviceAttributes;
private Version version;
private Build build;
@ -119,7 +118,7 @@ public class NodeInfo extends BaseNodeResponse {
* The service attributes of the node.
*/
@Nullable
public ImmutableMap<String, String> getServiceAttributes() {
public Map<String, String> getServiceAttributes() {
return this.serviceAttributes;
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.admin.cluster.snapshots.status;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.FailedNodeException;
@ -46,10 +47,13 @@ import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReferenceArray;
import static java.util.Collections.unmodifiableMap;
/**
* Transport client that collects snapshot shard statuses from data nodes
*/
@ -104,7 +108,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
@Override
protected NodeSnapshotStatus nodeOperation(NodeRequest request) {
ImmutableMap.Builder<SnapshotId, ImmutableMap<ShardId, SnapshotIndexShardStatus>> snapshotMapBuilder = ImmutableMap.builder();
Map<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> snapshotMapBuilder = new HashMap<>();
try {
String nodeId = clusterService.localNode().id();
for (SnapshotId snapshotId : request.snapshotIds) {
@ -112,7 +116,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
if (shardsStatus == null) {
continue;
}
ImmutableMap.Builder<ShardId, SnapshotIndexShardStatus> shardMapBuilder = ImmutableMap.builder();
Map<ShardId, SnapshotIndexShardStatus> shardMapBuilder = new HashMap<>();
for (Map.Entry<ShardId, IndexShardSnapshotStatus> shardEntry : shardsStatus.entrySet()) {
SnapshotIndexShardStatus shardStatus;
IndexShardSnapshotStatus.Stage stage = shardEntry.getValue().stage();
@ -124,9 +128,9 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
}
shardMapBuilder.put(shardEntry.getKey(), shardStatus);
}
snapshotMapBuilder.put(snapshotId, shardMapBuilder.build());
snapshotMapBuilder.put(snapshotId, unmodifiableMap(shardMapBuilder));
}
return new NodeSnapshotStatus(clusterService.localNode(), snapshotMapBuilder.build());
return new NodeSnapshotStatus(clusterService.localNode(), unmodifiableMap(snapshotMapBuilder));
} catch (Exception e) {
throw new ElasticsearchException("failed to load metadata", e);
}
@ -241,17 +245,17 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
public static class NodeSnapshotStatus extends BaseNodeResponse {
private ImmutableMap<SnapshotId, ImmutableMap<ShardId, SnapshotIndexShardStatus>> status;
private Map<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> status;
NodeSnapshotStatus() {
}
public NodeSnapshotStatus(DiscoveryNode node, ImmutableMap<SnapshotId, ImmutableMap<ShardId, SnapshotIndexShardStatus>> status) {
public NodeSnapshotStatus(DiscoveryNode node, Map<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> status) {
super(node);
this.status = status;
}
public ImmutableMap<SnapshotId, ImmutableMap<ShardId, SnapshotIndexShardStatus>> status() {
public Map<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> status() {
return status;
}
@ -259,19 +263,19 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int numberOfSnapshots = in.readVInt();
ImmutableMap.Builder<SnapshotId, ImmutableMap<ShardId, SnapshotIndexShardStatus>> snapshotMapBuilder = ImmutableMap.builder();
Map<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> snapshotMapBuilder = new HashMap<>(numberOfSnapshots);
for (int i = 0; i < numberOfSnapshots; i++) {
SnapshotId snapshotId = SnapshotId.readSnapshotId(in);
ImmutableMap.Builder<ShardId, SnapshotIndexShardStatus> shardMapBuilder = ImmutableMap.builder();
int numberOfShards = in.readVInt();
Map<ShardId, SnapshotIndexShardStatus> shardMapBuilder = new HashMap<>(numberOfShards);
for (int j = 0; j < numberOfShards; j++) {
ShardId shardId = ShardId.readShardId(in);
SnapshotIndexShardStatus status = SnapshotIndexShardStatus.readShardSnapshotStatus(in);
shardMapBuilder.put(shardId, status);
}
snapshotMapBuilder.put(snapshotId, shardMapBuilder.build());
snapshotMapBuilder.put(snapshotId, unmodifiableMap(shardMapBuilder));
}
status = snapshotMapBuilder.build();
status = unmodifiableMap(snapshotMapBuilder);
}
@Override
@ -279,10 +283,10 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
super.writeTo(out);
if (status != null) {
out.writeVInt(status.size());
for (ImmutableMap.Entry<SnapshotId, ImmutableMap<ShardId, SnapshotIndexShardStatus>> entry : status.entrySet()) {
for (Map.Entry<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> entry : status.entrySet()) {
entry.getKey().writeTo(out);
out.writeVInt(entry.getValue().size());
for (ImmutableMap.Entry<ShardId, SnapshotIndexShardStatus> shardEntry : entry.getValue().entrySet()) {
for (Map.Entry<ShardId, SnapshotIndexShardStatus> shardEntry : entry.getValue().entrySet()) {
shardEntry.getKey().writeTo(out);
shardEntry.getValue().writeTo(out);
}

View File

@ -157,7 +157,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
// We should have information about this shard from the shard:
TransportNodesSnapshotsStatus.NodeSnapshotStatus nodeStatus = nodeSnapshotStatusMap.get(status.nodeId());
if (nodeStatus != null) {
ImmutableMap<ShardId, SnapshotIndexShardStatus> shardStatues = nodeStatus.status().get(entry.snapshotId());
Map<ShardId, SnapshotIndexShardStatus> shardStatues = nodeStatus.status().get(entry.snapshotId());
if (shardStatues != null) {
SnapshotIndexShardStatus shardStatus = shardStatues.get(shardEntry.getKey());
if (shardStatus != null) {
@ -204,7 +204,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
Snapshot snapshot = snapshotsService.snapshot(snapshotId);
List<SnapshotIndexShardStatus> shardStatusBuilder = new ArrayList<>();
if (snapshot.state().completed()) {
ImmutableMap<ShardId, IndexShardSnapshotStatus> shardStatues = snapshotsService.snapshotShards(snapshotId);
Map<ShardId, IndexShardSnapshotStatus> shardStatues = snapshotsService.snapshotShards(snapshotId);
for (ImmutableMap.Entry<ShardId, IndexShardSnapshotStatus> shardStatus : shardStatues.entrySet()) {
shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), shardStatus.getValue()));
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.admin.indices.mapping.get;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
@ -31,14 +32,17 @@ import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.index.mapper.Mapper;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import static java.util.Collections.unmodifiableMap;
/** Response object for {@link GetFieldMappingsRequest} API */
public class GetFieldMappingsResponse extends ActionResponse implements ToXContent {
private ImmutableMap<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> mappings = ImmutableMap.of();
private Map<String, Map<String, Map<String, FieldMappingMetaData>>> mappings = ImmutableMap.of();
GetFieldMappingsResponse(ImmutableMap<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> mappings) {
GetFieldMappingsResponse(Map<String, Map<String, Map<String, FieldMappingMetaData>>> mappings) {
this.mappings = mappings;
}
@ -46,7 +50,7 @@ public class GetFieldMappingsResponse extends ActionResponse implements ToXConte
}
/** returns the retrieved field mapping. The return map keys are index, type, field (as specified in the request). */
public ImmutableMap<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> mappings() {
public Map<String, Map<String, Map<String, FieldMappingMetaData>>> mappings() {
return mappings;
}
@ -57,11 +61,11 @@ public class GetFieldMappingsResponse extends ActionResponse implements ToXConte
* @return FieldMappingMetaData for the requested field or null if not found.
*/
public FieldMappingMetaData fieldMappings(String index, String type, String field) {
ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>> indexMapping = mappings.get(index);
Map<String, Map<String, FieldMappingMetaData>> indexMapping = mappings.get(index);
if (indexMapping == null) {
return null;
}
ImmutableMap<String, FieldMappingMetaData> typeMapping = indexMapping.get(type);
Map<String, FieldMappingMetaData> typeMapping = indexMapping.get(type);
if (typeMapping == null) {
return null;
}
@ -70,10 +74,10 @@ public class GetFieldMappingsResponse extends ActionResponse implements ToXConte
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
for (Map.Entry<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> indexEntry : mappings.entrySet()) {
for (Map.Entry<String, Map<String, Map<String, FieldMappingMetaData>>> indexEntry : mappings.entrySet()) {
builder.startObject(indexEntry.getKey(), XContentBuilder.FieldCaseConversion.NONE);
builder.startObject("mappings");
for (Map.Entry<String, ImmutableMap<String, FieldMappingMetaData>> typeEntry : indexEntry.getValue().entrySet()) {
for (Map.Entry<String, Map<String, FieldMappingMetaData>> typeEntry : indexEntry.getValue().entrySet()) {
builder.startObject(typeEntry.getKey(), XContentBuilder.FieldCaseConversion.NONE);
for (Map.Entry<String, FieldMappingMetaData> fieldEntry : typeEntry.getValue().entrySet()) {
builder.startObject(fieldEntry.getKey());
@ -128,33 +132,33 @@ public class GetFieldMappingsResponse extends ActionResponse implements ToXConte
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
ImmutableMap.Builder<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> indexMapBuilder = ImmutableMap.builder();
Map<String, Map<String, Map<String, FieldMappingMetaData>>> indexMapBuilder = new HashMap<>(size);
for (int i = 0; i < size; i++) {
String index = in.readString();
int typesSize = in.readVInt();
ImmutableMap.Builder<String, ImmutableMap<String, FieldMappingMetaData>> typeMapBuilder = ImmutableMap.builder();
Map<String, Map<String, FieldMappingMetaData>> typeMapBuilder = new HashMap<>(typesSize);
for (int j = 0; j < typesSize; j++) {
String type = in.readString();
ImmutableMap.Builder<String, FieldMappingMetaData> fieldMapBuilder = ImmutableMap.builder();
int fieldSize = in.readVInt();
Map<String, FieldMappingMetaData> fieldMapBuilder = new HashMap<>(fieldSize);
for (int k = 0; k < fieldSize; k++) {
fieldMapBuilder.put(in.readString(), new FieldMappingMetaData(in.readString(), in.readBytesReference()));
}
typeMapBuilder.put(type, fieldMapBuilder.build());
typeMapBuilder.put(type, unmodifiableMap(fieldMapBuilder));
}
indexMapBuilder.put(index, typeMapBuilder.build());
indexMapBuilder.put(index, unmodifiableMap(typeMapBuilder));
}
mappings = indexMapBuilder.build();
mappings = unmodifiableMap(indexMapBuilder);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(mappings.size());
for (Map.Entry<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> indexEntry : mappings.entrySet()) {
for (Map.Entry<String, Map<String, Map<String, FieldMappingMetaData>>> indexEntry : mappings.entrySet()) {
out.writeString(indexEntry.getKey());
out.writeVInt(indexEntry.getValue().size());
for (Map.Entry<String, ImmutableMap<String, FieldMappingMetaData>> typeEntry : indexEntry.getValue().entrySet()) {
for (Map.Entry<String, Map<String, FieldMappingMetaData>> typeEntry : indexEntry.getValue().entrySet()) {
out.writeString(typeEntry.getKey());
out.writeVInt(typeEntry.getValue().size());
for (Map.Entry<String, FieldMappingMetaData> fieldEntry : typeEntry.getValue().entrySet()) {

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.admin.indices.mapping.get;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
@ -32,6 +31,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReferenceArray;
@ -88,7 +88,7 @@ public class TransportGetFieldMappingsAction extends HandledTransportAction<GetF
}
private GetFieldMappingsResponse merge(AtomicReferenceArray<Object> indexResponses) {
MapBuilder<String, ImmutableMap<String, ImmutableMap<String, GetFieldMappingsResponse.FieldMappingMetaData>>> mergedResponses = MapBuilder.newMapBuilder();
MapBuilder<String, Map<String, Map<String, GetFieldMappingsResponse.FieldMappingMetaData>>> mergedResponses = MapBuilder.newMapBuilder();
for (int i = 0; i < indexResponses.length(); i++) {
Object element = indexResponses.get(i);
if (element instanceof GetFieldMappingsResponse) {

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.admin.indices.mapping.get;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData;
import org.elasticsearch.action.support.ActionFilters;
@ -52,6 +53,7 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.stream.Collectors;
import static org.elasticsearch.common.util.CollectionUtils.newLinkedList;
@ -105,7 +107,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
}
}
MapBuilder<String, ImmutableMap<String, FieldMappingMetaData>> typeMappings = new MapBuilder<>();
MapBuilder<String, Map<String, FieldMappingMetaData>> typeMappings = new MapBuilder<>();
for (String type : typeIntersection) {
DocumentMapper documentMapper = indexService.mapperService().documentMapper(type);
ImmutableMap<String, FieldMappingMetaData> fieldMapping = findFieldMappingsByType(documentMapper, request);

View File

@ -304,7 +304,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
assert preVersionTypes[requestIndex] != null;
}
processAfter(request, indexShard, location);
processAfter(request.refresh(), indexShard, location);
BulkItemResponse[] responses = new BulkItemResponse[request.items().length];
BulkItemRequest[] items = request.items();
for (int i = 0; i < items.length; i++) {
@ -500,21 +500,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
}
}
processAfter(request, indexShard, location);
}
private void processAfter(BulkShardRequest request, IndexShard indexShard, Translog.Location location) {
if (request.refresh()) {
try {
indexShard.refresh("refresh_flag_bulk");
} catch (Throwable e) {
// ignore
}
}
if (indexShard.getTranslogDurability() == Translog.Durabilty.REQUEST && location != null) {
indexShard.sync(location);
}
processAfter(request.refresh(), indexShard, location);
}
private void applyVersion(BulkItemRequest item, long version, VersionType versionType) {

View File

@ -138,8 +138,7 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
request.version(delete.version());
assert request.versionType().validateVersionForWrites(request.version());
processAfter(request, indexShard, delete.getTranslogLocation());
processAfter(request.refresh(), indexShard, delete.getTranslogLocation());
DeleteResponse response = new DeleteResponse(shardRequest.shardId.getIndex(), request.type(), request.id(), delete.version(), delete.found());
return new Tuple<>(response, shardRequest.request);
@ -151,7 +150,7 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version(), request.versionType(), Engine.Operation.Origin.REPLICA);
indexShard.delete(delete);
processAfter(request, indexShard, delete.getTranslogLocation());
processAfter(request.refresh(), indexShard, delete.getTranslogLocation());
}
@Override
@ -159,18 +158,4 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
return clusterService.operationRouting()
.deleteShards(clusterService.state(), request.concreteIndex(), request.request().type(), request.request().id(), request.request().routing());
}
private void processAfter(DeleteRequest request, IndexShard indexShard, Translog.Location location) {
if (request.refresh()) {
try {
indexShard.refresh("refresh_flag_delete");
} catch (Throwable e) {
// ignore
}
}
if (indexShard.getTranslogDurability() == Translog.Durabilty.REQUEST && location != null) {
indexShard.sync(location);
}
}
}

View File

@ -170,7 +170,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard);
final IndexResponse response = result.response;
final Translog.Location location = result.location;
processAfter(request, indexShard, location);
processAfter(request.refresh(), indexShard, location);
return new Tuple<>(response, shardRequest.request);
}
@ -193,20 +193,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
}
operation.execute(indexShard);
processAfter(request, indexShard, operation.getTranslogLocation());
processAfter(request.refresh(), indexShard, operation.getTranslogLocation());
}
private void processAfter(IndexRequest request, IndexShard indexShard, Translog.Location location) {
if (request.refresh()) {
try {
indexShard.refresh("refresh_flag_index");
} catch (Throwable e) {
// ignore
}
}
if (indexShard.getTranslogDurability() == Translog.Durabilty.REQUEST && location != null) {
indexShard.sync(location);
}
}
}

View File

@ -1082,4 +1082,18 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
return new WriteResult(new IndexResponse(shardId.getIndex(), request.type(), request.id(), request.version(), created), operation.getTranslogLocation());
}
protected final void processAfter(boolean refresh, IndexShard indexShard, Translog.Location location) {
if (refresh) {
try {
indexShard.refresh("refresh_flag_index");
} catch (Throwable e) {
// ignore
}
}
if (indexShard.getTranslogDurability() == Translog.Durabilty.REQUEST && location != null) {
indexShard.sync(location);
}
indexShard.maybeFlush();
}
}

View File

@ -79,7 +79,7 @@ final class Bootstrap {
}
/** initialize native resources */
public static void initializeNatives(boolean mlockAll, boolean ctrlHandler) {
public static void initializeNatives(boolean mlockAll, boolean seccomp, boolean ctrlHandler) {
final ESLogger logger = Loggers.getLogger(Bootstrap.class);
// check if the user is running as root, and bail
@ -91,6 +91,11 @@ final class Bootstrap {
}
}
// enable secure computing mode
if (seccomp) {
Natives.trySeccomp();
}
// mlockall if requested
if (mlockAll) {
if (Constants.WINDOWS) {
@ -134,7 +139,8 @@ final class Bootstrap {
private void setup(boolean addShutdownHook, Settings settings, Environment environment) throws Exception {
initializeNatives(settings.getAsBoolean("bootstrap.mlockall", false),
settings.getAsBoolean("bootstrap.ctrlhandler", true));
settings.getAsBoolean("bootstrap.seccomp", true),
settings.getAsBoolean("bootstrap.ctrlhandler", true));
// initialize probes before the security manager is installed
initializeProbes();
@ -218,6 +224,9 @@ final class Bootstrap {
* to startup elasticsearch.
*/
static void init(String[] args) throws Throwable {
// Set the system property before anything has a chance to trigger its use
System.setProperty("es.logger.prefix", "");
BootstrapCLIParser bootstrapCLIParser = new BootstrapCLIParser();
CliTool.ExitStatus status = bootstrapCLIParser.execute(args);
@ -225,7 +234,6 @@ final class Bootstrap {
System.exit(status.status());
}
System.setProperty("es.logger.prefix", "");
INSTANCE = new Bootstrap();
boolean foreground = !"false".equals(System.getProperty("es.foreground", System.getProperty("es-foreground")));

View File

@ -128,7 +128,13 @@ final class BootstrapCLIParser extends CliTool {
while (iterator.hasNext()) {
String arg = iterator.next();
if (!arg.startsWith("--")) {
throw new IllegalArgumentException("Parameter [" + arg + "]does not start with --");
if (arg.startsWith("-D") || arg.startsWith("-d") || arg.startsWith("-p")) {
throw new IllegalArgumentException(
"Parameter [" + arg + "] starting with \"-D\", \"-d\" or \"-p\" must be before any parameters starting with --"
);
} else {
throw new IllegalArgumentException("Parameter [" + arg + "]does not start with --");
}
}
// if there is no = sign, we have to get the next argu
arg = arg.replace("--", "");

View File

@ -43,4 +43,11 @@ public final class BootstrapInfo {
public static boolean isMemoryLocked() {
return Natives.isMemoryLocked();
}
/**
* Returns true if secure computing mode is enabled (linux/amd64 only)
*/
public static boolean isSeccompInstalled() {
return Natives.isSeccompInstalled();
}
}

View File

@ -41,6 +41,8 @@ class JNANatives {
// Set to true, in case native mlockall call was successful
static boolean LOCAL_MLOCKALL = false;
// Set to true, in case native seccomp call was successful
static boolean LOCAL_SECCOMP = false;
static void tryMlockall() {
int errno = Integer.MIN_VALUE;
@ -170,4 +172,19 @@ class JNANatives {
}
}
static void trySeccomp() {
if (Constants.LINUX && "amd64".equals(Constants.OS_ARCH)) {
try {
Seccomp.installFilter();
LOCAL_SECCOMP = true;
} catch (Exception e) {
// this is likely to happen unless the kernel is newish, its a best effort at the moment
// so we log stacktrace at debug for now...
if (logger.isDebugEnabled()) {
logger.debug("unable to install seccomp filter", e);
}
logger.warn("unable to install seccomp filter: " + e.getMessage());
}
}
}
}

View File

@ -88,4 +88,19 @@ final class Natives {
}
return JNANatives.LOCAL_MLOCKALL;
}
static void trySeccomp() {
if (!JNA_AVAILABLE) {
logger.warn("cannot install seccomp filters because JNA is not available");
return;
}
JNANatives.trySeccomp();
}
static boolean isSeccompInstalled() {
if (!JNA_AVAILABLE) {
return false;
}
return JNANatives.LOCAL_SECCOMP;
}
}

View File

@ -0,0 +1,271 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
import com.sun.jna.Library;
import com.sun.jna.Memory;
import com.sun.jna.Native;
import com.sun.jna.Pointer;
import com.sun.jna.Structure;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Arrays;
import java.util.List;
/**
* Installs a limited form of Linux secure computing mode (filter mode).
* This filters system calls to block process execution.
* <p>
* This is only supported on the amd64 architecture, on Linux kernels 3.5 or above, and requires
* {@code CONFIG_SECCOMP} and {@code CONFIG_SECCOMP_FILTER} compiled into the kernel.
* <p>
* Filters are installed using either {@code seccomp(2)} (3.17+) or {@code prctl(2)} (3.5+). {@code seccomp(2)}
* is preferred, as it allows filters to be applied to any existing threads in the process, and one motivation
* here is to protect against bugs in the JVM. Otherwise, code will fall back to the {@code prctl(2)} method
* which will at least protect elasticsearch application threads.
* <p>
* The filters will return {@code EACCES} (Access Denied) for the following system calls:
* <ul>
* <li>{@code execve}</li>
* <li>{@code fork}</li>
* <li>{@code vfork}</li>
* </ul>
* <p>
* This is not intended as a sandbox. It is another level of security, mostly intended to annoy
* security researchers and make their lives more difficult in achieving "remote execution" exploits.
* @see <a href="http://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt">
* http://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt</a>
*/
// only supported on linux/amd64
// not an example of how to write code!!!
final class Seccomp {
private static final ESLogger logger = Loggers.getLogger(Seccomp.class);
/** we use an explicit interface for native methods, for varargs support */
static interface LinuxLibrary extends Library {
/**
* maps to prctl(2)
*/
int prctl(int option, long arg2, long arg3, long arg4, long arg5);
/**
* used to call seccomp(2), its too new...
* this is the only way, DONT use it on some other architecture unless you know wtf you are doing
*/
long syscall(long number, Object... args);
};
// null if something goes wrong.
static final LinuxLibrary libc;
static {
LinuxLibrary lib = null;
try {
lib = (LinuxLibrary) Native.loadLibrary("c", LinuxLibrary.class);
} catch (UnsatisfiedLinkError e) {
logger.warn("unable to link C library. native methods (seccomp) will be disabled.", e);
}
libc = lib;
}
/** the preferred method is seccomp(2), since we can apply to all threads of the process */
static final int SECCOMP_SYSCALL_NR = 317; // since Linux 3.17
static final int SECCOMP_SET_MODE_FILTER = 1; // since Linux 3.17
static final int SECCOMP_FILTER_FLAG_TSYNC = 1; // since Linux 3.17
/** otherwise, we can use prctl(2), which will at least protect ES application threads */
static final int PR_GET_NO_NEW_PRIVS = 39; // since Linux 3.5
static final int PR_SET_NO_NEW_PRIVS = 38; // since Linux 3.5
static final int PR_GET_SECCOMP = 21; // since Linux 2.6.23
static final int PR_SET_SECCOMP = 22; // since Linux 2.6.23
static final int SECCOMP_MODE_FILTER = 2; // since Linux Linux 3.5
/** corresponds to struct sock_filter */
static final class SockFilter {
short code; // insn
byte jt; // number of insn to jump (skip) if true
byte jf; // number of insn to jump (skip) if false
int k; // additional data
SockFilter(short code, byte jt, byte jf, int k) {
this.code = code;
this.jt = jt;
this.jf = jf;
this.k = k;
}
}
/** corresponds to struct sock_fprog */
public static final class SockFProg extends Structure implements Structure.ByReference {
public short len; // number of filters
public Pointer filter; // filters
public SockFProg(SockFilter filters[]) {
len = (short) filters.length;
// serialize struct sock_filter * explicitly, its less confusing than the JNA magic we would need
Memory filter = new Memory(len * 8);
ByteBuffer bbuf = filter.getByteBuffer(0, len * 8);
bbuf.order(ByteOrder.nativeOrder()); // little endian
for (SockFilter f : filters) {
bbuf.putShort(f.code);
bbuf.put(f.jt);
bbuf.put(f.jf);
bbuf.putInt(f.k);
}
this.filter = filter;
}
@Override
protected List<String> getFieldOrder() {
return Arrays.asList(new String[] { "len", "filter" });
}
}
// BPF "macros" and constants
static final int BPF_LD = 0x00;
static final int BPF_W = 0x00;
static final int BPF_ABS = 0x20;
static final int BPF_JMP = 0x05;
static final int BPF_JEQ = 0x10;
static final int BPF_JGE = 0x30;
static final int BPF_JGT = 0x20;
static final int BPF_RET = 0x06;
static final int BPF_K = 0x00;
static SockFilter BPF_STMT(int code, int k) {
return new SockFilter((short) code, (byte) 0, (byte) 0, k);
}
static SockFilter BPF_JUMP(int code, int k, int jt, int jf) {
return new SockFilter((short) code, (byte) jt, (byte) jf, k);
}
static final int AUDIT_ARCH_X86_64 = 0xC000003E;
static final int SECCOMP_RET_ERRNO = 0x00050000;
static final int SECCOMP_RET_DATA = 0x0000FFFF;
static final int SECCOMP_RET_ALLOW = 0x7FFF0000;
// some errno constants for error checking/handling
static final int EACCES = 0x0D;
static final int EFAULT = 0x0E;
static final int EINVAL = 0x16;
static final int ENOSYS = 0x26;
// offsets (arch dependent) that our BPF checks
static final int SECCOMP_DATA_NR_OFFSET = 0x00;
static final int SECCOMP_DATA_ARCH_OFFSET = 0x04;
// currently this range is blocked (inclusive):
// execve is really the only one needed but why let someone fork a 30G heap? (not really what happens)
// ...
// 57: fork
// 58: vfork
// 59: execve
// ...
static final int BLACKLIST_START = 57;
static final int BLACKLIST_END = 59;
// TODO: execveat()? its less of a risk since the jvm does not use it...
/** try to install our filters */
static void installFilter() {
// first be defensive: we can give nice errors this way, at the very least.
// also, some of these security features get backported to old versions, checking kernel version here is a big no-no!
boolean supported = Constants.LINUX && "amd64".equals(Constants.OS_ARCH);
if (supported == false) {
throw new IllegalStateException("bug: should not be trying to initialize seccomp for an unsupported architecture");
}
// we couldn't link methods, could be some really ancient kernel (e.g. < 2.1.57) or some bug
if (libc == null) {
throw new UnsupportedOperationException("seccomp unavailable: could not link methods. requires kernel 3.5+ with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
}
// check for kernel version
if (libc.prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0) < 0) {
int errno = Native.getLastError();
switch (errno) {
case ENOSYS: throw new UnsupportedOperationException("seccomp unavailable: requires kernel 3.5+ with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
default: throw new UnsupportedOperationException("prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(errno));
}
}
// check for SECCOMP
if (libc.prctl(PR_GET_SECCOMP, 0, 0, 0, 0) < 0) {
int errno = Native.getLastError();
switch (errno) {
case EINVAL: throw new UnsupportedOperationException("seccomp unavailable: CONFIG_SECCOMP not compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed");
default: throw new UnsupportedOperationException("prctl(PR_GET_SECCOMP): " + JNACLibrary.strerror(errno));
}
}
// check for SECCOMP_MODE_FILTER
if (libc.prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 0, 0, 0) < 0) {
int errno = Native.getLastError();
switch (errno) {
case EFAULT: break; // available
case EINVAL: throw new UnsupportedOperationException("seccomp unavailable: CONFIG_SECCOMP_FILTER not compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed");
default: throw new UnsupportedOperationException("prctl(PR_SET_SECCOMP): " + JNACLibrary.strerror(errno));
}
}
// ok, now set PR_SET_NO_NEW_PRIVS, needed to be able to set a seccomp filter as ordinary user
if (libc.prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) < 0) {
throw new UnsupportedOperationException("prctl(PR_SET_NO_NEW_PRIVS): " + JNACLibrary.strerror(Native.getLastError()));
}
// BPF installed to check arch, then syscall range. See https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt for details.
SockFilter insns[] = {
/* 1 */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_ARCH_OFFSET), // if (arch != amd64) goto fail;
/* 2 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, AUDIT_ARCH_X86_64, 0, 3), //
/* 3 */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_NR_OFFSET), // if (syscall < BLACKLIST_START) goto pass;
/* 4 */ BPF_JUMP(BPF_JMP + BPF_JGE + BPF_K, BLACKLIST_START, 0, 2), //
/* 5 */ BPF_JUMP(BPF_JMP + BPF_JGT + BPF_K, BLACKLIST_END, 1, 0), // if (syscall > BLACKLIST_END) goto pass;
/* 6 */ BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ERRNO | (EACCES & SECCOMP_RET_DATA)), // fail: return EACCES;
/* 7 */ BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ALLOW) // pass: return OK;
};
// seccomp takes a long, so we pass it one explicitly to keep the JNA simple
SockFProg prog = new SockFProg(insns);
prog.write();
long pointer = Pointer.nativeValue(prog.getPointer());
// install filter, if this works, after this there is no going back!
// first try it with seccomp(SECCOMP_SET_MODE_FILTER), falling back to prctl()
if (libc.syscall(SECCOMP_SYSCALL_NR, SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, pointer) != 0) {
int errno1 = Native.getLastError();
if (logger.isDebugEnabled()) {
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): " + JNACLibrary.strerror(errno1) + ", falling back to prctl(PR_SET_SECCOMP)...");
}
if (libc.prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) < 0) {
int errno2 = Native.getLastError();
throw new UnsupportedOperationException("seccomp(SECCOMP_SET_MODE_FILTER): " + JNACLibrary.strerror(errno1) +
", prctl(PR_SET_SECCOMP): " + JNACLibrary.strerror(errno2));
}
}
// now check that the filter was really installed, we should be in filter mode.
if (libc.prctl(PR_GET_SECCOMP, 0, 0, 0, 0) != 2) {
throw new UnsupportedOperationException("seccomp filter installation did not really succeed. seccomp(PR_GET_SECCOMP): " + JNACLibrary.strerror(Native.getLastError()));
}
}
}

View File

@ -28,7 +28,7 @@ import org.elasticsearch.common.unit.TimeValue;
* An extension interface to {@link ClusterStateUpdateTask} that allows to be notified when
* all the nodes have acknowledged a cluster state update request
*/
public abstract class AckedClusterStateUpdateTask<Response> extends TimeoutClusterStateUpdateTask {
public abstract class AckedClusterStateUpdateTask<Response> extends ClusterStateUpdateTask {
private final ActionListener<Response> listener;
private final AckedRequest request;
@ -73,11 +73,6 @@ public abstract class AckedClusterStateUpdateTask<Response> extends TimeoutClust
listener.onFailure(t);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
/**
* Acknowledgement timeout, maximum time interval to wait for acknowledgements
*/

View File

@ -81,7 +81,6 @@ import org.elasticsearch.index.shard.MergePolicyConfig;
import org.elasticsearch.index.shard.MergeSchedulerConfig;
import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.translog.TranslogConfig;
import org.elasticsearch.index.translog.TranslogService;
import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
@ -259,11 +258,9 @@ public class ClusterModule extends AbstractModule {
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, Validator.DOUBLE_GTE_2);
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT, Validator.NON_NEGATIVE_DOUBLE);
registerIndexDynamicSetting(MergePolicyConfig.INDEX_COMPOUND_FORMAT, Validator.EMPTY);
registerIndexDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_INTERVAL, Validator.TIME);
registerIndexDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, Validator.INTEGER);
registerIndexDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, Validator.BYTES_SIZE);
registerIndexDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_PERIOD, Validator.TIME);
registerIndexDynamicSetting(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH, Validator.EMPTY);
registerIndexDynamicSetting(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, Validator.INTEGER);
registerIndexDynamicSetting(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, Validator.BYTES_SIZE);
registerIndexDynamicSetting(IndexShard.INDEX_TRANSLOG_DISABLE_FLUSH, Validator.EMPTY);
registerIndexDynamicSetting(TranslogConfig.INDEX_TRANSLOG_DURABILITY, Validator.EMPTY);
registerIndexDynamicSetting(IndicesWarmer.INDEX_WARMER_ENABLED, Validator.EMPTY);
registerIndexDynamicSetting(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, Validator.BOOLEAN);

View File

@ -19,6 +19,8 @@
package org.elasticsearch.cluster;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
/**
@ -51,4 +53,22 @@ abstract public class ClusterStateUpdateTask {
public void onNoLongerMaster(String source) {
onFailure(source, new EsRejectedExecutionException("no longer master. source: [" + source + "]"));
}
/**
* Called when the result of the {@link #execute(ClusterState)} have been processed
* properly by all listeners.
*/
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
/**
* If the cluster state update task wasn't processed by the provided timeout, call
* {@link #onFailure(String, Throwable)}. May return null to indicate no timeout is needed (default).
*/
@Nullable
public TimeValue timeout() {
return null;
}
}

View File

@ -21,7 +21,7 @@ package org.elasticsearch.cluster;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -45,11 +45,11 @@ public final class DiffableUtils {
}
/**
* Calculates diff between two ImmutableMaps of Diffable objects
* Calculates diff between two Maps of Diffable objects.
*/
public static <T extends Diffable<T>> Diff<ImmutableMap<String, T>> diff(ImmutableMap<String, T> before, ImmutableMap<String, T> after) {
public static <T extends Diffable<T>> Diff<Map<String, T>> diff(Map<String, T> before, Map<String, T> after) {
assert after != null && before != null;
return new ImmutableMapDiff<>(before, after);
return new JdkMapDiff<>(before, after);
}
/**
@ -60,10 +60,10 @@ public final class DiffableUtils {
}
/**
* Loads an object that represents difference between two ImmutableMaps
* Loads an object that represents difference between two Maps.
*/
public static <T extends Diffable<T>> Diff<ImmutableMap<String, T>> readImmutableMapDiff(StreamInput in, KeyedReader<T> keyedReader) throws IOException {
return new ImmutableMapDiff<>(in, keyedReader);
public static <T extends Diffable<T>> Diff<Map<String, T>> readJdkMapDiff(StreamInput in, KeyedReader<T> keyedReader) throws IOException {
return new JdkMapDiff<>(in, keyedReader);
}
/**
@ -74,10 +74,10 @@ public final class DiffableUtils {
}
/**
* Loads an object that represents difference between two ImmutableMaps
* Loads an object that represents difference between two Maps.
*/
public static <T extends Diffable<T>> Diff<ImmutableMap<String, T>> readImmutableMapDiff(StreamInput in, T proto) throws IOException {
return new ImmutableMapDiff<>(in, new PrototypeReader<>(proto));
public static <T extends Diffable<T>> Diff<Map<String, T>> readJdkMapDiff(StreamInput in, T proto) throws IOException {
return new JdkMapDiff<>(in, new PrototypeReader<>(proto));
}
/**
@ -121,24 +121,24 @@ public final class DiffableUtils {
}
/**
* Represents differences between two ImmutableMaps of diffable objects
* Represents differences between two Maps of Diffable objects.
*
* @param <T> the diffable object
*/
private static class ImmutableMapDiff<T extends Diffable<T>> extends MapDiff<T, ImmutableMap<String, T>> {
private static class JdkMapDiff<T extends Diffable<T>> extends MapDiff<T, Map<String, T>> {
protected ImmutableMapDiff(StreamInput in, KeyedReader<T> reader) throws IOException {
protected JdkMapDiff(StreamInput in, KeyedReader<T> reader) throws IOException {
super(in, reader);
}
public ImmutableMapDiff(ImmutableMap<String, T> before, ImmutableMap<String, T> after) {
public JdkMapDiff(Map<String, T> before, Map<String, T> after) {
assert after != null && before != null;
for (String key : before.keySet()) {
if (!after.containsKey(key)) {
deletes.add(key);
}
}
for (ImmutableMap.Entry<String, T> partIter : after.entrySet()) {
for (Map.Entry<String, T> partIter : after.entrySet()) {
T beforePart = before.get(partIter.getKey());
if (beforePart == null) {
adds.put(partIter.getKey(), partIter.getValue());
@ -149,8 +149,8 @@ public final class DiffableUtils {
}
@Override
public ImmutableMap<String, T> apply(ImmutableMap<String, T> map) {
HashMap<String, T> builder = new HashMap<>();
public Map<String, T> apply(Map<String, T> map) {
Map<String, T> builder = new HashMap<>();
builder.putAll(map);
for (String part : deletes) {
@ -164,7 +164,7 @@ public final class DiffableUtils {
for (Map.Entry<String, T> additon : adds.entrySet()) {
builder.put(additon.getKey(), additon.getValue());
}
return ImmutableMap.copyOf(builder);
return builder;
}
}

View File

@ -1,31 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster;
/**
* A combination between {@link org.elasticsearch.cluster.ProcessedClusterStateUpdateTask} and
* {@link org.elasticsearch.cluster.ClusterStateNonMasterUpdateTask} to allow easy creation of anonymous classes
*/
abstract public class ProcessedClusterStateNonMasterUpdateTask extends ProcessedClusterStateUpdateTask {
@Override
public boolean runOnlyOnMaster() {
return false;
}
}

View File

@ -1,33 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster;
/**
* An extension interface to {@link ClusterStateUpdateTask} that allows to be notified when
* the cluster state update has been processed.
*/
public abstract class ProcessedClusterStateUpdateTask extends ClusterStateUpdateTask {
/**
* Called when the result of the {@link #execute(ClusterState)} have been processed
* properly by all listeners.
*/
public abstract void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState);
}

View File

@ -112,7 +112,7 @@ public class RestoreInProgress extends AbstractDiffable<Custom> implements Custo
public static class Entry {
private final State state;
private final SnapshotId snapshotId;
private final ImmutableMap<ShardId, ShardRestoreStatus> shards;
private final Map<ShardId, ShardRestoreStatus> shards;
private final List<String> indices;
/**
@ -148,7 +148,7 @@ public class RestoreInProgress extends AbstractDiffable<Custom> implements Custo
*
* @return list of shards
*/
public ImmutableMap<ShardId, ShardRestoreStatus> shards() {
public Map<ShardId, ShardRestoreStatus> shards() {
return this.shards;
}

View File

@ -38,6 +38,8 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static java.util.Collections.unmodifiableMap;
/**
* Meta data about snapshots that are currently executing
*/
@ -67,12 +69,12 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
private final State state;
private final SnapshotId snapshotId;
private final boolean includeGlobalState;
private final ImmutableMap<ShardId, ShardSnapshotStatus> shards;
private final Map<ShardId, ShardSnapshotStatus> shards;
private final List<String> indices;
private final ImmutableMap<String, List<ShardId>> waitingIndices;
private final Map<String, List<ShardId>> waitingIndices;
private final long startTime;
public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, List<String> indices, long startTime, ImmutableMap<ShardId, ShardSnapshotStatus> shards) {
public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, List<String> indices, long startTime, Map<ShardId, ShardSnapshotStatus> shards) {
this.state = state;
this.snapshotId = snapshotId;
this.includeGlobalState = includeGlobalState;
@ -82,16 +84,16 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
this.shards = ImmutableMap.of();
this.waitingIndices = ImmutableMap.of();
} else {
this.shards = shards;
this.shards = unmodifiableMap(shards);
this.waitingIndices = findWaitingIndices(shards);
}
}
public Entry(Entry entry, State state, ImmutableMap<ShardId, ShardSnapshotStatus> shards) {
public Entry(Entry entry, State state, Map<ShardId, ShardSnapshotStatus> shards) {
this(entry.snapshotId, entry.includeGlobalState, state, entry.indices, entry.startTime, shards);
}
public Entry(Entry entry, ImmutableMap<ShardId, ShardSnapshotStatus> shards) {
public Entry(Entry entry, Map<ShardId, ShardSnapshotStatus> shards) {
this(entry, entry.state, shards);
}
@ -99,7 +101,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
return this.snapshotId;
}
public ImmutableMap<ShardId, ShardSnapshotStatus> shards() {
public Map<ShardId, ShardSnapshotStatus> shards() {
return this.shards;
}
@ -111,7 +113,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
return indices;
}
public ImmutableMap<String, List<ShardId>> waitingIndices() {
public Map<String, List<ShardId>> waitingIndices() {
return waitingIndices;
}
@ -153,7 +155,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
return result;
}
private ImmutableMap<String, List<ShardId>> findWaitingIndices(ImmutableMap<ShardId, ShardSnapshotStatus> shards) {
private ImmutableMap<String, List<ShardId>> findWaitingIndices(Map<ShardId, ShardSnapshotStatus> shards) {
Map<String, List<ShardId>> waitingIndicesMap = new HashMap<>();
for (ImmutableMap.Entry<ShardId, ShardSnapshotStatus> entry : shards.entrySet()) {
if (entry.getValue().state() == State.WAITING) {

View File

@ -1,35 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster;
import org.elasticsearch.common.unit.TimeValue;
/**
* An extension interface to {@link org.elasticsearch.cluster.ClusterStateUpdateTask} that allows to associate
* a timeout.
*/
abstract public class TimeoutClusterStateUpdateTask extends ProcessedClusterStateUpdateTask {
/**
* If the cluster state update task wasn't processed by the provided timeout, call
* {@link #onFailure(String, Throwable)}
*/
abstract public TimeValue timeout();
}

View File

@ -23,11 +23,10 @@ import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.*;
import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
@ -37,7 +36,6 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.threadpool.ThreadPool;
@ -129,7 +127,7 @@ public class ShardStateAction extends AbstractComponent {
private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) {
logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
failedShardQueue.add(shardRoutingEntry);
clusterService.submitStateUpdateTask("shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", Priority.HIGH, new ProcessedClusterStateUpdateTask() {
clusterService.submitStateUpdateTask("shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", Priority.HIGH, new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {

View File

@ -45,11 +45,11 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
private final ImmutableSet<ClusterBlock> global;
private final ImmutableMap<String, ImmutableSet<ClusterBlock>> indicesBlocks;
private final Map<String, ImmutableSet<ClusterBlock>> indicesBlocks;
private final ImmutableLevelHolder[] levelHolders;
ClusterBlocks(ImmutableSet<ClusterBlock> global, ImmutableMap<String, ImmutableSet<ClusterBlock>> indicesBlocks) {
ClusterBlocks(ImmutableSet<ClusterBlock> global, Map<String, ImmutableSet<ClusterBlock>> indicesBlocks) {
this.global = global;
this.indicesBlocks = indicesBlocks;
@ -83,7 +83,7 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
return global;
}
public ImmutableMap<String, ImmutableSet<ClusterBlock>> indices() {
public Map<String, ImmutableSet<ClusterBlock>> indices() {
return indicesBlocks;
}
@ -91,7 +91,7 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
return levelHolders[level.id()].global();
}
public ImmutableMap<String, ImmutableSet<ClusterBlock>> indices(ClusterBlockLevel level) {
public Map<String, ImmutableSet<ClusterBlock>> indices(ClusterBlockLevel level) {
return levelHolders[level.id()].indices();
}

View File

@ -22,7 +22,7 @@ package org.elasticsearch.cluster.metadata;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.TimeoutClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.routing.RoutingTable;
@ -100,7 +100,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
private void deleteIndex(final Request request, final Listener userListener, Semaphore mdLock) {
final DeleteIndexListener listener = new DeleteIndexListener(mdLock, userListener);
clusterService.submitStateUpdateTask("delete-index [" + request.index + "]", Priority.URGENT, new TimeoutClusterStateUpdateTask() {
clusterService.submitStateUpdateTask("delete-index [" + request.index + "]", Priority.URGENT, new ClusterStateUpdateTask() {
@Override
public TimeValue timeout() {

View File

@ -23,7 +23,7 @@ import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.TimeoutClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.ValidationException;
@ -36,13 +36,7 @@ import org.elasticsearch.indices.IndexTemplateAlreadyExistsException;
import org.elasticsearch.indices.IndexTemplateMissingException;
import org.elasticsearch.indices.InvalidIndexTemplateException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.*;
/**
* Service responsible for submitting index templates updates
@ -62,7 +56,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
}
public void removeTemplates(final RemoveRequest request, final RemoveListener listener) {
clusterService.submitStateUpdateTask("remove-index-template [" + request.name + "]", Priority.URGENT, new TimeoutClusterStateUpdateTask() {
clusterService.submitStateUpdateTask("remove-index-template [" + request.name + "]", Priority.URGENT, new ClusterStateUpdateTask() {
@Override
public TimeValue timeout() {
@ -149,7 +143,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
}
final IndexTemplateMetaData template = templateBuilder.build();
clusterService.submitStateUpdateTask("create-index-template [" + request.name + "], cause [" + request.cause + "]", Priority.URGENT, new TimeoutClusterStateUpdateTask() {
clusterService.submitStateUpdateTask("create-index-template [" + request.name + "], cause [" + request.cause + "]", Priority.URGENT, new ClusterStateUpdateTask() {
@Override
public TimeValue timeout() {

View File

@ -25,7 +25,7 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterState
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.Tuple;
@ -43,14 +43,7 @@ import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidTypeNameException;
import org.elasticsearch.percolator.PercolatorService;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.*;
/**
* Service responsible for submitting mapping changes
*/
@ -301,7 +294,7 @@ public class MetaDataMappingService extends AbstractComponent {
insertOrder = ++refreshOrUpdateInsertOrder;
refreshOrUpdateQueue.add(new RefreshTask(index, indexUUID, types));
}
clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]", Priority.HIGH, new ProcessedClusterStateUpdateTask() {
clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]", Priority.HIGH, new ClusterStateUpdateTask() {
private volatile List<MappingTask> allTasks;
@Override

View File

@ -100,7 +100,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
private String hostName;
private String hostAddress;
private TransportAddress address;
private ImmutableMap<String, String> attributes;
private Map<String, String> attributes;
private Version version = Version.CURRENT;
DiscoveryNode() {
@ -120,7 +120,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
* @param version the version of the node.
*/
public DiscoveryNode(String nodeId, TransportAddress address, Version version) {
this("", nodeId, address, ImmutableMap.<String, String>of(), version);
this("", nodeId, address, Collections.emptyMap(), version);
}
/**
@ -230,14 +230,14 @@ public class DiscoveryNode implements Streamable, ToXContent {
/**
* The node attributes.
*/
public ImmutableMap<String, String> attributes() {
public Map<String, String> attributes() {
return this.attributes;
}
/**
* The node attributes.
*/
public ImmutableMap<String, String> getAttributes() {
public Map<String, String> getAttributes() {
return attributes();
}

View File

@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing;
import com.carrotsearch.hppc.IntSet;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.Diffable;
import org.elasticsearch.cluster.DiffableUtils;
@ -32,9 +33,16 @@ import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.index.IndexNotFoundException;
import java.io.IOException;
import java.util.*;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.function.Predicate;
import static java.util.Collections.unmodifiableMap;
/**
* Represents a global cluster-wide routing table for all indices including the
* version of the current routing state.
@ -50,11 +58,11 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
private final long version;
// index to IndexRoutingTable map
private final ImmutableMap<String, IndexRoutingTable> indicesRouting;
private final Map<String, IndexRoutingTable> indicesRouting;
RoutingTable(long version, Map<String, IndexRoutingTable> indicesRouting) {
this.version = version;
this.indicesRouting = ImmutableMap.copyOf(indicesRouting);
this.indicesRouting = unmodifiableMap(indicesRouting);
}
/**
@ -304,7 +312,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
private final long version;
private final Diff<ImmutableMap<String, IndexRoutingTable>> indicesRouting;
private final Diff<Map<String, IndexRoutingTable>> indicesRouting;
public RoutingTableDiff(RoutingTable before, RoutingTable after) {
version = after.version;
@ -313,7 +321,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
public RoutingTableDiff(StreamInput in) throws IOException {
version = in.readLong();
indicesRouting = DiffableUtils.readImmutableMapDiff(in, IndexRoutingTable.PROTO);
indicesRouting = DiffableUtils.readJdkMapDiff(in, IndexRoutingTable.PROTO);
}
@Override

View File

@ -41,8 +41,8 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.common.util.concurrent.*;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.node.settings.NodeSettingsService;
@ -275,15 +275,14 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
}
try {
final UpdateTask task = new UpdateTask(source, priority, updateTask);
if (updateTask instanceof TimeoutClusterStateUpdateTask) {
final TimeoutClusterStateUpdateTask timeoutUpdateTask = (TimeoutClusterStateUpdateTask) updateTask;
updateTasksExecutor.execute(task, threadPool.scheduler(), timeoutUpdateTask.timeout(), new Runnable() {
if (updateTask.timeout() != null) {
updateTasksExecutor.execute(task, threadPool.scheduler(), updateTask.timeout(), new Runnable() {
@Override
public void run() {
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
timeoutUpdateTask.onFailure(task.source(), new ProcessClusterEventTimeoutException(timeoutUpdateTask.timeout(), task.source()));
updateTask.onFailure(task.source(), new ProcessClusterEventTimeoutException(updateTask.timeout(), task.source()));
}
});
}
@ -401,9 +400,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
//no need to wait for ack if nothing changed, the update can be counted as acknowledged
((AckedClusterStateUpdateTask) updateTask).onAllNodesAcked(null);
}
if (updateTask instanceof ProcessedClusterStateUpdateTask) {
((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
}
updateTask.clusterStateProcessed(source, previousClusterState, newClusterState);
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime);
warnAboutSlowTaskIfNeeded(executionTime, source);
@ -526,9 +523,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
}
}
if (updateTask instanceof ProcessedClusterStateUpdateTask) {
((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
}
updateTask.clusterStateProcessed(source, previousClusterState, newClusterState);
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());

View File

@ -1,109 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.collect;
import com.google.common.collect.ForwardingSet;
import java.util.AbstractMap;
import java.util.Collection;
import java.util.Set;
/**
* {@link Set} implementation based on {@link CopyOnWriteHashMap}.
* Null values are not supported.
*/
public class CopyOnWriteHashSet<T> extends ForwardingSet<T> {
/**
* Return a copy of the provided set.
*/
public static <T> CopyOnWriteHashSet<T> copyOf(Collection<? extends T> set) {
if (set instanceof CopyOnWriteHashSet) {
// no need to copy in that case
@SuppressWarnings("unchecked")
final CopyOnWriteHashSet<T> cowSet = (CopyOnWriteHashSet<T>) set;
return cowSet;
} else {
return new CopyOnWriteHashSet<T>().copyAndAddAll(set);
}
}
private final CopyOnWriteHashMap<T, Boolean> map;
/** Create a new empty set. */
public CopyOnWriteHashSet() {
this(new CopyOnWriteHashMap<T, Boolean>());
}
private CopyOnWriteHashSet(CopyOnWriteHashMap<T, Boolean> map) {
this.map = map;
}
@Override
protected Set<T> delegate() {
return map.keySet();
}
/**
* Copy the current set and return a copy that contains or replaces <code>entry</code>.
*/
public CopyOnWriteHashSet<T> copyAndAdd(T entry) {
return new CopyOnWriteHashSet<>(map.copyAndPut(entry, true));
}
/**
* Copy the current set and return a copy that is the union of the current
* set and <code>entries</code>, potentially replacing existing entries in
* case of equality.
*/
public CopyOnWriteHashSet<T> copyAndAddAll(Collection<? extends T> entries) {
CopyOnWriteHashMap<T, Boolean> updated = this.map.copyAndPutAll(entries.stream().map(
p -> new AbstractMap.SimpleImmutableEntry<T, Boolean>(p, true)
));
return new CopyOnWriteHashSet<>(updated);
}
/**
* Copy the current set and return a copy that removes <code>entry</code>
* if it exists.
*/
public CopyOnWriteHashSet<T> copyAndRemove(Object entry) {
final CopyOnWriteHashMap<T, Boolean> updated = map.copyAndRemove(entry);
if (updated == map) {
return this;
} else {
return new CopyOnWriteHashSet<>(updated);
}
}
/**
* Copy the current set and return a copy that is the difference of the current
* set and <code>entries</code>.
*/
public CopyOnWriteHashSet<T> copyAndRemoveAll(Collection<?> entries) {
CopyOnWriteHashMap<T, Boolean> updated = this.map.copyAndRemoveAll(entries);
if (updated == map) {
return this;
} else {
return new CopyOnWriteHashSet<>(updated);
}
}
}

View File

@ -51,7 +51,7 @@ public final class PrivateElementsImpl implements PrivateElements {
/**
* lazily instantiated
*/
private ImmutableMap<Key<?>, Object> exposedKeysToSources;
private Map<Key<?>, Object> exposedKeysToSources;
private Injector injector;
public PrivateElementsImpl(Object source) {

View File

@ -26,28 +26,31 @@ import org.elasticsearch.common.io.stream.Streamable;
import java.io.IOException;
/**
* A bounded transport address is a tuple of two {@link TransportAddress}, one that represents
* the address the transport is bounded on, the the published one represents the one clients should
* communicate on.
* A bounded transport address is a tuple of {@link TransportAddress}, one array that represents
* the addresses the transport is bound to, and the other is the published one that represents the address clients
* should communicate on.
*
*
*/
public class BoundTransportAddress implements Streamable {
private TransportAddress boundAddress;
private TransportAddress[] boundAddresses;
private TransportAddress publishAddress;
BoundTransportAddress() {
}
public BoundTransportAddress(TransportAddress boundAddress, TransportAddress publishAddress) {
this.boundAddress = boundAddress;
public BoundTransportAddress(TransportAddress[] boundAddresses, TransportAddress publishAddress) {
if (boundAddresses == null || boundAddresses.length < 1) {
throw new IllegalArgumentException("at least one bound address must be provided");
}
this.boundAddresses = boundAddresses;
this.publishAddress = publishAddress;
}
public TransportAddress boundAddress() {
return boundAddress;
public TransportAddress[] boundAddresses() {
return boundAddresses;
}
public TransportAddress publishAddress() {
@ -62,18 +65,38 @@ public class BoundTransportAddress implements Streamable {
@Override
public void readFrom(StreamInput in) throws IOException {
boundAddress = TransportAddressSerializers.addressFromStream(in);
int boundAddressLength = in.readInt();
boundAddresses = new TransportAddress[boundAddressLength];
for (int i = 0; i < boundAddressLength; i++) {
boundAddresses[i] = TransportAddressSerializers.addressFromStream(in);
}
publishAddress = TransportAddressSerializers.addressFromStream(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
TransportAddressSerializers.addressToStream(out, boundAddress);
out.writeInt(boundAddresses.length);
for (TransportAddress address : boundAddresses) {
TransportAddressSerializers.addressToStream(out, address);
}
TransportAddressSerializers.addressToStream(out, publishAddress);
}
@Override
public String toString() {
return "bound_address {" + boundAddress + "}, publish_address {" + publishAddress + "}";
StringBuilder builder = new StringBuilder("publish_address {");
builder.append(publishAddress);
builder.append("}, bound_addresses ");
boolean firstAdded = false;
for (TransportAddress address : boundAddresses) {
if (firstAdded) {
builder.append(", ");
} else {
firstAdded = true;
}
builder.append("{").append(address).append("}");
}
return builder.toString();
}
}

View File

@ -124,7 +124,13 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
// we are the first master (and the master)
master = true;
final LocalDiscovery master = firstMaster;
clusterService.submitStateUpdateTask("local-disco-initial_connect(master)", new ProcessedClusterStateNonMasterUpdateTask() {
clusterService.submitStateUpdateTask("local-disco-initial_connect(master)", new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
@ -150,7 +156,12 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
} else if (firstMaster != null) {
// update as fast as we can the local node state with the new metadata (so we create indices for example)
final ClusterState masterState = firstMaster.clusterService.state();
clusterService.submitStateUpdateTask("local-disco(detected_master)", new ClusterStateNonMasterUpdateTask() {
clusterService.submitStateUpdateTask("local-disco(detected_master)", new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) {
// make sure we have the local node id set, we might need it as a result of the new metadata
@ -166,7 +177,12 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
// tell the master to send the fact that we are here
final LocalDiscovery master = firstMaster;
firstMaster.clusterService.submitStateUpdateTask("local-disco-receive(from node[" + localNode + "])", new ProcessedClusterStateNonMasterUpdateTask() {
firstMaster.clusterService.submitStateUpdateTask("local-disco-receive(from node[" + localNode + "])", new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
@ -230,7 +246,12 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
}
final LocalDiscovery master = firstMaster;
master.clusterService.submitStateUpdateTask("local-disco-update", new ClusterStateNonMasterUpdateTask() {
master.clusterService.submitStateUpdateTask("local-disco-update", new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes newNodes = currentState.nodes().removeDeadMembers(newMembers, master.localNode.id());
@ -351,7 +372,12 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
assert nodeSpecificClusterState.nodes().masterNode() != null : "received a cluster state without a master";
assert !nodeSpecificClusterState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock()) : "received a cluster state with a master block";
discovery.clusterService.submitStateUpdateTask("local-disco-receive(from master)", new ProcessedClusterStateNonMasterUpdateTask() {
discovery.clusterService.submitStateUpdateTask("local-disco-receive(from master)", new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) {
if (currentState.supersedes(nodeSpecificClusterState)) {

View File

@ -21,8 +21,7 @@ package org.elasticsearch.discovery.zen;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateNonMasterUpdateTask;
import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@ -133,7 +132,13 @@ public class NodeJoinController extends AbstractComponent {
/** utility method to fail the given election context under the cluster state thread */
private void failContext(final ElectionContext context, final String reason, final Throwable throwable) {
clusterService.submitStateUpdateTask("zen-disco-join(failure [" + reason + "])", Priority.IMMEDIATE, new ClusterStateNonMasterUpdateTask() {
clusterService.submitStateUpdateTask("zen-disco-join(failure [" + reason + "])", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
context.onFailure(throwable);
@ -343,7 +348,7 @@ public class NodeJoinController extends AbstractComponent {
* Processes any pending joins via a ClusterState update task.
* Note: this task automatically fails (and fails all pending joins) if the current node is not marked as master
*/
class ProcessJoinsTask extends ProcessedClusterStateUpdateTask {
class ProcessJoinsTask extends ClusterStateUpdateTask {
private final List<MembershipAction.JoinCallback> joinCallbacksToRespondTo = new ArrayList<>();
private boolean nodeAdded = false;

View File

@ -76,7 +76,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implements Discovery, PingContextProvider {
public final static String SETTING_REJOIN_ON_MASTER_GONE = "discovery.zen.rejoin_on_master_gone";
public final static String SETTING_PING_TIMEOUT = "discovery.zen.ping.timeout";
public final static String SETTING_PING_TIMEOUT = "discovery.zen.ping_timeout";
public final static String SETTING_JOIN_TIMEOUT = "discovery.zen.join_timeout";
public final static String SETTING_JOIN_RETRY_ATTEMPTS = "discovery.zen.join_retry_attempts";
public final static String SETTING_JOIN_RETRY_DELAY = "discovery.zen.join_retry_delay";
@ -150,10 +150,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
this.discoverySettings = discoverySettings;
this.pingService = pingService;
this.electMaster = electMasterService;
TimeValue pingTimeout = this.settings.getAsTime("discovery.zen.initial_ping_timeout", timeValueSeconds(3));
pingTimeout = this.settings.getAsTime("discovery.zen.ping_timeout", pingTimeout);
pingTimeout = settings.getAsTime("discovery.zen.ping_timeout", pingTimeout);
this.pingTimeout = settings.getAsTime(SETTING_PING_TIMEOUT, pingTimeout);
this.pingTimeout = settings.getAsTime(SETTING_PING_TIMEOUT, timeValueSeconds(3));
this.joinTimeout = settings.getAsTime(SETTING_JOIN_TIMEOUT, TimeValue.timeValueMillis(this.pingTimeout.millis() * 20));
this.joinRetryAttempts = settings.getAsInt(SETTING_JOIN_RETRY_ATTEMPTS, 3);
@ -173,7 +170,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
throw new IllegalArgumentException("'" + SETTING_MAX_PINGS_FROM_ANOTHER_MASTER + "' must be a positive number. got [" + this.maxPingsFromAnotherMaster + "]");
}
logger.debug("using ping.timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes);
logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes);
nodeSettingsService.addListener(new ApplySettings());
@ -210,7 +207,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
this.nodeJoinController = new NodeJoinController(clusterService, routingService, discoverySettings, settings);
// start the join thread from a cluster state update. See {@link JoinThreadControl} for details.
clusterService.submitStateUpdateTask("initial_join", new ClusterStateNonMasterUpdateTask() {
clusterService.submitStateUpdateTask("initial_join", new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
// do the join on a different thread, the DiscoveryService waits for 30s anyhow till it is discovered
@ -397,7 +400,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
// finalize join through the cluster state update thread
final DiscoveryNode finalMasterNode = masterNode;
clusterService.submitStateUpdateTask("finalize_join (" + masterNode + ")", new ClusterStateNonMasterUpdateTask() {
clusterService.submitStateUpdateTask("finalize_join (" + masterNode + ")", new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
if (!success) {
@ -523,7 +531,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
// nothing to do here...
return;
}
clusterService.submitStateUpdateTask("zen-disco-node_failed(" + node + "), reason " + reason, Priority.IMMEDIATE, new ProcessedClusterStateUpdateTask() {
clusterService.submitStateUpdateTask("zen-disco-node_failed(" + node + "), reason " + reason, Priority.IMMEDIATE, new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
if (currentState.nodes().get(node.id()) == null) {
@ -570,7 +578,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
// We only set the new value. If the master doesn't see enough nodes it will revoke it's mastership.
return;
}
clusterService.submitStateUpdateTask("zen-disco-minimum_master_nodes_changed", Priority.IMMEDIATE, new ProcessedClusterStateUpdateTask() {
clusterService.submitStateUpdateTask("zen-disco-minimum_master_nodes_changed", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
// check if we have enough master nodes, if not, we need to move into joining the cluster again
@ -610,7 +618,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
logger.info("master_left [{}], reason [{}]", masterNode, reason);
clusterService.submitStateUpdateTask("zen-disco-master_failed (" + masterNode + ")", Priority.IMMEDIATE, new ProcessedClusterStateNonMasterUpdateTask() {
clusterService.submitStateUpdateTask("zen-disco-master_failed (" + masterNode + ")", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) {
if (!masterNode.id().equals(currentState.nodes().masterNodeId())) {
@ -671,7 +685,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
}
void processNextPendingClusterState(String reason) {
clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + reason + "])", Priority.URGENT, new ProcessedClusterStateNonMasterUpdateTask() {
clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + reason + "])", Priority.URGENT, new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
ClusterState newClusterState = null;
@Override
@ -1086,7 +1105,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
class RejoinClusterRequestHandler implements TransportRequestHandler<RejoinClusterRequest> {
@Override
public void messageReceived(final RejoinClusterRequest request, final TransportChannel channel) throws Exception {
clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", Priority.IMMEDIATE, new ClusterStateNonMasterUpdateTask() {
clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) {
try {
@ -1097,11 +1122,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
return rejoin(currentState, "received a request to rejoin the cluster from [" + request.fromNodeId + "]");
}
@Override
public void onNoLongerMaster(String source) {
// already logged
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);

View File

@ -23,7 +23,7 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ProcessedClusterStateNonMasterUpdateTask;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.Nullable;
@ -342,21 +342,23 @@ public class MasterFaultDetection extends FaultDetection {
if (!nodes.localNodeMaster() || !nodes.nodeExists(request.nodeId)) {
logger.trace("checking ping from [{}] under a cluster state thread", request.nodeId);
clusterService.submitStateUpdateTask("master ping (from: [" + request.nodeId + "])", new ProcessedClusterStateNonMasterUpdateTask() {
clusterService.submitStateUpdateTask("master ping (from: [" + request.nodeId + "])", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
// if we are no longer master, fail...
DiscoveryNodes nodes = currentState.nodes();
if (!nodes.localNodeMaster()) {
throw new NotMasterException("local node is not master");
}
if (!nodes.nodeExists(request.nodeId)) {
throw new NodeDoesNotExistOnMasterException();
}
return currentState;
}
@Override
public void onNoLongerMaster(String source) {
onFailure(source, new NotMasterException("local node is not master"));
}
@Override
public void onFailure(String source, @Nullable Throwable t) {
if (t == null) {

View File

@ -214,7 +214,7 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
@Override
public void onSuccess(final ClusterState recoveredState) {
logger.trace("successful state recovery, importing cluster state...");
clusterService.submitStateUpdateTask("local-gateway-elected-state", new ProcessedClusterStateUpdateTask() {
clusterService.submitStateUpdateTask("local-gateway-elected-state", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
assert currentState.metaData().indices().isEmpty();

View File

@ -21,7 +21,7 @@ package org.elasticsearch.gateway;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
@ -112,7 +112,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent {
for (int i = 0; i < request.indices.length; i++) {
indexNames[i] = request.indices[i].index();
}
clusterService.submitStateUpdateTask("allocation dangled indices " + Arrays.toString(indexNames), new ProcessedClusterStateUpdateTask() {
clusterService.submitStateUpdateTask("allocation dangled indices " + Arrays.toString(indexNames), new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
if (currentState.blocks().disableStatePersistence()) {

View File

@ -57,7 +57,7 @@ public class HttpInfo implements Streamable, ToXContent {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.HTTP);
builder.field(Fields.BOUND_ADDRESS, address.boundAddress().toString());
builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses());
builder.field(Fields.PUBLISH_ADDRESS, address.publishAddress().toString());
builder.byteSizeField(Fields.MAX_CONTENT_LENGTH_IN_BYTES, Fields.MAX_CONTENT_LENGTH, maxContentLength);
builder.endObject();

View File

@ -28,10 +28,7 @@ import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.network.NetworkUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.transport.NetworkExceptionHelper;
import org.elasticsearch.common.transport.PortsRange;
import org.elasticsearch.common.transport.*;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays;
@ -52,10 +49,8 @@ import org.jboss.netty.handler.timeout.ReadTimeoutException;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicReference;
@ -255,12 +250,13 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
} catch (IOException e) {
throw new BindHttpException("Failed to resolve host [" + bindHost + "]", e);
}
List<InetSocketTransportAddress> boundAddresses = new ArrayList<>(hostAddresses.length);
for (InetAddress address : hostAddresses) {
bindAddress(address);
boundAddresses.add(bindAddress(address));
}
InetSocketAddress boundAddress = (InetSocketAddress) serverChannels.get(0).getLocalAddress();
InetSocketTransportAddress boundAddress = boundAddresses.get(0);
InetSocketAddress publishAddress;
if (0 == publishPort) {
publishPort = boundAddress.getPort();
@ -270,10 +266,10 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
} catch (Exception e) {
throw new BindTransportException("Failed to resolve publish address", e);
}
this.boundAddress = new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress));
this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[boundAddresses.size()]), new InetSocketTransportAddress(publishAddress));
}
private void bindAddress(final InetAddress hostAddress) {
private InetSocketTransportAddress bindAddress(final InetAddress hostAddress) {
PortsRange portsRange = new PortsRange(port);
final AtomicReference<Exception> lastException = new AtomicReference<>();
final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();
@ -296,7 +292,11 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
if (!success) {
throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get());
}
logger.info("Bound http to address {{}}", NetworkAddress.format(boundSocket.get()));
if (logger.isDebugEnabled()) {
logger.debug("Bound http to address {{}}", NetworkAddress.format(boundSocket.get()));
}
return new InetSocketTransportAddress(boundSocket.get());
}
@Override

View File

@ -59,7 +59,6 @@ import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.store.StoreModule;
import org.elasticsearch.index.translog.TranslogService;
import org.elasticsearch.indices.IndicesLifecycle;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InternalIndicesLifecycle;
@ -435,9 +434,6 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
logger.debug("[{}] failed to clean plugin shard service [{}]", e, shardId, closeable);
}
}
// now we can close the translog service, we need to close it before the we close the shard
// note the that the translog service is not there for shadow replicas
closeInjectorOptionalResource(sId, shardInjector, TranslogService.class);
// this logic is tricky, we want to close the engine so we rollback the changes done to it
// and close the shard so no operations are allowed to it
if (indexShard != null) {

View File

@ -231,7 +231,8 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
StringFieldMapper geohashMapper = null;
if (enableGeoHash || enableGeohashPrefix) {
// TODO: possible also implicitly enable geohash if geohash precision is set
geohashMapper = stringField(Names.GEOHASH).index(true).tokenized(false).includeInAll(false).omitNorms(true).indexOptions(IndexOptions.DOCS).build(context);
geohashMapper = stringField(Names.GEOHASH).index(true).tokenized(false).includeInAll(false).store(fieldType.stored())
.omitNorms(true).indexOptions(IndexOptions.DOCS).build(context);
geoPointFieldType.setGeohashEnabled(geohashMapper.fieldType(), geoHashPrecision, enableGeohashPrefix);
}
context.path().remove();
@ -678,7 +679,7 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
private void addGeohashField(ParseContext context, String geohash) throws IOException {
int len = Math.min(fieldType().geohashPrecision(), geohash.length());
int min = fieldType().isGeohashPrefixEnabled() ? 1 : geohash.length();
int min = fieldType().isGeohashPrefixEnabled() ? 1 : len;
for (int i = len; i >= min; i--) {
// side effect of this call is adding the field

View File

@ -55,4 +55,6 @@ public abstract class AbstractIndexShardComponent implements IndexShardComponent
public String nodeName() {
return indexSettings.get("name", "");
}
}

View File

@ -46,10 +46,12 @@ import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.IndexService;
@ -114,7 +116,10 @@ import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
public class IndexShard extends AbstractIndexShardComponent {
@ -176,12 +181,20 @@ public class IndexShard extends AbstractIndexShardComponent {
private final ShardEngineFailListener failedEngineListener = new ShardEngineFailListener();
private volatile boolean flushOnClose = true;
private volatile int flushThresholdOperations;
private volatile ByteSizeValue flushThresholdSize;
private volatile boolean disableFlush;
/**
* Index setting to control if a flush is executed before engine is closed
* This setting is realtime updateable.
*/
public static final String INDEX_FLUSH_ON_CLOSE = "index.flush_on_close";
public static final String INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS = "index.translog.flush_threshold_ops";
public static final String INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE = "index.translog.flush_threshold_size";
public static final String INDEX_TRANSLOG_DISABLE_FLUSH = "index.translog.disable_flush";
private final ShardPath path;
private final IndexShardOperationCounter indexShardOperationCounter;
@ -251,8 +264,10 @@ public class IndexShard extends AbstractIndexShardComponent {
cachingPolicy = new UsageTrackingQueryCachingPolicy();
}
this.engineConfig = newEngineConfig(translogConfig, cachingPolicy);
this.flushThresholdOperations = indexSettings.getAsInt(INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, indexSettings.getAsInt("index.translog.flush_threshold", Integer.MAX_VALUE));
this.flushThresholdSize = indexSettings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(512, ByteSizeUnit.MB));
this.disableFlush = indexSettings.getAsBoolean(INDEX_TRANSLOG_DISABLE_FLUSH, false);
this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId);
}
public Store store() {
@ -1051,6 +1066,25 @@ public class IndexShard extends AbstractIndexShardComponent {
storeRecoveryService.recover(this, shouldExist, recoveryListener);
}
/**
* Returns <code>true</code> iff this shard needs to be flushed due to too many translog operation or a too large transaction log.
* Otherwise <code>false</code>.
*/
boolean shouldFlush() {
if (disableFlush == false) {
Engine engine = engineUnsafe();
if (engine != null) {
try {
Translog translog = engine.getTranslog();
return translog.totalOperations() > flushThresholdOperations || translog.sizeInBytes() > flushThresholdSize.bytes();
} catch (AlreadyClosedException ex) {
// that's fine we are already close - no need to flush
}
}
}
return false;
}
private class ApplyRefreshSettings implements IndexSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
@ -1059,6 +1093,22 @@ public class IndexShard extends AbstractIndexShardComponent {
if (state() == IndexShardState.CLOSED) { // no need to update anything if we are closed
return;
}
int flushThresholdOperations = settings.getAsInt(INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, IndexShard.this.flushThresholdOperations);
if (flushThresholdOperations != IndexShard.this.flushThresholdOperations) {
logger.info("updating flush_threshold_ops from [{}] to [{}]", IndexShard.this.flushThresholdOperations, flushThresholdOperations);
IndexShard.this.flushThresholdOperations = flushThresholdOperations;
}
ByteSizeValue flushThresholdSize = settings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, IndexShard.this.flushThresholdSize);
if (!flushThresholdSize.equals(IndexShard.this.flushThresholdSize)) {
logger.info("updating flush_threshold_size from [{}] to [{}]", IndexShard.this.flushThresholdSize, flushThresholdSize);
IndexShard.this.flushThresholdSize = flushThresholdSize;
}
boolean disableFlush = settings.getAsBoolean(INDEX_TRANSLOG_DISABLE_FLUSH, IndexShard.this.disableFlush);
if (disableFlush != IndexShard.this.disableFlush) {
logger.info("updating disable_flush from [{}] to [{}]", IndexShard.this.disableFlush, disableFlush);
IndexShard.this.disableFlush = disableFlush;
}
final EngineConfig config = engineConfig;
final boolean flushOnClose = settings.getAsBoolean(INDEX_FLUSH_ON_CLOSE, IndexShard.this.flushOnClose);
if (flushOnClose != IndexShard.this.flushOnClose) {
@ -1438,4 +1488,48 @@ public class IndexShard extends AbstractIndexShardComponent {
}
}
private final AtomicBoolean asyncFlushRunning = new AtomicBoolean();
/**
* Schedules a flush if needed but won't schedule more than one flush concurrently. The flush will be executed on the
* Flush thread-pool asynchronously.
* @return <code>true</code> if a new flush is scheduled otherwise <code>false</code>.
*/
public boolean maybeFlush() {
if (shouldFlush()) {
if (asyncFlushRunning.compareAndSet(false, true)) { // we can't use a lock here since we "release" in a different thread
if (shouldFlush() == false) {
// we have to check again since otherwise there is a race when a thread passes
// the first shouldFlush() check next to another thread which flushes fast enough
// to finish before the current thread could flip the asyncFlushRunning flag.
// in that situation we have an extra unexpected flush.
asyncFlushRunning.compareAndSet(true, false);
} else {
logger.debug("submitting async flush request");
final AbstractRunnable abstractRunnable = new AbstractRunnable() {
@Override
public void onFailure(Throwable t) {
if (state != IndexShardState.CLOSED) {
logger.warn("failed to flush index", t);
}
}
@Override
protected void doRun() throws Exception {
flush(new FlushRequest());
}
@Override
public void onAfter() {
asyncFlushRunning.compareAndSet(true, false);
maybeFlush(); // fire a flush up again if we have filled up the limits such that shouldFlush() returns true
}
};
threadPool.executor(ThreadPool.Names.FLUSH).execute(abstractRunnable);
return true;
}
}
}
return false;
}
}

View File

@ -20,18 +20,15 @@
package org.elasticsearch.index.shard;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Classes;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.cache.query.index.IndexQueryCache;
import org.elasticsearch.index.engine.IndexSearcherWrapper;
import org.elasticsearch.index.engine.IndexSearcherWrappingService;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.InternalEngineFactory;
import org.elasticsearch.index.percolator.stats.ShardPercolateService;
import org.elasticsearch.index.termvectors.ShardTermVectorsService;
import org.elasticsearch.index.translog.TranslogService;
/**
* The {@code IndexShardModule} module is responsible for binding the correct
@ -68,7 +65,6 @@ public class IndexShardModule extends AbstractModule {
bind(IndexShard.class).to(ShadowIndexShard.class).asEagerSingleton();
} else {
bind(IndexShard.class).asEagerSingleton();
bind(TranslogService.class).asEagerSingleton();
}
bind(EngineFactory.class).to(engineFactoryImpl);

View File

@ -108,6 +108,12 @@ public final class ShadowIndexShard extends IndexShard {
return engineFactory.newReadOnlyEngine(config);
}
@Override
public boolean shouldFlush() {
// we don't need to flush since we don't write - all dominated by the primary
return false;
}
public boolean allowsPrimaryPromotion() {
return false;
}

View File

@ -49,8 +49,8 @@ public class BlobStoreIndexShardSnapshots implements Iterable<SnapshotFiles>, To
public static final BlobStoreIndexShardSnapshots PROTO = new BlobStoreIndexShardSnapshots();
private final List<SnapshotFiles> shardSnapshots;
private final ImmutableMap<String, FileInfo> files;
private final ImmutableMap<String, List<FileInfo>> physicalFiles;
private final Map<String, FileInfo> files;
private final Map<String, List<FileInfo>> physicalFiles;
public BlobStoreIndexShardSnapshots(List<SnapshotFiles> shardSnapshots) {
this.shardSnapshots = Collections.unmodifiableList(new ArrayList<>(shardSnapshots));
@ -108,8 +108,8 @@ public class BlobStoreIndexShardSnapshots implements Iterable<SnapshotFiles>, To
private BlobStoreIndexShardSnapshots() {
shardSnapshots = Collections.emptyList();
files = ImmutableMap.of();
physicalFiles = ImmutableMap.of();
files = Collections.emptyMap();
physicalFiles = Collections.emptyMap();
}

View File

@ -737,7 +737,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
public static final MetadataSnapshot EMPTY = new MetadataSnapshot();
private final ImmutableMap<String, String> commitUserData;
private final Map<String, String> commitUserData;
private final long numDocs;

View File

@ -1,207 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.translog;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.index.engine.FlushNotAllowedEngineException;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.settings.IndexSettingsService;
import org.elasticsearch.index.shard.*;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.Closeable;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ThreadLocalRandom;
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
/**
*
*/
public class TranslogService extends AbstractIndexShardComponent implements Closeable {
public static final String INDEX_TRANSLOG_FLUSH_INTERVAL = "index.translog.interval";
public static final String INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS = "index.translog.flush_threshold_ops";
public static final String INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE = "index.translog.flush_threshold_size";
public static final String INDEX_TRANSLOG_FLUSH_THRESHOLD_PERIOD = "index.translog.flush_threshold_period";
public static final String INDEX_TRANSLOG_DISABLE_FLUSH = "index.translog.disable_flush";
private final ThreadPool threadPool;
private final IndexSettingsService indexSettingsService;
private final IndexShard indexShard;
private volatile TimeValue interval;
private volatile int flushThresholdOperations;
private volatile ByteSizeValue flushThresholdSize;
private volatile TimeValue flushThresholdPeriod;
private volatile boolean disableFlush;
private volatile ScheduledFuture future;
private final ApplySettings applySettings = new ApplySettings();
@Inject
public TranslogService(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, ThreadPool threadPool, IndexShard indexShard) {
super(shardId, indexSettings);
this.threadPool = threadPool;
this.indexSettingsService = indexSettingsService;
this.indexShard = indexShard;
this.flushThresholdOperations = indexSettings.getAsInt(INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, indexSettings.getAsInt("index.translog.flush_threshold", Integer.MAX_VALUE));
this.flushThresholdSize = indexSettings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(512, ByteSizeUnit.MB));
this.flushThresholdPeriod = indexSettings.getAsTime(INDEX_TRANSLOG_FLUSH_THRESHOLD_PERIOD, TimeValue.timeValueMinutes(30));
this.interval = indexSettings.getAsTime(INDEX_TRANSLOG_FLUSH_INTERVAL, timeValueMillis(5000));
this.disableFlush = indexSettings.getAsBoolean(INDEX_TRANSLOG_DISABLE_FLUSH, false);
logger.debug("interval [{}], flush_threshold_ops [{}], flush_threshold_size [{}], flush_threshold_period [{}]", interval, flushThresholdOperations, flushThresholdSize, flushThresholdPeriod);
this.future = threadPool.schedule(interval, ThreadPool.Names.SAME, new TranslogBasedFlush());
indexSettingsService.addListener(applySettings);
}
@Override
public void close() {
indexSettingsService.removeListener(applySettings);
FutureUtils.cancel(this.future);
}
class ApplySettings implements IndexSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
int flushThresholdOperations = settings.getAsInt(INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, TranslogService.this.flushThresholdOperations);
if (flushThresholdOperations != TranslogService.this.flushThresholdOperations) {
logger.info("updating flush_threshold_ops from [{}] to [{}]", TranslogService.this.flushThresholdOperations, flushThresholdOperations);
TranslogService.this.flushThresholdOperations = flushThresholdOperations;
}
ByteSizeValue flushThresholdSize = settings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, TranslogService.this.flushThresholdSize);
if (!flushThresholdSize.equals(TranslogService.this.flushThresholdSize)) {
logger.info("updating flush_threshold_size from [{}] to [{}]", TranslogService.this.flushThresholdSize, flushThresholdSize);
TranslogService.this.flushThresholdSize = flushThresholdSize;
}
TimeValue flushThresholdPeriod = settings.getAsTime(INDEX_TRANSLOG_FLUSH_THRESHOLD_PERIOD, TranslogService.this.flushThresholdPeriod);
if (!flushThresholdPeriod.equals(TranslogService.this.flushThresholdPeriod)) {
logger.info("updating flush_threshold_period from [{}] to [{}]", TranslogService.this.flushThresholdPeriod, flushThresholdPeriod);
TranslogService.this.flushThresholdPeriod = flushThresholdPeriod;
}
TimeValue interval = settings.getAsTime(INDEX_TRANSLOG_FLUSH_INTERVAL, TranslogService.this.interval);
if (!interval.equals(TranslogService.this.interval)) {
logger.info("updating interval from [{}] to [{}]", TranslogService.this.interval, interval);
TranslogService.this.interval = interval;
}
boolean disableFlush = settings.getAsBoolean(INDEX_TRANSLOG_DISABLE_FLUSH, TranslogService.this.disableFlush);
if (disableFlush != TranslogService.this.disableFlush) {
logger.info("updating disable_flush from [{}] to [{}]", TranslogService.this.disableFlush, disableFlush);
TranslogService.this.disableFlush = disableFlush;
}
}
}
private TimeValue computeNextInterval() {
return new TimeValue(interval.millis() + (ThreadLocalRandom.current().nextLong(interval.millis())));
}
private class TranslogBasedFlush implements Runnable {
private volatile long lastFlushTime = System.currentTimeMillis();
@Override
public void run() {
if (indexShard.state() == IndexShardState.CLOSED) {
return;
}
// flush is disabled, but still reschedule
if (disableFlush) {
reschedule();
return;
}
Translog translog = indexShard.engine().getTranslog();
if (translog == null) {
reschedule();
return;
}
int currentNumberOfOperations = translog.totalOperations();
if (currentNumberOfOperations == 0) {
reschedule();
return;
}
if (flushThresholdOperations > 0) {
if (currentNumberOfOperations > flushThresholdOperations) {
logger.trace("flushing translog, operations [{}], breached [{}]", currentNumberOfOperations, flushThresholdOperations);
asyncFlushAndReschedule();
return;
}
}
if (flushThresholdSize.bytes() > 0) {
long sizeInBytes = translog.sizeInBytes();
if (sizeInBytes > flushThresholdSize.bytes()) {
logger.trace("flushing translog, size [{}], breached [{}]", new ByteSizeValue(sizeInBytes), flushThresholdSize);
asyncFlushAndReschedule();
return;
}
}
if (flushThresholdPeriod.millis() > 0) {
if ((threadPool.estimatedTimeInMillis() - lastFlushTime) > flushThresholdPeriod.millis()) {
logger.trace("flushing translog, last_flush_time [{}], breached [{}]", lastFlushTime, flushThresholdPeriod);
asyncFlushAndReschedule();
return;
}
}
reschedule();
}
private void reschedule() {
future = threadPool.schedule(computeNextInterval(), ThreadPool.Names.SAME, this);
}
private void asyncFlushAndReschedule() {
threadPool.executor(ThreadPool.Names.FLUSH).execute(new Runnable() {
@Override
public void run() {
try {
indexShard.flush(new FlushRequest());
} catch (IllegalIndexShardStateException e) {
// we are being closed, or in created state, ignore
} catch (FlushNotAllowedEngineException e) {
// ignore this exception, we are not allowed to perform flush
} catch (Throwable e) {
logger.warn("failed to flush shard on translog threshold", e);
}
lastFlushTime = threadPool.estimatedTimeInMillis();
if (indexShard.state() != IndexShardState.CLOSED) {
future = threadPool.schedule(computeNextInterval(), ThreadPool.Names.SAME, TranslogBasedFlush.this);
}
}
});
}
}
}

View File

@ -38,12 +38,7 @@ import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.*;
import java.util.concurrent.ScheduledFuture;
/**
@ -258,7 +253,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
}
// consider shard inactive if it has same translogFileGeneration and no operations for a long time
if (status.translogId == translog.currentFileGeneration() && translog.totalOperations() == 0) {
if (status.translogId == translog.currentFileGeneration() && translog.totalOperations() == status.translogNumberOfOperations) {
if (status.timeMS == -1) {
// first time we noticed the shard become idle
status.timeMS = timeMS;
@ -282,6 +277,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
status.timeMS = -1;
}
status.translogId = translog.currentFileGeneration();
status.translogNumberOfOperations = translog.totalOperations();
if (status.activeIndexing) {
activeShards++;
@ -376,6 +372,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
private static class ShardIndexingStatus {
long translogId = -1;
long translogNumberOfOperations = -1;
boolean activeIndexing = true;
long timeMS = -1; // contains the first time we saw this shard with no operations done on it
}

View File

@ -287,7 +287,12 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
return;
}
clusterService.submitStateUpdateTask("indices_store ([" + shardId + "] active fully on other nodes)", new ClusterStateNonMasterUpdateTask() {
clusterService.submitStateUpdateTask("indices_store ([" + shardId + "] active fully on other nodes)", new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
if (clusterState.getVersion() != currentState.getVersion()) {

View File

@ -70,7 +70,7 @@ public class DeadlockAnalyzer {
}
private Set<LinkedHashSet<ThreadInfo>> calculateCycles(ImmutableMap<Long, ThreadInfo> threadInfoMap) {
private Set<LinkedHashSet<ThreadInfo>> calculateCycles(Map<Long, ThreadInfo> threadInfoMap) {
Set<LinkedHashSet<ThreadInfo>> cycles = new HashSet<>();
for (Map.Entry<Long, ThreadInfo> entry : threadInfoMap.entrySet()) {
LinkedHashSet<ThreadInfo> cycle = new LinkedHashSet<>();

View File

@ -20,6 +20,7 @@
package org.elasticsearch.node.service;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.Build;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
@ -41,6 +42,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.Map;
/**
*/
@ -101,7 +103,7 @@ public class NodeService extends AbstractComponent {
/**
* Attributes different services in the node can add to be reported as part of the node info (for example).
*/
public ImmutableMap<String, String> attributes() {
public Map<String, String> attributes() {
return this.serviceAttributes;
}

View File

@ -84,6 +84,7 @@ public class PluginManager {
"discovery-azure",
"discovery-ec2",
"discovery-multicast",
"lang-expression",
"lang-javascript",
"lang-python",
"mapper-murmur3",

View File

@ -58,7 +58,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
private final VerifyNodeRepositoryAction verifyAction;
private volatile ImmutableMap<String, RepositoryHolder> repositories = ImmutableMap.of();
private volatile Map<String, RepositoryHolder> repositories = ImmutableMap.of();
@Inject
public RepositoriesService(Settings settings, ClusterService clusterService, TransportService transportService, RepositoryTypesRegistry typesRegistry, Injector injector) {

View File

@ -19,7 +19,6 @@
package org.elasticsearch.rest.action.admin.indices.mapping.get;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest;
import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse;
import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData;
@ -29,7 +28,13 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.*;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.rest.action.support.RestBuilderListener;
import java.io.IOException;
@ -64,11 +69,9 @@ public class RestGetFieldMappingAction extends BaseRestHandler {
getMappingsRequest.indicesOptions(IndicesOptions.fromRequest(request, getMappingsRequest.indicesOptions()));
getMappingsRequest.local(request.paramAsBoolean("local", getMappingsRequest.local()));
client.admin().indices().getFieldMappings(getMappingsRequest, new RestBuilderListener<GetFieldMappingsResponse>(channel) {
@SuppressWarnings("unchecked")
@Override
public RestResponse buildResponse(GetFieldMappingsResponse response, XContentBuilder builder) throws Exception {
ImmutableMap<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> mappingsByIndex = response.mappings();
Map<String, Map<String, Map<String, FieldMappingMetaData>>> mappingsByIndex = response.mappings();
boolean isPossibleSingleFieldRequest = indices.length == 1 && types.length == 1 && fields.length == 1;
if (isPossibleSingleFieldRequest && isFieldMappingMissingField(mappingsByIndex)) {
@ -91,13 +94,13 @@ public class RestGetFieldMappingAction extends BaseRestHandler {
* Helper method to find out if the only included fieldmapping metadata is typed NULL, which means
* that type and index exist, but the field did not
*/
private boolean isFieldMappingMissingField(ImmutableMap<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> mappingsByIndex) throws IOException {
private boolean isFieldMappingMissingField(Map<String, Map<String, Map<String, FieldMappingMetaData>>> mappingsByIndex) throws IOException {
if (mappingsByIndex.size() != 1) {
return false;
}
for (ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>> value : mappingsByIndex.values()) {
for (ImmutableMap<String, FieldMappingMetaData> fieldValue : value.values()) {
for (Map<String, Map<String, FieldMappingMetaData>> value : mappingsByIndex.values()) {
for (Map<String, FieldMappingMetaData> fieldValue : value.values()) {
for (Map.Entry<String, FieldMappingMetaData> fieldMappingMetaDataEntry : fieldValue.entrySet()) {
if (fieldMappingMetaDataEntry.getValue().isNull()) {
return true;

View File

@ -18,7 +18,6 @@
*/
package org.elasticsearch.rest.action.cat;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
@ -34,11 +33,16 @@ import org.elasticsearch.common.Table;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.rest.*;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.action.support.RestActionListener;
import org.elasticsearch.rest.action.support.RestResponseListener;
import org.elasticsearch.rest.action.support.RestTable;
import java.util.Map;
import static org.elasticsearch.rest.RestRequest.Method.GET;
public class RestNodeAttrsAction extends AbstractCatAction {
@ -107,7 +111,7 @@ public class RestNodeAttrsAction extends AbstractCatAction {
for (DiscoveryNode node : nodes) {
NodeInfo info = nodesInfo.getNodesMap().get(node.id());
ImmutableMap<String, String> attrs = node.getAttributes();
Map<String, String> attrs = node.getAttributes();
for(String att : attrs.keySet()) {
table.startRow();
table.addCell(node.name());

View File

@ -23,6 +23,7 @@ import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
@ -36,7 +37,7 @@ public final class ScriptContextRegistry {
private final ImmutableMap<String, ScriptContext> scriptContexts;
public ScriptContextRegistry(Iterable<ScriptContext.Plugin> customScriptContexts) {
public ScriptContextRegistry(Collection<ScriptContext.Plugin> customScriptContexts) {
Map<String, ScriptContext> scriptContexts = new HashMap<>();
for (ScriptContext.Standard scriptContext : ScriptContext.Standard.values()) {
scriptContexts.put(scriptContext.getKey(), scriptContext);

View File

@ -24,7 +24,6 @@ import org.elasticsearch.common.inject.multibindings.MapBinder;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.script.expression.ExpressionScriptEngineService;
import org.elasticsearch.script.groovy.GroovyScriptEngineService;
import org.elasticsearch.script.mustache.MustacheScriptEngineService;
@ -92,13 +91,6 @@ public class ScriptModule extends AbstractModule {
Loggers.getLogger(ScriptService.class, settings).debug("failed to load mustache", t);
}
try {
Class.forName("org.apache.lucene.expressions.Expression");
multibinder.addBinding().to(ExpressionScriptEngineService.class).asEagerSingleton();
} catch (Throwable t) {
Loggers.getLogger(ScriptService.class, settings).debug("failed to load lucene expressions", t);
}
for (Class<? extends ScriptEngineService> scriptEngine : scriptEngines) {
multibinder.addBinding().to(scriptEngine).asEagerSingleton();
}

View File

@ -58,7 +58,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.query.TemplateQueryParser;
import org.elasticsearch.script.expression.ExpressionScriptEngineService;
import org.elasticsearch.script.groovy.GroovyScriptEngineService;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.lookup.SearchLookup;
@ -245,8 +244,9 @@ public class ScriptService extends AbstractComponent implements Closeable {
throw new ScriptException("scripts of type [" + script.getType() + "], operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are disabled");
}
// TODO: fix this through some API or something, thats wrong
// special exception to prevent expressions from compiling as update or mapping scripts
boolean expression = scriptEngineService instanceof ExpressionScriptEngineService;
boolean expression = "expression".equals(script.getLang());
boolean notSupported = scriptContext.getKey().equals(ScriptContext.Standard.UPDATE.getKey()) ||
scriptContext.getKey().equals(ScriptContext.Standard.MAPPING.getKey());
if (expression && notSupported) {

View File

@ -1,42 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.script.expression;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.io.stream.StreamInput;
import java.io.IOException;
import java.text.ParseException;
/**
* Exception representing a compilation error in an expression.
*/
public class ExpressionScriptCompilationException extends ElasticsearchException {
public ExpressionScriptCompilationException(String msg, ParseException e) {
super(msg, e);
}
public ExpressionScriptCompilationException(String msg) {
super(msg);
}
public ExpressionScriptCompilationException(StreamInput in) throws IOException {
super(in);
}
}

View File

@ -26,7 +26,6 @@ import org.elasticsearch.script.CompiledScript;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.expression.ExpressionScriptEngineService;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
import org.elasticsearch.search.aggregations.InternalAggregation.Type;
@ -105,7 +104,8 @@ public class BucketSelectorPipelineAggregator extends PipelineAggregator {
ExecutableScript executableScript = reduceContext.scriptService().executable(compiledScript, vars);
Object scriptReturnValue = executableScript.run();
final boolean keepBucket;
if (ExpressionScriptEngineService.NAME.equals(script.getLang())) {
// TODO: WTF!!!!!
if ("expression".equals(script.getLang())) {
double scriptDoubleValue = (double) scriptReturnValue;
keepBucket = scriptDoubleValue == 1.0;
} else {

View File

@ -79,7 +79,7 @@ public class MatchedQueriesFetchSubPhase implements FetchSubPhase {
hitContext.hit().matchedQueries(matchedQueries.toArray(new String[matchedQueries.size()]));
}
private void addMatchedQueries(HitContext hitContext, ImmutableMap<String, Query> namedQueries, List<String> matchedQueries) throws IOException {
private void addMatchedQueries(HitContext hitContext, Map<String, Query> namedQueries, List<String> matchedQueries) throws IOException {
for (Map.Entry<String, Query> entry : namedQueries.entrySet()) {
String name = entry.getKey();
Query filter = entry.getValue();

View File

@ -19,12 +19,13 @@
package org.elasticsearch.search.lookup;
import com.google.common.collect.ImmutableMap;
import org.apache.lucene.index.LeafReaderContext;
import java.util.HashMap;
import java.util.Map;
import static java.util.Collections.unmodifiableMap;
/**
* Per-segment version of {@link SearchLookup}.
*/
@ -35,7 +36,7 @@ public class LeafSearchLookup {
final SourceLookup sourceLookup;
final LeafFieldsLookup fieldsLookup;
final LeafIndexLookup indexLookup;
final ImmutableMap<String, Object> asMap;
final Map<String, Object> asMap;
public LeafSearchLookup(LeafReaderContext ctx, LeafDocLookup docMap, SourceLookup sourceLookup,
LeafFieldsLookup fieldsLookup, LeafIndexLookup indexLookup, Map<String, Object> topLevelMap) {
@ -45,17 +46,17 @@ public class LeafSearchLookup {
this.fieldsLookup = fieldsLookup;
this.indexLookup = indexLookup;
ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
builder.putAll(topLevelMap);
builder.put("doc", docMap);
builder.put("_doc", docMap);
builder.put("_source", sourceLookup);
builder.put("_fields", fieldsLookup);
builder.put("_index", indexLookup);
asMap = builder.build();
Map<String, Object> asMap = new HashMap<>(topLevelMap.size() + 5);
asMap.putAll(topLevelMap);
asMap.put("doc", docMap);
asMap.put("_doc", docMap);
asMap.put("_source", sourceLookup);
asMap.put("_fields", fieldsLookup);
asMap.put("_index", indexLookup);
this.asMap = unmodifiableMap(asMap);
}
public ImmutableMap<String, Object> asMap() {
public Map<String, Object> asMap() {
return this.asMap;
}

View File

@ -27,28 +27,11 @@ import com.google.common.collect.ImmutableSet;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask;
import org.elasticsearch.cluster.RestoreInProgress;
import org.elasticsearch.cluster.*;
import org.elasticsearch.cluster.RestoreInProgress.ShardRestoreStatus;
import org.elasticsearch.cluster.TimeoutClusterStateUpdateTask;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RestoreSource;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.metadata.*;
import org.elasticsearch.cluster.routing.*;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.settings.ClusterDynamicSettings;
@ -70,36 +53,15 @@ import org.elasticsearch.index.shard.StoreRecoveryService;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.EmptyTransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.*;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.*;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CopyOnWriteArrayList;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_INDEX_UUID;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_UPGRADED;
import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
import static org.elasticsearch.cluster.metadata.MetaDataIndexStateService.INDEX_CLOSED_BLOCK;
/**
@ -211,7 +173,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
// Now we can start the actual restore process by adding shards to be recovered in the cluster state
// and updating cluster metadata (global and index) as needed
clusterService.submitStateUpdateTask(request.cause(), new TimeoutClusterStateUpdateTask() {
clusterService.submitStateUpdateTask(request.cause(), new ClusterStateUpdateTask() {
RestoreInfo restoreInfo = null;
@Override
@ -525,7 +487,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
logger.trace("received updated snapshot restore state [{}]", request);
updatedSnapshotStateQueue.add(request);
clusterService.submitStateUpdateTask("update snapshot state", new ProcessedClusterStateUpdateTask() {
clusterService.submitStateUpdateTask("update snapshot state", new ClusterStateUpdateTask() {
private final List<UpdateIndexShardRestoreStatusRequest> drainedRequests = new ArrayList<>();
private Map<SnapshotId, Tuple<RestoreInfo, Map<ShardId, ShardRestoreStatus>>> batchedRestoreInfo = null;

View File

@ -91,7 +91,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent<SnapshotSh
private final Condition shutdownCondition = shutdownLock.newCondition();
private volatile ImmutableMap<SnapshotId, SnapshotShards> shardSnapshots = ImmutableMap.of();
private volatile Map<SnapshotId, SnapshotShards> shardSnapshots = ImmutableMap.of();
private final BlockingQueue<UpdateIndexShardSnapshotStatusRequest> updatedSnapshotStateQueue = ConcurrentCollections.newBlockingQueue();
@ -368,7 +368,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent<SnapshotSh
if (snapshot.state() == SnapshotsInProgress.State.STARTED || snapshot.state() == SnapshotsInProgress.State.ABORTED) {
Map<ShardId, IndexShardSnapshotStatus> localShards = currentSnapshotShards(snapshot.snapshotId());
if (localShards != null) {
ImmutableMap<ShardId, SnapshotsInProgress.ShardSnapshotStatus> masterShards = snapshot.shards();
Map<ShardId, SnapshotsInProgress.ShardSnapshotStatus> masterShards = snapshot.shards();
for(Map.Entry<ShardId, IndexShardSnapshotStatus> localShard : localShards.entrySet()) {
ShardId shardId = localShard.getKey();
IndexShardSnapshotStatus localShardStatus = localShard.getValue();

View File

@ -24,21 +24,10 @@ import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask;
import org.elasticsearch.cluster.SnapshotsInProgress;
import org.elasticsearch.cluster.*;
import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus;
import org.elasticsearch.cluster.SnapshotsInProgress.State;
import org.elasticsearch.cluster.TimeoutClusterStateUpdateTask;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.metadata.*;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
@ -61,14 +50,7 @@ import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import static org.elasticsearch.cluster.SnapshotsInProgress.completed;
@ -182,7 +164,7 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
public void createSnapshot(final SnapshotRequest request, final CreateSnapshotListener listener) {
final SnapshotId snapshotId = new SnapshotId(request.repository(), request.name());
validate(snapshotId);
clusterService.submitStateUpdateTask(request.cause(), new TimeoutClusterStateUpdateTask() {
clusterService.submitStateUpdateTask(request.cause(), new ClusterStateUpdateTask() {
private SnapshotsInProgress.Entry newSnapshot = null;
@ -303,7 +285,7 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
endSnapshot(snapshot);
return;
}
clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshotId().getSnapshot() + "]", new ProcessedClusterStateUpdateTask() {
clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshotId().getSnapshot() + "]", new ClusterStateUpdateTask() {
boolean accepted = false;
SnapshotsInProgress.Entry updatedSnapshot;
String failure = null;
@ -614,7 +596,7 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
for (final SnapshotsInProgress.Entry snapshot : snapshots.entries()) {
SnapshotsInProgress.Entry updatedSnapshot = snapshot;
if (snapshot.state() == State.STARTED) {
ImmutableMap<ShardId, ShardSnapshotStatus> shards = processWaitingShards(snapshot.shards(), routingTable);
Map<ShardId, ShardSnapshotStatus> shards = processWaitingShards(snapshot.shards(), routingTable);
if (shards != null) {
changed = true;
if (!snapshot.state().completed() && completed(shards.values())) {
@ -643,7 +625,7 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
}
}
private ImmutableMap<ShardId, ShardSnapshotStatus> processWaitingShards(ImmutableMap<ShardId, ShardSnapshotStatus> snapshotShards, RoutingTable routingTable) {
private Map<ShardId, ShardSnapshotStatus> processWaitingShards(Map<ShardId, ShardSnapshotStatus> snapshotShards, RoutingTable routingTable) {
boolean snapshotChanged = false;
ImmutableMap.Builder<ShardId, ShardSnapshotStatus> shards = ImmutableMap.builder();
for (ImmutableMap.Entry<ShardId, ShardSnapshotStatus> shardEntry : snapshotShards.entrySet()) {
@ -734,10 +716,10 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
* @param shards list of shard statuses
* @return list of failed and closed indices
*/
private Tuple<Set<String>, Set<String>> indicesWithMissingShards(ImmutableMap<ShardId, SnapshotsInProgress.ShardSnapshotStatus> shards, MetaData metaData) {
private Tuple<Set<String>, Set<String>> indicesWithMissingShards(Map<ShardId, SnapshotsInProgress.ShardSnapshotStatus> shards, MetaData metaData) {
Set<String> missing = new HashSet<>();
Set<String> closed = new HashSet<>();
for (ImmutableMap.Entry<ShardId, SnapshotsInProgress.ShardSnapshotStatus> entry : shards.entrySet()) {
for (Map.Entry<ShardId, SnapshotsInProgress.ShardSnapshotStatus> entry : shards.entrySet()) {
if (entry.getValue().state() == State.MISSING) {
if (metaData.hasIndex(entry.getKey().getIndex()) && metaData.index(entry.getKey().getIndex()).getState() == IndexMetaData.State.CLOSE) {
closed.add(entry.getKey().getIndex());
@ -805,7 +787,7 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
* @param t exception if snapshot failed
*/
private void removeSnapshotFromClusterState(final SnapshotId snapshotId, final SnapshotInfo snapshot, final Throwable t) {
clusterService.submitStateUpdateTask("remove snapshot metadata", new ProcessedClusterStateUpdateTask() {
clusterService.submitStateUpdateTask("remove snapshot metadata", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE);
@ -860,7 +842,7 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
*/
public void deleteSnapshot(final SnapshotId snapshotId, final DeleteSnapshotListener listener) {
validate(snapshotId);
clusterService.submitStateUpdateTask("delete snapshot", new ProcessedClusterStateUpdateTask() {
clusterService.submitStateUpdateTask("delete snapshot", new ClusterStateUpdateTask() {
boolean waitForSnapshot = false;
@ -882,7 +864,7 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
} else {
// This snapshot is currently running - stopping shards first
waitForSnapshot = true;
ImmutableMap<ShardId, ShardSnapshotStatus> shards;
Map<ShardId, ShardSnapshotStatus> shards;
if (snapshot.state() == State.STARTED && snapshot.shards() != null) {
// snapshot is currently running - stop started shards
ImmutableMap.Builder<ShardId, ShardSnapshotStatus> shardsBuilder = ImmutableMap.builder();

View File

@ -91,7 +91,7 @@ public class ThreadPool extends AbstractComponent {
private volatile ImmutableMap<String, ExecutorHolder> executors;
private final ImmutableMap<String, Settings> defaultExecutorTypeSettings;
private final Map<String, Settings> defaultExecutorTypeSettings;
private final Queue<ExecutorHolder> retiredExecutors = new ConcurrentLinkedQueue<>();
@ -511,8 +511,8 @@ public class ThreadPool extends AbstractComponent {
public void run() {
try {
runnable.run();
} catch (Exception e) {
logger.warn("failed to run {}", e, runnable.toString());
} catch (Throwable t) {
logger.warn("failed to run {}", t, runnable.toString());
}
}

View File

@ -58,13 +58,13 @@ public class TransportInfo implements Streamable, ToXContent {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.TRANSPORT);
builder.field(Fields.BOUND_ADDRESS, address.boundAddress().toString());
builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses());
builder.field(Fields.PUBLISH_ADDRESS, address.publishAddress().toString());
builder.startObject(Fields.PROFILES);
if (profileAddresses != null && profileAddresses.size() > 0) {
for (Map.Entry<String, BoundTransportAddress> entry : profileAddresses.entrySet()) {
builder.startObject(entry.getKey());
builder.field(Fields.BOUND_ADDRESS, entry.getValue().boundAddress().toString());
builder.array(Fields.BOUND_ADDRESS, (Object[]) entry.getValue().boundAddresses());
builder.field(Fields.PUBLISH_ADDRESS, entry.getValue().publishAddress().toString());
builder.endObject();
}

View File

@ -67,7 +67,7 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
protected final Transport transport;
protected final ThreadPool threadPool;
volatile ImmutableMap<String, RequestHandlerRegistry> requestHandlers = ImmutableMap.of();
volatile Map<String, RequestHandlerRegistry> requestHandlers = Collections.emptyMap();
final Object requestHandlerMutex = new Object();
final ConcurrentMapLong<RequestHolder> clientHandlers = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency();
@ -171,6 +171,9 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
transport.start();
if (transport.boundAddress() != null && logger.isInfoEnabled()) {
logger.info("{}", transport.boundAddress());
for (Map.Entry<String, BoundTransportAddress> entry : transport.profileBoundAddresses().entrySet()) {
logger.info("profile [{}]: {}", entry.getKey(), entry.getValue());
}
}
boolean setStarted = started.compareAndSet(false, true);
assert setStarted : "service was already started";

View File

@ -105,7 +105,7 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
if (previous != null) {
throw new ElasticsearchException("local address [" + address + "] is already bound");
}
boundAddress = new BoundTransportAddress(localAddress, localAddress);
boundAddress = new BoundTransportAddress(new TransportAddress[] { localAddress }, localAddress);
}
@Override

View File

@ -89,7 +89,7 @@ public class LocalTransportChannel implements TransportChannel {
public void sendResponse(Throwable error) throws IOException {
BytesStreamOutput stream = new BytesStreamOutput();
writeResponseExceptionHeader(stream);
RemoteTransportException tx = new RemoteTransportException(targetTransport.nodeName(), targetTransport.boundAddress().boundAddress(), action, error);
RemoteTransportException tx = new RemoteTransportException(targetTransport.nodeName(), targetTransport.boundAddress().boundAddresses()[0], action, error);
stream.writeThrowable(tx);
final byte[] data = stream.bytes().toBytes();

View File

@ -329,12 +329,6 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
createServerBootstrap(name, mergedSettings);
bindServerBootstrap(name, mergedSettings);
}
InetSocketAddress boundAddress = (InetSocketAddress) serverChannels.get(DEFAULT_PROFILE).get(0).getLocalAddress();
int publishPort = settings.getAsInt("transport.netty.publish_port", settings.getAsInt("transport.publish_port", boundAddress.getPort()));
String publishHost = settings.get("transport.netty.publish_host", settings.get("transport.publish_host", settings.get("transport.host")));
InetSocketAddress publishAddress = createPublishAddress(publishHost, publishPort);
this.boundAddress = new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress));
}
success = true;
} finally {
@ -460,9 +454,9 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
}
}
private void bindServerBootstrap(final String name, final InetAddress hostAddress, Settings settings) {
private void bindServerBootstrap(final String name, final InetAddress hostAddress, Settings profileSettings) {
String port = settings.get("port");
String port = profileSettings.get("port");
PortsRange portsRange = new PortsRange(port);
final AtomicReference<Exception> lastException = new AtomicReference<>();
final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();
@ -478,7 +472,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
serverChannels.put(name, list);
}
list.add(channel);
boundSocket.set((InetSocketAddress)channel.getLocalAddress());
boundSocket.set((InetSocketAddress) channel.getLocalAddress());
}
} catch (Exception e) {
lastException.set(e);
@ -491,16 +485,48 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
throw new BindTransportException("Failed to bind to [" + port + "]", lastException.get());
}
InetSocketAddress boundAddress = boundSocket.get();
// TODO: We can remove the special casing for the default profile and store it in the profile map to reduce the complexity here
if (!DEFAULT_PROFILE.equals(name)) {
InetSocketAddress boundAddress = boundSocket.get();
int publishPort = settings.getAsInt("publish_port", boundAddress.getPort());
String publishHost = settings.get("publish_host", boundAddress.getHostString());
InetSocketAddress publishAddress = createPublishAddress(publishHost, publishPort);
// TODO: support real multihoming with publishing. Today we use putIfAbsent so only the prioritized address is published
profileBoundAddresses.putIfAbsent(name, new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress)));
// check to see if an address is already bound for this profile
BoundTransportAddress boundTransportAddress = profileBoundAddresses().get(name);
if (boundTransportAddress == null) {
// no address is bound, so lets create one with the publish address information from the settings or the bound address as a fallback
int publishPort = profileSettings.getAsInt("publish_port", boundAddress.getPort());
String publishHost = profileSettings.get("publish_host", boundAddress.getHostString());
InetSocketAddress publishAddress = createPublishAddress(publishHost, publishPort);
profileBoundAddresses.put(name, new BoundTransportAddress(new TransportAddress[]{new InetSocketTransportAddress(boundAddress)}, new InetSocketTransportAddress(publishAddress)));
} else {
// TODO: support real multihoming with publishing. Today we update the bound addresses so only the prioritized address is published
// an address already exists. add the new bound address to the end of a new array and create a new BoundTransportAddress with the array and existing publish address
// the new bound address is appended in order to preserve the ordering/priority of bound addresses
TransportAddress[] existingBoundAddress = boundTransportAddress.boundAddresses();
TransportAddress[] updatedBoundAddresses = Arrays.copyOf(existingBoundAddress, existingBoundAddress.length + 1);
updatedBoundAddresses[updatedBoundAddresses.length - 1] = new InetSocketTransportAddress(boundAddress);
profileBoundAddresses.put(name, new BoundTransportAddress(updatedBoundAddresses, boundTransportAddress.publishAddress()));
}
} else {
if (this.boundAddress == null) {
// this is the first address that has been bound for the default profile so we get the publish address information and create a new BoundTransportAddress
// these calls are different from the profile ones due to the way the settings for a profile are created. If we want to merge the code for the default profile and
// other profiles together, we need to change how the profileSettings are built for the default profile...
int publishPort = settings.getAsInt("transport.netty.publish_port", settings.getAsInt("transport.publish_port", boundAddress.getPort()));
String publishHost = settings.get("transport.netty.publish_host", settings.get("transport.publish_host", settings.get("transport.host")));
InetSocketAddress publishAddress = createPublishAddress(publishHost, publishPort);
this.boundAddress = new BoundTransportAddress(new TransportAddress[]{new InetSocketTransportAddress(boundAddress)}, new InetSocketTransportAddress(publishAddress));
} else {
// the default profile is already bound to one address and has the publish address, copy the existing bound addresses as is and append the new address.
// the new bound address is appended in order to preserve the ordering/priority of bound addresses
TransportAddress[] existingBoundAddress = this.boundAddress.boundAddresses();
TransportAddress[] updatedBoundAddresses = Arrays.copyOf(existingBoundAddress, existingBoundAddress.length + 1);
updatedBoundAddresses[updatedBoundAddresses.length - 1] = new InetSocketTransportAddress(boundAddress);
this.boundAddress = new BoundTransportAddress(updatedBoundAddresses, this.boundAddress.publishAddress());
}
}
logger.info("Bound profile [{}] to address {{}}", name, NetworkAddress.format(boundSocket.get()));
if (logger.isDebugEnabled()) {
logger.debug("Bound profile [{}] to address {{}}", name, NetworkAddress.format(boundSocket.get()));
}
}
private void createServerBootstrap(String name, Settings settings) {

View File

@ -22,11 +22,7 @@ package org.elasticsearch.tribe;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ClusterStateNonMasterUpdateTask;
import org.elasticsearch.cluster.*;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks;
@ -50,11 +46,7 @@ import org.elasticsearch.node.NodeBuilder;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.rest.RestStatus;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
/**
@ -217,7 +209,12 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
@Override
public void clusterChanged(final ClusterChangedEvent event) {
logger.debug("[{}] received cluster event, [{}]", tribeName, event.source());
clusterService.submitStateUpdateTask("cluster event from " + tribeName + ", " + event.source(), new ClusterStateNonMasterUpdateTask() {
clusterService.submitStateUpdateTask("cluster event from " + tribeName + ", " + event.source(), new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
ClusterState tribeState = event.state();

View File

@ -1,6 +1,6 @@
NAME
start - start Elasticsearcion
start - Start Elasticsearch
SYNOPSIS
@ -24,3 +24,5 @@ OPTIONS
--property=value Configures an elasticsearch specific property, like --network.host 127.0.0.1
--property value
NOTE: The -d, -p, and -D arguments must appear before any --property arguments.

View File

@ -43,6 +43,7 @@ OFFICIAL PLUGINS
- discovery-azure
- discovery-ec2
- discovery-multicast
- lang-expression
- lang-javascript
- lang-python
- mapper-murmur3

View File

@ -19,17 +19,22 @@
package org.elasticsearch.benchmark.common.recycler;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.common.recycler.AbstractRecyclerC;
import org.elasticsearch.common.recycler.Recycler;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import static org.elasticsearch.common.recycler.Recyclers.*;
import static org.elasticsearch.common.recycler.Recyclers.concurrent;
import static org.elasticsearch.common.recycler.Recyclers.concurrentDeque;
import static org.elasticsearch.common.recycler.Recyclers.deque;
import static org.elasticsearch.common.recycler.Recyclers.dequeFactory;
import static org.elasticsearch.common.recycler.Recyclers.locked;
import static org.elasticsearch.common.recycler.Recyclers.none;
/** Benchmark that tries to measure the overhead of object recycling depending on concurrent access. */
public class RecyclerBenchmark {
@ -89,11 +94,11 @@ public class RecyclerBenchmark {
}
};
final ImmutableMap<String, Recycler<Object>> recyclers = ImmutableMap.<String, Recycler<Object>>builder()
.put("none", none(c))
.put("concurrent-queue", concurrentDeque(c, limit))
.put("locked", locked(deque(c, limit)))
.put("concurrent", concurrent(dequeFactory(c, limit), Runtime.getRuntime().availableProcessors())).build();
Map<String, Recycler<Object>> recyclers = new HashMap<>();
recyclers.put("none", none(c));
recyclers.put("concurrent-queue", concurrentDeque(c, limit));
recyclers.put("locked", locked(deque(c, limit)));
recyclers.put("concurrent", concurrent(dequeFactory(c, limit), Runtime.getRuntime().availableProcessors()));
// warmup
final long start = System.nanoTime();

View File

@ -220,6 +220,16 @@ public class BootstrapCliParserTests extends CliToolTestCase {
}
public void testThatHelpfulErrorMessageIsGivenWhenParametersAreOutOfOrder() throws Exception {
BootstrapCLIParser parser = new BootstrapCLIParser(terminal);
try {
parser.parse("start", new String[]{"--foo=bar", "-Dbaz=qux"});
fail("expected IllegalArgumentException for out-of-order parameters");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("must be before any parameters starting with --"));
}
}
private void registerProperties(String ... systemProperties) {
propertiesToClear.addAll(Arrays.asList(systemProperties));
}

View File

@ -52,7 +52,7 @@ public class BootstrapForTesting {
static {
// just like bootstrap, initialize natives, then SM
Bootstrap.initializeNatives(true, true);
Bootstrap.initializeNatives(true, true, true);
// initialize probes
Bootstrap.initializeProbes();

View File

@ -0,0 +1,73 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
import org.elasticsearch.test.ESTestCase;
/** Simple tests seccomp filter is working. */
public class SeccompTests extends ESTestCase {
@Override
public void setUp() throws Exception {
super.setUp();
assumeTrue("requires seccomp filter installation", Natives.isSeccompInstalled());
// otherwise security manager will block the execution, no fun
assumeTrue("cannot test with security manager enabled", System.getSecurityManager() == null);
}
public void testNoExecution() throws Exception {
try {
Runtime.getRuntime().exec("ls");
fail("should not have been able to execute!");
} catch (Exception expected) {
// we can't guarantee how its converted, currently its an IOException, like this:
/*
java.io.IOException: Cannot run program "ls": error=13, Permission denied
at __randomizedtesting.SeedInfo.seed([65E6C4BED11899E:FC6E1CA6AA2DB634]:0)
at java.lang.ProcessBuilder.start(ProcessBuilder.java:1048)
at java.lang.Runtime.exec(Runtime.java:620)
...
Caused by: java.io.IOException: error=13, Permission denied
at java.lang.UNIXProcess.forkAndExec(Native Method)
at java.lang.UNIXProcess.<init>(UNIXProcess.java:248)
at java.lang.ProcessImpl.start(ProcessImpl.java:134)
at java.lang.ProcessBuilder.start(ProcessBuilder.java:1029)
...
*/
}
}
// make sure thread inherits this too (its documented that way)
public void testNoExecutionFromThread() throws Exception {
Thread t = new Thread() {
@Override
public void run() {
try {
Runtime.getRuntime().exec("ls");
fail("should not have been able to execute!");
} catch (Exception expected) {
// ok
}
}
};
t.start();
t.join();
}
}

View File

@ -33,6 +33,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.Singleton;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
@ -42,12 +43,7 @@ import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.threadpool.ThreadPool;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
@ -55,11 +51,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.test.ESIntegTestCase.Scope;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.*;
/**
*
@ -100,7 +92,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
final CountDownLatch timedOut = new CountDownLatch(1);
final AtomicBoolean executeCalled = new AtomicBoolean();
clusterService1.submitStateUpdateTask("test2", new TimeoutClusterStateUpdateTask() {
clusterService1.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public TimeValue timeout() {
return TimeValue.timeValueMillis(2);
@ -325,7 +317,12 @@ public class ClusterServiceIT extends ESIntegTestCase {
taskFailed[0] = true;
final CountDownLatch latch2 = new CountDownLatch(1);
clusterService.submitStateUpdateTask("test", new ClusterStateNonMasterUpdateTask() {
clusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
taskFailed[0] = false;
@ -525,7 +522,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
invoked1.await();
final CountDownLatch invoked2 = new CountDownLatch(9);
for (int i = 2; i <= 10; i++) {
clusterService.submitStateUpdateTask(Integer.toString(i), new ProcessedClusterStateUpdateTask() {
clusterService.submitStateUpdateTask(Integer.toString(i), new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
@ -634,7 +631,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
Settings settings = settingsBuilder()
.put("discovery.type", "zen")
.put("discovery.zen.minimum_master_nodes", 1)
.put("discovery.zen.ping_timeout", "400ms")
.put(ZenDiscovery.SETTING_PING_TIMEOUT, "400ms")
.put("discovery.initial_state_timeout", "500ms")
.build();
@ -758,7 +755,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
rootLogger.addAppender(mockAppender);
try {
final CountDownLatch latch = new CountDownLatch(4);
clusterService1.submitStateUpdateTask("test1", new ProcessedClusterStateUpdateTask() {
clusterService1.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
return currentState;
@ -774,7 +771,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
fail();
}
});
clusterService1.submitStateUpdateTask("test2", new ProcessedClusterStateUpdateTask() {
clusterService1.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
@ -790,7 +787,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
latch.countDown();
}
});
clusterService1.submitStateUpdateTask("test3", new ProcessedClusterStateUpdateTask() {
clusterService1.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState).incrementVersion().build();
@ -808,7 +805,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
});
// Additional update task to make sure all previous logging made it to the logger
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterService1.submitStateUpdateTask("test4", new ProcessedClusterStateUpdateTask() {
clusterService1.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
@ -851,7 +848,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
try {
final CountDownLatch latch = new CountDownLatch(5);
final CountDownLatch processedFirstTask = new CountDownLatch(1);
clusterService1.submitStateUpdateTask("test1", new ProcessedClusterStateUpdateTask() {
clusterService1.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
Thread.sleep(100);
@ -874,7 +871,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder()
.put(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, "10ms")));
clusterService1.submitStateUpdateTask("test2", new ProcessedClusterStateUpdateTask() {
clusterService1.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
Thread.sleep(100);
@ -891,7 +888,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
latch.countDown();
}
});
clusterService1.submitStateUpdateTask("test3", new ProcessedClusterStateUpdateTask() {
clusterService1.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
Thread.sleep(100);
@ -908,7 +905,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
fail();
}
});
clusterService1.submitStateUpdateTask("test4", new ProcessedClusterStateUpdateTask() {
clusterService1.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
Thread.sleep(100);
@ -927,7 +924,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
});
// Additional update task to make sure all previous logging made it to the logger
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterService1.submitStateUpdateTask("test5", new ProcessedClusterStateUpdateTask() {
clusterService1.submitStateUpdateTask("test5", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;

View File

@ -69,7 +69,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
Settings settings = settingsBuilder()
.put("discovery.type", "zen")
.put("discovery.zen.minimum_master_nodes", 2)
.put("discovery.zen.ping_timeout", "200ms")
.put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms")
.put("discovery.initial_state_timeout", "500ms")
.build();
@ -182,7 +182,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
Settings settings = settingsBuilder()
.put("discovery.type", "zen")
.put("discovery.zen.minimum_master_nodes", 3)
.put("discovery.zen.ping_timeout", "1s")
.put(ZenDiscovery.SETTING_PING_TIMEOUT, "1s")
.put("discovery.initial_state_timeout", "500ms")
.build();
@ -258,7 +258,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
public void dynamicUpdateMinimumMasterNodes() throws Exception {
Settings settings = settingsBuilder()
.put("discovery.type", "zen")
.put("discovery.zen.ping_timeout", "400ms")
.put(ZenDiscovery.SETTING_PING_TIMEOUT, "400ms")
.put("discovery.initial_state_timeout", "500ms")
.build();
@ -317,7 +317,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
int nodeCount = scaledRandomIntBetween(1, 5);
Settings.Builder settings = settingsBuilder()
.put("discovery.type", "zen")
.put("discovery.zen.ping_timeout", "200ms")
.put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms")
.put("discovery.initial_state_timeout", "500ms");
// set an initial value which is at least quorum to avoid split brains during initial startup
@ -372,7 +372,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
final AtomicReference<Throwable> failure = new AtomicReference<>();
logger.debug("--> submitting for cluster state to be rejected");
final ClusterService masterClusterService = internalCluster().clusterService(master);
masterClusterService.submitStateUpdateTask("test", new ProcessedClusterStateUpdateTask() {
masterClusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();

View File

@ -33,6 +33,7 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.MasterNotDiscoveredException;
import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
@ -45,12 +46,8 @@ import java.util.HashMap;
import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertExists;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.lessThan;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
import static org.hamcrest.Matchers.*;
/**
*/
@ -68,7 +65,7 @@ public class NoMasterNodeIT extends ESIntegTestCase {
.put("discovery.type", "zen")
.put("action.auto_create_index", autoCreateIndex)
.put("discovery.zen.minimum_master_nodes", 2)
.put("discovery.zen.ping_timeout", "200ms")
.put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms")
.put("discovery.initial_state_timeout", "500ms")
.put(DiscoverySettings.NO_MASTER_BLOCK, "all")
.build();
@ -221,7 +218,7 @@ public class NoMasterNodeIT extends ESIntegTestCase {
.put("discovery.type", "zen")
.put("action.auto_create_index", false)
.put("discovery.zen.minimum_master_nodes", 2)
.put("discovery.zen.ping_timeout", "200ms")
.put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms")
.put("discovery.initial_state_timeout", "500ms")
.put(DiscoverySettings.NO_MASTER_BLOCK, "write")
.build();

View File

@ -41,7 +41,7 @@ import static org.hamcrest.CoreMatchers.equalTo;
public class DiffableTests extends ESTestCase {
@Test
public void testImmutableMapDiff() throws IOException {
public void testJdkMapDiff() throws IOException {
ImmutableMap.Builder<String, TestDiffable> builder = ImmutableMap.builder();
builder.put("foo", new TestDiffable("1"));
builder.put("bar", new TestDiffable("2"));
@ -57,7 +57,7 @@ public class DiffableTests extends ESTestCase {
BytesStreamOutput out = new BytesStreamOutput();
diff.writeTo(out);
StreamInput in = StreamInput.wrap(out.bytes());
ImmutableMap<String, TestDiffable> serialized = DiffableUtils.readImmutableMapDiff(in, TestDiffable.PROTO).apply(before);
Map<String, TestDiffable> serialized = DiffableUtils.readJdkMapDiff(in, TestDiffable.PROTO).apply(before);
assertThat(serialized.size(), equalTo(3));
assertThat(serialized.get("foo").value(), equalTo("1"));
assertThat(serialized.get("baz").value(), equalTo("4"));

View File

@ -1,127 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.collect;
import com.google.common.collect.ImmutableSet;
import org.elasticsearch.test.ESTestCase;
import java.util.HashSet;
import java.util.Set;
public class CopyOnWriteHashSetTests extends ESTestCase {
private static class O {
private final int value, hashCode;
O(int value, int hashCode) {
super();
this.value = value;
this.hashCode = hashCode;
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof O)) {
return false;
}
return value == ((O) obj).value;
}
}
public void testDuel() {
final int iters = scaledRandomIntBetween(2, 5);
for (int iter = 0; iter < iters; ++iter) {
final int valueBits = randomIntBetween(1, 30);
final int hashBits = randomInt(valueBits);
// we compute the total number of ops based on the bits of the hash
// since the test is much heavier when few bits are used for the hash
final int numOps = randomInt(10 + hashBits * 100);
Set<O> ref = new HashSet<>();
CopyOnWriteHashSet<O> set = new CopyOnWriteHashSet<>();
assertEquals(ref, set);
final int hashBase = randomInt();
for (int i = 0; i < numOps; ++i) {
final int v = randomInt(1 << valueBits);
final int h = (v & ((1 << hashBits) - 1)) ^ hashBase;
O key = new O(v, h);
Set<O> newRef = new HashSet<>(ref);
final CopyOnWriteHashSet<O> newSet;
if (randomBoolean()) {
// ADD
newRef.add(key);
newSet = set.copyAndAdd(key);
} else {
// REMOVE
final boolean modified = newRef.remove(key);
newSet = set.copyAndRemove(key);
if (!modified) {
assertSame(set, newSet);
}
}
assertEquals(ref, set); // make sure that the old copy has not been modified
assertEquals(newRef, newSet);
assertEquals(newSet, newRef);
assertEquals(ref.isEmpty(), set.isEmpty());
assertEquals(newRef.isEmpty(), newSet.isEmpty());
ref = newRef;
set = newSet;
}
assertEquals(ref, CopyOnWriteHashSet.copyOf(ref));
assertEquals(ImmutableSet.of(), CopyOnWriteHashSet.copyOf(ref).copyAndRemoveAll(ref));
}
}
public void testUnsupportedAPIs() {
try {
new CopyOnWriteHashSet<>().add("a");
fail();
} catch (UnsupportedOperationException e) {
// expected
}
try {
new CopyOnWriteHashSet<>().copyAndAdd("a").remove("a");
fail();
} catch (UnsupportedOperationException e) {
// expected
}
}
public void testUnsupportedValues() {
try {
new CopyOnWriteHashSet<>().copyAndAdd(null);
fail();
} catch (IllegalArgumentException e) {
// expected
}
}
}

View File

@ -0,0 +1,81 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.transport;
import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.test.ESTestCase;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.List;
import static org.hamcrest.Matchers.*;
/**
* Basic tests for the {@link BoundTransportAddress} class. These tests should not bind to any addresses but should
* just test things like serialization and exception handling...
*/
public class BoundTransportAddressTests extends ESTestCase {
public void testSerialization() throws Exception {
InetAddress[] inetAddresses = InetAddress.getAllByName("0.0.0.0");
List<InetSocketTransportAddress> transportAddressList = new ArrayList<>();
for (InetAddress address : inetAddresses) {
transportAddressList.add(new InetSocketTransportAddress(address, randomIntBetween(9200, 9299)));
}
final BoundTransportAddress transportAddress = new BoundTransportAddress(transportAddressList.toArray(new InetSocketTransportAddress[0]), transportAddressList.get(0));
assertThat(transportAddress.boundAddresses().length, equalTo(transportAddressList.size()));
// serialize
BytesStreamOutput streamOutput = new BytesStreamOutput();
transportAddress.writeTo(streamOutput);
StreamInput in = ByteBufferStreamInput.wrap(streamOutput.bytes());
BoundTransportAddress serializedAddress;
if (randomBoolean()) {
serializedAddress = BoundTransportAddress.readBoundTransportAddress(in);
} else {
serializedAddress = new BoundTransportAddress();
serializedAddress.readFrom(in);
}
assertThat(serializedAddress, not(sameInstance(transportAddress)));
assertThat(serializedAddress.boundAddresses().length, equalTo(transportAddress.boundAddresses().length));
assertThat(serializedAddress.publishAddress(), equalTo(transportAddress.publishAddress()));
TransportAddress[] serializedBoundAddresses = serializedAddress.boundAddresses();
TransportAddress[] boundAddresses = transportAddress.boundAddresses();
for (int i = 0; i < serializedBoundAddresses.length; i++) {
assertThat(serializedBoundAddresses[i], equalTo(boundAddresses[i]));
}
}
public void testBadBoundAddressArray() {
try {
TransportAddress[] badArray = randomBoolean() ? null : new TransportAddress[0];
new BoundTransportAddress(badArray, new InetSocketTransportAddress(InetAddress.getLoopbackAddress(), 80));
fail("expected an exception to be thrown due to no bound address");
} catch (IllegalArgumentException e) {
//expected
}
}
}

View File

@ -95,7 +95,7 @@ public class NettyHttpServerPipeliningTests extends ESTestCase {
Settings settings = settingsBuilder().put("http.pipelining", true).build();
httpServerTransport = new CustomNettyHttpServerTransport(settings);
httpServerTransport.start();
InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) httpServerTransport.boundAddress().boundAddress();
InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress().boundAddresses());
List<String> requests = Arrays.asList("/firstfast", "/slow?sleep=500", "/secondfast", "/slow?sleep=1000", "/thirdfast");
try (NettyHttpClient nettyHttpClient = new NettyHttpClient()) {
@ -110,7 +110,7 @@ public class NettyHttpServerPipeliningTests extends ESTestCase {
Settings settings = settingsBuilder().put("http.pipelining", false).build();
httpServerTransport = new CustomNettyHttpServerTransport(settings);
httpServerTransport.start();
InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) httpServerTransport.boundAddress().boundAddress();
InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress().boundAddresses());
List<String> requests = Arrays.asList("/slow?sleep=1000", "/firstfast", "/secondfast", "/thirdfast", "/slow?sleep=500");
try (NettyHttpClient nettyHttpClient = new NettyHttpClient()) {

View File

@ -56,7 +56,7 @@ public class NettyPipeliningDisabledIT extends ESIntegTestCase {
List<String> requests = Arrays.asList("/", "/_nodes/stats", "/", "/_cluster/state", "/", "/_nodes", "/");
HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class);
InetSocketTransportAddress inetSocketTransportAddress = (InetSocketTransportAddress) httpServerTransport.boundAddress().boundAddress();
InetSocketTransportAddress inetSocketTransportAddress = (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress().boundAddresses());
try (NettyHttpClient nettyHttpClient = new NettyHttpClient()) {
Collection<HttpResponse> responses = nettyHttpClient.sendRequests(inetSocketTransportAddress.address(), requests.toArray(new String[]{}));

View File

@ -52,7 +52,7 @@ public class NettyPipeliningEnabledIT extends ESIntegTestCase {
List<String> requests = Arrays.asList("/", "/_nodes/stats", "/", "/_cluster/state", "/");
HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class);
InetSocketTransportAddress inetSocketTransportAddress = (InetSocketTransportAddress) httpServerTransport.boundAddress().boundAddress();
InetSocketTransportAddress inetSocketTransportAddress = (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress().boundAddresses());
try (NettyHttpClient nettyHttpClient = new NettyHttpClient()) {
Collection<HttpResponse> responses = nettyHttpClient.sendRequests(inetSocketTransportAddress.address(), requests.toArray(new String[]{}));

View File

@ -20,7 +20,10 @@ package org.elasticsearch.index.mapper.geo;
import org.apache.lucene.util.XGeoHashUtils;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.mapper.DocumentMapper;
@ -28,13 +31,18 @@ import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.search.SearchHitField;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.test.VersionUtils;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.hamcrest.Matchers.*;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.nullValue;
@ -640,4 +648,52 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
mergeResult = stage1.merge(stage2.mapping(), false, false);
assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false));
}
public void testGeoHashSearch() throws Exception {
// create a geo_point mapping with geohash enabled and random (between 1 and 12) geohash precision
int precision = randomIntBetween(1, 12);
String mapping = XContentFactory.jsonBuilder().startObject().startObject("pin").startObject("properties").startObject("location")
.field("type", "geo_point").field("geohash", true).field("geohash_precision", precision).field("store", true).endObject()
.endObject().endObject().endObject().string();
// create index and add a test point (dr5regy6rc6z)
CreateIndexRequestBuilder mappingRequest = client().admin().indices().prepareCreate("test").addMapping("pin", mapping);
mappingRequest.execute().actionGet();
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
client().prepareIndex("test", "pin", "1").setSource(jsonBuilder().startObject().startObject("location").field("lat", 40.7143528)
.field("lon", -74.0059731).endObject().endObject()).setRefresh(true).execute().actionGet();
// match all search with geohash field
SearchResponse searchResponse = client().prepareSearch().addField("location.geohash").setQuery(matchAllQuery()).execute().actionGet();
Map<String, SearchHitField> m = searchResponse.getHits().getAt(0).getFields();
// ensure single geohash was indexed
assertEquals("dr5regy6rc6y".substring(0, precision), m.get("location.geohash").value());
}
public void testGeoHashSearchWithPrefix() throws Exception {
// create a geo_point mapping with geohash enabled and random (between 1 and 12) geohash precision
int precision = randomIntBetween(1, 12);
String mapping = XContentFactory.jsonBuilder().startObject().startObject("pin").startObject("properties").startObject("location")
.field("type", "geo_point").field("geohash_prefix", true).field("geohash_precision", precision).field("store", true)
.endObject().endObject().endObject().endObject().string();
// create index and add a test point (dr5regy6rc6z)
CreateIndexRequestBuilder mappingRequest = client().admin().indices().prepareCreate("test").addMapping("pin", mapping);
mappingRequest.execute().actionGet();
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
client().prepareIndex("test", "pin", "1").setSource(jsonBuilder().startObject().startObject("location").field("lat", 40.7143528)
.field("lon", -74.0059731).endObject().endObject()).setRefresh(true).execute().actionGet();
// match all search with geohash field (includes prefixes)
SearchResponse searchResponse = client().prepareSearch().addField("location.geohash").setQuery(matchAllQuery()).execute().actionGet();
Map<String, SearchHitField> m = searchResponse.getHits().getAt(0).getFields();
List<Object> hashes = m.get("location.geohash").values();
final int numHashes = hashes.size();
for(int i=0; i<numHashes; ++i) {
assertEquals("dr5regy6rc6y".substring(0, numHashes-i), hashes.get(i));
}
}
}

Some files were not shown because too many files have changed in this diff Show More