mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-24 17:09:48 +00:00
Merge branch 'master' into placeholder
This commit is contained in:
commit
31ca8fa411
@ -54,7 +54,7 @@ Once your changes and tests are ready to submit for review:
|
||||
1. Test your changes
|
||||
|
||||
Run the test suite to make sure that nothing is broken. See the
|
||||
[TESTING](../TESTING.asciidoc) file for help running tests.
|
||||
[TESTING](TESTING.asciidoc) file for help running tests.
|
||||
|
||||
2. Sign the Contributor License Agreement
|
||||
|
||||
@ -102,5 +102,3 @@ Before submitting your changes, run the test suite to make sure that nothing is
|
||||
```sh
|
||||
gradle check
|
||||
```
|
||||
|
||||
Source: [Contributing to elasticsearch](https://www.elastic.co/contributing-to-elasticsearch/)
|
||||
|
@ -345,20 +345,21 @@ gradle :qa:vagrant:checkVagrantVersion
|
||||
-------------------------------------
|
||||
|
||||
. Download and smoke test the VMs with `gradle vagrantSmokeTest` or
|
||||
`gradle vagrantSmokeTestAllDistros`. The first time you run this it will
|
||||
`gradle -Pvagrant.boxes=all vagrantSmokeTest`. The first time you run this it will
|
||||
download the base images and provision the boxes and immediately quit. If you
|
||||
you this again it'll skip the download step.
|
||||
|
||||
. Run the tests with `gradle checkPackages`. This will cause gradle to build
|
||||
. Run the tests with `gradle packagingTest`. This will cause gradle to build
|
||||
the tar, zip, and deb packages and all the plugins. It will then run the tests
|
||||
on ubuntu-1404 and centos-7. We chose those two distributions as the default
|
||||
because they cover deb and rpm packaging and SyvVinit and systemd.
|
||||
|
||||
You can run on all the VMs by running `gradle checkPackagesAllDistros`. You can
|
||||
run a particular VM with a command like `gradle checkOel7`. See `gradle tasks`
|
||||
for a list. Its important to know that if you ctrl-c any of these `gradle`
|
||||
commands then the boxes will remain running and you'll have to terminate them
|
||||
with `vagrant halt`.
|
||||
You can run on all the VMs by running `gradle -Pvagrant.boxes=all packagingTest`.
|
||||
You can run a particular VM with a command like
|
||||
`gradle -Pvagrant.boxes=oel-7 packagingTest`. See `gradle tasks` for a complete
|
||||
list of available vagrant boxes for testing. It's important to know that if you
|
||||
ctrl-c any of these `gradle` commands then the boxes will remain running and
|
||||
you'll have to terminate them with 'gradle stop'.
|
||||
|
||||
All the regular vagrant commands should just work so you can get a shell in a
|
||||
VM running trusty by running
|
||||
@ -387,7 +388,7 @@ We're missing the follow because our tests are very linux/bash centric:
|
||||
|
||||
* Windows Server 2012
|
||||
|
||||
Its important to think of VMs like cattle. If they become lame you just shoot
|
||||
It's important to think of VMs like cattle. If they become lame you just shoot
|
||||
them and let vagrant reprovision them. Say you've hosed your precise VM:
|
||||
|
||||
----------------------------------------------------
|
||||
@ -432,7 +433,7 @@ and in another window:
|
||||
|
||||
----------------------------------------------------
|
||||
vagrant up centos-7 --provider virtualbox && vagrant ssh centos-7
|
||||
cd $RPM
|
||||
cd $TESTROOT
|
||||
sudo bats $BATS/*rpm*.bats
|
||||
----------------------------------------------------
|
||||
|
||||
@ -440,7 +441,7 @@ If you wanted to retest all the release artifacts on a single VM you could:
|
||||
|
||||
-------------------------------------------------
|
||||
gradle prepareTestRoot
|
||||
vagrant up trusty --provider virtualbox && vagrant ssh trusty
|
||||
vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404
|
||||
cd $TESTROOT
|
||||
sudo bats $BATS/*.bats
|
||||
-------------------------------------------------
|
||||
|
@ -100,9 +100,12 @@ class BuildPlugin implements Plugin<Project> {
|
||||
println " OS Info : ${System.getProperty('os.name')} ${System.getProperty('os.version')} (${System.getProperty('os.arch')})"
|
||||
if (gradleJavaVersionDetails != javaVersionDetails) {
|
||||
println " JDK Version (gradle) : ${gradleJavaVersionDetails}"
|
||||
println " JAVA_HOME (gradle) : ${gradleJavaHome}"
|
||||
println " JDK Version (compile) : ${javaVersionDetails}"
|
||||
println " JAVA_HOME (compile) : ${javaHome}"
|
||||
} else {
|
||||
println " JDK Version : ${gradleJavaVersionDetails}"
|
||||
println " JAVA_HOME : ${gradleJavaHome}"
|
||||
}
|
||||
|
||||
// enforce gradle version
|
||||
|
@ -68,6 +68,7 @@ public class NamingConventionsTask extends LoggedExec {
|
||||
*/
|
||||
project.afterEvaluate {
|
||||
doFirst {
|
||||
args('-Djna.nosys=true')
|
||||
args('-cp', classpath.asPath, 'org.elasticsearch.test.NamingConventionsCheck')
|
||||
if (skipIntegTestInDisguise) {
|
||||
args('--skip-integ-tests-in-disguise')
|
||||
|
@ -258,7 +258,7 @@ class ClusterFormationTasks {
|
||||
'path.repo' : "${node.sharedDir}/repo",
|
||||
'path.shared_data' : "${node.sharedDir}/",
|
||||
// Define a node attribute so we can test that it exists
|
||||
'node.testattr' : 'test',
|
||||
'node.attr.testattr' : 'test',
|
||||
'repositories.url.allowed_urls': 'http://snapshot.test*'
|
||||
]
|
||||
esConfig['http.port'] = node.config.httpPort
|
||||
@ -391,6 +391,22 @@ class ClusterFormationTasks {
|
||||
return configureExecTask(name, project, setup, node, args)
|
||||
}
|
||||
|
||||
/** Wrapper for command line argument: surrounds comma with double quotes **/
|
||||
private static class EscapeCommaWrapper {
|
||||
|
||||
Object arg
|
||||
|
||||
public String toString() {
|
||||
String s = arg.toString()
|
||||
|
||||
/// Surround strings that contains a comma with double quotes
|
||||
if (s.indexOf(',') != -1) {
|
||||
return "\"${s}\""
|
||||
}
|
||||
return s
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds a task to execute a command to help setup the cluster */
|
||||
static Task configureExecTask(String name, Project project, Task setup, NodeInfo node, Object[] execArgs) {
|
||||
return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) {
|
||||
@ -398,10 +414,13 @@ class ClusterFormationTasks {
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
executable 'cmd'
|
||||
args '/C', 'call'
|
||||
// On Windows the comma character is considered a parameter separator:
|
||||
// argument are wrapped in an ExecArgWrapper that escapes commas
|
||||
args execArgs.collect { a -> new EscapeCommaWrapper(arg: a) }
|
||||
} else {
|
||||
executable 'sh'
|
||||
args execArgs
|
||||
}
|
||||
args execArgs
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -187,7 +187,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]IngestActionFilter.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]IngestProxyActionFilter.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]PutPipelineTransportAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulateDocumentBaseResult.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulateExecutionService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineRequest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineRequestBuilder.java" checks="LineLength" />
|
||||
@ -270,7 +269,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]node[/\\]NodeClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]support[/\\]AbstractClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]TransportClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]TransportClientNodesService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]support[/\\]TransportProxyClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]ClusterModule.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]ClusterState.java" checks="LineLength" />
|
||||
@ -302,7 +300,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataMappingService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataUpdateSettingsService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]RepositoriesMetaData.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]node[/\\]DiscoveryNode.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]node[/\\]DiscoveryNodes.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]IndexRoutingTable.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]IndexShardRoutingTable.java" checks="LineLength" />
|
||||
@ -326,17 +323,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]command[/\\]CancelAllocationCommand.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]command[/\\]MoveAllocationCommand.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]AllocationDeciders.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]AwarenessAllocationDecider.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]ClusterRebalanceAllocationDecider.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]ConcurrentRebalanceAllocationDecider.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]DiskThresholdDecider.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]EnableAllocationDecider.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]FilterAllocationDecider.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]NodeVersionAllocationDecider.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]SameShardAllocationDecider.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]ShardsLimitAllocationDecider.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]SnapshotInProgressAllocationDecider.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]ThrottlingAllocationDecider.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]service[/\\]InternalClusterService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]Base64.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]Booleans.java" checks="LineLength" />
|
||||
@ -349,9 +335,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]cli[/\\]CheckFileCommand.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]collect[/\\]ImmutableOpenIntMap.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]GeoDistance.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]builders[/\\]LineStringBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]builders[/\\]PolygonBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]builders[/\\]ShapeBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]DefaultConstructionProxyFactory.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]InjectorImpl.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]internal[/\\]ConstructionContext.java" checks="LineLength" />
|
||||
@ -360,7 +343,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]io[/\\]Channels.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]joda[/\\]Joda.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]Lucene.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]all[/\\]AllTermQuery.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]index[/\\]ElasticsearchDirectoryReader.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]index[/\\]FilterableTermsEnum.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]index[/\\]FreqTermsEnum.java" checks="LineLength" />
|
||||
@ -390,7 +372,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]PrioritizedEsThreadPoolExecutor.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]ThreadBarrier.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]ThreadContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]ObjectParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]XContentBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]XContentFactory.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]XContentHelper.java" checks="LineLength" />
|
||||
@ -409,7 +390,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]fd[/\\]NodesFaultDetection.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]membership[/\\]MembershipAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ping[/\\]ZenPing.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ping[/\\]unicast[/\\]UnicastZenPing.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]publish[/\\]PendingClusterStatesQueue.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]publish[/\\]PublishClusterStateAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]env[/\\]ESFileStore.java" checks="LineLength" />
|
||||
@ -432,7 +412,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]http[/\\]netty[/\\]NettyHttpServerTransport.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]AlreadyExpiredException.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]CompositeIndexEventListener.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexModule.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexSettings.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexingSlowLog.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]MergePolicyConfig.java" checks="LineLength" />
|
||||
@ -505,7 +484,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]geo[/\\]GeoPointFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]geo[/\\]GeoPointFieldMapperLegacy.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]geo[/\\]GeoShapeFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]internal[/\\]AllFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]internal[/\\]FieldNamesFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]internal[/\\]IdFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]internal[/\\]IndexFieldMapper.java" checks="LineLength" />
|
||||
@ -525,68 +503,12 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]percolator[/\\]PercolatorFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]percolator[/\\]PercolatorQueriesRegistry.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]AbstractQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]BoolQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]BoostingQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]CommonTermsQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]CommonTermsQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]ConstantScoreQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]ExistsQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]ExistsQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]FieldMaskingSpanQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]FieldMaskingSpanQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]GeoBoundingBoxQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]GeoDistanceQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]GeoDistanceQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]GeoDistanceRangeQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]GeoPolygonQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]GeoPolygonQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]GeoShapeQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]GeohashCellQuery.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]HasChildQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]HasChildQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]HasParentQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]HasParentQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]IdsQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]MatchAllQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]MatchNoneQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]MatchQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]MoreLikeThisQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]MultiMatchQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]NestedQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]Operator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]PrefixQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryBuilders.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryShardContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryStringQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryStringQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryValidationException.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]RangeQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]RangeQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]RegexpQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]ScriptQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SimpleQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SimpleQueryStringParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SpanContainingQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SpanContainingQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SpanMultiTermQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SpanMultiTermQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SpanTermQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SpanWithinQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SpanWithinQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]TemplateQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]TermQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]TermsQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]TypeQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]WildcardQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]WildcardQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]functionscore[/\\]DecayFunctionBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]functionscore[/\\]FunctionScoreQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]functionscore[/\\]FunctionScoreQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]functionscore[/\\]ScoreFunctionBuilders.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]functionscore[/\\]fieldvaluefactor[/\\]FieldValueFactorFunctionParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]functionscore[/\\]random[/\\]RandomScoreFunctionParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]functionscore[/\\]script[/\\]ScriptScoreFunctionBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]functionscore[/\\]script[/\\]ScriptScoreFunctionParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]support[/\\]InnerHitsQueryParserHelper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]support[/\\]QueryParsers.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]MatchQuery.java" checks="LineLength" />
|
||||
@ -727,7 +649,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptSettings.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]Template.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueMode.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchModule.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactories.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactory.java" checks="LineLength" />
|
||||
@ -847,9 +768,7 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]innerhits[/\\]InnerHitsParseElement.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]script[/\\]ScriptFieldsParseElement.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]source[/\\]FetchSourceContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]highlight[/\\]AbstractHighlighterBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]highlight[/\\]FastVectorHighlighter.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]highlight[/\\]HighlightBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]highlight[/\\]HighlightPhase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]highlight[/\\]HighlightUtils.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]highlight[/\\]HighlighterParseElement.java" checks="LineLength" />
|
||||
@ -877,19 +796,14 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]SuggestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]SuggestContextParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]SuggestUtils.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]Suggesters.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]CompletionSuggestParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]CategoryContextMapping.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]ContextMapping.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]GeoContextMapping.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]GeoQueryContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]CandidateScorer.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]DirectCandidateGenerator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]LaplaceScorer.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]LinearInterpoatingScorer.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]NoisyChannelSpellChecker.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]PhraseSuggestParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]StupidBackoffScorer.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]WordScorer.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]term[/\\]TermSuggestParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]RestoreService.java" checks="LineLength" />
|
||||
@ -950,7 +864,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]broadcast[/\\]node[/\\]TransportBroadcastByNodeActionTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]master[/\\]TransportMasterNodeActionTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]replication[/\\]BroadcastReplicationTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]replication[/\\]ClusterStateCreationUtils.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]replication[/\\]TransportReplicationActionTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]single[/\\]instance[/\\]TransportInstanceSingleOperationActionTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]termvectors[/\\]AbstractTermVectorsTestCase.java" checks="LineLength" />
|
||||
@ -968,9 +881,7 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bwcompat[/\\]RestoreBackwardsCompatIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]AbstractClientHeadersTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]FailAndRetryMockTransport.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]TransportClientHeadersTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]TransportClientIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]TransportClientNodesServiceTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]TransportClientRetryIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]ClusterHealthIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]ClusterInfoServiceIT.java" checks="LineLength" />
|
||||
@ -1039,7 +950,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]RebalanceAfterActiveTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]ReplicaAllocatedAfterPrimaryTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]RoutingNodesIntegrityTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]SameShardRoutingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]ShardVersioningTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]ShardsLimitAllocationTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]SingleShardNoReplicasRoutingTests.java" checks="LineLength" />
|
||||
@ -1048,12 +958,10 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]TenShardsOneReplicaRoutingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]ThrottlingAllocationTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]UpdateNumberOfReplicasTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]DiskThresholdDeciderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]DiskThresholdDeciderUnitTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]EnableAllocationDeciderIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]EnableAllocationTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]serialization[/\\]ClusterSerializationTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]serialization[/\\]ClusterStateToStringTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]serialization[/\\]DiffableTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]settings[/\\]ClusterSettingsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]shards[/\\]ClusterSearchShardsIT.java" checks="LineLength" />
|
||||
@ -1064,9 +972,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]FsBlobStoreTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]breaker[/\\]MemoryCircuitBreakerTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]ShapeBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]builders[/\\]AbstractShapeBuilderTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]builders[/\\]EnvelopeBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]builders[/\\]PolygonBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]hash[/\\]MessageDigestsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]ModuleTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]io[/\\]stream[/\\]BytesStreamsTests.java" checks="LineLength" />
|
||||
@ -1084,7 +989,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]LongObjectHashMapTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]EsExecutorsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]PrioritizedExecutorsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]ObjectParserTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]XContentFactoryTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]builder[/\\]XContentBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]cbor[/\\]JsonVsCborTests.java" checks="LineLength" />
|
||||
@ -1102,7 +1006,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ZenDiscoveryIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ZenDiscoveryUnitTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ping[/\\]unicast[/\\]UnicastZenPingIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]publish[/\\]PendingClusterStatesQueueTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]publish[/\\]PublishClusterStateActionTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]document[/\\]DocumentActionsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]env[/\\]EnvironmentTests.java" checks="LineLength" />
|
||||
@ -1110,8 +1013,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]explain[/\\]ExplainActionIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]fieldstats[/\\]FieldStatsIntegrationIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]fieldstats[/\\]FieldStatsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]AsyncShardFetchTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayMetaStateTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayModuleTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayServiceTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayTests.java" checks="LineLength" />
|
||||
@ -1165,7 +1066,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]DynamicMappingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]FieldTypeTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MapperServiceTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]all[/\\]SimpleAllMapperTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]binary[/\\]BinaryMappingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]boost[/\\]CustomBoostMappingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]boost[/\\]FieldLevelBoostTests.java" checks="LineLength" />
|
||||
@ -1218,7 +1118,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]BoostingQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]CommonTermsQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]FieldMaskingSpanQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]GeoBoundingBoxQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]GeoDistanceQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]HasChildQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]HasParentQueryBuilderTests.java" checks="LineLength" />
|
||||
@ -1230,9 +1129,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]ScoreModeTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SpanMultiTermQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SpanNotQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]functionscore[/\\]FunctionScoreEquivalenceTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]functionscore[/\\]FunctionScoreQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]functionscore[/\\]FunctionScoreTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]plugin[/\\]CustomQueryParserIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]support[/\\]QueryInnerHitsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]MultiMatchQueryTests.java" checks="LineLength" />
|
||||
@ -1339,7 +1235,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptServiceTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptSettingsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueModeTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchModuleTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchWithRejectionsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]MissingValueIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]ChildrenIT.java" checks="LineLength" />
|
||||
@ -1376,14 +1271,9 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]child[/\\]ChildQuerySearchIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]child[/\\]ParentFieldLoadingIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSubPhasePluginIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]functionscore[/\\]DecayFunctionScoreIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]functionscore[/\\]ExplainableScriptIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]functionscore[/\\]FunctionScoreBackwardCompatibilityIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]functionscore[/\\]QueryRescorerIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]geo[/\\]GeoBoundingBoxIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]geo[/\\]GeoFilterIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]geo[/\\]GeoShapeQueryTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]highlight[/\\]HighlightBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]highlight[/\\]HighlighterSearchIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]innerhits[/\\]InnerHitsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]matchedqueries[/\\]MatchedQueriesIT.java" checks="LineLength" />
|
||||
@ -1406,7 +1296,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]CustomSuggester.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]CategoryContextMappingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]GeoContextMappingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]DirectCandidateGeneratorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]NoisyChannelSpellCheckerTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]similarity[/\\]SimilarityIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]AbstractSnapshotIntegTestCase.java" checks="LineLength" />
|
||||
@ -1433,7 +1322,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]transport[/\\]netty[/\\]NettyTransportIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]transport[/\\]netty[/\\]NettyTransportMultiPortIntegrationIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]transport[/\\]netty[/\\]NettyTransportMultiPortTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]transport[/\\]netty[/\\]SimpleNettyTransportTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ttl[/\\]SimpleTTLIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]update[/\\]UpdateIT.java" checks="LineLength" />
|
||||
@ -1454,7 +1342,6 @@
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BulkTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DoubleTermsTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]EquivalenceTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]FunctionScoreTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]GeoDistanceTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]HDRPercentileRanksTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]HDRPercentilesTests.java" checks="LineLength" />
|
||||
@ -1502,13 +1389,11 @@
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]deletebyquery[/\\]TransportDeleteByQueryActionTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugin[/\\]deletebyquery[/\\]DeleteByQueryTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]azure[/\\]management[/\\]AzureComputeService.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]azure[/\\]AzureUnicastHostsProvider.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]azure[/\\]AbstractAzureTestCase.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]azure[/\\]AzureMinimumMasterNodesTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]azure[/\\]AzureSimpleTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]azure[/\\]AzureTwoStartedNodesTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-ec2[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]aws[/\\]AwsEc2ServiceImpl.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-ec2[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]ec2[/\\]AwsEc2UnicastHostsProvider.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-ec2[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]aws[/\\]AbstractAwsTestCase.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-ec2[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]ec2[/\\]AmazonEC2Mock.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-gce[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]gce[/\\]GceUnicastHostsProvider.java" checks="LineLength" />
|
||||
|
@ -237,6 +237,10 @@ public abstract class BlendedTermQuery extends Query {
|
||||
return newCtx;
|
||||
}
|
||||
|
||||
public List<Term> getTerms() {
|
||||
return Arrays.asList(terms);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
StringBuilder builder = new StringBuilder("blended(terms:[");
|
||||
|
@ -128,4 +128,33 @@ public class Build {
|
||||
public String toString() {
|
||||
return "[" + shortHash + "][" + date + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Build build = (Build) o;
|
||||
|
||||
if (isSnapshot != build.isSnapshot) {
|
||||
return false;
|
||||
}
|
||||
if (!shortHash.equals(build.shortHash)) {
|
||||
return false;
|
||||
}
|
||||
return date.equals(build.date);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = (isSnapshot ? 1 : 0);
|
||||
result = 31 * result + shortHash.hashCode();
|
||||
result = 31 * result + date.hashCode();
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
@ -62,8 +62,12 @@ public class Version {
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_2_1_ID = 2020199;
|
||||
public static final Version V_2_2_1 = new Version(V_2_2_1_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_2_2_ID = 2020299;
|
||||
public static final Version V_2_2_2 = new Version(V_2_2_2_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_3_0_ID = 2030099;
|
||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_1_ID = 2030199;
|
||||
public static final Version V_2_3_1 = new Version(V_2_3_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_5_0_0_alpha1_ID = 5000001;
|
||||
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final Version CURRENT = V_5_0_0_alpha1;
|
||||
@ -81,8 +85,12 @@ public class Version {
|
||||
switch (id) {
|
||||
case V_5_0_0_alpha1_ID:
|
||||
return V_5_0_0_alpha1;
|
||||
case V_2_3_1_ID:
|
||||
return V_2_3_1;
|
||||
case V_2_3_0_ID:
|
||||
return V_2_3_0;
|
||||
case V_2_2_2_ID:
|
||||
return V_2_2_2;
|
||||
case V_2_2_1_ID:
|
||||
return V_2_2_1;
|
||||
case V_2_2_0_ID:
|
||||
|
@ -19,6 +19,8 @@
|
||||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction;
|
||||
import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
|
||||
import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsAction;
|
||||
@ -161,6 +163,8 @@ import org.elasticsearch.action.ingest.PutPipelineAction;
|
||||
import org.elasticsearch.action.ingest.PutPipelineTransportAction;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineAction;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineTransportAction;
|
||||
import org.elasticsearch.action.main.MainAction;
|
||||
import org.elasticsearch.action.main.TransportMainAction;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateAction;
|
||||
import org.elasticsearch.action.percolate.PercolateAction;
|
||||
import org.elasticsearch.action.percolate.TransportMultiPercolateAction;
|
||||
@ -257,12 +261,14 @@ public class ActionModule extends AbstractModule {
|
||||
bind(ActionFilters.class).asEagerSingleton();
|
||||
bind(AutoCreateIndex.class).asEagerSingleton();
|
||||
bind(DestructiveOperations.class).asEagerSingleton();
|
||||
registerAction(MainAction.INSTANCE, TransportMainAction.class);
|
||||
registerAction(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class);
|
||||
registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
|
||||
registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
|
||||
registerAction(ListTasksAction.INSTANCE, TransportListTasksAction.class);
|
||||
registerAction(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class);
|
||||
|
||||
registerAction(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class);
|
||||
registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class);
|
||||
registerAction(ClusterStateAction.INSTANCE, TransportClusterStateAction.class);
|
||||
registerAction(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class);
|
||||
|
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Action for explaining shard allocation for a shard in the cluster
|
||||
*/
|
||||
public class ClusterAllocationExplainAction extends Action<ClusterAllocationExplainRequest,
|
||||
ClusterAllocationExplainResponse,
|
||||
ClusterAllocationExplainRequestBuilder> {
|
||||
|
||||
public static final ClusterAllocationExplainAction INSTANCE = new ClusterAllocationExplainAction();
|
||||
public static final String NAME = "cluster:monitor/allocation/explain";
|
||||
|
||||
private ClusterAllocationExplainAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterAllocationExplainResponse newResponse() {
|
||||
return new ClusterAllocationExplainResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterAllocationExplainRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ClusterAllocationExplainRequestBuilder(client, this);
|
||||
}
|
||||
}
|
@ -0,0 +1,176 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* A request to explain the allocation of a shard in the cluster
|
||||
*/
|
||||
public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAllocationExplainRequest> {
|
||||
|
||||
private static ObjectParser<ClusterAllocationExplainRequest, Void> PARSER = new ObjectParser("cluster/allocation/explain");
|
||||
static {
|
||||
PARSER.declareString(ClusterAllocationExplainRequest::setIndex, new ParseField("index"));
|
||||
PARSER.declareInt(ClusterAllocationExplainRequest::setShard, new ParseField("shard"));
|
||||
PARSER.declareBoolean(ClusterAllocationExplainRequest::setPrimary, new ParseField("primary"));
|
||||
}
|
||||
|
||||
private String index;
|
||||
private Integer shard;
|
||||
private Boolean primary;
|
||||
private boolean includeYesDecisions = false;
|
||||
|
||||
/** Explain the first unassigned shard */
|
||||
public ClusterAllocationExplainRequest() {
|
||||
this.index = null;
|
||||
this.shard = null;
|
||||
this.primary = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new allocation explain request. If {@code primary} is false, the first unassigned replica
|
||||
* will be picked for explanation. If no replicas are unassigned, the first assigned replica will
|
||||
* be explained.
|
||||
*/
|
||||
public ClusterAllocationExplainRequest(String index, int shard, boolean primary) {
|
||||
this.index = index;
|
||||
this.shard = shard;
|
||||
this.primary = primary;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (this.useAnyUnassignedShard() == false) {
|
||||
if (this.index == null) {
|
||||
validationException = addValidationError("index must be specified", validationException);
|
||||
}
|
||||
if (this.shard == null) {
|
||||
validationException = addValidationError("shard must be specified", validationException);
|
||||
}
|
||||
if (this.primary == null) {
|
||||
validationException = addValidationError("primary must be specified", validationException);
|
||||
}
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code true} iff the first unassigned shard is to be used
|
||||
*/
|
||||
public boolean useAnyUnassignedShard() {
|
||||
return this.index == null && this.shard == null && this.primary == null;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplainRequest setIndex(String index) {
|
||||
this.index = index;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public String getIndex() {
|
||||
return this.index;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplainRequest setShard(Integer shard) {
|
||||
this.shard = shard;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public Integer getShard() {
|
||||
return this.shard;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplainRequest setPrimary(Boolean primary) {
|
||||
this.primary = primary;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public Boolean isPrimary() {
|
||||
return this.primary;
|
||||
}
|
||||
|
||||
public void includeYesDecisions(boolean includeYesDecisions) {
|
||||
this.includeYesDecisions = includeYesDecisions;
|
||||
}
|
||||
|
||||
/** Returns true if all decisions should be included. Otherwise only "NO" and "THROTTLE" decisions are returned */
|
||||
public boolean includeYesDecisions() {
|
||||
return this.includeYesDecisions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder("ClusterAllocationExplainRequest[");
|
||||
if (this.useAnyUnassignedShard()) {
|
||||
sb.append("useAnyUnassignedShard=true");
|
||||
} else {
|
||||
sb.append("index=").append(index);
|
||||
sb.append(",shard=").append(shard);
|
||||
sb.append(",primary?=").append(primary);
|
||||
}
|
||||
sb.append(",includeYesDecisions?=").append(includeYesDecisions);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public static ClusterAllocationExplainRequest parse(XContentParser parser) throws IOException {
|
||||
ClusterAllocationExplainRequest req = PARSER.parse(parser, new ClusterAllocationExplainRequest());
|
||||
Exception e = req.validate();
|
||||
if (e != null) {
|
||||
throw new ElasticsearchParseException("'index', 'shard', and 'primary' must be specified in allocation explain request", e);
|
||||
}
|
||||
return req;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
this.index = in.readOptionalString();
|
||||
this.shard = in.readOptionalVInt();
|
||||
this.primary = in.readOptionalBoolean();
|
||||
this.includeYesDecisions = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeOptionalString(index);
|
||||
out.writeOptionalVInt(shard);
|
||||
out.writeOptionalBoolean(primary);
|
||||
out.writeBoolean(includeYesDecisions);
|
||||
}
|
||||
}
|
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Builder for requests to explain the allocation of a shard in the cluster
|
||||
*/
|
||||
public class ClusterAllocationExplainRequestBuilder
|
||||
extends MasterNodeOperationRequestBuilder<ClusterAllocationExplainRequest,
|
||||
ClusterAllocationExplainResponse,
|
||||
ClusterAllocationExplainRequestBuilder> {
|
||||
|
||||
public ClusterAllocationExplainRequestBuilder(ElasticsearchClient client, ClusterAllocationExplainAction action) {
|
||||
super(client, action, new ClusterAllocationExplainRequest());
|
||||
}
|
||||
|
||||
/** The index name to use when finding the shard to explain */
|
||||
public ClusterAllocationExplainRequestBuilder setIndex(String index) {
|
||||
request.setIndex(index);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** The shard number to use when finding the shard to explain */
|
||||
public ClusterAllocationExplainRequestBuilder setShard(int shard) {
|
||||
request.setShard(shard);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Whether the primary or replica should be explained */
|
||||
public ClusterAllocationExplainRequestBuilder setPrimary(boolean primary) {
|
||||
request.setPrimary(primary);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Signal that the first unassigned shard should be used
|
||||
*/
|
||||
public ClusterAllocationExplainRequestBuilder useAnyUnassignedShard() {
|
||||
request.setIndex(null);
|
||||
request.setShard(null);
|
||||
request.setPrimary(null);
|
||||
return this;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Explanation response for a shard in the cluster
|
||||
*/
|
||||
public class ClusterAllocationExplainResponse extends ActionResponse {
|
||||
|
||||
private ClusterAllocationExplanation cae;
|
||||
|
||||
public ClusterAllocationExplainResponse() {
|
||||
}
|
||||
|
||||
public ClusterAllocationExplainResponse(ClusterAllocationExplanation cae) {
|
||||
this.cae = cae;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the explanation for shard allocation in the cluster
|
||||
*/
|
||||
public ClusterAllocationExplanation getExplanation() {
|
||||
return this.cae;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
this.cae = new ClusterAllocationExplanation(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
cae.writeTo(out);
|
||||
}
|
||||
}
|
@ -0,0 +1,213 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A {@code ClusterAllocationExplanation} is an explanation of why a shard may or may not be allocated to nodes. It also includes weights
|
||||
* for where the shard is likely to be assigned. It is an immutable class
|
||||
*/
|
||||
public final class ClusterAllocationExplanation implements ToXContent, Writeable<ClusterAllocationExplanation> {
|
||||
|
||||
private final ShardId shard;
|
||||
private final boolean primary;
|
||||
private final String assignedNodeId;
|
||||
private final Map<DiscoveryNode, Decision> nodeToDecision;
|
||||
private final Map<DiscoveryNode, Float> nodeWeights;
|
||||
private final UnassignedInfo unassignedInfo;
|
||||
private final long remainingDelayNanos;
|
||||
|
||||
public ClusterAllocationExplanation(StreamInput in) throws IOException {
|
||||
this.shard = ShardId.readShardId(in);
|
||||
this.primary = in.readBoolean();
|
||||
this.assignedNodeId = in.readOptionalString();
|
||||
this.unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new);
|
||||
|
||||
Map<DiscoveryNode, Decision> ntd = null;
|
||||
int size = in.readVInt();
|
||||
ntd = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
DiscoveryNode dn = new DiscoveryNode(in);
|
||||
Decision decision = Decision.readFrom(in);
|
||||
ntd.put(dn, decision);
|
||||
}
|
||||
this.nodeToDecision = ntd;
|
||||
|
||||
Map<DiscoveryNode, Float> ntw = null;
|
||||
size = in.readVInt();
|
||||
ntw = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
DiscoveryNode dn = new DiscoveryNode(in);
|
||||
float weight = in.readFloat();
|
||||
ntw.put(dn, weight);
|
||||
}
|
||||
this.nodeWeights = ntw;
|
||||
remainingDelayNanos = in.readVLong();
|
||||
}
|
||||
|
||||
public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId,
|
||||
UnassignedInfo unassignedInfo, Map<DiscoveryNode, Decision> nodeToDecision,
|
||||
Map<DiscoveryNode, Float> nodeWeights, long remainingDelayNanos) {
|
||||
this.shard = shard;
|
||||
this.primary = primary;
|
||||
this.assignedNodeId = assignedNodeId;
|
||||
this.unassignedInfo = unassignedInfo;
|
||||
this.nodeToDecision = nodeToDecision == null ? Collections.emptyMap() : nodeToDecision;
|
||||
this.nodeWeights = nodeWeights == null ? Collections.emptyMap() : nodeWeights;
|
||||
this.remainingDelayNanos = remainingDelayNanos;
|
||||
}
|
||||
|
||||
public ShardId getShard() {
|
||||
return this.shard;
|
||||
}
|
||||
|
||||
public boolean isPrimary() {
|
||||
return this.primary;
|
||||
}
|
||||
|
||||
/** Return turn if the shard is assigned to a node */
|
||||
public boolean isAssigned() {
|
||||
return this.assignedNodeId != null;
|
||||
}
|
||||
|
||||
/** Return the assigned node id or null if not assigned */
|
||||
@Nullable
|
||||
public String getAssignedNodeId() {
|
||||
return this.assignedNodeId;
|
||||
}
|
||||
|
||||
/** Return the unassigned info for the shard or null if the shard is assigned */
|
||||
@Nullable
|
||||
public UnassignedInfo getUnassignedInfo() {
|
||||
return this.unassignedInfo;
|
||||
}
|
||||
|
||||
/** Return a map of node to decision for shard allocation */
|
||||
public Map<DiscoveryNode, Decision> getNodeDecisions() {
|
||||
return this.nodeToDecision;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a map of node to balancer "weight" for allocation. Higher weights mean the balancer wants to allocated the shard to that node
|
||||
* more
|
||||
*/
|
||||
public Map<DiscoveryNode, Float> getNodeWeights() {
|
||||
return this.nodeWeights;
|
||||
}
|
||||
|
||||
/** Return the remaining allocation delay for this shard in nanoseconds */
|
||||
public long getRemainingDelayNanos() {
|
||||
return this.remainingDelayNanos;
|
||||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(); {
|
||||
builder.startObject("shard"); {
|
||||
builder.field("index", shard.getIndexName());
|
||||
builder.field("index_uuid", shard.getIndex().getUUID());
|
||||
builder.field("id", shard.getId());
|
||||
builder.field("primary", primary);
|
||||
}
|
||||
builder.endObject(); // end shard
|
||||
builder.field("assigned", this.assignedNodeId != null);
|
||||
// If assigned, show the node id of the node it's assigned to
|
||||
if (assignedNodeId != null) {
|
||||
builder.field("assigned_node_id", this.assignedNodeId);
|
||||
}
|
||||
// If we have unassigned info, show that
|
||||
if (unassignedInfo != null) {
|
||||
unassignedInfo.toXContent(builder, params);
|
||||
long delay = unassignedInfo.getLastComputedLeftDelayNanos();
|
||||
builder.field("allocation_delay", TimeValue.timeValueNanos(delay));
|
||||
builder.field("allocation_delay_ms", TimeValue.timeValueNanos(delay).millis());
|
||||
builder.field("remaining_delay", TimeValue.timeValueNanos(remainingDelayNanos));
|
||||
builder.field("remaining_delay_ms", TimeValue.timeValueNanos(remainingDelayNanos).millis());
|
||||
}
|
||||
builder.startObject("nodes");
|
||||
for (Map.Entry<DiscoveryNode, Float> entry : nodeWeights.entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
builder.startObject(node.getId()); {
|
||||
builder.field("node_name", node.getName());
|
||||
builder.startObject("node_attributes"); {
|
||||
for (Map.Entry<String, String> attrEntry : node.getAttributes().entrySet()) {
|
||||
builder.field(attrEntry.getKey(), attrEntry.getValue());
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end attributes
|
||||
Decision d = nodeToDecision.get(node);
|
||||
if (node.getId().equals(assignedNodeId)) {
|
||||
builder.field("final_decision", "CURRENTLY_ASSIGNED");
|
||||
} else {
|
||||
builder.field("final_decision", d.type().toString());
|
||||
}
|
||||
builder.field("weight", entry.getValue());
|
||||
d.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject(); // end node <uuid>
|
||||
}
|
||||
builder.endObject(); // end nodes
|
||||
}
|
||||
builder.endObject(); // end wrapping object
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterAllocationExplanation readFrom(StreamInput in) throws IOException {
|
||||
return new ClusterAllocationExplanation(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
this.getShard().writeTo(out);
|
||||
out.writeBoolean(this.isPrimary());
|
||||
out.writeOptionalString(this.getAssignedNodeId());
|
||||
out.writeOptionalWriteable(this.getUnassignedInfo());
|
||||
|
||||
Map<DiscoveryNode, Decision> ntd = this.getNodeDecisions();
|
||||
out.writeVInt(ntd.size());
|
||||
for (Map.Entry<DiscoveryNode, Decision> entry : ntd.entrySet()) {
|
||||
entry.getKey().writeTo(out);
|
||||
Decision.writeTo(entry.getValue(), out);
|
||||
}
|
||||
Map<DiscoveryNode, Float> ntw = this.getNodeWeights();
|
||||
out.writeVInt(ntw.size());
|
||||
for (Map.Entry<DiscoveryNode, Float> entry : ntw.entrySet()) {
|
||||
entry.getKey().writeTo(out);
|
||||
out.writeFloat(entry.getValue());
|
||||
}
|
||||
out.writeVLong(remainingDelayNanos);
|
||||
}
|
||||
}
|
@ -0,0 +1,200 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData.Custom;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes.RoutingNodesIterator;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the
|
||||
* master node in the cluster.
|
||||
*/
|
||||
public class TransportClusterAllocationExplainAction
|
||||
extends TransportMasterNodeAction<ClusterAllocationExplainRequest, ClusterAllocationExplainResponse> {
|
||||
|
||||
private final AllocationService allocationService;
|
||||
private final ClusterInfoService clusterInfoService;
|
||||
private final AllocationDeciders allocationDeciders;
|
||||
private final ShardsAllocator shardAllocator;
|
||||
|
||||
@Inject
|
||||
public TransportClusterAllocationExplainAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AllocationService allocationService, ClusterInfoService clusterInfoService,
|
||||
AllocationDeciders allocationDeciders, ShardsAllocator shardAllocator) {
|
||||
super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver, ClusterAllocationExplainRequest::new);
|
||||
this.allocationService = allocationService;
|
||||
this.clusterInfoService = clusterInfoService;
|
||||
this.allocationDeciders = allocationDeciders;
|
||||
this.shardAllocator = shardAllocator;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String executor() {
|
||||
return ThreadPool.Names.MANAGEMENT;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(ClusterAllocationExplainRequest request, ClusterState state) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterAllocationExplainResponse newResponse() {
|
||||
return new ClusterAllocationExplainResponse();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the decisions for the given {@code ShardRouting} on the given {@code RoutingNode}. If {@code includeYesDecisions} is not true,
|
||||
* only non-YES (NO and THROTTLE) decisions are returned.
|
||||
*/
|
||||
public static Decision tryShardOnNode(ShardRouting shard, RoutingNode node, RoutingAllocation allocation, boolean includeYesDecisions) {
|
||||
Decision d = allocation.deciders().canAllocate(shard, node, allocation);
|
||||
if (includeYesDecisions) {
|
||||
return d;
|
||||
} else {
|
||||
Decision.Multi nonYesDecisions = new Decision.Multi();
|
||||
List<Decision> decisions = d.getDecisions();
|
||||
for (Decision decision : decisions) {
|
||||
if (decision.type() != Decision.Type.YES) {
|
||||
nonYesDecisions.add(decision);
|
||||
}
|
||||
}
|
||||
return nonYesDecisions;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* For the given {@code ShardRouting}, return the explanation of the allocation for that shard on all nodes. If {@code
|
||||
* includeYesDecisions} is true, returns all decisions, otherwise returns only 'NO' and 'THROTTLE' decisions.
|
||||
*/
|
||||
public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes,
|
||||
boolean includeYesDecisions, ShardsAllocator shardAllocator) {
|
||||
// don't short circuit deciders, we want a full explanation
|
||||
allocation.debugDecision(true);
|
||||
// get the existing unassigned info if available
|
||||
UnassignedInfo ui = shard.unassignedInfo();
|
||||
|
||||
RoutingNodesIterator iter = routingNodes.nodes();
|
||||
Map<DiscoveryNode, Decision> nodeToDecision = new HashMap<>();
|
||||
while (iter.hasNext()) {
|
||||
RoutingNode node = iter.next();
|
||||
DiscoveryNode discoNode = node.node();
|
||||
if (discoNode.isDataNode()) {
|
||||
Decision d = tryShardOnNode(shard, node, allocation, includeYesDecisions);
|
||||
nodeToDecision.put(discoNode, d);
|
||||
}
|
||||
}
|
||||
long remainingDelayNanos = 0;
|
||||
if (ui != null) {
|
||||
final MetaData metadata = allocation.metaData();
|
||||
final Settings indexSettings = metadata.index(shard.index()).getSettings();
|
||||
remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), metadata.settings(), indexSettings);
|
||||
}
|
||||
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(), shard.currentNodeId(), ui, nodeToDecision,
|
||||
shardAllocator.weighShard(allocation, shard), remainingDelayNanos);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
|
||||
final ActionListener<ClusterAllocationExplainResponse> listener) {
|
||||
final RoutingNodes routingNodes = state.getRoutingNodes();
|
||||
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state.nodes(),
|
||||
clusterInfoService.getClusterInfo(), System.nanoTime());
|
||||
|
||||
ShardRouting shardRouting = null;
|
||||
if (request.useAnyUnassignedShard()) {
|
||||
// If we can use any shard, just pick the first unassigned one (if there are any)
|
||||
RoutingNodes.UnassignedShards.UnassignedIterator ui = routingNodes.unassigned().iterator();
|
||||
if (ui.hasNext()) {
|
||||
shardRouting = ui.next();
|
||||
}
|
||||
} else {
|
||||
String index = request.getIndex();
|
||||
int shard = request.getShard();
|
||||
if (request.isPrimary()) {
|
||||
// If we're looking for the primary shard, there's only one copy, so pick it directly
|
||||
shardRouting = allocation.routingTable().shardRoutingTable(index, shard).primaryShard();
|
||||
} else {
|
||||
// If looking for a replica, go through all the replica shards
|
||||
List<ShardRouting> replicaShardRoutings = allocation.routingTable().shardRoutingTable(index, shard).replicaShards();
|
||||
if (replicaShardRoutings.size() > 0) {
|
||||
// Pick the first replica at the very least
|
||||
shardRouting = replicaShardRoutings.get(0);
|
||||
// In case there are multiple replicas where some are assigned and some aren't,
|
||||
// try to find one that is unassigned at least
|
||||
for (ShardRouting replica : replicaShardRoutings) {
|
||||
if (replica.unassigned()) {
|
||||
shardRouting = replica;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (shardRouting == null) {
|
||||
listener.onFailure(new ElasticsearchException("unable to find any shards to explain [{}] in the routing table", request));
|
||||
return;
|
||||
}
|
||||
logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting);
|
||||
|
||||
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
|
||||
request.includeYesDecisions(), shardAllocator);
|
||||
listener.onResponse(new ClusterAllocationExplainResponse(cae));
|
||||
}
|
||||
}
|
@ -143,7 +143,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
||||
assert waitFor >= 0;
|
||||
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger, threadPool.getThreadContext());
|
||||
final ClusterState state = observer.observedState();
|
||||
if (waitFor == 0 || request.timeout().millis() == 0) {
|
||||
if (request.timeout().millis() == 0) {
|
||||
listener.onResponse(getResponse(request, state, waitFor, request.timeout().millis() == 0));
|
||||
return;
|
||||
}
|
||||
|
@ -19,9 +19,9 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.info;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -68,10 +68,10 @@ public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements To
|
||||
|
||||
builder.startObject("nodes");
|
||||
for (NodeInfo nodeInfo : this) {
|
||||
builder.startObject(nodeInfo.getNode().id(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.startObject(nodeInfo.getNode().getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
|
||||
builder.field("name", nodeInfo.getNode().name(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("transport_address", nodeInfo.getNode().address().toString());
|
||||
builder.field("name", nodeInfo.getNode().getName(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("transport_address", nodeInfo.getNode().getAddress().toString());
|
||||
builder.field("host", nodeInfo.getNode().getHostName(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("ip", nodeInfo.getNode().getHostAddress(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
|
||||
@ -84,15 +84,20 @@ public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements To
|
||||
}
|
||||
}
|
||||
|
||||
if (!nodeInfo.getNode().attributes().isEmpty()) {
|
||||
builder.startArray("roles");
|
||||
for (DiscoveryNode.Role role : nodeInfo.getNode().getRoles()) {
|
||||
builder.value(role.getRoleName());
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
if (!nodeInfo.getNode().getAttributes().isEmpty()) {
|
||||
builder.startObject("attributes");
|
||||
for (ObjectObjectCursor<String, String> attr : nodeInfo.getNode().attributes()) {
|
||||
builder.field(attr.key, attr.value, XContentBuilder.FieldCaseConversion.NONE);
|
||||
for (Map.Entry<String, String> entry : nodeInfo.getNode().getAttributes().entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
|
||||
if (nodeInfo.getSettings() != null) {
|
||||
builder.startObject("settings");
|
||||
Settings settings = nodeInfo.getSettings();
|
||||
|
@ -48,18 +48,14 @@ public final class LivenessResponse extends ActionResponse {
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
clusterName = ClusterName.readClusterName(in);
|
||||
if (in.readBoolean()) {
|
||||
node = DiscoveryNode.readNode(in);
|
||||
} else {
|
||||
node = null;
|
||||
}
|
||||
node = in.readOptionalWriteable(DiscoveryNode::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
clusterName.writeTo(out);
|
||||
out.writeOptionalStreamable(node);
|
||||
out.writeOptionalWriteable(node);
|
||||
}
|
||||
|
||||
public ClusterName getClusterName() {
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.stats;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
@ -41,6 +40,7 @@ import org.elasticsearch.threadpool.ThreadPoolStats;
|
||||
import org.elasticsearch.transport.TransportStats;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Node statistics (dynamic, changes depending on when created).
|
||||
@ -299,15 +299,21 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (!params.param("node_info_format", "default").equals("none")) {
|
||||
builder.field("name", getNode().name(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("transport_address", getNode().address().toString(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("name", getNode().getName(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("transport_address", getNode().getAddress().toString(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("host", getNode().getHostName(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("ip", getNode().getAddress(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
|
||||
if (!getNode().attributes().isEmpty()) {
|
||||
builder.startArray("roles");
|
||||
for (DiscoveryNode.Role role : getNode().getRoles()) {
|
||||
builder.value(role.getRoleName());
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
if (!getNode().getAttributes().isEmpty()) {
|
||||
builder.startObject("attributes");
|
||||
for (ObjectObjectCursor<String, String> attr : getNode().attributes()) {
|
||||
builder.field(attr.key, attr.value, XContentBuilder.FieldCaseConversion.NONE);
|
||||
for (Map.Entry<String, String> attrEntry : getNode().getAttributes().entrySet()) {
|
||||
builder.field(attrEntry.getKey(), attrEntry.getValue(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ public class NodesStatsResponse extends BaseNodesResponse<NodeStats> implements
|
||||
|
||||
builder.startObject("nodes");
|
||||
for (NodeStats nodeStats : this) {
|
||||
builder.startObject(nodeStats.getNode().id(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.startObject(nodeStats.getNode().getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("timestamp", nodeStats.getTimestamp());
|
||||
nodeStats.toXContent(builder, params);
|
||||
|
||||
|
@ -19,8 +19,6 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksResponse;
|
||||
@ -30,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
@ -39,6 +38,7 @@ import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Returns the list of tasks currently running on the nodes
|
||||
@ -49,6 +49,8 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
||||
|
||||
private Map<DiscoveryNode, List<TaskInfo>> nodes;
|
||||
|
||||
private List<TaskGroup> groups;
|
||||
|
||||
public ListTasksResponse() {
|
||||
}
|
||||
|
||||
@ -96,6 +98,41 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
||||
return nodeTasks;
|
||||
}
|
||||
|
||||
public List<TaskGroup> getTaskGroups() {
|
||||
if (groups == null) {
|
||||
buildTaskGroups();
|
||||
}
|
||||
return groups;
|
||||
}
|
||||
|
||||
private void buildTaskGroups() {
|
||||
Map<TaskId, TaskGroup.Builder> taskGroups = new HashMap<>();
|
||||
List<TaskGroup.Builder> topLevelTasks = new ArrayList<>();
|
||||
// First populate all tasks
|
||||
for (TaskInfo taskInfo : this.tasks) {
|
||||
taskGroups.put(taskInfo.getTaskId(), TaskGroup.builder(taskInfo));
|
||||
}
|
||||
|
||||
// Now go through all task group builders and add children to their parents
|
||||
for (TaskGroup.Builder taskGroup : taskGroups.values()) {
|
||||
TaskId parentTaskId = taskGroup.getTaskInfo().getParentTaskId();
|
||||
if (parentTaskId.isSet()) {
|
||||
TaskGroup.Builder parentTask = taskGroups.get(parentTaskId);
|
||||
if (parentTask != null) {
|
||||
// we found parent in the list of tasks - add it to the parent list
|
||||
parentTask.addGroup(taskGroup);
|
||||
} else {
|
||||
// we got zombie or the parent was filtered out - add it to the the top task list
|
||||
topLevelTasks.add(taskGroup);
|
||||
}
|
||||
} else {
|
||||
// top level task - add it to the top task list
|
||||
topLevelTasks.add(taskGroup);
|
||||
}
|
||||
}
|
||||
this.groups = Collections.unmodifiableList(topLevelTasks.stream().map(TaskGroup.Builder::build).collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
public List<TaskInfo> getTasks() {
|
||||
return tasks;
|
||||
}
|
||||
@ -121,33 +158,48 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
String groupBy = params.param("group_by", "nodes");
|
||||
if ("nodes".equals(groupBy)) {
|
||||
builder.startObject("nodes");
|
||||
for (Map.Entry<DiscoveryNode, List<TaskInfo>> entry : getPerNodeTasks().entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
builder.startObject(node.getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("name", node.getName());
|
||||
builder.field("transport_address", node.getAddress().toString());
|
||||
builder.field("host", node.getHostName());
|
||||
builder.field("ip", node.getAddress());
|
||||
|
||||
builder.startObject("nodes");
|
||||
for (Map.Entry<DiscoveryNode, List<TaskInfo>> entry : getPerNodeTasks().entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
builder.startObject(node.getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("name", node.name());
|
||||
builder.field("transport_address", node.address().toString());
|
||||
builder.field("host", node.getHostName());
|
||||
builder.field("ip", node.getAddress());
|
||||
builder.startArray("roles");
|
||||
for (DiscoveryNode.Role role : node.getRoles()) {
|
||||
builder.value(role.getRoleName());
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
if (!node.attributes().isEmpty()) {
|
||||
builder.startObject("attributes");
|
||||
for (ObjectObjectCursor<String, String> attr : node.attributes()) {
|
||||
builder.field(attr.key, attr.value, XContentBuilder.FieldCaseConversion.NONE);
|
||||
if (!node.getAttributes().isEmpty()) {
|
||||
builder.startObject("attributes");
|
||||
for (Map.Entry<String, String> attrEntry : node.getAttributes().entrySet()) {
|
||||
builder.field(attrEntry.getKey(), attrEntry.getValue(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.startObject("tasks");
|
||||
for(TaskInfo task : entry.getValue()) {
|
||||
builder.startObject(task.getTaskId().toString(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
task.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
}
|
||||
} else if ("parents".equals(groupBy)) {
|
||||
builder.startObject("tasks");
|
||||
for(TaskInfo task : entry.getValue()) {
|
||||
builder.startObject(task.getTaskId().toString(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
task.toXContent(builder, params);
|
||||
for (TaskGroup group : getTaskGroups()) {
|
||||
builder.startObject(group.getTaskInfo().getTaskId().toString(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
group.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Information about a currently running task and all its subtasks.
|
||||
*/
|
||||
public class TaskGroup implements ToXContent {
|
||||
|
||||
private final TaskInfo task;
|
||||
|
||||
private final List<TaskGroup> childTasks;
|
||||
|
||||
|
||||
public TaskGroup(TaskInfo task, List<TaskGroup> childTasks) {
|
||||
this.task = task;
|
||||
this.childTasks = Collections.unmodifiableList(new ArrayList<>(childTasks));
|
||||
}
|
||||
|
||||
public static Builder builder(TaskInfo taskInfo) {
|
||||
return new Builder(taskInfo);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
private TaskInfo taskInfo;
|
||||
private List<Builder> childTasks;
|
||||
|
||||
private Builder(TaskInfo taskInfo) {
|
||||
this.taskInfo = taskInfo;
|
||||
childTasks = new ArrayList<>();
|
||||
}
|
||||
|
||||
public void addGroup(Builder builder) {
|
||||
childTasks.add(builder);
|
||||
}
|
||||
|
||||
public TaskInfo getTaskInfo() {
|
||||
return taskInfo;
|
||||
}
|
||||
|
||||
public TaskGroup build() {
|
||||
return new TaskGroup(taskInfo, childTasks.stream().map(Builder::build).collect(Collectors.toList()));
|
||||
}
|
||||
}
|
||||
|
||||
public TaskInfo getTaskInfo() {
|
||||
return task;
|
||||
}
|
||||
|
||||
public List<TaskGroup> getChildTasks() {
|
||||
return childTasks;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
task.toXContent(builder, params);
|
||||
if (childTasks.isEmpty() == false) {
|
||||
builder.startArray("children");
|
||||
for (TaskGroup taskGroup : childTasks) {
|
||||
builder.startObject();
|
||||
taskGroup.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
}
|
@ -57,10 +57,12 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
|
||||
private final Task.Status status;
|
||||
|
||||
private final boolean cancellable;
|
||||
|
||||
private final TaskId parentTaskId;
|
||||
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status, long startTime,
|
||||
long runningTimeNanos, TaskId parentTaskId) {
|
||||
long runningTimeNanos, boolean cancellable, TaskId parentTaskId) {
|
||||
this.node = node;
|
||||
this.taskId = new TaskId(node.getId(), id);
|
||||
this.type = type;
|
||||
@ -69,11 +71,12 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
this.status = status;
|
||||
this.startTime = startTime;
|
||||
this.runningTimeNanos = runningTimeNanos;
|
||||
this.cancellable = cancellable;
|
||||
this.parentTaskId = parentTaskId;
|
||||
}
|
||||
|
||||
public TaskInfo(StreamInput in) throws IOException {
|
||||
node = DiscoveryNode.readNode(in);
|
||||
node = new DiscoveryNode(in);
|
||||
taskId = new TaskId(node.getId(), in.readLong());
|
||||
type = in.readString();
|
||||
action = in.readString();
|
||||
@ -85,6 +88,7 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
}
|
||||
startTime = in.readLong();
|
||||
runningTimeNanos = in.readLong();
|
||||
cancellable = in.readBoolean();
|
||||
parentTaskId = new TaskId(in);
|
||||
}
|
||||
|
||||
@ -134,6 +138,13 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
return runningTimeNanos;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the task supports cancellation
|
||||
*/
|
||||
public boolean isCancellable() {
|
||||
return cancellable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the parent task id
|
||||
*/
|
||||
@ -161,6 +172,7 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
}
|
||||
out.writeLong(startTime);
|
||||
out.writeLong(runningTimeNanos);
|
||||
out.writeBoolean(cancellable);
|
||||
parentTaskId.writeTo(out);
|
||||
}
|
||||
|
||||
@ -178,6 +190,7 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
}
|
||||
builder.dateValueField("start_time_in_millis", "start_time", startTime);
|
||||
builder.timeValueField("running_time_in_nanos", "running_time", runningTimeNanos, TimeUnit.NANOSECONDS);
|
||||
builder.field("cancellable", cancellable);
|
||||
if (parentTaskId.isSet()) {
|
||||
builder.field("parent_task_id", parentTaskId.toString());
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
|
||||
* @return this request
|
||||
*/
|
||||
public PutRepositoryRequest settings(String source) {
|
||||
this.settings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
this.settings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,7 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
|
||||
clusterName = ClusterName.readClusterName(in);
|
||||
nodes = new DiscoveryNode[in.readVInt()];
|
||||
for (int i=0; i<nodes.length; i++){
|
||||
nodes[i] = DiscoveryNode.readNode(in);
|
||||
nodes[i] = new DiscoveryNode(in);
|
||||
}
|
||||
}
|
||||
|
||||
@ -86,8 +86,8 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.NODES);
|
||||
for (DiscoveryNode node : nodes) {
|
||||
builder.startObject(node.id(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field(Fields.NAME, node.name(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.startObject(node.getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field(Fields.NAME, node.getName(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
|
@ -85,7 +85,7 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
|
||||
* Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
|
||||
*/
|
||||
public ClusterUpdateSettingsRequest transientSettings(String source) {
|
||||
this.transientSettings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
this.transientSettings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -124,7 +124,7 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
|
||||
* Sets the source containing the persistent settings to be updated. They will get applied cross restarts
|
||||
*/
|
||||
public ClusterUpdateSettingsRequest persistentSettings(String source) {
|
||||
this.persistentSettings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
this.persistentSettings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -32,8 +32,8 @@ import static org.elasticsearch.cluster.ClusterState.builder;
|
||||
* due to the update.
|
||||
*/
|
||||
final class SettingsUpdater {
|
||||
final Settings.Builder transientUpdates = Settings.settingsBuilder();
|
||||
final Settings.Builder persistentUpdates = Settings.settingsBuilder();
|
||||
final Settings.Builder transientUpdates = Settings.builder();
|
||||
final Settings.Builder persistentUpdates = Settings.builder();
|
||||
private final ClusterSettings clusterSettings;
|
||||
|
||||
SettingsUpdater(ClusterSettings clusterSettings) {
|
||||
@ -50,11 +50,11 @@ final class SettingsUpdater {
|
||||
|
||||
synchronized ClusterState updateSettings(final ClusterState currentState, Settings transientToApply, Settings persistentToApply) {
|
||||
boolean changed = false;
|
||||
Settings.Builder transientSettings = Settings.settingsBuilder();
|
||||
Settings.Builder transientSettings = Settings.builder();
|
||||
transientSettings.put(currentState.metaData().transientSettings());
|
||||
changed |= clusterSettings.updateDynamicSettings(transientToApply, transientSettings, transientUpdates, "transient");
|
||||
|
||||
Settings.Builder persistentSettings = Settings.settingsBuilder();
|
||||
Settings.Builder persistentSettings = Settings.builder();
|
||||
persistentSettings.put(currentState.metaData().persistentSettings());
|
||||
changed |= clusterSettings.updateDynamicSettings(persistentToApply, persistentSettings, persistentUpdates, "persistent");
|
||||
|
||||
|
@ -114,7 +114,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
||||
// We're about to send a second update task, so we need to check if we're still the elected master
|
||||
// For example the minimum_master_node could have been breached and we're no longer elected master,
|
||||
// so we should *not* execute the reroute.
|
||||
if (!clusterService.state().nodes().localNodeMaster()) {
|
||||
if (!clusterService.state().nodes().isLocalNodeElectedMaster()) {
|
||||
logger.debug("Skipping reroute after cluster update settings, because node is no longer master");
|
||||
listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate()));
|
||||
return;
|
||||
|
@ -61,7 +61,7 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
|
||||
}
|
||||
nodes = new DiscoveryNode[in.readVInt()];
|
||||
for (int i = 0; i < nodes.length; i++) {
|
||||
nodes[i] = DiscoveryNode.readNode(in);
|
||||
nodes[i] = new DiscoveryNode(in);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -299,7 +299,7 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
|
||||
* @return this request
|
||||
*/
|
||||
public CreateSnapshotRequest settings(String source) {
|
||||
this.settings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
this.settings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -324,7 +324,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
|
||||
* @return this request
|
||||
*/
|
||||
public RestoreSnapshotRequest settings(String source) {
|
||||
this.settings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
this.settings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -441,7 +441,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
|
||||
* Sets settings that should be added/changed in all restored indices
|
||||
*/
|
||||
public RestoreSnapshotRequest indexSettings(String source) {
|
||||
this.indexSettings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
this.indexSettings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -107,7 +107,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
||||
protected NodeSnapshotStatus nodeOperation(NodeRequest request) {
|
||||
Map<SnapshotId, Map<ShardId, SnapshotIndexShardStatus>> snapshotMapBuilder = new HashMap<>();
|
||||
try {
|
||||
String nodeId = clusterService.localNode().id();
|
||||
String nodeId = clusterService.localNode().getId();
|
||||
for (SnapshotId snapshotId : request.snapshotIds) {
|
||||
Map<ShardId, IndexShardSnapshotStatus> shardsStatus = snapshotShardsService.currentSnapshotShards(snapshotId);
|
||||
if (shardsStatus == null) {
|
||||
|
@ -28,6 +28,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
@ -41,40 +42,57 @@ import org.elasticsearch.plugins.PluginInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class ClusterStatsNodes implements ToXContent, Streamable {
|
||||
public class ClusterStatsNodes implements ToXContent, Writeable<ClusterStatsNodes> {
|
||||
|
||||
private Counts counts;
|
||||
private Set<Version> versions;
|
||||
private OsStats os;
|
||||
private ProcessStats process;
|
||||
private JvmStats jvm;
|
||||
private FsInfo.Path fs;
|
||||
private Set<PluginInfo> plugins;
|
||||
private final Counts counts;
|
||||
private final Set<Version> versions;
|
||||
private final OsStats os;
|
||||
private final ProcessStats process;
|
||||
private final JvmStats jvm;
|
||||
private final FsInfo.Path fs;
|
||||
private final Set<PluginInfo> plugins;
|
||||
|
||||
private ClusterStatsNodes() {
|
||||
ClusterStatsNodes(StreamInput in) throws IOException {
|
||||
this.counts = new Counts(in);
|
||||
|
||||
int size = in.readVInt();
|
||||
this.versions = new HashSet<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
this.versions.add(Version.readVersion(in));
|
||||
}
|
||||
|
||||
this.os = new OsStats(in);
|
||||
this.process = new ProcessStats(in);
|
||||
this.jvm = new JvmStats(in);
|
||||
this.fs = FsInfo.Path.readInfoFrom(in);
|
||||
|
||||
size = in.readVInt();
|
||||
this.plugins = new HashSet<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
this.plugins.add(PluginInfo.readFromStream(in));
|
||||
}
|
||||
}
|
||||
|
||||
public ClusterStatsNodes(ClusterStatsNodeResponse[] nodeResponses) {
|
||||
this.counts = new Counts();
|
||||
ClusterStatsNodes(ClusterStatsNodeResponse[] nodeResponses) {
|
||||
this.versions = new HashSet<>();
|
||||
this.os = new OsStats();
|
||||
this.jvm = new JvmStats();
|
||||
this.fs = new FsInfo.Path();
|
||||
this.plugins = new HashSet<>();
|
||||
this.process = new ProcessStats();
|
||||
|
||||
Set<InetAddress> seenAddresses = new HashSet<>(nodeResponses.length);
|
||||
|
||||
List<NodeInfo> nodeInfos = new ArrayList<>();
|
||||
List<NodeStats> nodeStats = new ArrayList<>();
|
||||
for (ClusterStatsNodeResponse nodeResponse : nodeResponses) {
|
||||
|
||||
counts.addNodeInfo(nodeResponse.nodeInfo());
|
||||
versions.add(nodeResponse.nodeInfo().getVersion());
|
||||
process.addNodeStats(nodeResponse.nodeStats());
|
||||
jvm.addNodeInfoStats(nodeResponse.nodeInfo(), nodeResponse.nodeStats());
|
||||
plugins.addAll(nodeResponse.nodeInfo().getPlugins().getPluginInfos());
|
||||
nodeInfos.add(nodeResponse.nodeInfo());
|
||||
nodeStats.add(nodeResponse.nodeStats());
|
||||
this.versions.add(nodeResponse.nodeInfo().getVersion());
|
||||
this.plugins.addAll(nodeResponse.nodeInfo().getPlugins().getPluginInfos());
|
||||
|
||||
// now do the stats that should be deduped by hardware (implemented by ip deduping)
|
||||
TransportAddress publishAddress = nodeResponse.nodeInfo().getTransport().address().publishAddress();
|
||||
@ -82,19 +100,19 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
|
||||
if (publishAddress.uniqueAddressTypeId() == 1) {
|
||||
inetAddress = ((InetSocketTransportAddress) publishAddress).address().getAddress();
|
||||
}
|
||||
|
||||
if (!seenAddresses.add(inetAddress)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
os.addNodeInfo(nodeResponse.nodeInfo());
|
||||
if (nodeResponse.nodeStats().getFs() != null) {
|
||||
fs.add(nodeResponse.nodeStats().getFs().total());
|
||||
this.fs.add(nodeResponse.nodeStats().getFs().total());
|
||||
}
|
||||
}
|
||||
this.counts = new Counts(nodeInfos);
|
||||
this.os = new OsStats(nodeInfos);
|
||||
this.process = new ProcessStats(nodeStats);
|
||||
this.jvm = new JvmStats(nodeInfos, nodeStats);
|
||||
}
|
||||
|
||||
|
||||
public Counts getCounts() {
|
||||
return this.counts;
|
||||
}
|
||||
@ -125,25 +143,8 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
|
||||
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
counts = Counts.readCounts(in);
|
||||
|
||||
int size = in.readVInt();
|
||||
versions = new HashSet<>(size);
|
||||
for (; size > 0; size--) {
|
||||
versions.add(Version.readVersion(in));
|
||||
}
|
||||
|
||||
os = OsStats.readOsStats(in);
|
||||
process = ProcessStats.readStats(in);
|
||||
jvm = JvmStats.readJvmStats(in);
|
||||
fs = FsInfo.Path.readInfoFrom(in);
|
||||
|
||||
size = in.readVInt();
|
||||
plugins = new HashSet<>(size);
|
||||
for (; size > 0; size--) {
|
||||
plugins.add(PluginInfo.readFromStream(in));
|
||||
}
|
||||
public ClusterStatsNodes readFrom(StreamInput in) throws IOException {
|
||||
return new ClusterStatsNodes(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -161,12 +162,6 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
|
||||
}
|
||||
}
|
||||
|
||||
public static ClusterStatsNodes readNodeStats(StreamInput in) throws IOException {
|
||||
ClusterStatsNodes nodeStats = new ClusterStatsNodes();
|
||||
nodeStats.readFrom(in);
|
||||
return nodeStats;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final XContentBuilderString COUNT = new XContentBuilderString("count");
|
||||
static final XContentBuilderString VERSIONS = new XContentBuilderString("versions");
|
||||
@ -212,109 +207,104 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static class Counts implements Streamable, ToXContent {
|
||||
int total;
|
||||
int masterOnly;
|
||||
int dataOnly;
|
||||
int masterData;
|
||||
int client;
|
||||
public static class Counts implements Writeable<Counts>, ToXContent {
|
||||
static final String COORDINATING_ONLY = "coordinating_only";
|
||||
|
||||
public void addNodeInfo(NodeInfo nodeInfo) {
|
||||
total++;
|
||||
DiscoveryNode node = nodeInfo.getNode();
|
||||
if (node.masterNode()) {
|
||||
if (node.dataNode()) {
|
||||
masterData++;
|
||||
} else {
|
||||
masterOnly++;
|
||||
}
|
||||
} else if (node.dataNode()) {
|
||||
dataOnly++;
|
||||
} else if (node.clientNode()) {
|
||||
client++;
|
||||
private final int total;
|
||||
private final Map<String, Integer> roles;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private Counts(StreamInput in) throws IOException {
|
||||
this.total = in.readVInt();
|
||||
this.roles = (Map<String, Integer>)in.readGenericValue();
|
||||
}
|
||||
|
||||
private Counts(List<NodeInfo> nodeInfos) {
|
||||
this.roles = new HashMap<>();
|
||||
for (DiscoveryNode.Role role : DiscoveryNode.Role.values()) {
|
||||
this.roles.put(role.getRoleName(), 0);
|
||||
}
|
||||
this.roles.put(COORDINATING_ONLY, 0);
|
||||
|
||||
int total = 0;
|
||||
for (NodeInfo nodeInfo : nodeInfos) {
|
||||
total++;
|
||||
if (nodeInfo.getNode().getRoles().isEmpty()) {
|
||||
Integer count = roles.get(COORDINATING_ONLY);
|
||||
roles.put(COORDINATING_ONLY, ++count);
|
||||
} else {
|
||||
for (DiscoveryNode.Role role : nodeInfo.getNode().getRoles()) {
|
||||
Integer count = roles.get(role.getRoleName());
|
||||
roles.put(role.getRoleName(), ++count);
|
||||
}
|
||||
}
|
||||
}
|
||||
this.total = total;
|
||||
}
|
||||
|
||||
public int getTotal() {
|
||||
return total;
|
||||
}
|
||||
|
||||
public int getMasterOnly() {
|
||||
return masterOnly;
|
||||
}
|
||||
|
||||
public int getDataOnly() {
|
||||
return dataOnly;
|
||||
}
|
||||
|
||||
public int getMasterData() {
|
||||
return masterData;
|
||||
}
|
||||
|
||||
public int getClient() {
|
||||
return client;
|
||||
}
|
||||
|
||||
public static Counts readCounts(StreamInput in) throws IOException {
|
||||
Counts c = new Counts();
|
||||
c.readFrom(in);
|
||||
return c;
|
||||
public Map<String, Integer> getRoles() {
|
||||
return roles;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
total = in.readVInt();
|
||||
masterOnly = in.readVInt();
|
||||
dataOnly = in.readVInt();
|
||||
masterData = in.readVInt();
|
||||
client = in.readVInt();
|
||||
public Counts readFrom(StreamInput in) throws IOException {
|
||||
return new Counts(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(total);
|
||||
out.writeVInt(masterOnly);
|
||||
out.writeVInt(dataOnly);
|
||||
out.writeVInt(masterData);
|
||||
out.writeVInt(client);
|
||||
out.writeGenericValue(roles);
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
|
||||
static final XContentBuilderString MASTER_ONLY = new XContentBuilderString("master_only");
|
||||
static final XContentBuilderString DATA_ONLY = new XContentBuilderString("data_only");
|
||||
static final XContentBuilderString MASTER_DATA = new XContentBuilderString("master_data");
|
||||
static final XContentBuilderString CLIENT = new XContentBuilderString("client");
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(Fields.TOTAL, total);
|
||||
builder.field(Fields.MASTER_ONLY, masterOnly);
|
||||
builder.field(Fields.DATA_ONLY, dataOnly);
|
||||
builder.field(Fields.MASTER_DATA, masterData);
|
||||
builder.field(Fields.CLIENT, client);
|
||||
for (Map.Entry<String, Integer> entry : roles.entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue());
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
public static class OsStats implements ToXContent, Streamable {
|
||||
|
||||
int availableProcessors;
|
||||
int allocatedProcessors;
|
||||
public static class OsStats implements ToXContent, Writeable<OsStats> {
|
||||
final int availableProcessors;
|
||||
final int allocatedProcessors;
|
||||
final ObjectIntHashMap<String> names;
|
||||
|
||||
public OsStats() {
|
||||
names = new ObjectIntHashMap<>();
|
||||
@SuppressWarnings("unchecked")
|
||||
private OsStats(StreamInput in) throws IOException {
|
||||
this.availableProcessors = in.readVInt();
|
||||
this.allocatedProcessors = in.readVInt();
|
||||
int size = in.readVInt();
|
||||
this.names = new ObjectIntHashMap<>();
|
||||
for (int i = 0; i < size; i++) {
|
||||
names.addTo(in.readString(), in.readVInt());
|
||||
}
|
||||
}
|
||||
|
||||
public void addNodeInfo(NodeInfo nodeInfo) {
|
||||
availableProcessors += nodeInfo.getOs().getAvailableProcessors();
|
||||
allocatedProcessors += nodeInfo.getOs().getAllocatedProcessors();
|
||||
private OsStats(List<NodeInfo> nodeInfos) {
|
||||
this.names = new ObjectIntHashMap<>();
|
||||
int availableProcessors = 0;
|
||||
int allocatedProcessors = 0;
|
||||
for (NodeInfo nodeInfo : nodeInfos) {
|
||||
availableProcessors += nodeInfo.getOs().getAvailableProcessors();
|
||||
allocatedProcessors += nodeInfo.getOs().getAllocatedProcessors();
|
||||
|
||||
if (nodeInfo.getOs().getName() != null) {
|
||||
names.addTo(nodeInfo.getOs().getName(), 1);
|
||||
if (nodeInfo.getOs().getName() != null) {
|
||||
names.addTo(nodeInfo.getOs().getName(), 1);
|
||||
}
|
||||
}
|
||||
this.availableProcessors = availableProcessors;
|
||||
this.allocatedProcessors = allocatedProcessors;
|
||||
}
|
||||
|
||||
public int getAvailableProcessors() {
|
||||
@ -326,14 +316,8 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
availableProcessors = in.readVInt();
|
||||
allocatedProcessors = in.readVInt();
|
||||
int size = in.readVInt();
|
||||
names.clear();
|
||||
for (int i = 0; i < size; i++) {
|
||||
names.addTo(in.readString(), in.readVInt());
|
||||
}
|
||||
public OsStats readFrom(StreamInput in) throws IOException {
|
||||
return new OsStats(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -347,12 +331,6 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
|
||||
}
|
||||
}
|
||||
|
||||
public static OsStats readOsStats(StreamInput in) throws IOException {
|
||||
OsStats os = new OsStats();
|
||||
os.readFrom(in);
|
||||
return os;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final XContentBuilderString AVAILABLE_PROCESSORS = new XContentBuilderString("available_processors");
|
||||
static final XContentBuilderString ALLOCATED_PROCESSORS = new XContentBuilderString("allocated_processors");
|
||||
@ -373,35 +351,54 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
public static class ProcessStats implements ToXContent, Streamable {
|
||||
public static class ProcessStats implements ToXContent, Writeable<ProcessStats> {
|
||||
|
||||
int count;
|
||||
int cpuPercent;
|
||||
long totalOpenFileDescriptors;
|
||||
long minOpenFileDescriptors = Long.MAX_VALUE;
|
||||
long maxOpenFileDescriptors = Long.MIN_VALUE;
|
||||
final int count;
|
||||
final int cpuPercent;
|
||||
final long totalOpenFileDescriptors;
|
||||
final long minOpenFileDescriptors;
|
||||
final long maxOpenFileDescriptors;
|
||||
|
||||
public void addNodeStats(NodeStats nodeStats) {
|
||||
if (nodeStats.getProcess() == null) {
|
||||
return;
|
||||
private ProcessStats(StreamInput in) throws IOException {
|
||||
this.count = in.readVInt();
|
||||
this.cpuPercent = in.readVInt();
|
||||
this.totalOpenFileDescriptors = in.readVLong();
|
||||
this.minOpenFileDescriptors = in.readLong();
|
||||
this.maxOpenFileDescriptors = in.readLong();
|
||||
}
|
||||
|
||||
private ProcessStats(List<NodeStats> nodeStatsList) {
|
||||
int count = 0;
|
||||
int cpuPercent = 0;
|
||||
long totalOpenFileDescriptors = 0;
|
||||
long minOpenFileDescriptors = Long.MAX_VALUE;
|
||||
long maxOpenFileDescriptors = Long.MIN_VALUE;
|
||||
for (NodeStats nodeStats : nodeStatsList) {
|
||||
if (nodeStats.getProcess() == null) {
|
||||
continue;
|
||||
}
|
||||
count++;
|
||||
if (nodeStats.getProcess().getCpu() != null) {
|
||||
cpuPercent += nodeStats.getProcess().getCpu().getPercent();
|
||||
}
|
||||
long fd = nodeStats.getProcess().getOpenFileDescriptors();
|
||||
if (fd > 0) {
|
||||
// fd can be -1 if not supported on platform
|
||||
totalOpenFileDescriptors += fd;
|
||||
}
|
||||
// we still do min max calc on -1, so we'll have an indication of it not being supported on one of the nodes.
|
||||
minOpenFileDescriptors = Math.min(minOpenFileDescriptors, fd);
|
||||
maxOpenFileDescriptors = Math.max(maxOpenFileDescriptors, fd);
|
||||
}
|
||||
count++;
|
||||
if (nodeStats.getProcess().getCpu() != null) {
|
||||
cpuPercent += nodeStats.getProcess().getCpu().getPercent();
|
||||
}
|
||||
long fd = nodeStats.getProcess().getOpenFileDescriptors();
|
||||
if (fd > 0) {
|
||||
// fd can be -1 if not supported on platform
|
||||
totalOpenFileDescriptors += fd;
|
||||
}
|
||||
// we still do min max calc on -1, so we'll have an indication of it not being supported on one of the nodes.
|
||||
minOpenFileDescriptors = Math.min(minOpenFileDescriptors, fd);
|
||||
maxOpenFileDescriptors = Math.max(maxOpenFileDescriptors, fd);
|
||||
this.count = count;
|
||||
this.cpuPercent = cpuPercent;
|
||||
this.totalOpenFileDescriptors = totalOpenFileDescriptors;
|
||||
this.minOpenFileDescriptors = minOpenFileDescriptors;
|
||||
this.maxOpenFileDescriptors = maxOpenFileDescriptors;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -433,12 +430,8 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
count = in.readVInt();
|
||||
cpuPercent = in.readVInt();
|
||||
totalOpenFileDescriptors = in.readVLong();
|
||||
minOpenFileDescriptors = in.readLong();
|
||||
maxOpenFileDescriptors = in.readLong();
|
||||
public ProcessStats readFrom(StreamInput in) throws IOException {
|
||||
return new ProcessStats(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -450,12 +443,6 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
|
||||
out.writeLong(maxOpenFileDescriptors);
|
||||
}
|
||||
|
||||
public static ProcessStats readStats(StreamInput in) throws IOException {
|
||||
ProcessStats cpu = new ProcessStats();
|
||||
cpu.readFrom(in);
|
||||
return cpu;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final XContentBuilderString CPU = new XContentBuilderString("cpu");
|
||||
static final XContentBuilderString PERCENT = new XContentBuilderString("percent");
|
||||
@ -479,20 +466,54 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
|
||||
}
|
||||
}
|
||||
|
||||
public static class JvmStats implements Streamable, ToXContent {
|
||||
public static class JvmStats implements Writeable<JvmStats>, ToXContent {
|
||||
|
||||
ObjectIntHashMap<JvmVersion> versions;
|
||||
long threads;
|
||||
long maxUptime;
|
||||
long heapUsed;
|
||||
long heapMax;
|
||||
private final ObjectIntHashMap<JvmVersion> versions;
|
||||
private final long threads;
|
||||
private final long maxUptime;
|
||||
private final long heapUsed;
|
||||
private final long heapMax;
|
||||
|
||||
JvmStats() {
|
||||
versions = new ObjectIntHashMap<>();
|
||||
threads = 0;
|
||||
maxUptime = 0;
|
||||
heapMax = 0;
|
||||
heapUsed = 0;
|
||||
private JvmStats(StreamInput in) throws IOException {
|
||||
int size = in.readVInt();
|
||||
this.versions = new ObjectIntHashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
this.versions.addTo(JvmVersion.readJvmVersion(in), in.readVInt());
|
||||
}
|
||||
this.threads = in.readVLong();
|
||||
this.maxUptime = in.readVLong();
|
||||
this.heapUsed = in.readVLong();
|
||||
this.heapMax = in.readVLong();
|
||||
}
|
||||
|
||||
private JvmStats(List<NodeInfo> nodeInfos, List<NodeStats> nodeStatsList) {
|
||||
this.versions = new ObjectIntHashMap<>();
|
||||
long threads = 0;
|
||||
long maxUptime = 0;
|
||||
long heapMax = 0;
|
||||
long heapUsed = 0;
|
||||
for (NodeInfo nodeInfo : nodeInfos) {
|
||||
versions.addTo(new JvmVersion(nodeInfo.getJvm()), 1);
|
||||
}
|
||||
|
||||
for (NodeStats nodeStats : nodeStatsList) {
|
||||
org.elasticsearch.monitor.jvm.JvmStats js = nodeStats.getJvm();
|
||||
if (js == null) {
|
||||
continue;
|
||||
}
|
||||
if (js.getThreads() != null) {
|
||||
threads += js.getThreads().getCount();
|
||||
}
|
||||
maxUptime = Math.max(maxUptime, js.getUptime().millis());
|
||||
if (js.getMem() != null) {
|
||||
heapUsed += js.getMem().getHeapUsed().bytes();
|
||||
heapMax += js.getMem().getHeapMax().bytes();
|
||||
}
|
||||
}
|
||||
this.threads = threads;
|
||||
this.maxUptime = maxUptime;
|
||||
this.heapUsed = heapUsed;
|
||||
this.heapMax = heapMax;
|
||||
}
|
||||
|
||||
public ObjectIntHashMap<JvmVersion> getVersions() {
|
||||
@ -527,33 +548,9 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
|
||||
return new ByteSizeValue(heapMax);
|
||||
}
|
||||
|
||||
public void addNodeInfoStats(NodeInfo nodeInfo, NodeStats nodeStats) {
|
||||
versions.addTo(new JvmVersion(nodeInfo.getJvm()), 1);
|
||||
org.elasticsearch.monitor.jvm.JvmStats js = nodeStats.getJvm();
|
||||
if (js == null) {
|
||||
return;
|
||||
}
|
||||
if (js.getThreads() != null) {
|
||||
threads += js.getThreads().getCount();
|
||||
}
|
||||
maxUptime = Math.max(maxUptime, js.getUptime().millis());
|
||||
if (js.getMem() != null) {
|
||||
heapUsed += js.getMem().getHeapUsed().bytes();
|
||||
heapMax += js.getMem().getHeapMax().bytes();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
int size = in.readVInt();
|
||||
versions = new ObjectIntHashMap<>(size);
|
||||
for (; size > 0; size--) {
|
||||
versions.addTo(JvmVersion.readJvmVersion(in), in.readVInt());
|
||||
}
|
||||
threads = in.readVLong();
|
||||
maxUptime = in.readVLong();
|
||||
heapUsed = in.readVLong();
|
||||
heapMax = in.readVLong();
|
||||
public JvmStats readFrom(StreamInput in) throws IOException {
|
||||
return new JvmStats(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -563,19 +560,12 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
|
||||
v.key.writeTo(out);
|
||||
out.writeVInt(v.value);
|
||||
}
|
||||
|
||||
out.writeVLong(threads);
|
||||
out.writeVLong(maxUptime);
|
||||
out.writeVLong(heapUsed);
|
||||
out.writeVLong(heapMax);
|
||||
}
|
||||
|
||||
public static JvmStats readJvmStats(StreamInput in) throws IOException {
|
||||
JvmStats jvmStats = new JvmStats();
|
||||
jvmStats.readFrom(in);
|
||||
return jvmStats;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final XContentBuilderString VERSIONS = new XContentBuilderString("versions");
|
||||
static final XContentBuilderString VERSION = new XContentBuilderString("version");
|
||||
@ -674,6 +664,4 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
|
||||
out.writeString(vmVendor);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ public class ClusterStatsResponse extends BaseNodesResponse<ClusterStatsNodeResp
|
||||
status = ClusterHealthStatus.fromValue(in.readByte());
|
||||
}
|
||||
clusterUUID = in.readString();
|
||||
nodesStats = ClusterStatsNodes.readNodeStats(in);
|
||||
nodesStats = new ClusterStatsNodes(in);
|
||||
indicesStats = ClusterStatsIndices.readIndicesStats(in);
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
||||
}
|
||||
|
||||
ClusterHealthStatus clusterStatus = null;
|
||||
if (clusterService.state().nodes().localNodeMaster()) {
|
||||
if (clusterService.state().nodes().isLocalNodeElectedMaster()) {
|
||||
clusterStatus = new ClusterStateHealth(clusterService.state()).getStatus();
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
||||
* The settings to create the index with (either json/yaml/properties format)
|
||||
*/
|
||||
public CreateIndexRequest settings(String source) {
|
||||
this.settings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
this.settings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -124,7 +124,7 @@ public class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsReq
|
||||
* Sets the settings to be updated (either json/yaml/properties format)
|
||||
*/
|
||||
public UpdateSettingsRequest settings(String source) {
|
||||
this.settings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
this.settings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -173,7 +173,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
node = DiscoveryNode.readNode(in);
|
||||
node = new DiscoveryNode(in);
|
||||
legacyVersion = in.readLong();
|
||||
allocationId = in.readOptionalString();
|
||||
allocationStatus = AllocationStatus.readFrom(in);
|
||||
|
@ -194,7 +194,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
||||
}
|
||||
|
||||
private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) {
|
||||
for (ShardRouting shardRouting : routingNodes.node(node.id())) {
|
||||
for (ShardRouting shardRouting : routingNodes.node(node.getId())) {
|
||||
ShardId shardId = shardRouting.shardId();
|
||||
if (shardId.id() == shardID && shardId.getIndexName().equals(index)) {
|
||||
if (shardRouting.primary()) {
|
||||
|
@ -162,7 +162,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
||||
* The settings to create the index template with (either json/yaml/properties format).
|
||||
*/
|
||||
public PutIndexTemplateRequest settings(String source) {
|
||||
this.settings = Settings.settingsBuilder().loadFromSource(source).build();
|
||||
this.settings = Settings.builder().loadFromSource(source).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,7 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
|
||||
if (cause.length() == 0) {
|
||||
cause = "api";
|
||||
}
|
||||
final Settings.Builder templateSettingsBuilder = Settings.settingsBuilder();
|
||||
final Settings.Builder templateSettingsBuilder = Settings.builder();
|
||||
templateSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
indexScopedSettings.validate(templateSettingsBuilder);
|
||||
indexTemplateService.putTemplate(new MetaDataIndexTemplateService.PutRequest(cause, request.name())
|
||||
|
@ -168,8 +168,6 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
||||
protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.getShard(request.shardId().id());
|
||||
final QueryShardContext queryShardContext = indexService.newQueryShardContext();
|
||||
queryShardContext.setTypes(request.types());
|
||||
|
||||
boolean valid;
|
||||
String explanation = null;
|
||||
@ -182,7 +180,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
||||
parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
|
||||
SearchContext.setCurrent(searchContext);
|
||||
try {
|
||||
searchContext.parsedQuery(queryShardContext.toQuery(request.query()));
|
||||
searchContext.parsedQuery(searchContext.getQueryShardContext().toQuery(request.query()));
|
||||
searchContext.preProcess();
|
||||
|
||||
valid = true;
|
||||
|
@ -85,8 +85,6 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
||||
static final String CAUSE_FIELD = "cause";
|
||||
static final String STATUS_FIELD = "status";
|
||||
|
||||
public static final Failure PROTOTYPE = new Failure(null, null, null, null);
|
||||
|
||||
private final String index;
|
||||
private final String type;
|
||||
private final String id;
|
||||
@ -101,6 +99,26 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
||||
this.status = ExceptionsHelper.status(t);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
public Failure(StreamInput in) throws IOException {
|
||||
index = in.readString();
|
||||
type = in.readString();
|
||||
id = in.readOptionalString();
|
||||
cause = in.readThrowable();
|
||||
status = ExceptionsHelper.status(cause);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(getIndex());
|
||||
out.writeString(getType());
|
||||
out.writeOptionalString(getId());
|
||||
out.writeThrowable(getCause());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* The index name of the action.
|
||||
*/
|
||||
@ -143,19 +161,6 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
||||
return cause;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Failure readFrom(StreamInput in) throws IOException {
|
||||
return new Failure(in.readString(), in.readString(), in.readOptionalString(), in.readThrowable());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(getIndex());
|
||||
out.writeString(getType());
|
||||
out.writeOptionalString(getId());
|
||||
out.writeThrowable(getCause());
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(INDEX_FIELD, index);
|
||||
@ -305,7 +310,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
||||
}
|
||||
|
||||
if (in.readBoolean()) {
|
||||
failure = Failure.PROTOTYPE.readFrom(in);
|
||||
failure = new Failure(in);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -32,17 +32,17 @@ import java.util.Map;
|
||||
*/
|
||||
public class FieldStatsShardResponse extends BroadcastShardResponse {
|
||||
|
||||
private Map<String, FieldStats> fieldStats;
|
||||
private Map<String, FieldStats<?>> fieldStats;
|
||||
|
||||
public FieldStatsShardResponse() {
|
||||
}
|
||||
|
||||
public FieldStatsShardResponse(ShardId shardId, Map<String, FieldStats> fieldStats) {
|
||||
public FieldStatsShardResponse(ShardId shardId, Map<String, FieldStats<?>> fieldStats) {
|
||||
super(shardId);
|
||||
this.fieldStats = fieldStats;
|
||||
}
|
||||
|
||||
public Map<String, FieldStats> getFieldStats() {
|
||||
public Map<String, FieldStats<?>> getFieldStats() {
|
||||
return fieldStats;
|
||||
}
|
||||
|
||||
@ -63,7 +63,7 @@ public class FieldStatsShardResponse extends BroadcastShardResponse {
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(fieldStats.size());
|
||||
for (Map.Entry<String, FieldStats> entry : fieldStats.entrySet()) {
|
||||
for (Map.Entry<String, FieldStats<?>> entry : fieldStats.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
}
|
||||
|
@ -19,9 +19,6 @@
|
||||
|
||||
package org.elasticsearch.action.fieldstats;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
@ -102,9 +99,9 @@ public class TransportFieldStatsTransportAction extends TransportBroadcastAction
|
||||
indicesMergedFieldStats.put(indexName, indexMergedFieldStats = new HashMap<>());
|
||||
}
|
||||
|
||||
Map<String, FieldStats> fieldStats = shardResponse.getFieldStats();
|
||||
for (Map.Entry<String, FieldStats> entry : fieldStats.entrySet()) {
|
||||
FieldStats existing = indexMergedFieldStats.get(entry.getKey());
|
||||
Map<String, FieldStats<?>> fieldStats = shardResponse.getFieldStats();
|
||||
for (Map.Entry<String, FieldStats<?>> entry : fieldStats.entrySet()) {
|
||||
FieldStats<?> existing = indexMergedFieldStats.get(entry.getKey());
|
||||
if (existing != null) {
|
||||
if (existing.getType() != entry.getValue().getType()) {
|
||||
throw new IllegalStateException(
|
||||
@ -156,22 +153,20 @@ public class TransportFieldStatsTransportAction extends TransportBroadcastAction
|
||||
@Override
|
||||
protected FieldStatsShardResponse shardOperation(FieldStatsShardRequest request) {
|
||||
ShardId shardId = request.shardId();
|
||||
Map<String, FieldStats> fieldStats = new HashMap<>();
|
||||
Map<String, FieldStats<?>> fieldStats = new HashMap<>();
|
||||
IndexService indexServices = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
MapperService mapperService = indexServices.mapperService();
|
||||
IndexShard shard = indexServices.getShard(shardId.id());
|
||||
try (Engine.Searcher searcher = shard.acquireSearcher("fieldstats")) {
|
||||
for (String field : request.getFields()) {
|
||||
MappedFieldType fieldType = mapperService.fullName(field);
|
||||
if (fieldType != null) {
|
||||
IndexReader reader = searcher.reader();
|
||||
Terms terms = MultiFields.getTerms(reader, field);
|
||||
if (terms != null) {
|
||||
fieldStats.put(field, fieldType.stats(terms, reader.maxDoc()));
|
||||
}
|
||||
} else {
|
||||
if (fieldType == null) {
|
||||
throw new IllegalArgumentException("field [" + field + "] doesn't exist");
|
||||
}
|
||||
FieldStats<?> stats = fieldType.stats(searcher.reader());
|
||||
if (stats != null) {
|
||||
fieldStats.put(field, stats);
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw ExceptionsHelper.convertToElastic(e);
|
||||
|
@ -25,52 +25,35 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.ingest.core.IngestDocument;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
/**
|
||||
* Holds the end result of what a pipeline did to sample document provided via the simulate api.
|
||||
*/
|
||||
public final class SimulateDocumentBaseResult implements SimulateDocumentResult<SimulateDocumentBaseResult> {
|
||||
|
||||
private static final SimulateDocumentBaseResult PROTOTYPE = new SimulateDocumentBaseResult(new WriteableIngestDocument(new IngestDocument(Collections.emptyMap(), Collections.emptyMap())));
|
||||
|
||||
private WriteableIngestDocument ingestDocument;
|
||||
private Exception failure;
|
||||
private final WriteableIngestDocument ingestDocument;
|
||||
private final Exception failure;
|
||||
|
||||
public SimulateDocumentBaseResult(IngestDocument ingestDocument) {
|
||||
this.ingestDocument = new WriteableIngestDocument(ingestDocument);
|
||||
}
|
||||
|
||||
private SimulateDocumentBaseResult(WriteableIngestDocument ingestDocument) {
|
||||
this.ingestDocument = ingestDocument;
|
||||
failure = null;
|
||||
}
|
||||
|
||||
public SimulateDocumentBaseResult(Exception failure) {
|
||||
ingestDocument = null;
|
||||
this.failure = failure;
|
||||
}
|
||||
|
||||
public IngestDocument getIngestDocument() {
|
||||
if (ingestDocument == null) {
|
||||
return null;
|
||||
}
|
||||
return ingestDocument.getIngestDocument();
|
||||
}
|
||||
|
||||
public Exception getFailure() {
|
||||
return failure;
|
||||
}
|
||||
|
||||
public static SimulateDocumentBaseResult readSimulateDocumentSimpleResult(StreamInput in) throws IOException {
|
||||
return PROTOTYPE.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SimulateDocumentBaseResult readFrom(StreamInput in) throws IOException {
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
public SimulateDocumentBaseResult(StreamInput in) throws IOException {
|
||||
if (in.readBoolean()) {
|
||||
Exception exception = in.readThrowable();
|
||||
return new SimulateDocumentBaseResult(exception);
|
||||
ingestDocument = null;
|
||||
failure = in.readThrowable();
|
||||
} else {
|
||||
ingestDocument = new WriteableIngestDocument(in);
|
||||
failure = null;
|
||||
}
|
||||
return new SimulateDocumentBaseResult(new WriteableIngestDocument(in));
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -84,6 +67,17 @@ public final class SimulateDocumentBaseResult implements SimulateDocumentResult<
|
||||
}
|
||||
}
|
||||
|
||||
public IngestDocument getIngestDocument() {
|
||||
if (ingestDocument == null) {
|
||||
return null;
|
||||
}
|
||||
return ingestDocument.getIngestDocument();
|
||||
}
|
||||
|
||||
public Exception getFailure() {
|
||||
return failure;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
|
@ -24,7 +24,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
@ -32,31 +31,21 @@ import java.util.List;
|
||||
* this result class holds the intermediate result each processor did to the sample document.
|
||||
*/
|
||||
public final class SimulateDocumentVerboseResult implements SimulateDocumentResult<SimulateDocumentVerboseResult> {
|
||||
|
||||
private static final SimulateDocumentVerboseResult PROTOTYPE = new SimulateDocumentVerboseResult(Collections.emptyList());
|
||||
|
||||
private final List<SimulateProcessorResult> processorResults;
|
||||
|
||||
public SimulateDocumentVerboseResult(List<SimulateProcessorResult> processorResults) {
|
||||
this.processorResults = processorResults;
|
||||
}
|
||||
|
||||
public List<SimulateProcessorResult> getProcessorResults() {
|
||||
return processorResults;
|
||||
}
|
||||
|
||||
public static SimulateDocumentVerboseResult readSimulateDocumentVerboseResultFrom(StreamInput in) throws IOException {
|
||||
return PROTOTYPE.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SimulateDocumentVerboseResult readFrom(StreamInput in) throws IOException {
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
public SimulateDocumentVerboseResult(StreamInput in) throws IOException {
|
||||
int size = in.readVInt();
|
||||
List<SimulateProcessorResult> processorResults = new ArrayList<>();
|
||||
processorResults = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
processorResults.add(new SimulateProcessorResult(in));
|
||||
}
|
||||
return new SimulateDocumentVerboseResult(processorResults);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -67,6 +56,10 @@ public final class SimulateDocumentVerboseResult implements SimulateDocumentResu
|
||||
}
|
||||
}
|
||||
|
||||
public List<SimulateProcessorResult> getProcessorResults() {
|
||||
return processorResults;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
|
@ -23,13 +23,14 @@ import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.ingest.core.IngestDocument;
|
||||
import org.elasticsearch.ingest.core.Pipeline;
|
||||
import org.elasticsearch.ingest.core.Processor;
|
||||
import org.elasticsearch.ingest.core.CompoundProcessor;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.ingest.processor.TrackingResultProcessor.decorate;
|
||||
|
||||
class SimulateExecutionService {
|
||||
|
||||
private static final String THREAD_POOL_NAME = ThreadPool.Names.MANAGEMENT;
|
||||
@ -40,40 +41,16 @@ class SimulateExecutionService {
|
||||
this.threadPool = threadPool;
|
||||
}
|
||||
|
||||
void executeVerboseDocument(Processor processor, IngestDocument ingestDocument, List<SimulateProcessorResult> processorResultList) throws Exception {
|
||||
if (processor instanceof CompoundProcessor) {
|
||||
CompoundProcessor cp = (CompoundProcessor) processor;
|
||||
try {
|
||||
for (Processor p : cp.getProcessors()) {
|
||||
executeVerboseDocument(p, ingestDocument, processorResultList);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
for (Processor p : cp.getOnFailureProcessors()) {
|
||||
executeVerboseDocument(p, ingestDocument, processorResultList);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
processor.execute(ingestDocument);
|
||||
processorResultList.add(new SimulateProcessorResult(processor.getTag(), new IngestDocument(ingestDocument)));
|
||||
} catch (Exception e) {
|
||||
processorResultList.add(new SimulateProcessorResult(processor.getTag(), e));
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SimulateDocumentResult executeDocument(Pipeline pipeline, IngestDocument ingestDocument, boolean verbose) {
|
||||
if (verbose) {
|
||||
List<SimulateProcessorResult> processorResultList = new ArrayList<>();
|
||||
IngestDocument currentIngestDocument = new IngestDocument(ingestDocument);
|
||||
CompoundProcessor pipelineProcessor = new CompoundProcessor(pipeline.getProcessors(), pipeline.getOnFailureProcessors());
|
||||
CompoundProcessor verbosePipelineProcessor = decorate(pipeline.getCompoundProcessor(), processorResultList);
|
||||
try {
|
||||
executeVerboseDocument(pipelineProcessor, currentIngestDocument, processorResultList);
|
||||
verbosePipelineProcessor.execute(ingestDocument);
|
||||
return new SimulateDocumentVerboseResult(processorResultList);
|
||||
} catch (Exception e) {
|
||||
return new SimulateDocumentBaseResult(e);
|
||||
return new SimulateDocumentVerboseResult(processorResultList);
|
||||
}
|
||||
return new SimulateDocumentVerboseResult(processorResultList);
|
||||
} else {
|
||||
try {
|
||||
pipeline.execute(ingestDocument);
|
||||
|
@ -79,9 +79,9 @@ public class SimulatePipelineResponse extends ActionResponse implements ToXConte
|
||||
for (int i = 0; i < responsesLength; i++) {
|
||||
SimulateDocumentResult<?> simulateDocumentResult;
|
||||
if (verbose) {
|
||||
simulateDocumentResult = SimulateDocumentVerboseResult.readSimulateDocumentVerboseResultFrom(in);
|
||||
simulateDocumentResult = new SimulateDocumentVerboseResult(in);
|
||||
} else {
|
||||
simulateDocumentResult = SimulateDocumentBaseResult.readSimulateDocumentSimpleResult(in);
|
||||
simulateDocumentResult = new SimulateDocumentBaseResult(in);
|
||||
}
|
||||
results.add(simulateDocumentResult);
|
||||
}
|
||||
|
@ -24,11 +24,9 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.ingest.core.IngestDocument;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
|
@ -17,23 +17,27 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.query.functionscore.lin;
|
||||
package org.elasticsearch.action.main;
|
||||
|
||||
import org.elasticsearch.index.query.functionscore.DecayFunctionParser;
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class LinearDecayFunctionParser extends DecayFunctionParser<LinearDecayFunctionBuilder> {
|
||||
public class MainAction extends Action<MainRequest, MainResponse, MainRequestBuilder> {
|
||||
|
||||
public static final String[] NAMES = { "linear" };
|
||||
public static final String NAME = "cluster:monitor/main";
|
||||
public static final MainAction INSTANCE = new MainAction();
|
||||
|
||||
private static final LinearDecayFunctionBuilder PROTOTYPE = new LinearDecayFunctionBuilder("", "", "", "");
|
||||
|
||||
@Override
|
||||
public String[] getNames() {
|
||||
return NAMES;
|
||||
public MainAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public LinearDecayFunctionBuilder getBuilderPrototype() {
|
||||
return PROTOTYPE;
|
||||
public MainRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new MainRequestBuilder(client, INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MainResponse newResponse() {
|
||||
return new MainResponse();
|
||||
}
|
||||
}
|
@ -17,17 +17,16 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http;
|
||||
package org.elasticsearch.action.main;
|
||||
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class HttpChannel extends RestChannel {
|
||||
public class MainRequest extends ActionRequest<MainRequest> {
|
||||
|
||||
protected HttpChannel(RestRequest request, boolean detailedErrorsEnabled) {
|
||||
super(request, detailedErrorsEnabled);
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
@ -17,20 +17,14 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.math;
|
||||
package org.elasticsearch.action.main;
|
||||
|
||||
public enum MathUtils {
|
||||
;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Return the (positive) remainder of the division of <code>v</code> by <code>mod</code>.
|
||||
*/
|
||||
public static int mod(int v, int m) {
|
||||
int r = v % m;
|
||||
if (r < 0) {
|
||||
r += m;
|
||||
}
|
||||
return r;
|
||||
public class MainRequestBuilder extends ActionRequestBuilder<MainRequest, MainResponse, MainRequestBuilder> {
|
||||
|
||||
public MainRequestBuilder(ElasticsearchClient client, MainAction action) {
|
||||
super(client, action, new MainRequest());
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.main;
|
||||
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class MainResponse extends ActionResponse implements ToXContent {
|
||||
|
||||
private String nodeName;
|
||||
private Version version;
|
||||
private ClusterName clusterName;
|
||||
private Build build;
|
||||
private boolean available;
|
||||
|
||||
MainResponse() {
|
||||
}
|
||||
|
||||
public MainResponse(String nodeName, Version version, ClusterName clusterName, Build build, boolean available) {
|
||||
this.nodeName = nodeName;
|
||||
this.version = version;
|
||||
this.clusterName = clusterName;
|
||||
this.build = build;
|
||||
this.available = available;
|
||||
}
|
||||
|
||||
public String getNodeName() {
|
||||
return nodeName;
|
||||
}
|
||||
|
||||
public Version getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public ClusterName getClusterName() {
|
||||
return clusterName;
|
||||
}
|
||||
|
||||
public Build getBuild() {
|
||||
return build;
|
||||
}
|
||||
|
||||
public boolean isAvailable() {
|
||||
return available;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(nodeName);
|
||||
Version.writeVersion(version, out);
|
||||
clusterName.writeTo(out);
|
||||
Build.writeBuild(build, out);
|
||||
out.writeBoolean(available);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
nodeName = in.readString();
|
||||
version = Version.readVersion(in);
|
||||
clusterName = ClusterName.readClusterName(in);
|
||||
build = Build.readBuild(in);
|
||||
available = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("name", nodeName);
|
||||
builder.field("cluster_name", clusterName.value());
|
||||
builder.startObject("version")
|
||||
.field("number", version.toString())
|
||||
.field("build_hash", build.shortHash())
|
||||
.field("build_date", build.date())
|
||||
.field("build_snapshot", build.isSnapshot())
|
||||
.field("lucene_version", version.luceneVersion.toString())
|
||||
.endObject();
|
||||
builder.field("tagline", "You Know, for Search");
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.main;
|
||||
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
public class TransportMainAction extends HandledTransportAction<MainRequest, MainResponse> {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final Version version;
|
||||
|
||||
@Inject
|
||||
public TransportMainAction(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
ClusterService clusterService, Version version) {
|
||||
super(settings, MainAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MainRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(MainRequest request, ActionListener<MainResponse> listener) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
assert Node.NODE_NAME_SETTING.exists(settings);
|
||||
final boolean available = clusterState.getBlocks().hasGlobalBlock(RestStatus.SERVICE_UNAVAILABLE) == false;
|
||||
listener.onResponse(
|
||||
new MainResponse(Node.NODE_NAME_SETTING.get(settings), version, clusterState.getClusterName(), Build.CURRENT, available));
|
||||
}
|
||||
}
|
@ -38,14 +38,12 @@ import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.BoolQueryBuilder;
|
||||
import org.elasticsearch.index.query.ConstantScoreQueryBuilder;
|
||||
import org.elasticsearch.index.query.PercolatorQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.query.TemplateQueryParser;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.rest.action.support.RestActions;
|
||||
import org.elasticsearch.script.Template;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.search.aggregations.AggregatorParsers;
|
||||
@ -207,7 +205,10 @@ public class TransportPercolateAction extends HandledTransportAction<PercolateRe
|
||||
boolQueryBuilder.filter(percolatorQueryBuilder);
|
||||
searchSource.field("query", boolQueryBuilder);
|
||||
} else {
|
||||
searchSource.field("query", percolatorQueryBuilder);
|
||||
// wrapping in a constant score query with boost 0 for bwc reason.
|
||||
// percolator api didn't emit scores before and never included scores
|
||||
// for how well percolator queries matched with the document being percolated
|
||||
searchSource.field("query", new ConstantScoreQueryBuilder(percolatorQueryBuilder).boost(0f));
|
||||
}
|
||||
|
||||
searchSource.endObject();
|
||||
|
@ -107,7 +107,16 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
||||
request.indices());
|
||||
|
||||
shardsIts = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
|
||||
expectedSuccessfulOps = shardsIts.size();
|
||||
final int shardCount = shardsIts.size();
|
||||
final long shardCountLimit = clusterService.getClusterSettings().get(TransportSearchAction.SHARD_COUNT_LIMIT_SETTING);
|
||||
if (shardCount > shardCountLimit) {
|
||||
throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of "
|
||||
+ shardCountLimit + ". This limit exists because querying many shards at the same time can make the "
|
||||
+ "job of the coordinating node very CPU and/or memory intensive. It is usually a better idea to "
|
||||
+ "have a smaller number of larger shards. Update [" + TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey()
|
||||
+ "] to a greater value if you really want to query that many shards at the same time.");
|
||||
}
|
||||
expectedSuccessfulOps = shardCount;
|
||||
// we need to add 1 for non active partition, since we count it in the total!
|
||||
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
|
||||
|
||||
@ -155,7 +164,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onFirstPhaseResult(shardIndex, shard, node.id(), shardIt, t);
|
||||
onFirstPhaseResult(shardIndex, shard, node.getId(), shardIt, t);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ import org.elasticsearch.search.Scroll;
|
||||
import org.elasticsearch.search.aggregations.AggregatorBuilder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder;
|
||||
import org.elasticsearch.index.query.support.InnerHitsBuilder;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder;
|
||||
import org.elasticsearch.search.sort.SortBuilder;
|
||||
|
@ -75,7 +75,7 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
||||
int expectedOps = 0;
|
||||
this.nodes = clusterState.nodes();
|
||||
if (request.getScrollIds().size() == 1 && "_all".equals(request.getScrollIds().get(0))) {
|
||||
expectedOps = nodes.size();
|
||||
expectedOps = nodes.getSize();
|
||||
} else {
|
||||
for (String parsedScrollId : request.getScrollIds()) {
|
||||
ScrollIdForNode[] context = parseScrollId(parsedScrollId).getContext();
|
||||
|
@ -26,6 +26,8 @@ import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
@ -45,6 +47,10 @@ import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH;
|
||||
*/
|
||||
public class TransportSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> {
|
||||
|
||||
/** The maximum number of shards for a single search request. */
|
||||
public static final Setting<Long> SHARD_COUNT_LIMIT_SETTING = Setting.longSetting(
|
||||
"action.search.shard_count.limit", 1000L, 1L, Property.Dynamic, Property.NodeScope);
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
@ -22,7 +22,6 @@ package org.elasticsearch.action.support;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.transport.TransportClient;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
@ -48,9 +47,9 @@ public final class ThreadedActionListener<Response> implements ActionListener<Re
|
||||
public Wrapper(ESLogger logger, Settings settings, ThreadPool threadPool) {
|
||||
this.logger = logger;
|
||||
this.threadPool = threadPool;
|
||||
// Should the action listener be threaded or not by default. Action listeners are automatically threaded for client
|
||||
// nodes and transport client in order to make sure client side code is not executed on IO threads.
|
||||
this.threadedListener = DiscoveryNode.clientNode(settings) || TransportClient.CLIENT_TYPE.equals(Client.CLIENT_TYPE_SETTING_S.get(settings));
|
||||
// Should the action listener be threaded or not by default. Action listeners are automatically threaded for
|
||||
// the transport client in order to make sure client side code is not executed on IO threads.
|
||||
this.threadedListener = TransportClient.CLIENT_TYPE.equals(Client.CLIENT_TYPE_SETTING_S.get(settings));
|
||||
}
|
||||
|
||||
public <Response> ActionListener<Response> wrap(ActionListener<Response> listener) {
|
||||
|
@ -299,7 +299,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||
try {
|
||||
NodeRequest nodeRequest = new NodeRequest(node.getId(), request, shards);
|
||||
if (task != null) {
|
||||
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
||||
nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId());
|
||||
taskManager.registerChildTask(task, node.getId());
|
||||
}
|
||||
transportService.sendRequest(node, transportNodeBroadcastAction, nodeRequest, new BaseTransportResponseHandler<NodeResponse>() {
|
||||
@ -330,7 +330,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||
|
||||
protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse response) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("received response for [{}] from node [{}]", actionName, node.id());
|
||||
logger.trace("received response for [{}] from node [{}]", actionName, node.getId());
|
||||
}
|
||||
|
||||
// this is defensive to protect against the possibility of double invocation
|
||||
@ -344,7 +344,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||
}
|
||||
|
||||
protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) {
|
||||
String nodeId = node.id();
|
||||
String nodeId = node.getId();
|
||||
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
||||
logger.debug("failed to execute [{}] on node [{}]", t, actionName, nodeId);
|
||||
}
|
||||
|
@ -25,7 +25,6 @@ import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.ThreadedActionListener;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.MasterNodeChangePredicate;
|
||||
@ -116,10 +115,6 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
||||
if (task != null) {
|
||||
request.setParentTask(clusterService.localNode().getId(), task.getId());
|
||||
}
|
||||
// TODO do we really need to wrap it in a listener? the handlers should be cheap
|
||||
if ((listener instanceof ThreadedActionListener) == false) {
|
||||
listener = new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener);
|
||||
}
|
||||
this.listener = listener;
|
||||
}
|
||||
|
||||
@ -131,7 +126,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
||||
protected void doStart() {
|
||||
final ClusterState clusterState = observer.observedState();
|
||||
final DiscoveryNodes nodes = clusterState.nodes();
|
||||
if (nodes.localNodeMaster() || localExecute(request)) {
|
||||
if (nodes.isLocalNodeElectedMaster() || localExecute(request)) {
|
||||
// check for block, if blocked, retry, else, execute locally
|
||||
final ClusterBlockException blockException = checkBlock(request, clusterState);
|
||||
if (blockException != null) {
|
||||
@ -168,12 +163,12 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if (nodes.masterNode() == null) {
|
||||
if (nodes.getMasterNode() == null) {
|
||||
logger.debug("no known master node, scheduling a retry");
|
||||
retry(null, MasterNodeChangePredicate.INSTANCE);
|
||||
} else {
|
||||
taskManager.registerChildTask(task, nodes.masterNode().getId());
|
||||
transportService.sendRequest(nodes.masterNode(), actionName, request, new ActionListenerResponseHandler<Response>(listener) {
|
||||
taskManager.registerChildTask(task, nodes.getMasterNode().getId());
|
||||
transportService.sendRequest(nodes.getMasterNode(), actionName, request, new ActionListenerResponseHandler<Response>(listener) {
|
||||
@Override
|
||||
public Response newInstance() {
|
||||
return newResponse();
|
||||
@ -185,7 +180,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
||||
if (cause instanceof ConnectTransportException) {
|
||||
// we want to retry here a bit to see if a new master is elected
|
||||
logger.debug("connection exception while trying to forward request with action name [{}] to master node [{}], scheduling a retry. Error: [{}]",
|
||||
actionName, nodes.masterNode(), exp.getDetailedMessage());
|
||||
actionName, nodes.getMasterNode(), exp.getDetailedMessage());
|
||||
retry(cause, MasterNodeChangePredicate.INSTANCE);
|
||||
} else {
|
||||
listener.onFailure(exp);
|
||||
|
@ -51,7 +51,7 @@ public abstract class BaseNodeResponse extends TransportResponse {
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
node = DiscoveryNode.readNode(in);
|
||||
node = new DiscoveryNode(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -81,7 +81,7 @@ public abstract class BaseNodesResponse<TNodeResponse extends BaseNodeResponse>
|
||||
if (nodesMap == null) {
|
||||
nodesMap = new HashMap<>();
|
||||
for (TNodeResponse nodeResponse : nodes) {
|
||||
nodesMap.put(nodeResponse.getNode().id(), nodeResponse);
|
||||
nodesMap.put(nodeResponse.getNode().getId(), nodeResponse);
|
||||
}
|
||||
}
|
||||
return nodesMap;
|
||||
|
@ -127,7 +127,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
||||
ClusterState clusterState = clusterService.state();
|
||||
String[] nodesIds = resolveNodes(request, clusterState);
|
||||
this.nodesIds = filterNodeIds(clusterState.nodes(), nodesIds);
|
||||
ImmutableOpenMap<String, DiscoveryNode> nodes = clusterState.nodes().nodes();
|
||||
ImmutableOpenMap<String, DiscoveryNode> nodes = clusterState.nodes().getNodes();
|
||||
this.nodes = new DiscoveryNode[nodesIds.length];
|
||||
for (int i = 0; i < nodesIds.length; i++) {
|
||||
this.nodes[i] = nodes.get(nodesIds[i]);
|
||||
@ -161,7 +161,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
||||
} else {
|
||||
ChildTaskRequest nodeRequest = newNodeRequest(nodeId, request);
|
||||
if (task != null) {
|
||||
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
||||
nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId());
|
||||
taskManager.registerChildTask(task, node.getId());
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
onFailure(idx, node.id(), exp);
|
||||
onFailure(idx, node.getId(), exp);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -51,6 +51,8 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
||||
*/
|
||||
protected ShardId shardId;
|
||||
|
||||
long primaryTerm;
|
||||
|
||||
protected TimeValue timeout = DEFAULT_TIMEOUT;
|
||||
protected String index;
|
||||
|
||||
@ -148,6 +150,16 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
||||
return routedBasedOnClusterVersion;
|
||||
}
|
||||
|
||||
/** returns the primary term active at the time the operation was performed on the primary shard */
|
||||
public long primaryTerm() {
|
||||
return primaryTerm;
|
||||
}
|
||||
|
||||
/** marks the primary term in which the operation was performed */
|
||||
public void primaryTerm(long term) {
|
||||
primaryTerm = term;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
@ -169,6 +181,7 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
||||
timeout = TimeValue.readTimeValue(in);
|
||||
index = in.readString();
|
||||
routedBasedOnClusterVersion = in.readVLong();
|
||||
primaryTerm = in.readVLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -184,6 +197,7 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
||||
timeout.writeTo(out);
|
||||
out.writeString(index);
|
||||
out.writeVLong(routedBasedOnClusterVersion);
|
||||
out.writeVLong(primaryTerm);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -52,7 +52,6 @@ import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
@ -80,6 +79,7 @@ import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
@ -359,32 +359,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
}
|
||||
});
|
||||
} else {
|
||||
try {
|
||||
failReplicaIfNeeded(t);
|
||||
} catch (Throwable unexpected) {
|
||||
logger.error("{} unexpected error while failing replica", unexpected, request.shardId().id());
|
||||
} finally {
|
||||
responseWithFailure(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void failReplicaIfNeeded(Throwable t) {
|
||||
Index index = request.shardId().getIndex();
|
||||
int shardId = request.shardId().id();
|
||||
logger.trace("failure on replica [{}][{}], action [{}], request [{}]", t, index, shardId, actionName, request);
|
||||
if (ignoreReplicaException(t) == false) {
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
logger.debug("ignoring failed replica {}[{}] because index was already removed.", index, shardId);
|
||||
return;
|
||||
}
|
||||
IndexShard indexShard = indexService.getShardOrNull(shardId);
|
||||
if (indexShard == null) {
|
||||
logger.debug("ignoring failed replica {}[{}] because index was already removed.", index, shardId);
|
||||
return;
|
||||
}
|
||||
indexShard.failShard(actionName + " failed on replica", t);
|
||||
}
|
||||
}
|
||||
|
||||
@ -401,7 +376,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
protected void doRun() throws Exception {
|
||||
setPhase(task, "replica");
|
||||
assert request.shardId() != null : "request shardId must be set";
|
||||
try (Releasable ignored = getIndexShardReferenceOnReplica(request.shardId())) {
|
||||
try (Releasable ignored = getIndexShardReferenceOnReplica(request.shardId(), request.primaryTerm())) {
|
||||
shardOperationOnReplica(request);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), request);
|
||||
@ -475,7 +450,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
}
|
||||
final DiscoveryNode node = state.nodes().get(primary.currentNodeId());
|
||||
taskManager.registerChildTask(task, node.getId());
|
||||
if (primary.currentNodeId().equals(state.nodes().localNodeId())) {
|
||||
if (primary.currentNodeId().equals(state.nodes().getLocalNodeId())) {
|
||||
performLocalAction(state, primary, node);
|
||||
} else {
|
||||
performRemoteAction(state, primary, node);
|
||||
@ -546,7 +521,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
|
||||
private void handleBlockException(ClusterBlockException blockException) {
|
||||
if (blockException.retryable()) {
|
||||
logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage());
|
||||
logger.trace("cluster is blocked, scheduling a retry", blockException);
|
||||
retry(blockException);
|
||||
} else {
|
||||
finishAsFailed(blockException);
|
||||
@ -575,9 +550,10 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
public void handleException(TransportException exp) {
|
||||
try {
|
||||
// if we got disconnected from the node, or the node / shard is not in the right state (being closed)
|
||||
if (exp.unwrapCause() instanceof ConnectTransportException || exp.unwrapCause() instanceof NodeClosedException ||
|
||||
(isPrimaryAction && retryPrimaryException(exp.unwrapCause()))) {
|
||||
logger.trace("received an error from node [{}] for request [{}], scheduling a retry", exp, node.id(), request);
|
||||
final Throwable cause = exp.unwrapCause();
|
||||
if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException ||
|
||||
(isPrimaryAction && retryPrimaryException(cause))) {
|
||||
logger.trace("received an error from node [{}] for request [{}], scheduling a retry", exp, node.getId(), request);
|
||||
retry(exp);
|
||||
} else {
|
||||
finishAsFailed(exp);
|
||||
@ -704,10 +680,9 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
return;
|
||||
}
|
||||
// closed in finishAsFailed(e) in the case of error
|
||||
indexShardReference = getIndexShardReferenceOnPrimary(shardId);
|
||||
indexShardReference = getIndexShardReferenceOnPrimary(shardId, request);
|
||||
if (indexShardReference.isRelocated() == false) {
|
||||
executeLocally();
|
||||
|
||||
} else {
|
||||
executeRemotely();
|
||||
}
|
||||
@ -716,6 +691,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
private void executeLocally() throws Exception {
|
||||
// execute locally
|
||||
Tuple<Response, ReplicaRequest> primaryResponse = shardOperationOnPrimary(state.metaData(), request);
|
||||
primaryResponse.v2().primaryTerm(indexShardReference.opPrimaryTerm());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version());
|
||||
}
|
||||
@ -822,20 +798,27 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
* returns a new reference to {@link IndexShard} to perform a primary operation. Released after performing primary operation locally
|
||||
* and replication of the operation to all replica shards is completed / failed (see {@link ReplicationPhase}).
|
||||
*/
|
||||
protected IndexShardReference getIndexShardReferenceOnPrimary(ShardId shardId) {
|
||||
protected IndexShardReference getIndexShardReferenceOnPrimary(ShardId shardId, Request request) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
IndexShard indexShard = indexService.getShard(shardId.id());
|
||||
return new IndexShardReferenceImpl(indexShard, true);
|
||||
// we may end up here if the cluster state used to route the primary is so stale that the underlying
|
||||
// index shard was replaced with a replica. For example - in a two node cluster, if the primary fails
|
||||
// the replica will take over and a replica will be assigned to the first node.
|
||||
if (indexShard.routingEntry().primary() == false) {
|
||||
throw new RetryOnPrimaryException(indexShard.shardId(), "actual shard is not a primary " + indexShard.routingEntry());
|
||||
}
|
||||
return IndexShardReferenceImpl.createOnPrimary(indexShard);
|
||||
}
|
||||
|
||||
/**
|
||||
* returns a new reference to {@link IndexShard} on a node that the request is replicated to. The reference is closed as soon as
|
||||
* replication is completed on the node.
|
||||
*/
|
||||
protected IndexShardReference getIndexShardReferenceOnReplica(ShardId shardId) {
|
||||
protected IndexShardReference getIndexShardReferenceOnReplica(ShardId shardId, long primaryTerm) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
IndexShard indexShard = indexService.getShard(shardId.id());
|
||||
return new IndexShardReferenceImpl(indexShard, false);
|
||||
IndexShardReference ref = IndexShardReferenceImpl.createOnReplica(indexShard, primaryTerm);
|
||||
return ref;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -928,12 +911,12 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
|
||||
// we never execute replication operation locally as primary operation has already completed locally
|
||||
// hence, we ignore any local shard for replication
|
||||
if (nodes.localNodeId().equals(shard.currentNodeId()) == false) {
|
||||
if (nodes.getLocalNodeId().equals(shard.currentNodeId()) == false) {
|
||||
onLocalShard.accept(shard);
|
||||
}
|
||||
// send operation to relocating shard
|
||||
// local shard can be a relocation target of a primary that is in relocated state
|
||||
if (shard.relocating() && nodes.localNodeId().equals(shard.relocatingNodeId()) == false) {
|
||||
if (shard.relocating() && nodes.getLocalNodeId().equals(shard.relocatingNodeId()) == false) {
|
||||
onRelocatingShard.accept(shard);
|
||||
}
|
||||
}
|
||||
@ -1016,30 +999,38 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
String message = String.format(Locale.ROOT, "failed to perform %s on replica on node %s", transportReplicaAction, node);
|
||||
logger.warn("[{}] {}", exp, shardId, message);
|
||||
shardStateAction.shardFailed(
|
||||
shard,
|
||||
indexShardReference.routingEntry(),
|
||||
message,
|
||||
exp,
|
||||
new ShardStateAction.Listener() {
|
||||
@Override
|
||||
public void onSuccess() {
|
||||
onReplicaFailure(nodeId, exp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable shardFailedError) {
|
||||
if (shardFailedError instanceof ShardStateAction.NoLongerPrimaryShardException) {
|
||||
ShardRouting primaryShard = indexShardReference.routingEntry();
|
||||
String message = String.format(Locale.ROOT, "primary shard [%s] was demoted while failing replica shard [%s] for [%s]", primaryShard, shard, exp);
|
||||
// we are no longer the primary, fail ourselves and start over
|
||||
indexShardReference.failShard(message, shardFailedError);
|
||||
forceFinishAsFailed(new RetryOnPrimaryException(shardId, message, shardFailedError));
|
||||
} else {
|
||||
assert false : shardFailedError;
|
||||
shard,
|
||||
indexShardReference.routingEntry(),
|
||||
message,
|
||||
exp,
|
||||
new ShardStateAction.Listener() {
|
||||
@Override
|
||||
public void onSuccess() {
|
||||
onReplicaFailure(nodeId, exp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable shardFailedError) {
|
||||
if (shardFailedError instanceof ShardStateAction.NoLongerPrimaryShardException) {
|
||||
String message = "unknown";
|
||||
try {
|
||||
ShardRouting primaryShard = indexShardReference.routingEntry();
|
||||
message = String.format(Locale.ROOT, "primary shard [%s] was demoted while failing replica shard [%s] for [%s]", primaryShard, shard, exp);
|
||||
// we are no longer the primary, fail ourselves and start over
|
||||
indexShardReference.failShard(message, shardFailedError);
|
||||
} catch (Throwable t) {
|
||||
shardFailedError.addSuppressed(t);
|
||||
}
|
||||
forceFinishAsFailed(new RetryOnPrimaryException(shardId, message, shardFailedError));
|
||||
} else {
|
||||
// these can occur if the node is shutting down and are okay
|
||||
// any other exception here is not expected and merits investigation
|
||||
assert shardFailedError instanceof TransportException ||
|
||||
shardFailedError instanceof NodeClosedException : shardFailedError;
|
||||
onReplicaFailure(nodeId, exp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -1098,9 +1089,13 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
totalShards,
|
||||
success.get(),
|
||||
failuresArray
|
||||
|
||||
)
|
||||
);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("finished replicating action [{}], request [{}], shardInfo [{}]", actionName, replicaRequest,
|
||||
finalResponse.getShardInfo());
|
||||
}
|
||||
|
||||
try {
|
||||
channel.sendResponse(finalResponse);
|
||||
} catch (IOException responseException) {
|
||||
@ -1123,8 +1118,13 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
|
||||
interface IndexShardReference extends Releasable {
|
||||
boolean isRelocated();
|
||||
|
||||
void failShard(String reason, @Nullable Throwable e);
|
||||
|
||||
ShardRouting routingEntry();
|
||||
|
||||
/** returns the primary term of the current operation */
|
||||
long opPrimaryTerm();
|
||||
}
|
||||
|
||||
static final class IndexShardReferenceImpl implements IndexShardReference {
|
||||
@ -1132,15 +1132,23 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
private final IndexShard indexShard;
|
||||
private final Releasable operationLock;
|
||||
|
||||
IndexShardReferenceImpl(IndexShard indexShard, boolean primaryAction) {
|
||||
private IndexShardReferenceImpl(IndexShard indexShard, long primaryTerm) {
|
||||
this.indexShard = indexShard;
|
||||
if (primaryAction) {
|
||||
if (primaryTerm < 0) {
|
||||
operationLock = indexShard.acquirePrimaryOperationLock();
|
||||
} else {
|
||||
operationLock = indexShard.acquireReplicaOperationLock();
|
||||
operationLock = indexShard.acquireReplicaOperationLock(primaryTerm);
|
||||
}
|
||||
}
|
||||
|
||||
static IndexShardReferenceImpl createOnPrimary(IndexShard indexShard) {
|
||||
return new IndexShardReferenceImpl(indexShard, -1);
|
||||
}
|
||||
|
||||
static IndexShardReferenceImpl createOnReplica(IndexShard indexShard, long primaryTerm) {
|
||||
return new IndexShardReferenceImpl(indexShard, primaryTerm);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
operationLock.close();
|
||||
@ -1160,6 +1168,11 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
public ShardRouting routingEntry() {
|
||||
return indexShard.routingEntry();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long opPrimaryTerm() {
|
||||
return indexShard.getPrimaryTerm();
|
||||
}
|
||||
}
|
||||
|
||||
protected final void processAfterWrite(boolean refresh, IndexShard indexShard, Translog.Location location) {
|
||||
|
@ -228,7 +228,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
||||
|
||||
@Override
|
||||
public void onClusterServiceClose() {
|
||||
listener.onFailure(new NodeClosedException(nodes.localNode()));
|
||||
listener.onFailure(new NodeClosedException(nodes.getLocalNode()));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -42,8 +42,6 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
||||
|
||||
public static final String[] ALL_NODES = Strings.EMPTY_ARRAY;
|
||||
|
||||
public static final long ALL_TASKS = -1L;
|
||||
|
||||
private String[] nodesIds = ALL_NODES;
|
||||
|
||||
private TimeValue timeout;
|
||||
|
@ -113,10 +113,10 @@ public abstract class TransportTasksAction<
|
||||
results.add(response);
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
exceptions.add(new TaskOperationFailure(clusterService.localNode().id(), task.getId(), ex));
|
||||
exceptions.add(new TaskOperationFailure(clusterService.localNode().getId(), task.getId(), ex));
|
||||
}
|
||||
});
|
||||
return new NodeTasksResponse(clusterService.localNode().id(), results, exceptions);
|
||||
return new NodeTasksResponse(clusterService.localNode().getId(), results, exceptions);
|
||||
}
|
||||
|
||||
protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
|
||||
@ -205,10 +205,10 @@ public abstract class TransportTasksAction<
|
||||
ClusterState clusterState = clusterService.state();
|
||||
String[] nodesIds = resolveNodes(request, clusterState);
|
||||
this.nodesIds = filterNodeIds(clusterState.nodes(), nodesIds);
|
||||
ImmutableOpenMap<String, DiscoveryNode> nodes = clusterState.nodes().nodes();
|
||||
ImmutableOpenMap<String, DiscoveryNode> nodes = clusterState.nodes().getNodes();
|
||||
this.nodes = new DiscoveryNode[nodesIds.length];
|
||||
for (int i = 0; i < nodesIds.length; i++) {
|
||||
this.nodes[i] = nodes.get(nodesIds[i]);
|
||||
for (int i = 0; i < this.nodesIds.length; i++) {
|
||||
this.nodes[i] = nodes.get(this.nodesIds[i]);
|
||||
}
|
||||
this.responses = new AtomicReferenceArray<>(this.nodesIds.length);
|
||||
}
|
||||
@ -237,7 +237,7 @@ public abstract class TransportTasksAction<
|
||||
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
|
||||
} else {
|
||||
NodeTaskRequest nodeRequest = new NodeTaskRequest(request);
|
||||
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
||||
nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId());
|
||||
taskManager.registerChildTask(task, node.getId());
|
||||
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(),
|
||||
new BaseTransportResponseHandler<NodeTasksResponse>() {
|
||||
@ -253,7 +253,7 @@ public abstract class TransportTasksAction<
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
onFailure(idx, node.id(), exp);
|
||||
onFailure(idx, node.getId(), exp);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -133,9 +133,8 @@ final class Bootstrap {
|
||||
// we've already logged this.
|
||||
}
|
||||
|
||||
JNANatives.trySetMaxNumberOfThreads();
|
||||
|
||||
JNANatives.trySetMaxSizeVirtualMemory();
|
||||
Natives.trySetMaxNumberOfThreads();
|
||||
Natives.trySetMaxSizeVirtualMemory();
|
||||
|
||||
// init lucene random seed. it will use /dev/urandom where available:
|
||||
StringHelper.randomId();
|
||||
@ -180,7 +179,7 @@ final class Bootstrap {
|
||||
// We do not need to reload system properties here as we have already applied them in building the settings and
|
||||
// reloading could cause multiple prompts to the user for values if a system property was specified with a prompt
|
||||
// placeholder
|
||||
Settings nodeSettings = Settings.settingsBuilder()
|
||||
Settings nodeSettings = Settings.builder()
|
||||
.put(settings)
|
||||
.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true)
|
||||
.build();
|
||||
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
|
||||
@ -39,7 +40,6 @@ import java.util.Set;
|
||||
/**
|
||||
* We enforce limits once any network host is configured. In this case we assume the node is running in production
|
||||
* and all production limit checks must pass. This should be extended as we go to settings like:
|
||||
* - discovery.zen.minimum_master_nodes
|
||||
* - discovery.zen.ping.unicast.hosts is set if we use zen disco
|
||||
* - ensure we can write in all data directories
|
||||
* - fail if vm.max_map_count is under a certain limit (not sure if this works cross platform)
|
||||
@ -114,10 +114,10 @@ final class BootstrapCheck {
|
||||
}
|
||||
|
||||
// the list of checks to execute
|
||||
private static List<Check> checks(final Settings settings) {
|
||||
static List<Check> checks(final Settings settings) {
|
||||
final List<Check> checks = new ArrayList<>();
|
||||
final FileDescriptorCheck fileDescriptorCheck
|
||||
= Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck();
|
||||
= Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck();
|
||||
checks.add(fileDescriptorCheck);
|
||||
checks.add(new MlockallCheck(BootstrapSettings.MLOCKALL_SETTING.get(settings)));
|
||||
if (Constants.LINUX) {
|
||||
@ -126,6 +126,7 @@ final class BootstrapCheck {
|
||||
if (Constants.LINUX || Constants.MAC_OS_X) {
|
||||
checks.add(new MaxSizeVirtualMemoryCheck());
|
||||
}
|
||||
checks.add(new MinMasterNodesCheck(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(settings)));
|
||||
return Collections.unmodifiableList(checks);
|
||||
}
|
||||
|
||||
@ -186,10 +187,10 @@ final class BootstrapCheck {
|
||||
@Override
|
||||
public final String errorMessage() {
|
||||
return String.format(
|
||||
Locale.ROOT,
|
||||
"max file descriptors [%d] for elasticsearch process likely too low, increase to at least [%d]",
|
||||
getMaxFileDescriptorCount(),
|
||||
limit
|
||||
Locale.ROOT,
|
||||
"max file descriptors [%d] for elasticsearch process likely too low, increase to at least [%d]",
|
||||
getMaxFileDescriptorCount(),
|
||||
limit
|
||||
);
|
||||
}
|
||||
|
||||
@ -226,6 +227,26 @@ final class BootstrapCheck {
|
||||
|
||||
}
|
||||
|
||||
static class MinMasterNodesCheck implements Check {
|
||||
|
||||
final boolean minMasterNodesIsSet;
|
||||
|
||||
MinMasterNodesCheck(boolean minMasterNodesIsSet) {
|
||||
this.minMasterNodesIsSet = minMasterNodesIsSet;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean check() {
|
||||
return minMasterNodesIsSet == false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String errorMessage() {
|
||||
return "please set [" + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() +
|
||||
"] to a majority of the number of master eligible nodes in your cluster.";
|
||||
}
|
||||
}
|
||||
|
||||
static class MaxNumberOfThreadsCheck implements Check {
|
||||
|
||||
private final long maxNumberOfThreadsThreshold = 1 << 11;
|
||||
|
@ -90,7 +90,7 @@ final class Natives {
|
||||
}
|
||||
return JNANatives.LOCAL_MLOCKALL;
|
||||
}
|
||||
|
||||
|
||||
static void trySeccomp(Path tmpFile) {
|
||||
if (!JNA_AVAILABLE) {
|
||||
logger.warn("cannot install syscall filters because JNA is not available");
|
||||
@ -98,7 +98,23 @@ final class Natives {
|
||||
}
|
||||
JNANatives.trySeccomp(tmpFile);
|
||||
}
|
||||
|
||||
|
||||
static void trySetMaxNumberOfThreads() {
|
||||
if (!JNA_AVAILABLE) {
|
||||
logger.warn("cannot getrlimit RLIMIT_NPROC because JNA is not available");
|
||||
return;
|
||||
}
|
||||
JNANatives.trySetMaxNumberOfThreads();
|
||||
}
|
||||
|
||||
static void trySetMaxSizeVirtualMemory() {
|
||||
if (!JNA_AVAILABLE) {
|
||||
logger.warn("cannot getrlimit RLIMIT_AS beacuse JNA is not available");
|
||||
return;
|
||||
}
|
||||
JNANatives.trySetMaxSizeVirtualMemory();
|
||||
}
|
||||
|
||||
static boolean isSeccompInstalled() {
|
||||
if (!JNA_AVAILABLE) {
|
||||
return false;
|
||||
|
@ -21,6 +21,9 @@ package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest;
|
||||
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
@ -572,4 +575,19 @@ public interface ClusterAdminClient extends ElasticsearchClient {
|
||||
* Simulates an ingest pipeline
|
||||
*/
|
||||
SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source);
|
||||
|
||||
/**
|
||||
* Explain the allocation of a shard
|
||||
*/
|
||||
void allocationExplain(ClusterAllocationExplainRequest request, ActionListener<ClusterAllocationExplainResponse> listener);
|
||||
|
||||
/**
|
||||
* Explain the allocation of a shard
|
||||
*/
|
||||
ActionFuture<ClusterAllocationExplainResponse> allocationExplain(ClusterAllocationExplainRequest request);
|
||||
|
||||
/**
|
||||
* Explain the allocation of a shard
|
||||
*/
|
||||
ClusterAllocationExplainRequestBuilder prepareAllocationExplain();
|
||||
}
|
||||
|
@ -25,6 +25,10 @@ import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction;
|
||||
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest;
|
||||
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
|
||||
@ -1245,6 +1249,21 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
||||
public SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source) {
|
||||
return new SimulatePipelineRequestBuilder(this, SimulatePipelineAction.INSTANCE, source);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void allocationExplain(ClusterAllocationExplainRequest request, ActionListener<ClusterAllocationExplainResponse> listener) {
|
||||
execute(ClusterAllocationExplainAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<ClusterAllocationExplainResponse> allocationExplain(ClusterAllocationExplainRequest request) {
|
||||
return execute(ClusterAllocationExplainAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterAllocationExplainRequestBuilder prepareAllocationExplain() {
|
||||
return new ClusterAllocationExplainRequestBuilder(this, ClusterAllocationExplainAction.INSTANCE);
|
||||
}
|
||||
}
|
||||
|
||||
static class IndicesAdmin implements IndicesAdminClient {
|
||||
|
@ -43,7 +43,6 @@ import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerModule;
|
||||
import org.elasticsearch.monitor.MonitorService;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.PluginsModule;
|
||||
@ -58,8 +57,6 @@ import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
|
||||
/**
|
||||
* The transport client allows to create a client that is not part of the cluster, but simply connects to one
|
||||
* or more nodes directly by adding their respective addresses using {@link #addTransportAddress(org.elasticsearch.common.transport.TransportAddress)}.
|
||||
@ -108,11 +105,10 @@ public class TransportClient extends AbstractClient {
|
||||
}
|
||||
|
||||
private PluginsService newPluginService(final Settings settings) {
|
||||
final Settings.Builder settingsBuilder = settingsBuilder()
|
||||
final Settings.Builder settingsBuilder = Settings.builder()
|
||||
.put(NettyTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval
|
||||
.put(InternalSettingsPreparer.prepareSettings(settings))
|
||||
.put(NetworkService.NETWORK_SERVER.getKey(), false)
|
||||
.put(Node.NODE_CLIENT_SETTING.getKey(), true)
|
||||
.put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE);
|
||||
return new PluginsService(settingsBuilder.build(), null, null, pluginClasses);
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
||||
public List<TransportAddress> transportAddresses() {
|
||||
List<TransportAddress> lstBuilder = new ArrayList<>();
|
||||
for (DiscoveryNode listedNode : listedNodes) {
|
||||
lstBuilder.add(listedNode.address());
|
||||
lstBuilder.add(listedNode.getAddress());
|
||||
}
|
||||
return Collections.unmodifiableList(lstBuilder);
|
||||
}
|
||||
@ -164,7 +164,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
||||
for (TransportAddress transportAddress : transportAddresses) {
|
||||
boolean found = false;
|
||||
for (DiscoveryNode otherNode : listedNodes) {
|
||||
if (otherNode.address().equals(transportAddress)) {
|
||||
if (otherNode.getAddress().equals(transportAddress)) {
|
||||
found = true;
|
||||
logger.debug("address [{}] already exists with [{}], ignoring...", transportAddress, otherNode);
|
||||
break;
|
||||
@ -180,7 +180,8 @@ public class TransportClientNodesService extends AbstractComponent {
|
||||
List<DiscoveryNode> builder = new ArrayList<>();
|
||||
builder.addAll(listedNodes());
|
||||
for (TransportAddress transportAddress : filtered) {
|
||||
DiscoveryNode node = new DiscoveryNode("#transport#-" + tempNodeIdGenerator.incrementAndGet(), transportAddress, minCompatibilityVersion);
|
||||
DiscoveryNode node = new DiscoveryNode("#transport#-" + tempNodeIdGenerator.incrementAndGet(),
|
||||
transportAddress, Collections.emptyMap(), Collections.emptySet(), minCompatibilityVersion);
|
||||
logger.debug("adding address [{}]", node);
|
||||
builder.add(node);
|
||||
}
|
||||
@ -197,7 +198,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
||||
}
|
||||
List<DiscoveryNode> builder = new ArrayList<>();
|
||||
for (DiscoveryNode otherNode : listedNodes) {
|
||||
if (!otherNode.address().equals(transportAddress)) {
|
||||
if (!otherNode.getAddress().equals(transportAddress)) {
|
||||
builder.add(otherNode);
|
||||
} else {
|
||||
logger.debug("removing address [{}]", otherNode);
|
||||
@ -231,7 +232,8 @@ public class TransportClientNodesService extends AbstractComponent {
|
||||
|
||||
private volatile int i;
|
||||
|
||||
public RetryListener(NodeListenerCallback<Response> callback, ActionListener<Response> listener, List<DiscoveryNode> nodes, int index) {
|
||||
public RetryListener(NodeListenerCallback<Response> callback, ActionListener<Response> listener,
|
||||
List<DiscoveryNode> nodes, int index) {
|
||||
this.callback = callback;
|
||||
this.listener = listener;
|
||||
this.nodes = nodes;
|
||||
@ -378,11 +380,15 @@ public class TransportClientNodesService extends AbstractComponent {
|
||||
logger.warn("node {} not part of the cluster {}, ignoring...", listedNode, clusterName);
|
||||
newFilteredNodes.add(listedNode);
|
||||
} else if (livenessResponse.getDiscoveryNode() != null) {
|
||||
// use discovered information but do keep the original transport address, so people can control which address is exactly used.
|
||||
// use discovered information but do keep the original transport address,
|
||||
// so people can control which address is exactly used.
|
||||
DiscoveryNode nodeWithInfo = livenessResponse.getDiscoveryNode();
|
||||
newNodes.add(new DiscoveryNode(nodeWithInfo.name(), nodeWithInfo.id(), nodeWithInfo.getHostName(), nodeWithInfo.getHostAddress(), listedNode.address(), nodeWithInfo.attributes(), nodeWithInfo.version()));
|
||||
newNodes.add(new DiscoveryNode(nodeWithInfo.getName(), nodeWithInfo.getId(), nodeWithInfo.getHostName(),
|
||||
nodeWithInfo.getHostAddress(), listedNode.getAddress(), nodeWithInfo.getAttributes(),
|
||||
nodeWithInfo.getRoles(), nodeWithInfo.getVersion()));
|
||||
} else {
|
||||
// although we asked for one node, our target may not have completed initialization yet and doesn't have cluster nodes
|
||||
// although we asked for one node, our target may not have completed
|
||||
// initialization yet and doesn't have cluster nodes
|
||||
logger.debug("node {} didn't return any discovery info, temporarily using transport discovery node", listedNode);
|
||||
newNodes.add(listedNode);
|
||||
}
|
||||
@ -436,8 +442,10 @@ public class TransportClientNodesService extends AbstractComponent {
|
||||
return;
|
||||
}
|
||||
}
|
||||
transportService.sendRequest(listedNode, ClusterStateAction.NAME, Requests.clusterStateRequest().clear().nodes(true).local(true),
|
||||
TransportRequestOptions.builder().withType(TransportRequestOptions.Type.STATE).withTimeout(pingTimeout).build(),
|
||||
transportService.sendRequest(listedNode, ClusterStateAction.NAME,
|
||||
Requests.clusterStateRequest().clear().nodes(true).local(true),
|
||||
TransportRequestOptions.builder().withType(TransportRequestOptions.Type.STATE)
|
||||
.withTimeout(pingTimeout).build(),
|
||||
new BaseTransportResponseHandler<ClusterStateResponse>() {
|
||||
|
||||
@Override
|
||||
@ -482,11 +490,12 @@ public class TransportClientNodesService extends AbstractComponent {
|
||||
HashSet<DiscoveryNode> newFilteredNodes = new HashSet<>();
|
||||
for (Map.Entry<DiscoveryNode, ClusterStateResponse> entry : clusterStateResponses.entrySet()) {
|
||||
if (!ignoreClusterName && !clusterName.equals(entry.getValue().getClusterName())) {
|
||||
logger.warn("node {} not part of the cluster {}, ignoring...", entry.getValue().getState().nodes().localNode(), clusterName);
|
||||
logger.warn("node {} not part of the cluster {}, ignoring...",
|
||||
entry.getValue().getState().nodes().getLocalNode(), clusterName);
|
||||
newFilteredNodes.add(entry.getKey());
|
||||
continue;
|
||||
}
|
||||
for (ObjectCursor<DiscoveryNode> cursor : entry.getValue().getState().nodes().dataNodes().values()) {
|
||||
for (ObjectCursor<DiscoveryNode> cursor : entry.getValue().getState().nodes().getDataNodes().values()) {
|
||||
newNodes.add(cursor.value);
|
||||
}
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ public class ClusterChangedEvent {
|
||||
* Returns <code>true</code> iff the local node is the mater node of the cluster.
|
||||
*/
|
||||
public boolean localNodeMaster() {
|
||||
return state.nodes().localNodeMaster();
|
||||
return state.nodes().isLocalNodeElectedMaster();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -63,7 +63,7 @@ import java.util.Set;
|
||||
|
||||
/**
|
||||
* Represents the current state of the cluster.
|
||||
*
|
||||
* <p>
|
||||
* The cluster state object is immutable with an
|
||||
* exception of the {@link RoutingNodes} structure, which is built on demand from the {@link RoutingTable},
|
||||
* and cluster state {@link #status}, which is updated during cluster state publishing and applying
|
||||
@ -74,7 +74,7 @@ import java.util.Set;
|
||||
* the type of discovery. For example, for local discovery it is implemented by the {@link LocalDiscovery#publish}
|
||||
* method. In the Zen Discovery it is handled in the {@link PublishClusterStateAction#publish} method. The
|
||||
* publishing mechanism can be overridden by other discovery.
|
||||
*
|
||||
* <p>
|
||||
* The cluster state implements the {@link Diffable} interface in order to support publishing of cluster state
|
||||
* differences instead of the entire state on each change. The publishing mechanism should only send differences
|
||||
* to a node if this node was present in the previous version of the cluster state. If a node is not present was
|
||||
@ -135,7 +135,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
|
||||
public static <T extends Custom> T lookupPrototypeSafe(String type) {
|
||||
@SuppressWarnings("unchecked")
|
||||
T proto = (T)customPrototypes.get(type);
|
||||
T proto = (T) customPrototypes.get(type);
|
||||
if (proto == null) {
|
||||
throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "], node likely missing plugins");
|
||||
}
|
||||
@ -281,6 +281,16 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
sb.append("state uuid: ").append(stateUUID).append("\n");
|
||||
sb.append("from_diff: ").append(wasReadFromDiff).append("\n");
|
||||
sb.append("meta data version: ").append(metaData.version()).append("\n");
|
||||
for (IndexMetaData indexMetaData : metaData) {
|
||||
final String TAB = " ";
|
||||
sb.append(TAB).append(indexMetaData.getIndex());
|
||||
sb.append(": v[").append(indexMetaData.getVersion()).append("]\n");
|
||||
for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) {
|
||||
sb.append(TAB).append(TAB).append(shard).append(": ");
|
||||
sb.append("p_term [").append(indexMetaData.primaryTerm(shard)).append("], ");
|
||||
sb.append("a_ids ").append(indexMetaData.activeAllocationIds(shard)).append("\n");
|
||||
}
|
||||
}
|
||||
sb.append(blocks().prettyPrint());
|
||||
sb.append(nodes().prettyPrint());
|
||||
sb.append(routingTable().prettyPrint());
|
||||
@ -307,7 +317,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
* In essence that means that all the changes from the other cluster state are also reflected by the current one
|
||||
*/
|
||||
public boolean supersedes(ClusterState other) {
|
||||
return this.nodes().masterNodeId() != null && this.nodes().masterNodeId().equals(other.nodes().masterNodeId()) && this.version() > other.version();
|
||||
return this.nodes().getMasterNodeId() != null && this.nodes().getMasterNodeId().equals(other.nodes().getMasterNodeId()) && this.version() > other.version();
|
||||
|
||||
}
|
||||
|
||||
@ -372,7 +382,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
}
|
||||
|
||||
if (metrics.contains(Metric.MASTER_NODE)) {
|
||||
builder.field("master_node", nodes().masterNodeId());
|
||||
builder.field("master_node", nodes().getMasterNodeId());
|
||||
}
|
||||
|
||||
if (metrics.contains(Metric.BLOCKS)) {
|
||||
@ -477,6 +487,12 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
builder.startObject(IndexMetaData.KEY_PRIMARY_TERMS);
|
||||
for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) {
|
||||
builder.field(Integer.toString(shard), indexMetaData.primaryTerm(shard));
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject(IndexMetaData.KEY_ACTIVE_ALLOCATIONS);
|
||||
for (IntObjectCursor<Set<String>> cursor : indexMetaData.getActiveAllocationIds()) {
|
||||
builder.startArray(String.valueOf(cursor.key));
|
||||
@ -487,6 +503,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
// index metadata
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
@ -683,16 +700,16 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
}
|
||||
|
||||
/**
|
||||
* @param data input bytes
|
||||
* @param localNode used to set the local node in the cluster state.
|
||||
* @param data input bytes
|
||||
* @param localNode used to set the local node in the cluster state.
|
||||
*/
|
||||
public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode) throws IOException {
|
||||
return readFrom(StreamInput.wrap(data), localNode);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param in input stream
|
||||
* @param localNode used to set the local node in the cluster state. can be null.
|
||||
* @param in input stream
|
||||
* @param localNode used to set the local node in the cluster state. can be null.
|
||||
*/
|
||||
public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException {
|
||||
return PROTO.readFrom(in, localNode);
|
||||
@ -730,7 +747,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
|
||||
@Override
|
||||
public ClusterState readFrom(StreamInput in) throws IOException {
|
||||
return readFrom(in, nodes.localNode());
|
||||
return readFrom(in, nodes.getLocalNode());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -791,17 +808,17 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
metaData = proto.metaData.readDiffFrom(in);
|
||||
blocks = proto.blocks.readDiffFrom(in);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
|
||||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
public Custom read(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
public Custom read(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
@Override
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
@ -29,7 +30,10 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
||||
import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
|
||||
@ -166,7 +170,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||
// Check whether it was a data node that was added
|
||||
boolean dataNodeAdded = false;
|
||||
for (DiscoveryNode addedNode : event.nodesDelta().addedNodes()) {
|
||||
if (addedNode.dataNode()) {
|
||||
if (addedNode.isDataNode()) {
|
||||
dataNodeAdded = true;
|
||||
break;
|
||||
}
|
||||
@ -181,7 +185,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||
|
||||
if (this.isMaster && event.nodesRemoved()) {
|
||||
for (DiscoveryNode removedNode : event.nodesDelta().removedNodes()) {
|
||||
if (removedNode.dataNode()) {
|
||||
if (removedNode.isDataNode()) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Removing node from cluster info: {}", removedNode.getId());
|
||||
}
|
||||
@ -307,7 +311,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
if (e instanceof ReceiveTimeoutTransportException) {
|
||||
logger.error("NodeStatsAction timed out for ClusterInfoUpdateJob (reason [{}])", e.getMessage());
|
||||
logger.error("NodeStatsAction timed out for ClusterInfoUpdateJob", e);
|
||||
} else {
|
||||
if (e instanceof ClusterBlockException) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
@ -329,7 +333,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||
ShardStats[] stats = indicesStatsResponse.getShards();
|
||||
ImmutableOpenMap.Builder<String, Long> newShardSizes = ImmutableOpenMap.builder();
|
||||
ImmutableOpenMap.Builder<ShardRouting, String> newShardRoutingToDataPath = ImmutableOpenMap.builder();
|
||||
buildShardLevelInfo(logger, stats, newShardSizes, newShardRoutingToDataPath);
|
||||
buildShardLevelInfo(logger, stats, newShardSizes, newShardRoutingToDataPath, clusterService.state());
|
||||
shardSizes = newShardSizes.build();
|
||||
shardRoutingToDataPath = newShardRoutingToDataPath.build();
|
||||
}
|
||||
@ -337,7 +341,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
if (e instanceof ReceiveTimeoutTransportException) {
|
||||
logger.error("IndicesStatsAction timed out for ClusterInfoUpdateJob (reason [{}])", e.getMessage());
|
||||
logger.error("IndicesStatsAction timed out for ClusterInfoUpdateJob", e);
|
||||
} else {
|
||||
if (e instanceof ClusterBlockException) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
@ -378,14 +382,24 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||
}
|
||||
|
||||
static void buildShardLevelInfo(ESLogger logger, ShardStats[] stats, ImmutableOpenMap.Builder<String, Long> newShardSizes,
|
||||
ImmutableOpenMap.Builder<ShardRouting, String> newShardRoutingToDataPath) {
|
||||
ImmutableOpenMap.Builder<ShardRouting, String> newShardRoutingToDataPath, ClusterState state) {
|
||||
MetaData meta = state.getMetaData();
|
||||
for (ShardStats s : stats) {
|
||||
IndexMetaData indexMeta = meta.index(s.getShardRouting().index());
|
||||
Settings indexSettings = indexMeta == null ? null : indexMeta.getSettings();
|
||||
newShardRoutingToDataPath.put(s.getShardRouting(), s.getDataPath());
|
||||
long size = s.getStats().getStore().sizeInBytes();
|
||||
String sid = ClusterInfo.shardIdentifierFromRouting(s.getShardRouting());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("shard: {} size: {}", sid, size);
|
||||
}
|
||||
if (indexSettings != null && IndexMetaData.isIndexUsingShadowReplicas(indexSettings)) {
|
||||
// Shards on a shared filesystem should be considered of size 0
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("shard: {} is using shadow replicas and will be treated as size 0", sid);
|
||||
}
|
||||
size = 0;
|
||||
}
|
||||
newShardSizes.put(sid, size);
|
||||
}
|
||||
}
|
||||
@ -395,7 +409,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||
ImmutableOpenMap.Builder<String, DiskUsage> newMostAvaiableUsages) {
|
||||
for (NodeStats nodeStats : nodeStatsArray) {
|
||||
if (nodeStats.getFs() == null) {
|
||||
logger.warn("Unable to retrieve node FS stats for {}", nodeStats.getNode().name());
|
||||
logger.warn("Unable to retrieve node FS stats for {}", nodeStats.getNode().getName());
|
||||
} else {
|
||||
FsInfo.Path leastAvailablePath = null;
|
||||
FsInfo.Path mostAvailablePath = null;
|
||||
@ -409,7 +423,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||
mostAvailablePath = info;
|
||||
}
|
||||
}
|
||||
String nodeId = nodeStats.getNode().id();
|
||||
String nodeId = nodeStats.getNode().getId();
|
||||
String nodeName = nodeStats.getNode().getName();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}",
|
||||
|
@ -30,7 +30,7 @@ public enum MasterNodeChangePredicate implements ClusterStateObserver.ChangePred
|
||||
ClusterState.ClusterStateStatus newStatus) {
|
||||
// checking if the masterNodeId changed is insufficient as the
|
||||
// same master node might get re-elected after a disruption
|
||||
return newState.nodes().masterNodeId() != null && newState != previousState;
|
||||
return newState.nodes().getMasterNodeId() != null && newState != previousState;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -78,9 +78,9 @@ public class NodeIndexDeletedAction extends AbstractComponent {
|
||||
|
||||
public void nodeIndexDeleted(final ClusterState clusterState, final Index index, final IndexSettings indexSettings, final String nodeId) {
|
||||
final DiscoveryNodes nodes = clusterState.nodes();
|
||||
transportService.sendRequest(clusterState.nodes().masterNode(),
|
||||
transportService.sendRequest(clusterState.nodes().getMasterNode(),
|
||||
INDEX_DELETED_ACTION_NAME, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME);
|
||||
if (nodes.localNode().isDataNode() == false) {
|
||||
if (nodes.getLocalNode().isDataNode() == false) {
|
||||
logger.trace("[{}] not acking store deletion (not a data node)", index);
|
||||
return;
|
||||
}
|
||||
@ -104,7 +104,7 @@ public class NodeIndexDeletedAction extends AbstractComponent {
|
||||
// due to a "currently canceled recovery" or so. The shard will delete itself BEFORE the lock is released so it's guaranteed to be
|
||||
// deleted by the time we get the lock
|
||||
indicesService.processPendingDeletes(indexSettings.getIndex(), indexSettings, new TimeValue(30, TimeUnit.MINUTES));
|
||||
transportService.sendRequest(clusterState.nodes().masterNode(),
|
||||
transportService.sendRequest(clusterState.nodes().getMasterNode(),
|
||||
INDEX_STORE_DELETED_ACTION_NAME, new NodeIndexStoreDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME);
|
||||
} catch (LockObtainFailedException exc) {
|
||||
logger.warn("[{}] failed to lock all shards for index - timed out after 30 seconds", index);
|
||||
|
@ -60,11 +60,11 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
||||
|
||||
public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) {
|
||||
final DiscoveryNodes nodes = state.nodes();
|
||||
if (nodes.masterNode() == null) {
|
||||
if (nodes.getMasterNode() == null) {
|
||||
logger.warn("can't send mapping refresh for [{}], no master known.", request.index());
|
||||
return;
|
||||
}
|
||||
transportService.sendRequest(nodes.masterNode(), ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
|
||||
transportService.sendRequest(nodes.getMasterNode(), ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
|
||||
}
|
||||
|
||||
private class NodeMappingRefreshTransportHandler implements TransportRequestHandler<NodeMappingRefreshRequest> {
|
||||
|
@ -53,6 +53,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
||||
import org.elasticsearch.transport.NodeDisconnectedException;
|
||||
import org.elasticsearch.transport.RemoteTransportException;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
@ -92,7 +93,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||
}
|
||||
|
||||
private void sendShardAction(final String actionName, final ClusterStateObserver observer, final ShardRoutingEntry shardRoutingEntry, final Listener listener) {
|
||||
DiscoveryNode masterNode = observer.observedState().nodes().masterNode();
|
||||
DiscoveryNode masterNode = observer.observedState().nodes().getMasterNode();
|
||||
if (masterNode == null) {
|
||||
logger.warn("{} no master known for action [{}] for shard [{}]", shardRoutingEntry.getShardRouting().shardId(), actionName, shardRoutingEntry.getShardRouting());
|
||||
waitForNewMasterAndRetry(actionName, observer, shardRoutingEntry, listener);
|
||||
@ -111,7 +112,7 @@ public class ShardStateAction extends AbstractComponent {
|
||||
waitForNewMasterAndRetry(actionName, observer, shardRoutingEntry, listener);
|
||||
} else {
|
||||
logger.warn("{} unexpected failure while sending request [{}] to [{}] for shard [{}]", exp, shardRoutingEntry.getShardRouting().shardId(), actionName, masterNode, shardRoutingEntry);
|
||||
listener.onFailure(exp.getCause());
|
||||
listener.onFailure(exp instanceof RemoteTransportException ? exp.getCause() : exp);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -91,8 +91,8 @@ public final class ClusterStateHealth implements Iterable<ClusterIndexHealth>, S
|
||||
public ClusterStateHealth(ClusterState clusterState, String[] concreteIndices) {
|
||||
RoutingTableValidation validation = clusterState.routingTable().validate(clusterState.metaData());
|
||||
validationFailures = validation.failures();
|
||||
numberOfNodes = clusterState.nodes().size();
|
||||
numberOfDataNodes = clusterState.nodes().dataNodes().size();
|
||||
numberOfNodes = clusterState.nodes().getSize();
|
||||
numberOfDataNodes = clusterState.nodes().getDataNodes().size();
|
||||
|
||||
for (String index : concreteIndices) {
|
||||
IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(index);
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.LongArrayList;
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
@ -29,6 +30,8 @@ import org.elasticsearch.cluster.DiffableUtils;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
@ -56,6 +59,7 @@ import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.text.ParseException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
@ -68,7 +72,6 @@ import java.util.function.Function;
|
||||
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
|
||||
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
|
||||
/**
|
||||
@ -217,6 +220,13 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
.numberOfShards(1).numberOfReplicas(0).build();
|
||||
|
||||
public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations";
|
||||
static final String KEY_VERSION = "version";
|
||||
static final String KEY_SETTINGS = "settings";
|
||||
static final String KEY_STATE = "state";
|
||||
static final String KEY_MAPPINGS = "mappings";
|
||||
static final String KEY_ALIASES = "aliases";
|
||||
public static final String KEY_PRIMARY_TERMS = "primary_terms";
|
||||
|
||||
public static final String INDEX_STATE_FILE_PREFIX = "state-";
|
||||
|
||||
private final int numberOfShards;
|
||||
@ -224,6 +234,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
|
||||
private final Index index;
|
||||
private final long version;
|
||||
private final long[] primaryTerms;
|
||||
|
||||
private final State state;
|
||||
|
||||
@ -247,7 +258,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
private final Version indexUpgradedVersion;
|
||||
private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
|
||||
|
||||
private IndexMetaData(Index index, long version, State state, int numberOfShards, int numberOfReplicas, Settings settings,
|
||||
private IndexMetaData(Index index, long version, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings,
|
||||
ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases,
|
||||
ImmutableOpenMap<String, Custom> customs, ImmutableOpenIntMap<Set<String>> activeAllocationIds,
|
||||
DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters,
|
||||
@ -255,6 +266,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
|
||||
this.index = index;
|
||||
this.version = version;
|
||||
this.primaryTerms = primaryTerms;
|
||||
assert primaryTerms.length == numberOfShards;
|
||||
this.state = state;
|
||||
this.numberOfShards = numberOfShards;
|
||||
this.numberOfReplicas = numberOfReplicas;
|
||||
@ -296,6 +309,16 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
return this.version;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* The term of the current selected primary. This is a non-negative number incremented when
|
||||
* a primary shard is assigned after a full cluster restart or a replica shard is promoted to a primary
|
||||
* See {@link AllocationService#updateMetaDataWithRoutingTable(MetaData, RoutingTable, RoutingTable)}.
|
||||
**/
|
||||
public long primaryTerm(int shardId) {
|
||||
return this.primaryTerms[shardId];
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@link Version} on which this index has been created. This
|
||||
* information is typically useful for backward compatibility.
|
||||
@ -416,6 +439,10 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
|
||||
IndexMetaData that = (IndexMetaData) o;
|
||||
|
||||
if (version != that.version) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!aliases.equals(that.aliases)) {
|
||||
return false;
|
||||
}
|
||||
@ -434,6 +461,10 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
if (!customs.equals(that.customs)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Arrays.equals(primaryTerms, that.primaryTerms) == false) {
|
||||
return false;
|
||||
}
|
||||
if (!activeAllocationIds.equals(that.activeAllocationIds)) {
|
||||
return false;
|
||||
}
|
||||
@ -443,14 +474,18 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = index.hashCode();
|
||||
result = 31 * result + Long.hashCode(version);
|
||||
result = 31 * result + state.hashCode();
|
||||
result = 31 * result + aliases.hashCode();
|
||||
result = 31 * result + settings.hashCode();
|
||||
result = 31 * result + mappings.hashCode();
|
||||
result = 31 * result + customs.hashCode();
|
||||
result = 31 * result + Arrays.hashCode(primaryTerms);
|
||||
result = 31 * result + activeAllocationIds.hashCode();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Diff<IndexMetaData> diff(IndexMetaData previousState) {
|
||||
return new IndexMetaDataDiff(previousState, this);
|
||||
@ -476,6 +511,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
|
||||
private final String index;
|
||||
private final long version;
|
||||
private final long[] primaryTerms;
|
||||
private final State state;
|
||||
private final Settings settings;
|
||||
private final Diff<ImmutableOpenMap<String, MappingMetaData>> mappings;
|
||||
@ -488,11 +524,12 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
version = after.version;
|
||||
state = after.state;
|
||||
settings = after.settings;
|
||||
primaryTerms = after.primaryTerms;
|
||||
mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer());
|
||||
aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer());
|
||||
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
|
||||
activeAllocationIds = DiffableUtils.diff(before.activeAllocationIds, after.activeAllocationIds,
|
||||
DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance());
|
||||
DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance());
|
||||
}
|
||||
|
||||
public IndexMetaDataDiff(StreamInput in) throws IOException {
|
||||
@ -500,22 +537,23 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
version = in.readLong();
|
||||
state = State.fromId(in.readByte());
|
||||
settings = Settings.readSettingsFromStream(in);
|
||||
primaryTerms = in.readVLongArray();
|
||||
mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MappingMetaData.PROTO);
|
||||
aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData.PROTO);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
|
||||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
public Custom read(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
public Custom read(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
@Override
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
activeAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(),
|
||||
DiffableUtils.StringSetValueSerializer.getInstance());
|
||||
DiffableUtils.StringSetValueSerializer.getInstance());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -524,6 +562,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
out.writeLong(version);
|
||||
out.writeByte(state.id);
|
||||
Settings.writeSettingsToStream(settings, out);
|
||||
out.writeVLongArray(primaryTerms);
|
||||
mappings.writeTo(out);
|
||||
aliases.writeTo(out);
|
||||
customs.writeTo(out);
|
||||
@ -536,6 +575,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
builder.version(version);
|
||||
builder.state(state);
|
||||
builder.settings(settings);
|
||||
builder.primaryTerms(primaryTerms);
|
||||
builder.mappings.putAll(mappings.apply(part.mappings));
|
||||
builder.aliases.putAll(aliases.apply(part.aliases));
|
||||
builder.customs.putAll(customs.apply(part.customs));
|
||||
@ -550,6 +590,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
builder.version(in.readLong());
|
||||
builder.state(State.fromId(in.readByte()));
|
||||
builder.settings(readSettingsFromStream(in));
|
||||
builder.primaryTerms(in.readVLongArray());
|
||||
int mappingsSize = in.readVInt();
|
||||
for (int i = 0; i < mappingsSize; i++) {
|
||||
MappingMetaData mappingMd = MappingMetaData.PROTO.readFrom(in);
|
||||
@ -581,6 +622,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
out.writeLong(version);
|
||||
out.writeByte(state.id());
|
||||
writeSettingsToStream(settings, out);
|
||||
out.writeVLongArray(primaryTerms);
|
||||
out.writeVInt(mappings.size());
|
||||
for (ObjectCursor<MappingMetaData> cursor : mappings.values()) {
|
||||
cursor.value.writeTo(out);
|
||||
@ -614,6 +656,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
private String index;
|
||||
private State state = State.OPEN;
|
||||
private long version = 1;
|
||||
private long[] primaryTerms = null;
|
||||
private Settings settings = Settings.Builder.EMPTY_SETTINGS;
|
||||
private final ImmutableOpenMap.Builder<String, MappingMetaData> mappings;
|
||||
private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
|
||||
@ -633,6 +676,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
this.state = indexMetaData.state;
|
||||
this.version = indexMetaData.version;
|
||||
this.settings = indexMetaData.getSettings();
|
||||
this.primaryTerms = indexMetaData.primaryTerms.clone();
|
||||
this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings);
|
||||
this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases);
|
||||
this.customs = ImmutableOpenMap.builder(indexMetaData.customs);
|
||||
@ -649,7 +693,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
}
|
||||
|
||||
public Builder numberOfShards(int numberOfShards) {
|
||||
settings = settingsBuilder().put(settings).put(SETTING_NUMBER_OF_SHARDS, numberOfShards).build();
|
||||
settings = Settings.builder().put(settings).put(SETTING_NUMBER_OF_SHARDS, numberOfShards).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -658,7 +702,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
}
|
||||
|
||||
public Builder numberOfReplicas(int numberOfReplicas) {
|
||||
settings = settingsBuilder().put(settings).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas).build();
|
||||
settings = Settings.builder().put(settings).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -667,13 +711,12 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
}
|
||||
|
||||
public Builder creationDate(long creationDate) {
|
||||
settings = settingsBuilder().put(settings).put(SETTING_CREATION_DATE, creationDate).build();
|
||||
settings = Settings.builder().put(settings).put(SETTING_CREATION_DATE, creationDate).build();
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder settings(Settings.Builder settings) {
|
||||
this.settings = settings.build();
|
||||
return this;
|
||||
return settings(settings.build());
|
||||
}
|
||||
|
||||
public Builder settings(Settings settings) {
|
||||
@ -741,6 +784,42 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* returns the primary term for the given shard.
|
||||
* See {@link IndexMetaData#primaryTerm(int)} for more information.
|
||||
*/
|
||||
public long primaryTerm(int shardId) {
|
||||
if (primaryTerms == null) {
|
||||
initializePrimaryTerms();
|
||||
}
|
||||
return this.primaryTerms[shardId];
|
||||
}
|
||||
|
||||
/**
|
||||
* sets the primary term for the given shard.
|
||||
* See {@link IndexMetaData#primaryTerm(int)} for more information.
|
||||
*/
|
||||
public Builder primaryTerm(int shardId, long primaryTerm) {
|
||||
if (primaryTerms == null) {
|
||||
initializePrimaryTerms();
|
||||
}
|
||||
this.primaryTerms[shardId] = primaryTerm;
|
||||
return this;
|
||||
}
|
||||
|
||||
private void primaryTerms(long[] primaryTerms) {
|
||||
this.primaryTerms = primaryTerms.clone();
|
||||
}
|
||||
|
||||
private void initializePrimaryTerms() {
|
||||
assert primaryTerms == null;
|
||||
if (numberOfShards() < 0) {
|
||||
throw new IllegalStateException("you must set the number of shards before setting/reading primary terms");
|
||||
}
|
||||
primaryTerms = new long[numberOfShards()];
|
||||
}
|
||||
|
||||
|
||||
public IndexMetaData build() {
|
||||
ImmutableOpenMap.Builder<String, AliasMetaData> tmpAliases = aliases;
|
||||
Settings tmpSettings = settings;
|
||||
@ -815,27 +894,34 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
minimumCompatibleLuceneVersion = null;
|
||||
}
|
||||
|
||||
if (primaryTerms == null) {
|
||||
initializePrimaryTerms();
|
||||
} else if (primaryTerms.length != numberOfShards) {
|
||||
throw new IllegalStateException("primaryTerms length is [" + primaryTerms.length
|
||||
+ "] but should be equal to number of shards [" + numberOfShards() + "]");
|
||||
}
|
||||
|
||||
final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE);
|
||||
return new IndexMetaData(new Index(index, uuid), version, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
|
||||
tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, includeFilters, excludeFilters,
|
||||
indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion);
|
||||
return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
|
||||
tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, includeFilters, excludeFilters,
|
||||
indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion);
|
||||
}
|
||||
|
||||
public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startObject(indexMetaData.getIndex().getName(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
|
||||
builder.field("version", indexMetaData.getVersion());
|
||||
builder.field("state", indexMetaData.getState().toString().toLowerCase(Locale.ENGLISH));
|
||||
builder.field(KEY_VERSION, indexMetaData.getVersion());
|
||||
builder.field(KEY_STATE, indexMetaData.getState().toString().toLowerCase(Locale.ENGLISH));
|
||||
|
||||
boolean binary = params.paramAsBoolean("binary", false);
|
||||
|
||||
builder.startObject("settings");
|
||||
builder.startObject(KEY_SETTINGS);
|
||||
for (Map.Entry<String, String> entry : indexMetaData.getSettings().getAsMap().entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue());
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startArray("mappings");
|
||||
builder.startArray(KEY_MAPPINGS);
|
||||
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.getMappings()) {
|
||||
if (binary) {
|
||||
builder.value(cursor.value.source().compressed());
|
||||
@ -855,12 +941,18 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
builder.startObject("aliases");
|
||||
builder.startObject(KEY_ALIASES);
|
||||
for (ObjectCursor<AliasMetaData> cursor : indexMetaData.getAliases().values()) {
|
||||
AliasMetaData.Builder.toXContent(cursor.value, builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startArray(KEY_PRIMARY_TERMS);
|
||||
for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) {
|
||||
builder.value(indexMetaData.primaryTerm(i));
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
builder.startObject(KEY_ACTIVE_ALLOCATIONS);
|
||||
for (IntObjectCursor<Set<String>> cursor : indexMetaData.activeAllocationIds) {
|
||||
builder.startArray(String.valueOf(cursor.key));
|
||||
@ -895,9 +987,9 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if ("settings".equals(currentFieldName)) {
|
||||
builder.settings(Settings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())));
|
||||
} else if ("mappings".equals(currentFieldName)) {
|
||||
if (KEY_SETTINGS.equals(currentFieldName)) {
|
||||
builder.settings(Settings.builder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())));
|
||||
} else if (KEY_MAPPINGS.equals(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
@ -909,7 +1001,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
throw new IllegalArgumentException("Unexpected token: " + token);
|
||||
}
|
||||
}
|
||||
} else if ("aliases".equals(currentFieldName)) {
|
||||
} else if (KEY_ALIASES.equals(currentFieldName)) {
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
builder.putAlias(AliasMetaData.Builder.fromXContent(parser));
|
||||
}
|
||||
@ -949,7 +1041,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
}
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
if ("mappings".equals(currentFieldName)) {
|
||||
if (KEY_MAPPINGS.equals(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
|
||||
builder.putMapping(new MappingMetaData(new CompressedXContent(parser.binaryValue())));
|
||||
@ -961,13 +1053,23 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (KEY_PRIMARY_TERMS.equals(currentFieldName)) {
|
||||
LongArrayList list = new LongArrayList();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
list.add(parser.longValue());
|
||||
} else {
|
||||
throw new IllegalStateException("found a non-numeric value under [" + KEY_PRIMARY_TERMS + "]");
|
||||
}
|
||||
}
|
||||
builder.primaryTerms(list.toArray());
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unexpected field for an array " + currentFieldName);
|
||||
}
|
||||
} else if (token.isValue()) {
|
||||
if ("state".equals(currentFieldName)) {
|
||||
if (KEY_STATE.equals(currentFieldName)) {
|
||||
builder.state(State.fromString(parser.text()));
|
||||
} else if ("version".equals(currentFieldName)) {
|
||||
} else if (KEY_VERSION.equals(currentFieldName)) {
|
||||
builder.version(parser.longValue());
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]");
|
||||
|
@ -376,7 +376,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if ("settings".equals(currentFieldName)) {
|
||||
Settings.Builder templateSettingsBuilder = Settings.settingsBuilder();
|
||||
Settings.Builder templateSettingsBuilder = Settings.builder();
|
||||
templateSettingsBuilder.put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
builder.settings(templateSettingsBuilder.build());
|
||||
} else if ("mappings".equals(currentFieldName)) {
|
||||
|
@ -76,7 +76,6 @@ import java.util.TreeMap;
|
||||
|
||||
import static java.util.Collections.unmodifiableSet;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.util.set.Sets.newHashSet;
|
||||
|
||||
@ -181,7 +180,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
this.version = version;
|
||||
this.transientSettings = transientSettings;
|
||||
this.persistentSettings = persistentSettings;
|
||||
this.settings = Settings.settingsBuilder().put(persistentSettings).put(transientSettings).build();
|
||||
this.settings = Settings.builder().put(persistentSettings).put(transientSettings).build();
|
||||
this.indices = indices;
|
||||
this.customs = customs;
|
||||
this.templates = templates;
|
||||
@ -925,7 +924,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
put(IndexMetaData.builder(indexMetaData)
|
||||
.settings(settingsBuilder().put(indexMetaData.getSettings()).put(settings)));
|
||||
.settings(Settings.builder().put(indexMetaData.getSettings()).put(settings)));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
@ -1124,7 +1123,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if ("settings".equals(currentFieldName)) {
|
||||
builder.persistentSettings(Settings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build());
|
||||
builder.persistentSettings(Settings.builder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build());
|
||||
} else if ("indices".equals(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
builder.put(IndexMetaData.Builder.fromXContent(parser), false);
|
||||
|
@ -86,7 +86,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_INDEX_UUI
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED;
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
|
||||
/**
|
||||
* Service responsible for submitting create index requests
|
||||
@ -175,7 +174,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||
}
|
||||
|
||||
public void createIndex(final CreateIndexClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
Settings.Builder updatedSettingsBuilder = Settings.settingsBuilder();
|
||||
Settings.Builder updatedSettingsBuilder = Settings.builder();
|
||||
updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
indexScopedSettings.validate(updatedSettingsBuilder);
|
||||
request.settings(updatedSettingsBuilder.build());
|
||||
@ -265,7 +264,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||
}
|
||||
}
|
||||
|
||||
Settings.Builder indexSettingsBuilder = settingsBuilder();
|
||||
Settings.Builder indexSettingsBuilder = Settings.builder();
|
||||
// apply templates, here, in reverse order, since first ones are better matching
|
||||
for (int i = templates.size() - 1; i >= 0; i--) {
|
||||
indexSettingsBuilder.put(templates.get(i).settings());
|
||||
@ -294,7 +293,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||
|
||||
if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) {
|
||||
DiscoveryNodes nodes = currentState.nodes();
|
||||
final Version createdVersion = Version.smallest(version, nodes.smallestNonClientNodeVersion());
|
||||
final Version createdVersion = Version.smallest(version, nodes.getSmallestNonClientNodeVersion());
|
||||
indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion);
|
||||
}
|
||||
|
||||
|
@ -101,9 +101,9 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
|
||||
metaDataBuilder.remove(indexName);
|
||||
}
|
||||
// wait for events from all nodes that it has been removed from their respective metadata...
|
||||
int count = currentState.nodes().size();
|
||||
int count = currentState.nodes().getSize();
|
||||
// add the notifications that the store was deleted from *data* nodes
|
||||
count += currentState.nodes().dataNodes().size();
|
||||
count += currentState.nodes().getDataNodes().size();
|
||||
final AtomicInteger counter = new AtomicInteger(count * indices.size());
|
||||
|
||||
// this listener will be notified once we get back a notification based on the cluster state change below.
|
||||
|
@ -106,7 +106,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
||||
}
|
||||
|
||||
public void putTemplate(final PutRequest request, final PutListener listener) {
|
||||
Settings.Builder updatedSettingsBuilder = Settings.settingsBuilder();
|
||||
Settings.Builder updatedSettingsBuilder = Settings.builder();
|
||||
updatedSettingsBuilder.put(request.settings).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
request.settings(updatedSettingsBuilder.build());
|
||||
|
||||
|
@ -296,7 +296,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
}
|
||||
assert mappingType != null;
|
||||
|
||||
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorFieldMapper.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
|
||||
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && mappingType.charAt(0) == '_') {
|
||||
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
|
||||
}
|
||||
MetaData.Builder builder = MetaData.builder(metaData);
|
||||
|
@ -52,8 +52,6 @@ import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
|
||||
/**
|
||||
* Service responsible for submitting update index settings requests
|
||||
*/
|
||||
@ -79,11 +77,11 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
||||
@Override
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
// update an index with number of replicas based on data nodes if possible
|
||||
if (!event.state().nodes().localNodeMaster()) {
|
||||
if (!event.state().nodes().isLocalNodeElectedMaster()) {
|
||||
return;
|
||||
}
|
||||
// we will want to know this for translating "all" to a number
|
||||
final int dataNodeCount = event.state().nodes().dataNodes().size();
|
||||
final int dataNodeCount = event.state().nodes().getDataNodes().size();
|
||||
|
||||
Map<Integer, List<Index>> nrReplicasChanged = new HashMap<>();
|
||||
// we need to do this each time in case it was changed by update settings
|
||||
@ -124,7 +122,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
||||
if (nrReplicasChanged.size() > 0) {
|
||||
// update settings and kick of a reroute (implicit) for them to take effect
|
||||
for (final Integer fNumberOfReplicas : nrReplicasChanged.keySet()) {
|
||||
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, fNumberOfReplicas).build();
|
||||
Settings settings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, fNumberOfReplicas).build();
|
||||
final List<Index> indices = nrReplicasChanged.get(fNumberOfReplicas);
|
||||
|
||||
UpdateSettingsClusterStateUpdateRequest updateRequest = new UpdateSettingsClusterStateUpdateRequest()
|
||||
@ -152,7 +150,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
||||
}
|
||||
|
||||
public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
final Settings normalizedSettings = Settings.settingsBuilder().put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
|
||||
final Settings normalizedSettings = Settings.builder().put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
|
||||
Settings.Builder settingsForClosedIndices = Settings.builder();
|
||||
Settings.Builder settingsForOpenIndices = Settings.builder();
|
||||
Settings.Builder skipppedSettings = Settings.builder();
|
||||
@ -316,7 +314,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
||||
if (Version.CURRENT.equals(indexMetaData.getCreationVersion()) == false) {
|
||||
// No reason to pollute the settings, we didn't really upgrade anything
|
||||
metaDataBuilder.put(IndexMetaData.builder(indexMetaData)
|
||||
.settings(settingsBuilder().put(indexMetaData.getSettings())
|
||||
.settings(Settings.builder().put(indexMetaData.getSettings())
|
||||
.put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, entry.getValue().v2())
|
||||
.put(IndexMetaData.SETTING_VERSION_UPGRADED, entry.getValue().v1())
|
||||
)
|
||||
|
@ -154,7 +154,7 @@ public class RepositoriesMetaData extends AbstractDiffable<Custom> implements Me
|
||||
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
|
||||
throw new ElasticsearchParseException("failed to parse repository [{}], incompatible params", name);
|
||||
}
|
||||
settings = Settings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build();
|
||||
settings = Settings.builder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("failed to parse repository [{}], unknown field [{}]", name, currentFieldName);
|
||||
}
|
||||
|
@ -19,14 +19,11 @@
|
||||
|
||||
package org.elasticsearch.cluster.node;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddressSerializers;
|
||||
@ -36,22 +33,20 @@ import org.elasticsearch.node.Node;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.elasticsearch.common.transport.TransportAddressSerializers.addressToStream;
|
||||
|
||||
/**
|
||||
* A discovery node represents a node that is part of the cluster.
|
||||
*/
|
||||
public class DiscoveryNode implements Streamable, ToXContent {
|
||||
public class DiscoveryNode implements Writeable<DiscoveryNode>, ToXContent {
|
||||
|
||||
public static final String DATA_ATTR = "data";
|
||||
public static final String MASTER_ATTR = "master";
|
||||
public static final String CLIENT_ATTR = "client";
|
||||
public static final String INGEST_ATTR = "ingest";
|
||||
|
||||
public static boolean localNode(Settings settings) {
|
||||
public static boolean isLocalNode(Settings settings) {
|
||||
if (Node.NODE_LOCAL_SETTING.exists(settings)) {
|
||||
return Node.NODE_LOCAL_SETTING.get(settings);
|
||||
}
|
||||
@ -69,66 +64,82 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
||||
}
|
||||
|
||||
public static boolean nodeRequiresLocalStorage(Settings settings) {
|
||||
return (Node.NODE_CLIENT_SETTING.get(settings) || (Node.NODE_DATA_SETTING.get(settings) == false && Node.NODE_MASTER_SETTING.get(settings) == false)) == false;
|
||||
return Node.NODE_DATA_SETTING.get(settings) || Node.NODE_MASTER_SETTING.get(settings);
|
||||
}
|
||||
|
||||
public static boolean clientNode(Settings settings) {
|
||||
return Node.NODE_CLIENT_SETTING.get(settings);
|
||||
public static boolean isMasterNode(Settings settings) {
|
||||
return Node.NODE_MASTER_SETTING.get(settings);
|
||||
}
|
||||
|
||||
public static boolean masterNode(Settings settings) {
|
||||
if (Node.NODE_MASTER_SETTING.exists(settings)) {
|
||||
return Node.NODE_MASTER_SETTING.get(settings);
|
||||
}
|
||||
return clientNode(settings) == false;
|
||||
public static boolean isDataNode(Settings settings) {
|
||||
return Node.NODE_DATA_SETTING.get(settings);
|
||||
}
|
||||
|
||||
public static boolean dataNode(Settings settings) {
|
||||
if (Node.NODE_DATA_SETTING.exists(settings)) {
|
||||
return Node.NODE_DATA_SETTING.get(settings);
|
||||
}
|
||||
return clientNode(settings) == false;
|
||||
}
|
||||
|
||||
public static boolean ingestNode(Settings settings) {
|
||||
public static boolean isIngestNode(Settings settings) {
|
||||
return Node.NODE_INGEST_SETTING.get(settings);
|
||||
}
|
||||
|
||||
public static final List<DiscoveryNode> EMPTY_LIST = Collections.emptyList();
|
||||
private final String nodeName;
|
||||
private final String nodeId;
|
||||
private final String hostName;
|
||||
private final String hostAddress;
|
||||
private final TransportAddress address;
|
||||
private final Map<String, String> attributes;
|
||||
private final Version version;
|
||||
private final Set<Role> roles;
|
||||
|
||||
private String nodeName = "";
|
||||
private String nodeId;
|
||||
private String hostName;
|
||||
private String hostAddress;
|
||||
private TransportAddress address;
|
||||
private ImmutableOpenMap<String, String> attributes;
|
||||
private Version version = Version.CURRENT;
|
||||
|
||||
DiscoveryNode() {
|
||||
/**
|
||||
* Creates a new {@link DiscoveryNode} by reading from the stream provided as argument
|
||||
* @param in the stream
|
||||
* @throws IOException if there is an error while reading from the stream
|
||||
*/
|
||||
public DiscoveryNode(StreamInput in) throws IOException {
|
||||
this.nodeName = in.readString().intern();
|
||||
this.nodeId = in.readString().intern();
|
||||
this.hostName = in.readString().intern();
|
||||
this.hostAddress = in.readString().intern();
|
||||
this.address = TransportAddressSerializers.addressFromStream(in);
|
||||
int size = in.readVInt();
|
||||
this.attributes = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
this.attributes.put(in.readString(), in.readString());
|
||||
}
|
||||
int rolesSize = in.readVInt();
|
||||
this.roles = EnumSet.noneOf(Role.class);
|
||||
for (int i = 0; i < rolesSize; i++) {
|
||||
int ordinal = in.readVInt();
|
||||
if (ordinal < 0 || ordinal >= Role.values().length) {
|
||||
throw new IOException("Unknown Role ordinal [" + ordinal + "]");
|
||||
}
|
||||
this.roles.add(Role.values()[ordinal]);
|
||||
}
|
||||
this.version = Version.readVersion(in);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link DiscoveryNode}
|
||||
* <p>
|
||||
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current version.
|
||||
* it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
|
||||
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
|
||||
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
|
||||
* the node might not be able to communicate with the remove node. After initial handshakes node versions will be discovered
|
||||
* and updated.
|
||||
* </p>
|
||||
*
|
||||
* @param nodeId the nodes unique id.
|
||||
* @param address the nodes transport address
|
||||
* @param version the version of the node.
|
||||
* @param nodeId the nodes unique id.
|
||||
* @param address the nodes transport address
|
||||
* @param attributes node attributes
|
||||
* @param roles node roles
|
||||
* @param version the version of the node.
|
||||
*/
|
||||
public DiscoveryNode(String nodeId, TransportAddress address, Version version) {
|
||||
this("", nodeId, address, Collections.emptyMap(), version);
|
||||
public DiscoveryNode(String nodeId, TransportAddress address, Map<String, String> attributes, Set<Role> roles, Version version) {
|
||||
this("", nodeId, address.getHost(), address.getAddress(), address, attributes, roles, version);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link DiscoveryNode}
|
||||
* <p>
|
||||
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current version.
|
||||
* it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
|
||||
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
|
||||
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
|
||||
* the node might not be able to communicate with the remove node. After initial handshakes node versions will be discovered
|
||||
* and updated.
|
||||
* </p>
|
||||
@ -137,17 +148,19 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
||||
* @param nodeId the nodes unique id.
|
||||
* @param address the nodes transport address
|
||||
* @param attributes node attributes
|
||||
* @param roles node roles
|
||||
* @param version the version of the node.
|
||||
*/
|
||||
public DiscoveryNode(String nodeName, String nodeId, TransportAddress address, Map<String, String> attributes, Version version) {
|
||||
this(nodeName, nodeId, address.getHost(), address.getAddress(), address, attributes, version);
|
||||
public DiscoveryNode(String nodeName, String nodeId, TransportAddress address, Map<String, String> attributes,
|
||||
Set<Role> roles, Version version) {
|
||||
this(nodeName, nodeId, address.getHost(), address.getAddress(), address, attributes, roles, version);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link DiscoveryNode}.
|
||||
* <p>
|
||||
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current version.
|
||||
* it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
|
||||
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
|
||||
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
|
||||
* the node might not be able to communicate with the remove node. After initial handshakes node versions will be discovered
|
||||
* and updated.
|
||||
* </p>
|
||||
@ -158,170 +171,97 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
||||
* @param hostAddress the nodes host address
|
||||
* @param address the nodes transport address
|
||||
* @param attributes node attributes
|
||||
* @param roles node roles
|
||||
* @param version the version of the node.
|
||||
*/
|
||||
public DiscoveryNode(String nodeName, String nodeId, String hostName, String hostAddress, TransportAddress address, Map<String, String> attributes, Version version) {
|
||||
public DiscoveryNode(String nodeName, String nodeId, String hostName, String hostAddress, TransportAddress address,
|
||||
Map<String, String> attributes, Set<Role> roles, Version version) {
|
||||
if (nodeName != null) {
|
||||
this.nodeName = nodeName.intern();
|
||||
} else {
|
||||
this.nodeName = "";
|
||||
}
|
||||
ImmutableOpenMap.Builder<String, String> builder = ImmutableOpenMap.builder();
|
||||
for (Map.Entry<String, String> entry : attributes.entrySet()) {
|
||||
builder.put(entry.getKey().intern(), entry.getValue().intern());
|
||||
}
|
||||
this.attributes = builder.build();
|
||||
this.nodeId = nodeId.intern();
|
||||
this.hostName = hostName.intern();
|
||||
this.hostAddress = hostAddress.intern();
|
||||
this.address = address;
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link DiscoveryNode}.
|
||||
* <p>
|
||||
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current version.
|
||||
* it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
|
||||
* the node might not be able to communicate with the remove node. After initial handshakes node versions will be discovered
|
||||
* and updated.
|
||||
* </p>
|
||||
*
|
||||
* @param nodeName the nodes name
|
||||
* @param nodeId the nodes unique id.
|
||||
* @param hostName the nodes hostname
|
||||
* @param hostAddress the nodes host address
|
||||
* @param address the nodes transport address
|
||||
* @param attributes node attributes
|
||||
* @param version the version of the node.
|
||||
*/
|
||||
public DiscoveryNode(String nodeName, String nodeId, String hostName, String hostAddress, TransportAddress address, ImmutableOpenMap<String, String> attributes, Version version) {
|
||||
if (nodeName != null) {
|
||||
this.nodeName = nodeName.intern();
|
||||
if (version == null) {
|
||||
this.version = Version.CURRENT;
|
||||
} else {
|
||||
this.version = version;
|
||||
}
|
||||
ImmutableOpenMap.Builder<String, String> builder = ImmutableOpenMap.builder();
|
||||
for (ObjectObjectCursor<String, String> entry : attributes) {
|
||||
builder.put(entry.key.intern(), entry.value.intern());
|
||||
}
|
||||
this.attributes = builder.build();
|
||||
this.nodeId = nodeId.intern();
|
||||
this.hostName = hostName.intern();
|
||||
this.hostAddress = hostAddress.intern();
|
||||
this.address = address;
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
/**
|
||||
* The address that the node can be communicated with.
|
||||
*/
|
||||
public TransportAddress address() {
|
||||
return address;
|
||||
this.attributes = Collections.unmodifiableMap(attributes);
|
||||
//verify that no node roles are being provided as attributes
|
||||
Predicate<Map<String, String>> predicate = (attrs) -> {
|
||||
for (Role role : Role.values()) {
|
||||
assert attrs.containsKey(role.getRoleName()) == false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
assert predicate.test(attributes);
|
||||
Set<Role> rolesSet = EnumSet.noneOf(Role.class);
|
||||
rolesSet.addAll(roles);
|
||||
this.roles = Collections.unmodifiableSet(rolesSet);
|
||||
}
|
||||
|
||||
/**
|
||||
* The address that the node can be communicated with.
|
||||
*/
|
||||
public TransportAddress getAddress() {
|
||||
return address();
|
||||
}
|
||||
|
||||
/**
|
||||
* The unique id of the node.
|
||||
*/
|
||||
public String id() {
|
||||
return nodeId;
|
||||
return address;
|
||||
}
|
||||
|
||||
/**
|
||||
* The unique id of the node.
|
||||
*/
|
||||
public String getId() {
|
||||
return id();
|
||||
}
|
||||
|
||||
/**
|
||||
* The name of the node.
|
||||
*/
|
||||
public String name() {
|
||||
return this.nodeName;
|
||||
return nodeId;
|
||||
}
|
||||
|
||||
/**
|
||||
* The name of the node.
|
||||
*/
|
||||
public String getName() {
|
||||
return name();
|
||||
return this.nodeName;
|
||||
}
|
||||
|
||||
/**
|
||||
* The node attributes.
|
||||
*/
|
||||
public ImmutableOpenMap<String, String> attributes() {
|
||||
public Map<String, String> getAttributes() {
|
||||
return this.attributes;
|
||||
}
|
||||
|
||||
/**
|
||||
* The node attributes.
|
||||
*/
|
||||
public ImmutableOpenMap<String, String> getAttributes() {
|
||||
return attributes();
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this node hold data (shards) or not.
|
||||
*/
|
||||
public boolean dataNode() {
|
||||
String data = attributes.get(DATA_ATTR);
|
||||
if (data == null) {
|
||||
return !clientNode();
|
||||
}
|
||||
return Booleans.parseBooleanExact(data);
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this node hold data (shards) or not.
|
||||
*/
|
||||
public boolean isDataNode() {
|
||||
return dataNode();
|
||||
}
|
||||
|
||||
/**
|
||||
* Is the node a client node or not.
|
||||
*/
|
||||
public boolean clientNode() {
|
||||
String client = attributes.get(CLIENT_ATTR);
|
||||
return client != null && Booleans.parseBooleanExact(client);
|
||||
}
|
||||
|
||||
public boolean isClientNode() {
|
||||
return clientNode();
|
||||
}
|
||||
|
||||
/**
|
||||
* Can this node become master or not.
|
||||
*/
|
||||
public boolean masterNode() {
|
||||
String master = attributes.get(MASTER_ATTR);
|
||||
if (master == null) {
|
||||
return !clientNode();
|
||||
}
|
||||
return Booleans.parseBooleanExact(master);
|
||||
return roles.contains(Role.DATA);
|
||||
}
|
||||
|
||||
/**
|
||||
* Can this node become master or not.
|
||||
*/
|
||||
public boolean isMasterNode() {
|
||||
return masterNode();
|
||||
return roles.contains(Role.MASTER);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a boolean that tells whether this an ingest node or not
|
||||
*/
|
||||
public boolean isIngestNode() {
|
||||
String ingest = attributes.get(INGEST_ATTR);
|
||||
return ingest == null ? true : Booleans.parseBooleanExact(ingest);
|
||||
return roles.contains(Role.INGEST);
|
||||
}
|
||||
|
||||
public Version version() {
|
||||
/**
|
||||
* Returns a set of all the roles that the node fulfills.
|
||||
* If the node doesn't have any specific role, the set is returned empty, which means that the node is a coordinating only node.
|
||||
*/
|
||||
public Set<Role> getRoles() {
|
||||
return roles;
|
||||
}
|
||||
|
||||
public Version getVersion() {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
@ -333,30 +273,9 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
||||
return this.hostAddress;
|
||||
}
|
||||
|
||||
public Version getVersion() {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
public static DiscoveryNode readNode(StreamInput in) throws IOException {
|
||||
DiscoveryNode node = new DiscoveryNode();
|
||||
node.readFrom(in);
|
||||
return node;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
nodeName = in.readString().intern();
|
||||
nodeId = in.readString().intern();
|
||||
hostName = in.readString().intern();
|
||||
hostAddress = in.readString().intern();
|
||||
address = TransportAddressSerializers.addressFromStream(in);
|
||||
int size = in.readVInt();
|
||||
ImmutableOpenMap.Builder<String, String> attributes = ImmutableOpenMap.builder(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
attributes.put(in.readString().intern(), in.readString().intern());
|
||||
}
|
||||
this.attributes = attributes.build();
|
||||
version = Version.readVersion(in);
|
||||
public DiscoveryNode readFrom(StreamInput in) throws IOException {
|
||||
return new DiscoveryNode(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -367,9 +286,13 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
||||
out.writeString(hostAddress);
|
||||
addressToStream(out, address);
|
||||
out.writeVInt(attributes.size());
|
||||
for (ObjectObjectCursor<String, String> entry : attributes) {
|
||||
out.writeString(entry.key);
|
||||
out.writeString(entry.value);
|
||||
for (Map.Entry<String, String> entry : attributes.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeString(entry.getValue());
|
||||
}
|
||||
out.writeVInt(roles.size());
|
||||
for (Role role : roles) {
|
||||
out.writeVInt(role.ordinal());
|
||||
}
|
||||
Version.writeVersion(version, out);
|
||||
}
|
||||
@ -412,17 +335,43 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(id(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("name", name());
|
||||
builder.field("transport_address", address().toString());
|
||||
builder.startObject(getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("name", getName());
|
||||
builder.field("transport_address", getAddress().toString());
|
||||
|
||||
builder.startObject("attributes");
|
||||
for (ObjectObjectCursor<String, String> attr : attributes) {
|
||||
builder.field(attr.key, attr.value);
|
||||
for (Map.Entry<String, String> entry : attributes.entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue());
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enum that holds all the possible roles that that a node can fulfill in a cluster.
|
||||
* Each role has its name and a corresponding abbreviation used by cat apis.
|
||||
*/
|
||||
public enum Role {
|
||||
MASTER("master", "m"),
|
||||
DATA("data", "d"),
|
||||
INGEST("ingest", "i");
|
||||
|
||||
private final String roleName;
|
||||
private final String abbreviation;
|
||||
|
||||
Role(String roleName, String abbreviation) {
|
||||
this.roleName = roleName;
|
||||
this.abbreviation = abbreviation;
|
||||
}
|
||||
|
||||
public String getRoleName() {
|
||||
return roleName;
|
||||
}
|
||||
|
||||
public String getAbbreviation() {
|
||||
return abbreviation;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -33,13 +33,11 @@ import java.util.Map;
|
||||
*/
|
||||
public class DiscoveryNodeFilters {
|
||||
|
||||
public static enum OpType {
|
||||
public enum OpType {
|
||||
AND,
|
||||
OR
|
||||
}
|
||||
|
||||
;
|
||||
|
||||
public static DiscoveryNodeFilters buildFromSettings(OpType opType, String prefix, Settings settings) {
|
||||
return buildFromKeyValue(opType, settings.getByPrefix(prefix).getAsMap());
|
||||
}
|
||||
@ -84,8 +82,8 @@ public class DiscoveryNodeFilters {
|
||||
if ("_ip".equals(attr)) {
|
||||
// We check both the host_ip or the publish_ip
|
||||
String publishAddress = null;
|
||||
if (node.address() instanceof InetSocketTransportAddress) {
|
||||
publishAddress = NetworkAddress.format(((InetSocketTransportAddress) node.address()).address().getAddress());
|
||||
if (node.getAddress() instanceof InetSocketTransportAddress) {
|
||||
publishAddress = NetworkAddress.format(((InetSocketTransportAddress) node.getAddress()).address().getAddress());
|
||||
}
|
||||
|
||||
boolean match = matchByIP(values, node.getHostAddress(), publishAddress);
|
||||
@ -118,8 +116,8 @@ public class DiscoveryNodeFilters {
|
||||
} else if ("_publish_ip".equals(attr)) {
|
||||
// We check explicitly only the publish_ip
|
||||
String address = null;
|
||||
if (node.address() instanceof InetSocketTransportAddress) {
|
||||
address = NetworkAddress.format(((InetSocketTransportAddress) node.address()).address().getAddress());
|
||||
if (node.getAddress() instanceof InetSocketTransportAddress) {
|
||||
address = NetworkAddress.format(((InetSocketTransportAddress) node.getAddress()).address().getAddress());
|
||||
}
|
||||
|
||||
boolean match = matchByIP(values, address, null);
|
||||
@ -157,7 +155,7 @@ public class DiscoveryNodeFilters {
|
||||
}
|
||||
} else if ("_id".equals(attr)) {
|
||||
for (String value : values) {
|
||||
if (node.id().equals(value)) {
|
||||
if (node.getId().equals(value)) {
|
||||
if (opType == OpType.OR) {
|
||||
return true;
|
||||
}
|
||||
@ -169,7 +167,7 @@ public class DiscoveryNodeFilters {
|
||||
}
|
||||
} else if ("_name".equals(attr) || "name".equals(attr)) {
|
||||
for (String value : values) {
|
||||
if (Regex.simpleMatch(value, node.name())) {
|
||||
if (Regex.simpleMatch(value, node.getName())) {
|
||||
if (opType == OpType.OR) {
|
||||
return true;
|
||||
}
|
||||
@ -180,7 +178,7 @@ public class DiscoveryNodeFilters {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
String nodeAttributeValue = node.attributes().get(attr);
|
||||
String nodeAttributeValue = node.getAttributes().get(attr);
|
||||
if (nodeAttributeValue == null) {
|
||||
if (opType == OpType.AND) {
|
||||
return false;
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.cluster.node;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
@ -28,11 +29,14 @@ import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
/**
|
||||
@ -62,20 +66,17 @@ public class DiscoveryNodeService extends AbstractComponent {
|
||||
}
|
||||
|
||||
public DiscoveryNode buildLocalNode(TransportAddress publishAddress) {
|
||||
Map<String, String> attributes = new HashMap<>(settings.getByPrefix("node.").getAsMap());
|
||||
attributes.remove("name"); // name is extracted in other places
|
||||
if (attributes.containsKey("client")) {
|
||||
if (attributes.get("client").equals("false")) {
|
||||
attributes.remove("client"); // this is the default
|
||||
} else {
|
||||
// if we are client node, don't store data ...
|
||||
attributes.put("data", "false");
|
||||
}
|
||||
final String nodeId = generateNodeId(settings);
|
||||
Map<String, String> attributes = new HashMap<>(Node.NODE_ATTRIBUTES.get(this.settings).getAsMap());
|
||||
Set<DiscoveryNode.Role> roles = new HashSet<>();
|
||||
if (Node.NODE_INGEST_SETTING.get(settings)) {
|
||||
roles.add(DiscoveryNode.Role.INGEST);
|
||||
}
|
||||
if (attributes.containsKey("data")) {
|
||||
if (attributes.get("data").equals("true")) {
|
||||
attributes.remove("data");
|
||||
}
|
||||
if (Node.NODE_MASTER_SETTING.get(settings)) {
|
||||
roles.add(DiscoveryNode.Role.MASTER);
|
||||
}
|
||||
if (Node.NODE_DATA_SETTING.get(settings)) {
|
||||
roles.add(DiscoveryNode.Role.DATA);
|
||||
}
|
||||
|
||||
for (CustomAttributesProvider provider : customAttributesProviders) {
|
||||
@ -92,9 +93,8 @@ public class DiscoveryNodeService extends AbstractComponent {
|
||||
logger.warn("failed to build custom attributes from provider [{}]", e, provider);
|
||||
}
|
||||
}
|
||||
|
||||
final String nodeId = generateNodeId(settings);
|
||||
return new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, attributes, version);
|
||||
return new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeId, publishAddress, attributes,
|
||||
roles, version);
|
||||
}
|
||||
|
||||
public interface CustomAttributesProvider {
|
||||
|
@ -78,17 +78,9 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
}
|
||||
|
||||
/**
|
||||
* Is this a valid nodes that has the minimal information set. The minimal set is defined
|
||||
* by the localNodeId being set.
|
||||
* Returns <tt>true</tt> if the local node is the elected master node.
|
||||
*/
|
||||
public boolean valid() {
|
||||
return localNodeId != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <tt>true</tt> if the local node is the master node.
|
||||
*/
|
||||
public boolean localNodeMaster() {
|
||||
public boolean isLocalNodeElectedMaster() {
|
||||
if (localNodeId == null) {
|
||||
// we don't know yet the local node id, return false
|
||||
return false;
|
||||
@ -96,31 +88,13 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
return localNodeId.equals(masterNodeId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of known nodes
|
||||
*
|
||||
* @return number of nodes
|
||||
*/
|
||||
public int size() {
|
||||
return nodes.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of known nodes
|
||||
*
|
||||
* @return number of nodes
|
||||
*/
|
||||
public int getSize() {
|
||||
return size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a {@link Map} of the discovered nodes arranged by their ids
|
||||
*
|
||||
* @return {@link Map} of the discovered nodes arranged by their ids
|
||||
*/
|
||||
public ImmutableOpenMap<String, DiscoveryNode> nodes() {
|
||||
return this.nodes;
|
||||
return nodes.size();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -129,16 +103,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
* @return {@link Map} of the discovered nodes arranged by their ids
|
||||
*/
|
||||
public ImmutableOpenMap<String, DiscoveryNode> getNodes() {
|
||||
return nodes();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a {@link Map} of the discovered data nodes arranged by their ids
|
||||
*
|
||||
* @return {@link Map} of the discovered data nodes arranged by their ids
|
||||
*/
|
||||
public ImmutableOpenMap<String, DiscoveryNode> dataNodes() {
|
||||
return this.dataNodes;
|
||||
return this.nodes;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -147,16 +112,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
* @return {@link Map} of the discovered data nodes arranged by their ids
|
||||
*/
|
||||
public ImmutableOpenMap<String, DiscoveryNode> getDataNodes() {
|
||||
return dataNodes();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a {@link Map} of the discovered master nodes arranged by their ids
|
||||
*
|
||||
* @return {@link Map} of the discovered master nodes arranged by their ids
|
||||
*/
|
||||
public ImmutableOpenMap<String, DiscoveryNode> masterNodes() {
|
||||
return this.masterNodes;
|
||||
return this.dataNodes;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -165,7 +121,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
* @return {@link Map} of the discovered master nodes arranged by their ids
|
||||
*/
|
||||
public ImmutableOpenMap<String, DiscoveryNode> getMasterNodes() {
|
||||
return masterNodes();
|
||||
return this.masterNodes;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -180,7 +136,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
*
|
||||
* @return {@link Map} of the discovered master and data nodes arranged by their ids
|
||||
*/
|
||||
public ImmutableOpenMap<String, DiscoveryNode> masterAndDataNodes() {
|
||||
public ImmutableOpenMap<String, DiscoveryNode> getMasterAndDataNodes() {
|
||||
ImmutableOpenMap.Builder<String, DiscoveryNode> nodes = ImmutableOpenMap.builder(dataNodes);
|
||||
nodes.putAll(masterNodes);
|
||||
return nodes.build();
|
||||
@ -206,31 +162,13 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
return nodes.containsKey(nodeId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the id of the master node
|
||||
*
|
||||
* @return id of the master
|
||||
*/
|
||||
public String masterNodeId() {
|
||||
return this.masterNodeId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the id of the master node
|
||||
*
|
||||
* @return id of the master
|
||||
*/
|
||||
public String getMasterNodeId() {
|
||||
return masterNodeId();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the id of the local node
|
||||
*
|
||||
* @return id of the local node
|
||||
*/
|
||||
public String localNodeId() {
|
||||
return this.localNodeId;
|
||||
return this.masterNodeId;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -239,16 +177,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
* @return id of the local node
|
||||
*/
|
||||
public String getLocalNodeId() {
|
||||
return localNodeId();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the local node
|
||||
*
|
||||
* @return local node
|
||||
*/
|
||||
public DiscoveryNode localNode() {
|
||||
return nodes.get(localNodeId);
|
||||
return this.localNodeId;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -257,16 +186,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
* @return local node
|
||||
*/
|
||||
public DiscoveryNode getLocalNode() {
|
||||
return localNode();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the master node
|
||||
*
|
||||
* @return master node
|
||||
*/
|
||||
public DiscoveryNode masterNode() {
|
||||
return nodes.get(masterNodeId);
|
||||
return nodes.get(localNodeId);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -275,7 +195,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
* @return master node
|
||||
*/
|
||||
public DiscoveryNode getMasterNode() {
|
||||
return masterNode();
|
||||
return nodes.get(masterNodeId);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -287,7 +207,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
public DiscoveryNode findByAddress(TransportAddress address) {
|
||||
for (ObjectCursor<DiscoveryNode> cursor : nodes.values()) {
|
||||
DiscoveryNode node = cursor.value;
|
||||
if (node.address().equals(address)) {
|
||||
if (node.getAddress().equals(address)) {
|
||||
return node;
|
||||
}
|
||||
}
|
||||
@ -304,7 +224,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
*
|
||||
* @return the oldest version in the cluster
|
||||
*/
|
||||
public Version smallestVersion() {
|
||||
public Version getSmallestVersion() {
|
||||
return minNodeVersion;
|
||||
}
|
||||
|
||||
@ -313,7 +233,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
*
|
||||
* @return the oldest version in the cluster
|
||||
*/
|
||||
public Version smallestNonClientNodeVersion() {
|
||||
public Version getSmallestNonClientNodeVersion() {
|
||||
return minNonClientNodeVersion;
|
||||
}
|
||||
|
||||
@ -340,19 +260,19 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
int index = 0;
|
||||
nodesIds = new String[nodes.size()];
|
||||
for (DiscoveryNode node : this) {
|
||||
nodesIds[index++] = node.id();
|
||||
nodesIds[index++] = node.getId();
|
||||
}
|
||||
return nodesIds;
|
||||
} else {
|
||||
ObjectHashSet<String> resolvedNodesIds = new ObjectHashSet<>(nodesIds.length);
|
||||
for (String nodeId : nodesIds) {
|
||||
if (nodeId.equals("_local")) {
|
||||
String localNodeId = localNodeId();
|
||||
String localNodeId = getLocalNodeId();
|
||||
if (localNodeId != null) {
|
||||
resolvedNodesIds.add(localNodeId);
|
||||
}
|
||||
} else if (nodeId.equals("_master")) {
|
||||
String masterNodeId = masterNodeId();
|
||||
String masterNodeId = getMasterNodeId();
|
||||
if (masterNodeId != null) {
|
||||
resolvedNodesIds.add(masterNodeId);
|
||||
}
|
||||
@ -361,40 +281,46 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
} else {
|
||||
// not a node id, try and search by name
|
||||
for (DiscoveryNode node : this) {
|
||||
if (Regex.simpleMatch(nodeId, node.name())) {
|
||||
resolvedNodesIds.add(node.id());
|
||||
if (Regex.simpleMatch(nodeId, node.getName())) {
|
||||
resolvedNodesIds.add(node.getId());
|
||||
}
|
||||
}
|
||||
for (DiscoveryNode node : this) {
|
||||
if (Regex.simpleMatch(nodeId, node.getHostAddress())) {
|
||||
resolvedNodesIds.add(node.id());
|
||||
resolvedNodesIds.add(node.getId());
|
||||
} else if (Regex.simpleMatch(nodeId, node.getHostName())) {
|
||||
resolvedNodesIds.add(node.id());
|
||||
resolvedNodesIds.add(node.getId());
|
||||
}
|
||||
}
|
||||
int index = nodeId.indexOf(':');
|
||||
if (index != -1) {
|
||||
String matchAttrName = nodeId.substring(0, index);
|
||||
String matchAttrValue = nodeId.substring(index + 1);
|
||||
if ("data".equals(matchAttrName)) {
|
||||
if (DiscoveryNode.Role.DATA.getRoleName().equals(matchAttrName)) {
|
||||
if (Booleans.parseBoolean(matchAttrValue, true)) {
|
||||
resolvedNodesIds.addAll(dataNodes.keys());
|
||||
} else {
|
||||
resolvedNodesIds.removeAll(dataNodes.keys());
|
||||
}
|
||||
} else if ("master".equals(matchAttrName)) {
|
||||
} else if (DiscoveryNode.Role.MASTER.getRoleName().equals(matchAttrName)) {
|
||||
if (Booleans.parseBoolean(matchAttrValue, true)) {
|
||||
resolvedNodesIds.addAll(masterNodes.keys());
|
||||
} else {
|
||||
resolvedNodesIds.removeAll(masterNodes.keys());
|
||||
}
|
||||
} else if (DiscoveryNode.Role.INGEST.getRoleName().equals(matchAttrName)) {
|
||||
if (Booleans.parseBoolean(matchAttrValue, true)) {
|
||||
resolvedNodesIds.addAll(ingestNodes.keys());
|
||||
} else {
|
||||
resolvedNodesIds.removeAll(ingestNodes.keys());
|
||||
}
|
||||
} else {
|
||||
for (DiscoveryNode node : this) {
|
||||
for (ObjectObjectCursor<String, String> entry : node.attributes()) {
|
||||
String attrName = entry.key;
|
||||
String attrValue = entry.value;
|
||||
for (Map.Entry<String, String> entry : node.getAttributes().entrySet()) {
|
||||
String attrName = entry.getKey();
|
||||
String attrValue = entry.getValue();
|
||||
if (Regex.simpleMatch(matchAttrName, attrName) && Regex.simpleMatch(matchAttrValue, attrValue)) {
|
||||
resolvedNodesIds.add(node.id());
|
||||
resolvedNodesIds.add(node.getId());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -409,7 +335,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
public DiscoveryNodes removeDeadMembers(Set<String> newNodes, String masterNodeId) {
|
||||
Builder builder = new Builder().masterNodeId(masterNodeId).localNodeId(localNodeId);
|
||||
for (DiscoveryNode node : this) {
|
||||
if (newNodes.contains(node.id())) {
|
||||
if (newNodes.contains(node.getId())) {
|
||||
builder.put(node);
|
||||
}
|
||||
}
|
||||
@ -427,12 +353,12 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
List<DiscoveryNode> removed = new ArrayList<>();
|
||||
List<DiscoveryNode> added = new ArrayList<>();
|
||||
for (DiscoveryNode node : other) {
|
||||
if (!this.nodeExists(node.id())) {
|
||||
if (!this.nodeExists(node.getId())) {
|
||||
removed.add(node);
|
||||
}
|
||||
}
|
||||
for (DiscoveryNode node : this) {
|
||||
if (!other.nodeExists(node.id())) {
|
||||
if (!other.nodeExists(node.getId())) {
|
||||
added.add(node);
|
||||
}
|
||||
}
|
||||
@ -440,8 +366,8 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
DiscoveryNode newMasterNode = null;
|
||||
if (masterNodeId != null) {
|
||||
if (other.masterNodeId == null || !other.masterNodeId.equals(masterNodeId)) {
|
||||
previousMasterNode = other.masterNode();
|
||||
newMasterNode = masterNode();
|
||||
previousMasterNode = other.getMasterNode();
|
||||
newMasterNode = getMasterNode();
|
||||
}
|
||||
}
|
||||
return new Delta(previousMasterNode, newMasterNode, localNodeId, Collections.unmodifiableList(removed), Collections.unmodifiableList(added));
|
||||
@ -463,10 +389,10 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
sb.append("nodes: \n");
|
||||
for (DiscoveryNode node : this) {
|
||||
sb.append(" ").append(node);
|
||||
if (node == localNode()) {
|
||||
if (node == getLocalNode()) {
|
||||
sb.append(", local");
|
||||
}
|
||||
if (node == masterNode()) {
|
||||
if (node == getMasterNode()) {
|
||||
sb.append(", master");
|
||||
}
|
||||
sb.append("\n");
|
||||
@ -474,10 +400,6 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public Delta emptyDelta() {
|
||||
return new Delta(null, null, localNodeId, DiscoveryNode.EMPTY_LIST, DiscoveryNode.EMPTY_LIST);
|
||||
}
|
||||
|
||||
public static class Delta {
|
||||
|
||||
private final String localNodeId;
|
||||
@ -533,7 +455,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
public String shortSummary() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
if (!removed() && masterNodeChanged()) {
|
||||
if (newMasterNode.id().equals(localNodeId)) {
|
||||
if (newMasterNode.getId().equals(localNodeId)) {
|
||||
// we are the master, no nodes we removed, we are actually the first master
|
||||
sb.append("new_master ").append(newMasterNode());
|
||||
} else {
|
||||
@ -561,13 +483,13 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
}
|
||||
if (added()) {
|
||||
// don't print if there is one added, and it is us
|
||||
if (!(addedNodes().size() == 1 && addedNodes().get(0).id().equals(localNodeId))) {
|
||||
if (!(addedNodes().size() == 1 && addedNodes().get(0).getId().equals(localNodeId))) {
|
||||
if (removed() || masterNodeChanged()) {
|
||||
sb.append(", ");
|
||||
}
|
||||
sb.append("added {");
|
||||
for (DiscoveryNode node : addedNodes()) {
|
||||
if (!node.id().equals(localNodeId)) {
|
||||
if (!node.getId().equals(localNodeId)) {
|
||||
// don't print ourself
|
||||
sb.append(node).append(',');
|
||||
}
|
||||
@ -593,18 +515,18 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
}
|
||||
}
|
||||
|
||||
public DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) throws IOException {
|
||||
private DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) throws IOException {
|
||||
Builder builder = new Builder();
|
||||
if (in.readBoolean()) {
|
||||
builder.masterNodeId(in.readString());
|
||||
}
|
||||
if (localNode != null) {
|
||||
builder.localNodeId(localNode.id());
|
||||
builder.localNodeId(localNode.getId());
|
||||
}
|
||||
int size = in.readVInt();
|
||||
for (int i = 0; i < size; i++) {
|
||||
DiscoveryNode node = DiscoveryNode.readNode(in);
|
||||
if (localNode != null && node.id().equals(localNode.id())) {
|
||||
DiscoveryNode node = new DiscoveryNode(in);
|
||||
if (localNode != null && node.getId().equals(localNode.getId())) {
|
||||
// reuse the same instance of our address and local node id for faster equality
|
||||
node = localNode;
|
||||
}
|
||||
@ -615,7 +537,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
|
||||
@Override
|
||||
public DiscoveryNodes readFrom(StreamInput in) throws IOException {
|
||||
return readFrom(in, localNode());
|
||||
return readFrom(in, getLocalNode());
|
||||
}
|
||||
|
||||
public static Builder builder() {
|
||||
@ -637,13 +559,13 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
}
|
||||
|
||||
public Builder(DiscoveryNodes nodes) {
|
||||
this.masterNodeId = nodes.masterNodeId();
|
||||
this.localNodeId = nodes.localNodeId();
|
||||
this.nodes = ImmutableOpenMap.builder(nodes.nodes());
|
||||
this.masterNodeId = nodes.getMasterNodeId();
|
||||
this.localNodeId = nodes.getLocalNodeId();
|
||||
this.nodes = ImmutableOpenMap.builder(nodes.getNodes());
|
||||
}
|
||||
|
||||
public Builder put(DiscoveryNode node) {
|
||||
nodes.put(node.id(), node);
|
||||
nodes.put(node.getId(), node);
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -669,18 +591,18 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
Version minNodeVersion = Version.CURRENT;
|
||||
Version minNonClientNodeVersion = Version.CURRENT;
|
||||
for (ObjectObjectCursor<String, DiscoveryNode> nodeEntry : nodes) {
|
||||
if (nodeEntry.value.dataNode()) {
|
||||
if (nodeEntry.value.isDataNode()) {
|
||||
dataNodesBuilder.put(nodeEntry.key, nodeEntry.value);
|
||||
minNonClientNodeVersion = Version.smallest(minNonClientNodeVersion, nodeEntry.value.version());
|
||||
minNonClientNodeVersion = Version.smallest(minNonClientNodeVersion, nodeEntry.value.getVersion());
|
||||
}
|
||||
if (nodeEntry.value.masterNode()) {
|
||||
if (nodeEntry.value.isMasterNode()) {
|
||||
masterNodesBuilder.put(nodeEntry.key, nodeEntry.value);
|
||||
minNonClientNodeVersion = Version.smallest(minNonClientNodeVersion, nodeEntry.value.version());
|
||||
minNonClientNodeVersion = Version.smallest(minNonClientNodeVersion, nodeEntry.value.getVersion());
|
||||
}
|
||||
if (nodeEntry.value.isIngestNode()) {
|
||||
ingestNodesBuilder.put(nodeEntry.key, nodeEntry.value);
|
||||
}
|
||||
minNodeVersion = Version.smallest(minNodeVersion, nodeEntry.value.version());
|
||||
minNodeVersion = Version.smallest(minNodeVersion, nodeEntry.value.getVersion());
|
||||
}
|
||||
|
||||
return new DiscoveryNodes(
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user