mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
Merge branch 'master' into ccr
* master: Remove RestGetAllAliasesAction (#31308) Temporary fix for broken build Reenable Checkstyle's unused import rule (#31270) Remove remaining unused imports before merging #31270 Fix non-REST doc snippet [DOC] Extend SQL docs Immediately flush channel after writing to buffer (#31301) [DOCS] Shortens ML API intros Use quotes in the call invocation (#31249) move security ingest processors to a sub ingest directory (#31306) Add 5.6.11 version constant. Fix version detection. SQL: Whitelist SQL utility class for better scripting (#30681) [Docs] All Rollup docs experimental, agg limitations, clarify DeleteJob (#31299) CCS: don't proxy requests for already connected node (#31273) Mute ScriptedMetricAggregatorTests testSelfReferencingAggStateAfterMap [test] opensuse packaging turn up debug logging Add unreleased version 6.3.1 Removes experimental tag from scripted_metric aggregation (#31298) [Rollup] Metric config parser must use builder so validation runs (#31159) [ML] Check licence when datafeeds use cross cluster search (#31247) Add notion of internal index settings (#31286) Test: Remove broken yml test feature (#31255) REST hl client: cluster health to default to cluster level (#31268) [ML] Update test thresholds to account for changes to memory control (#31289) Log warnings when cluster state publication failed to some nodes (#31233) Fix AntFixture waiting condition (#31272) Ignore numeric shard count if waiting for ALL (#31265) [ML] Implement new rules design (#31110) index_prefixes back-compat should test 6.3 (#30951) Core: Remove plain execute method on TransportAction (#30998) Update checkstyle to 8.10.1 (#31269) Set analyzer version in PreBuiltAnalyzerProviderFactory (#31202) Modify pipelining handlers to require full requests (#31280) Revert upgrade to Netty 4.1.25.Final (#31282) Use armored input stream for reading public key (#31229) Fix Netty 4 Server Transport tests. Again. REST hl client: adjust wait_for_active_shards param in cluster health (#31266) REST high-level Client: remove deprecated API methods (#31200) [DOCS] Mark SQL feature as experimental [DOCS] Updates machine learning custom URL screenshots (#31222) Fix naming conventions check for XPackTestCase Fix security Netty 4 transport tests Fix race in clear scroll (#31259) [DOCS] Clarify audit index settings when remote indexing (#30923) Delete typos in SAML docs (#31199) REST high-level client: add Cluster Health API (#29331) [ML][TEST] Mute tests using rules (#31204) Support RequestedAuthnContext (#31238) SyncedFlushResponse to implement ToXContentObject (#31155) Add Get Aliases API to the high-level REST client (#28799) Remove some line length supressions (#31209) Validate xContentType in PutWatchRequest. (#31088) [INGEST] Interrupt the current thread if evaluation grok expressions take too long (#31024) Suppress extras FS on caching directory tests Revert "[DOCS] Added 6.3 info & updated the upgrade table. (#30940)" Revert "Fix snippets in upgrade docs" Fix snippets in upgrade docs [DOCS] Added 6.3 info & updated the upgrade table. (#30940) LLClient: Support host selection (#30523) Upgrade to Netty 4.1.25.Final (#31232) Enable custom credentials for core REST tests (#31235) Move ESIndexLevelReplicationTestCase to test framework (#31243) Encapsulate Translog in Engine (#31220) HLRest: Add get index templates API (#31161) Remove all unused imports and fix CRLF (#31207) [Tests] Fix self-referencing tests [TEST] Fix testRecoveryAfterPrimaryPromotion [Docs] Remove mention pattern files in Grok processor (#31170) Use stronger write-once semantics for Azure repository (#30437) Don't swallow exceptions on replication (#31179) Limit the number of concurrent requests per node (#31206) Call ensureNoSelfReferences() on _agg state variable after scripted metric agg script executions (#31044) Move java version checker back to its own jar (#30708) [test] add fix for rare virtualbox error (#31212)
This commit is contained in:
commit
2cffd85f7f
3
Vagrantfile
vendored
3
Vagrantfile
vendored
@ -31,6 +31,9 @@ Vagrant.configure(2) do |config|
|
||||
# Give the box more memory and cpu because our tests are beasts!
|
||||
vbox.memory = Integer(ENV['VAGRANT_MEMORY'] || 8192)
|
||||
vbox.cpus = Integer(ENV['VAGRANT_CPUS'] || 4)
|
||||
|
||||
# see https://github.com/hashicorp/vagrant/issues/9524
|
||||
vbox.customize ["modifyvm", :id, "--audio", "none"]
|
||||
end
|
||||
|
||||
# Switch the default share for the project root from /vagrant to
|
||||
|
@ -131,7 +131,7 @@ task verifyVersions {
|
||||
new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s ->
|
||||
xml = new XmlParser().parse(s)
|
||||
}
|
||||
Set<Version> knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d\.\d\.\d/ }.collect { Version.fromString(it) })
|
||||
Set<Version> knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d+\.\d+\.\d+/ }.collect { Version.fromString(it) })
|
||||
|
||||
// Limit the known versions to those that should be index compatible, and are not future versions
|
||||
knownVersions = knownVersions.findAll { it.major >= bwcVersions.currentVersion.major - 1 && it.before(VersionProperties.elasticsearch) }
|
||||
|
@ -140,7 +140,7 @@ class PrecommitTasks {
|
||||
configProperties = [
|
||||
suppressions: checkstyleSuppressions
|
||||
]
|
||||
toolVersion = 7.5
|
||||
toolVersion = '8.10.1'
|
||||
}
|
||||
|
||||
project.tasks.withType(Checkstyle) { task ->
|
||||
|
@ -149,11 +149,11 @@ public class AntFixture extends AntTask implements Fixture {
|
||||
}
|
||||
|
||||
// the process is started (has a pid) and is bound to a network interface
|
||||
// so now wait undil the waitCondition has been met
|
||||
// so now evaluates if the waitCondition is successful
|
||||
// TODO: change this to a loop?
|
||||
boolean success
|
||||
try {
|
||||
success = waitCondition(this, ant) == false
|
||||
success = waitCondition(this, ant)
|
||||
} catch (Exception e) {
|
||||
String msg = "Wait condition caught exception for ${name}"
|
||||
logger.error(msg, e)
|
||||
|
@ -26,13 +26,9 @@
|
||||
</module>
|
||||
|
||||
<module name="AvoidStarImport" />
|
||||
<!-- Doesn't pass but we could make it pass pretty quick.
|
||||
<module name="UnusedImports">
|
||||
The next property is optional. If we remove it then imports that are
|
||||
only referenced by Javadoc cause the check to fail.
|
||||
<property name="processJavadoc" value="true" />
|
||||
</module>
|
||||
-->
|
||||
|
||||
<!-- Unused imports are forbidden -->
|
||||
<module name="UnusedImports" />
|
||||
|
||||
<!-- Non-inner classes must be in files that match their names. -->
|
||||
<module name="OuterTypeFilename" />
|
||||
|
@ -20,7 +20,6 @@
|
||||
files start to pass. -->
|
||||
<suppress files="client[/\\]rest[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]HeapBufferedAsyncResponseConsumerTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]search[/\\]vectorhighlight[/\\]CustomFieldQuery.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]Action.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]ClusterHealthRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]TransportClusterHealthAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]hotthreads[/\\]NodesHotThreadsRequestBuilder.java" checks="LineLength" />
|
||||
@ -34,9 +33,7 @@
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]verify[/\\]TransportVerifyRepositoryAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]verify[/\\]VerifyRepositoryRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]reroute[/\\]TransportClusterRerouteAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]settings[/\\]ClusterUpdateSettingsAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]settings[/\\]ClusterUpdateSettingsRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]shards[/\\]ClusterSearchShardsAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]shards[/\\]ClusterSearchShardsRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]shards[/\\]TransportClusterSearchShardsAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]snapshots[/\\]create[/\\]CreateSnapshotRequestBuilder.java" checks="LineLength" />
|
||||
@ -53,7 +50,6 @@
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]stats[/\\]ClusterStatsNodeResponse.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]stats[/\\]ClusterStatsRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]stats[/\\]TransportClusterStatsAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]tasks[/\\]PendingClusterTasksAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]tasks[/\\]PendingClusterTasksRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]tasks[/\\]TransportPendingClusterTasksAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]alias[/\\]TransportIndicesAliasesAction.java" checks="LineLength" />
|
||||
@ -75,7 +71,6 @@
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]flush[/\\]TransportSyncedFlushAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]forcemerge[/\\]ForceMergeRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]forcemerge[/\\]TransportForceMergeAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]mapping[/\\]get[/\\]GetFieldMappingsRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]mapping[/\\]get[/\\]GetMappingsRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]mapping[/\\]get[/\\]TransportGetFieldMappingsAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]mapping[/\\]get[/\\]TransportGetFieldMappingsIndexAction.java" checks="LineLength" />
|
||||
@ -91,13 +86,11 @@
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]settings[/\\]put[/\\]TransportUpdateSettingsAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]settings[/\\]put[/\\]UpdateSettingsRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]shards[/\\]IndicesShardStoreRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]shards[/\\]IndicesShardStoresAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]shards[/\\]IndicesShardStoresResponse.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]shards[/\\]TransportIndicesShardStoresAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]stats[/\\]IndexStats.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]stats[/\\]IndicesStatsRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]stats[/\\]TransportIndicesStatsAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]template[/\\]delete[/\\]DeleteIndexTemplateAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]template[/\\]delete[/\\]DeleteIndexTemplateRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]template[/\\]delete[/\\]TransportDeleteIndexTemplateAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]template[/\\]get[/\\]GetIndexTemplatesRequestBuilder.java" checks="LineLength" />
|
||||
@ -124,12 +117,10 @@
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]get[/\\]TransportGetAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]get[/\\]TransportShardMultiGetAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]index[/\\]IndexRequest.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]DeletePipelineRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]DeletePipelineTransportAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]GetPipelineRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]GetPipelineTransportAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]PutPipelineTransportAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineTransportAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]MultiSearchRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchPhaseController.java" checks="LineLength" />
|
||||
@ -154,7 +145,6 @@
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]single[/\\]shard[/\\]SingleShardOperationRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]single[/\\]shard[/\\]TransportSingleShardAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]termvectors[/\\]MultiTermVectorsRequest.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]termvectors[/\\]MultiTermVectorsRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]termvectors[/\\]TermVectorsRequest.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]termvectors[/\\]TermVectorsResponse.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]termvectors[/\\]TermVectorsWriter.java" checks="LineLength" />
|
||||
@ -166,7 +156,6 @@
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]update[/\\]UpdateRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JNANatives.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JarHell.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]ElasticsearchClient.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]FilterClient.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]support[/\\]AbstractClient.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]TransportClient.java" checks="LineLength" />
|
||||
@ -340,7 +329,6 @@
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]Repository.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]VerifyNodeRepositoryAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]blobstore[/\\]BlobStoreRepository.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]blobstore[/\\]ChecksumBlobStoreFormat.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]fs[/\\]FsRepository.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestIndicesAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestShardsAction.java" checks="LineLength" />
|
||||
@ -577,10 +565,8 @@
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]GeoDistanceQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]MoreLikeThisQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]MultiMatchQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SpanMultiTermQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SpanNotQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]functionscore[/\\]FunctionScoreTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]replication[/\\]ESIndexLevelReplicationTestCase.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]geo[/\\]GeoUtilsTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]nested[/\\]AbstractNumberNestedSortingTestCase.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]nested[/\\]DoubleNestedSortingTests.java" checks="LineLength" />
|
||||
|
@ -28,8 +28,6 @@ import java.io.Closeable;
|
||||
import java.lang.management.GarbageCollectorMXBean;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
@ -19,14 +19,17 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.singleton;
|
||||
|
||||
/**
|
||||
* A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Cluster API.
|
||||
@ -55,20 +58,6 @@ public final class ClusterClient {
|
||||
options, ClusterUpdateSettingsResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates cluster wide specific settings using the Cluster Update Settings API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html"> Cluster Update Settings
|
||||
* API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #putSettings(ClusterUpdateSettingsRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public ClusterUpdateSettingsResponse putSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, Header... headers)
|
||||
throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings,
|
||||
ClusterUpdateSettingsResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously updates cluster wide specific settings using the Cluster Update Settings API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html"> Cluster Update Settings
|
||||
@ -82,17 +71,34 @@ public final class ClusterClient {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings,
|
||||
options, ClusterUpdateSettingsResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously updates cluster wide specific settings using the Cluster Update Settings API.
|
||||
* Get cluster health using the Cluster Health API.
|
||||
* See
|
||||
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html"> Cluster Health API on elastic.co</a>
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html"> Cluster Update Settings
|
||||
* API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #putSettingsAsync(ClusterUpdateSettingsRequest, RequestOptions, ActionListener)}
|
||||
* If timeout occurred, {@link ClusterHealthResponse} will have isTimedOut() == true and status() == RestStatus.REQUEST_TIMEOUT
|
||||
* @param healthRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
@Deprecated
|
||||
public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest,
|
||||
ActionListener<ClusterUpdateSettingsResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings,
|
||||
ClusterUpdateSettingsResponse::fromXContent, listener, emptySet(), headers);
|
||||
public ClusterHealthResponse health(ClusterHealthRequest healthRequest, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(healthRequest, RequestConverters::clusterHealth, options,
|
||||
ClusterHealthResponse::fromXContent, singleton(RestStatus.REQUEST_TIMEOUT.getStatus()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously get cluster health using the Cluster Health API.
|
||||
* See
|
||||
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html"> Cluster Health API on elastic.co</a>
|
||||
* If timeout occurred, {@link ClusterHealthResponse} will have isTimedOut() == true and status() == RestStatus.REQUEST_TIMEOUT
|
||||
* @param healthRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public void healthAsync(ClusterHealthRequest healthRequest, RequestOptions options, ActionListener<ClusterHealthResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(healthRequest, RequestConverters::clusterHealth, options,
|
||||
ClusterHealthResponse::fromXContent, listener, singleton(RestStatus.REQUEST_TIMEOUT.getStatus()));
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,199 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
|
||||
/**
|
||||
* Response obtained from the get aliases API.
|
||||
* The format is pretty horrible as it holds aliases, but at the same time errors can come back through the status and error fields.
|
||||
* Such errors are mostly 404 - NOT FOUND for aliases that were specified but not found. In such case the client won't throw exception
|
||||
* so it allows to retrieve the returned aliases, while at the same time checking if errors were returned.
|
||||
* There's also the case where an exception is returned, like for instance an {@link org.elasticsearch.index.IndexNotFoundException}.
|
||||
* We would usually throw such exception, but we configure the client to not throw for 404 to support the case above, hence we also not
|
||||
* throw in case an index is not found, although it is a hard error that doesn't come back with aliases.
|
||||
*/
|
||||
public class GetAliasesResponse extends ActionResponse implements StatusToXContentObject {
|
||||
|
||||
private final RestStatus status;
|
||||
private final String error;
|
||||
private final ElasticsearchException exception;
|
||||
|
||||
private final Map<String, Set<AliasMetaData>> aliases;
|
||||
|
||||
GetAliasesResponse(RestStatus status, String error, Map<String, Set<AliasMetaData>> aliases) {
|
||||
this.status = status;
|
||||
this.error = error;
|
||||
this.aliases = aliases;
|
||||
this.exception = null;
|
||||
}
|
||||
|
||||
private GetAliasesResponse(RestStatus status, ElasticsearchException exception) {
|
||||
this.status = status;
|
||||
this.error = null;
|
||||
this.aliases = Collections.emptyMap();
|
||||
this.exception = exception;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestStatus status() {
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the possibly returned error, null otherwise
|
||||
*/
|
||||
public String getError() {
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the exception that may have been returned
|
||||
*/
|
||||
public ElasticsearchException getException() {
|
||||
return exception;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the requested aliases
|
||||
*/
|
||||
public Map<String, Set<AliasMetaData>> getAliases() {
|
||||
return aliases;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
if (status != RestStatus.OK) {
|
||||
builder.field("error", error);
|
||||
builder.field("status", status.getStatus());
|
||||
}
|
||||
|
||||
for (Map.Entry<String, Set<AliasMetaData>> entry : aliases.entrySet()) {
|
||||
builder.startObject(entry.getKey());
|
||||
{
|
||||
builder.startObject("aliases");
|
||||
{
|
||||
for (final AliasMetaData alias : entry.getValue()) {
|
||||
AliasMetaData.Builder.toXContent(alias, builder, ToXContent.EMPTY_PARAMS);
|
||||
}
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse the get aliases response
|
||||
*/
|
||||
public static GetAliasesResponse fromXContent(XContentParser parser) throws IOException {
|
||||
if (parser.currentToken() == null) {
|
||||
parser.nextToken();
|
||||
}
|
||||
ensureExpectedToken(Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation);
|
||||
Map<String, Set<AliasMetaData>> aliases = new HashMap<>();
|
||||
|
||||
String currentFieldName;
|
||||
Token token;
|
||||
String error = null;
|
||||
ElasticsearchException exception = null;
|
||||
RestStatus status = RestStatus.OK;
|
||||
|
||||
while (parser.nextToken() != Token.END_OBJECT) {
|
||||
if (parser.currentToken() == Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
|
||||
if ("status".equals(currentFieldName)) {
|
||||
if ((token = parser.nextToken()) != Token.FIELD_NAME) {
|
||||
ensureExpectedToken(Token.VALUE_NUMBER, token, parser::getTokenLocation);
|
||||
status = RestStatus.fromCode(parser.intValue());
|
||||
}
|
||||
} else if ("error".equals(currentFieldName)) {
|
||||
token = parser.nextToken();
|
||||
if (token == Token.VALUE_STRING) {
|
||||
error = parser.text();
|
||||
} else if (token == Token.START_OBJECT) {
|
||||
parser.nextToken();
|
||||
exception = ElasticsearchException.innerFromXContent(parser, true);
|
||||
} else if (token == Token.START_ARRAY) {
|
||||
parser.skipChildren();
|
||||
}
|
||||
} else {
|
||||
String indexName = parser.currentName();
|
||||
if (parser.nextToken() == Token.START_OBJECT) {
|
||||
Set<AliasMetaData> parseInside = parseAliases(parser);
|
||||
aliases.put(indexName, parseInside);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (exception != null) {
|
||||
assert error == null;
|
||||
assert aliases.isEmpty();
|
||||
return new GetAliasesResponse(status, exception);
|
||||
}
|
||||
return new GetAliasesResponse(status, error, aliases);
|
||||
}
|
||||
|
||||
private static Set<AliasMetaData> parseAliases(XContentParser parser) throws IOException {
|
||||
Set<AliasMetaData> aliases = new HashSet<>();
|
||||
Token token;
|
||||
String currentFieldName = null;
|
||||
while ((token = parser.nextToken()) != Token.END_OBJECT) {
|
||||
if (token == Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == Token.START_OBJECT) {
|
||||
if ("aliases".equals(currentFieldName)) {
|
||||
while (parser.nextToken() != Token.END_OBJECT) {
|
||||
AliasMetaData fromXContent = AliasMetaData.Builder.fromXContent(parser);
|
||||
aliases.add(fromXContent);
|
||||
}
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
} else if (token == Token.START_ARRAY) {
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
return aliases;
|
||||
}
|
||||
}
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
|
||||
@ -54,13 +53,17 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeResponse;
|
||||
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.singleton;
|
||||
|
||||
/**
|
||||
* A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Indices API.
|
||||
@ -88,19 +91,6 @@ public final class IndicesClient {
|
||||
DeleteIndexResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes an index using the Delete Index API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html">
|
||||
* Delete Index API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #delete(DeleteIndexRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex,
|
||||
DeleteIndexResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously deletes an index using the Delete Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html">
|
||||
@ -114,19 +104,6 @@ public final class IndicesClient {
|
||||
DeleteIndexResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously deletes an index using the Delete Index API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html">
|
||||
* Delete Index API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #deleteAsync(DeleteIndexRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener<DeleteIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex,
|
||||
DeleteIndexResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an index using the Create Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html">
|
||||
@ -141,19 +118,6 @@ public final class IndicesClient {
|
||||
CreateIndexResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an index using the Create Index API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html">
|
||||
* Create Index API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #create(CreateIndexRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public CreateIndexResponse create(CreateIndexRequest createIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, RequestConverters::createIndex,
|
||||
CreateIndexResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously creates an index using the Create Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html">
|
||||
@ -167,19 +131,6 @@ public final class IndicesClient {
|
||||
CreateIndexResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously creates an index using the Create Index API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html">
|
||||
* Create Index API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #createAsync(CreateIndexRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void createAsync(CreateIndexRequest createIndexRequest, ActionListener<CreateIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, RequestConverters::createIndex,
|
||||
CreateIndexResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the mappings on an index using the Put Mapping API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html">
|
||||
@ -194,19 +145,6 @@ public final class IndicesClient {
|
||||
PutMappingResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the mappings on an index using the Put Mapping API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html">
|
||||
* Put Mapping API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #putMapping(PutMappingRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public PutMappingResponse putMapping(PutMappingRequest putMappingRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, RequestConverters::putMapping,
|
||||
PutMappingResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously updates the mappings on an index using the Put Mapping API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html">
|
||||
@ -220,20 +158,6 @@ public final class IndicesClient {
|
||||
PutMappingResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously updates the mappings on an index using the Put Mapping API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html">
|
||||
* Put Mapping API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #putMappingAsync(PutMappingRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void putMappingAsync(PutMappingRequest putMappingRequest, ActionListener<PutMappingResponse> listener,
|
||||
Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, RequestConverters::putMapping,
|
||||
PutMappingResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the mappings on an index or indices using the Get Mapping API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html">
|
||||
@ -276,20 +200,6 @@ public final class IndicesClient {
|
||||
IndicesAliasesResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates aliases using the Index Aliases API.
|
||||
* <p>
|
||||
* See <a href=
|
||||
* "https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html">
|
||||
* Index Aliases API on elastic.co</a>
|
||||
* @deprecated {@link #updateAliases(IndicesAliasesRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public IndicesAliasesResponse updateAliases(IndicesAliasesRequest indicesAliasesRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases,
|
||||
IndicesAliasesResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously updates aliases using the Index Aliases API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html">
|
||||
@ -304,21 +214,6 @@ public final class IndicesClient {
|
||||
IndicesAliasesResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously updates aliases using the Index Aliases API.
|
||||
* <p>
|
||||
* See <a href=
|
||||
* "https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html">
|
||||
* Index Aliases API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #updateAliasesAsync(IndicesAliasesRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequest, ActionListener<IndicesAliasesResponse> listener,
|
||||
Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases,
|
||||
IndicesAliasesResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens an index using the Open Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
@ -333,19 +228,6 @@ public final class IndicesClient {
|
||||
OpenIndexResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens an index using the Open Index API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
* Open Index API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #open(OpenIndexRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, RequestConverters::openIndex,
|
||||
OpenIndexResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously opens an index using the Open Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
@ -359,19 +241,6 @@ public final class IndicesClient {
|
||||
OpenIndexResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously opens an index using the Open Index API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
* Open Index API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #openAsync(OpenIndexRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void openAsync(OpenIndexRequest openIndexRequest, ActionListener<OpenIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, RequestConverters::openIndex,
|
||||
OpenIndexResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes an index using the Close Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
@ -386,19 +255,6 @@ public final class IndicesClient {
|
||||
CloseIndexResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes an index using the Close Index API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
* Close Index API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #close(CloseIndexRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public CloseIndexResponse close(CloseIndexRequest closeIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, RequestConverters::closeIndex,
|
||||
CloseIndexResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously closes an index using the Close Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
@ -413,19 +269,6 @@ public final class IndicesClient {
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Asynchronously closes an index using the Close Index API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
* Close Index API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #closeAsync(CloseIndexRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void closeAsync(CloseIndexRequest closeIndexRequest, ActionListener<CloseIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, RequestConverters::closeIndex,
|
||||
CloseIndexResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if one or more aliases exist using the Aliases Exist API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html">
|
||||
@ -440,19 +283,6 @@ public final class IndicesClient {
|
||||
RestHighLevelClient::convertExistsResponse, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if one or more aliases exist using the Aliases Exist API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html">
|
||||
* Indices Aliases API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #existsAlias(GetAliasesRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean existsAlias(GetAliasesRequest getAliasesRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequest(getAliasesRequest, RequestConverters::existsAlias,
|
||||
RestHighLevelClient::convertExistsResponse, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously checks if one or more aliases exist using the Aliases Exist API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html">
|
||||
@ -466,19 +296,6 @@ public final class IndicesClient {
|
||||
RestHighLevelClient::convertExistsResponse, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously checks if one or more aliases exist using the Aliases Exist API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html">
|
||||
* Indices Aliases API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #existsAliasAsync(GetAliasesRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void existsAliasAsync(GetAliasesRequest getAliasesRequest, ActionListener<Boolean> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsync(getAliasesRequest, RequestConverters::existsAlias,
|
||||
RestHighLevelClient::convertExistsResponse, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Refresh one or more indices using the Refresh API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html"> Refresh API on elastic.co</a>
|
||||
@ -492,18 +309,6 @@ public final class IndicesClient {
|
||||
RefreshResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Refresh one or more indices using the Refresh API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html"> Refresh API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #refresh(RefreshRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public RefreshResponse refresh(RefreshRequest refreshRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(refreshRequest, RequestConverters::refresh, RefreshResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously refresh one or more indices using the Refresh API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html"> Refresh API on elastic.co</a>
|
||||
@ -516,18 +321,6 @@ public final class IndicesClient {
|
||||
RefreshResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously refresh one or more indices using the Refresh API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html"> Refresh API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #refreshAsync(RefreshRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void refreshAsync(RefreshRequest refreshRequest, ActionListener<RefreshResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, RequestConverters::refresh, RefreshResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush one or more indices using the Flush API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html"> Flush API on elastic.co</a>
|
||||
@ -541,18 +334,6 @@ public final class IndicesClient {
|
||||
FlushResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush one or more indices using the Flush API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html"> Flush API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #flush(FlushRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public FlushResponse flush(FlushRequest flushRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(flushRequest, RequestConverters::flush, FlushResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously flush one or more indices using the Flush API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html"> Flush API on elastic.co</a>
|
||||
@ -565,18 +346,6 @@ public final class IndicesClient {
|
||||
FlushResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously flush one or more indices using the Flush API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html"> Flush API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #flushAsync(FlushRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void flushAsync(FlushRequest flushRequest, ActionListener<FlushResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(flushRequest, RequestConverters::flush, FlushResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initiate a synced flush manually using the synced flush API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-synced-flush.html">
|
||||
@ -647,19 +416,6 @@ public final class IndicesClient {
|
||||
ForceMergeResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Force merge one or more indices using the Force Merge API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html">
|
||||
* Force Merge API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #forceMerge(ForceMergeRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public ForceMergeResponse forceMerge(ForceMergeRequest forceMergeRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, RequestConverters::forceMerge,
|
||||
ForceMergeResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously force merge one or more indices using the Force Merge API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html">
|
||||
@ -673,19 +429,6 @@ public final class IndicesClient {
|
||||
ForceMergeResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously force merge one or more indices using the Force Merge API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html">
|
||||
* Force Merge API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #forceMergeAsync(ForceMergeRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void forceMergeAsync(ForceMergeRequest forceMergeRequest, ActionListener<ForceMergeResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, RequestConverters::forceMerge,
|
||||
ForceMergeResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears the cache of one or more indices using the Clear Cache API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clearcache.html">
|
||||
@ -701,19 +444,6 @@ public final class IndicesClient {
|
||||
ClearIndicesCacheResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears the cache of one or more indices using the Clear Cache API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clearcache.html">
|
||||
* Clear Cache API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #clearCache(ClearIndicesCacheRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public ClearIndicesCacheResponse clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache,
|
||||
ClearIndicesCacheResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously clears the cache of one or more indices using the Clear Cache API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clearcache.html">
|
||||
@ -728,20 +458,6 @@ public final class IndicesClient {
|
||||
ClearIndicesCacheResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously clears the cache of one or more indices using the Clear Cache API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clearcache.html">
|
||||
* Clear Cache API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #clearCacheAsync(ClearIndicesCacheRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void clearCacheAsync(ClearIndicesCacheRequest clearIndicesCacheRequest, ActionListener<ClearIndicesCacheResponse> listener,
|
||||
Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache,
|
||||
ClearIndicesCacheResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the index (indices) exists or not.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html">
|
||||
@ -761,24 +477,6 @@ public final class IndicesClient {
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the index (indices) exists or not.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html">
|
||||
* Indices Exists API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #exists(GetIndexRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean exists(GetIndexRequest request, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequest(
|
||||
request,
|
||||
RequestConverters::indicesExist,
|
||||
RestHighLevelClient::convertExistsResponse,
|
||||
Collections.emptySet(),
|
||||
headers
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously checks if the index (indices) exists or not.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html">
|
||||
@ -798,25 +496,6 @@ public final class IndicesClient {
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously checks if the index (indices) exists or not.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html">
|
||||
* Indices Exists API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #existsAsync(GetIndexRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void existsAsync(GetIndexRequest request, ActionListener<Boolean> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsync(
|
||||
request,
|
||||
RequestConverters::indicesExist,
|
||||
RestHighLevelClient::convertExistsResponse,
|
||||
listener,
|
||||
Collections.emptySet(),
|
||||
headers
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Shrinks an index using the Shrink Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html">
|
||||
@ -831,19 +510,6 @@ public final class IndicesClient {
|
||||
ResizeResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Shrinks an index using the Shrink Index API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html">
|
||||
* Shrink Index API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #shrink(ResizeRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public ResizeResponse shrink(ResizeRequest resizeRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::shrink, ResizeResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously shrinks an index using the Shrink index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html">
|
||||
@ -857,19 +523,6 @@ public final class IndicesClient {
|
||||
ResizeResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously shrinks an index using the Shrink index API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html">
|
||||
* Shrink Index API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #shrinkAsync(ResizeRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void shrinkAsync(ResizeRequest resizeRequest, ActionListener<ResizeResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::shrink, ResizeResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Splits an index using the Split Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-split-index.html">
|
||||
@ -884,19 +537,6 @@ public final class IndicesClient {
|
||||
ResizeResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Splits an index using the Split Index API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-split-index.html">
|
||||
* Split Index API on elastic.co</a>
|
||||
* @deprecated {@link #split(ResizeRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public ResizeResponse split(ResizeRequest resizeRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::split, ResizeResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously splits an index using the Split Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-split-index.html">
|
||||
@ -910,19 +550,6 @@ public final class IndicesClient {
|
||||
ResizeResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously splits an index using the Split Index API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-split-index.html">
|
||||
* Split Index API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #splitAsync(ResizeRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void splitAsync(ResizeRequest resizeRequest, ActionListener<ResizeResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::split, ResizeResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Rolls over an index using the Rollover Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-rollover-index.html">
|
||||
@ -937,19 +564,6 @@ public final class IndicesClient {
|
||||
RolloverResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Rolls over an index using the Rollover Index API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-rollover-index.html">
|
||||
* Rollover Index API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #rollover(RolloverRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public RolloverResponse rollover(RolloverRequest rolloverRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(rolloverRequest, RequestConverters::rollover,
|
||||
RolloverResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously rolls over an index using the Rollover Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-rollover-index.html">
|
||||
@ -964,16 +578,30 @@ public final class IndicesClient {
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously rolls over an index using the Rollover Index API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-rollover-index.html">
|
||||
* Rollover Index API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #rolloverAsync(RolloverRequest, RequestOptions, ActionListener)}
|
||||
* Gets one or more aliases using the Get Index Aliases API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html"> Indices Aliases API on
|
||||
* elastic.co</a>
|
||||
* @param getAliasesRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
@Deprecated
|
||||
public void rolloverAsync(RolloverRequest rolloverRequest, ActionListener<RolloverResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, RequestConverters::rollover, RolloverResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
public GetAliasesResponse getAlias(GetAliasesRequest getAliasesRequest, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(getAliasesRequest, RequestConverters::getAlias, options,
|
||||
GetAliasesResponse::fromXContent, singleton(RestStatus.NOT_FOUND.getStatus()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously gets one or more aliases using the Get Index Aliases API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html"> Indices Aliases API on
|
||||
* elastic.co</a>
|
||||
* @param getAliasesRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public void getAliasAsync(GetAliasesRequest getAliasesRequest, RequestOptions options, ActionListener<GetAliasesResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(getAliasesRequest, RequestConverters::getAlias, options,
|
||||
GetAliasesResponse::fromXContent, listener, singleton(RestStatus.NOT_FOUND.getStatus()));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -990,19 +618,6 @@ public final class IndicesClient {
|
||||
UpdateSettingsResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates specific index level settings using the Update Indices Settings API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html"> Update Indices Settings
|
||||
* API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #putSettings(UpdateSettingsRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateSettingsResponse putSettings(UpdateSettingsRequest updateSettingsRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings,
|
||||
UpdateSettingsResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously updates specific index level settings using the Update Indices Settings API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html"> Update Indices Settings
|
||||
@ -1017,20 +632,6 @@ public final class IndicesClient {
|
||||
UpdateSettingsResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously updates specific index level settings using the Update Indices Settings API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html"> Update Indices Settings
|
||||
* API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #putSettingsAsync(UpdateSettingsRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, ActionListener<UpdateSettingsResponse> listener,
|
||||
Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings,
|
||||
UpdateSettingsResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Puts an index template using the Index Templates API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html"> Index Templates API
|
||||
@ -1059,4 +660,33 @@ public final class IndicesClient {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(putIndexTemplateRequest, RequestConverters::putTemplate, options,
|
||||
PutIndexTemplateResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets index templates using the Index Templates API
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html"> Index Templates API
|
||||
* on elastic.co</a>
|
||||
* @param getIndexTemplatesRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
public GetIndexTemplatesResponse getTemplate(GetIndexTemplatesRequest getIndexTemplatesRequest,
|
||||
RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(getIndexTemplatesRequest, RequestConverters::getTemplates,
|
||||
options, GetIndexTemplatesResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously gets index templates using the Index Templates API
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html"> Index Templates API
|
||||
* on elastic.co</a>
|
||||
* @param getIndexTemplatesRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public void getTemplateAsync(GetIndexTemplatesRequest getIndexTemplatesRequest, RequestOptions options,
|
||||
ActionListener<GetIndexTemplatesResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(getIndexTemplatesRequest, RequestConverters::getTemplates,
|
||||
options, GetIndexTemplatesResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
@ -55,6 +56,7 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
@ -73,7 +75,9 @@ import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
@ -705,6 +709,28 @@ final class RequestConverters {
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request clusterHealth(ClusterHealthRequest healthRequest) {
|
||||
String[] indices = healthRequest.indices() == null ? Strings.EMPTY_ARRAY : healthRequest.indices();
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_cluster/health")
|
||||
.addCommaSeparatedPathParts(indices)
|
||||
.build();
|
||||
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
|
||||
|
||||
new Params(request)
|
||||
.withWaitForStatus(healthRequest.waitForStatus())
|
||||
.withWaitForNoRelocatingShards(healthRequest.waitForNoRelocatingShards())
|
||||
.withWaitForNoInitializingShards(healthRequest.waitForNoInitializingShards())
|
||||
.withWaitForActiveShards(healthRequest.waitForActiveShards(), ActiveShardCount.NONE)
|
||||
.withWaitForNodes(healthRequest.waitForNodes())
|
||||
.withWaitForEvents(healthRequest.waitForEvents())
|
||||
.withTimeout(healthRequest.timeout())
|
||||
.withMasterTimeout(healthRequest.masterNodeTimeout())
|
||||
.withLocal(healthRequest.local())
|
||||
.withLevel(healthRequest.level());
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request rollover(RolloverRequest rolloverRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover")
|
||||
.addPathPart(rolloverRequest.getNewIndexName()).build();
|
||||
@ -830,6 +856,27 @@ final class RequestConverters {
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request getAlias(GetAliasesRequest getAliasesRequest) {
|
||||
String[] indices = getAliasesRequest.indices() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.indices();
|
||||
String[] aliases = getAliasesRequest.aliases() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.aliases();
|
||||
String endpoint = endpoint(indices, "_alias", aliases);
|
||||
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
|
||||
Params params = new Params(request);
|
||||
params.withIndicesOptions(getAliasesRequest.indicesOptions());
|
||||
params.withLocal(getAliasesRequest.local());
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request getTemplates(GetIndexTemplatesRequest getIndexTemplatesRequest) throws IOException {
|
||||
String[] names = getIndexTemplatesRequest.names();
|
||||
String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addCommaSeparatedPathParts(names).build();
|
||||
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
|
||||
Params params = new Params(request);
|
||||
params.withLocal(getIndexTemplatesRequest.local());
|
||||
params.withMasterTimeout(getIndexTemplatesRequest.masterNodeTimeout());
|
||||
return request;
|
||||
}
|
||||
|
||||
private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException {
|
||||
BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef();
|
||||
return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType));
|
||||
@ -1000,7 +1047,11 @@ final class RequestConverters {
|
||||
}
|
||||
|
||||
Params withWaitForActiveShards(ActiveShardCount activeShardCount) {
|
||||
if (activeShardCount != null && activeShardCount != ActiveShardCount.DEFAULT) {
|
||||
return withWaitForActiveShards(activeShardCount, ActiveShardCount.DEFAULT);
|
||||
}
|
||||
|
||||
Params withWaitForActiveShards(ActiveShardCount activeShardCount, ActiveShardCount defaultActiveShardCount) {
|
||||
if (activeShardCount != null && activeShardCount != defaultActiveShardCount) {
|
||||
return putParam("wait_for_active_shards", activeShardCount.toString().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
return this;
|
||||
@ -1102,6 +1153,42 @@ final class RequestConverters {
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withWaitForStatus(ClusterHealthStatus status) {
|
||||
if (status != null) {
|
||||
return putParam("wait_for_status", status.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withWaitForNoRelocatingShards(boolean waitNoRelocatingShards) {
|
||||
if (waitNoRelocatingShards) {
|
||||
return putParam("wait_for_no_relocating_shards", Boolean.TRUE.toString());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withWaitForNoInitializingShards(boolean waitNoInitShards) {
|
||||
if (waitNoInitShards) {
|
||||
return putParam("wait_for_no_initializing_shards", Boolean.TRUE.toString());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withWaitForNodes(String waitForNodes) {
|
||||
return putParam("wait_for_nodes", waitForNodes);
|
||||
}
|
||||
|
||||
Params withLevel(ClusterHealthRequest.Level level) {
|
||||
return putParam("level", level.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
|
||||
Params withWaitForEvents(Priority waitForEvents) {
|
||||
if (waitForEvents != null) {
|
||||
return putParam("wait_for_events", waitForEvents.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -296,17 +296,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a bulk request using the Bulk API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #bulk(BulkRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, BulkResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a bulk request using the Bulk API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
|
||||
@ -318,17 +307,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
performRequestAsyncAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a bulk request using the Bulk API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #bulkAsync(BulkRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final void bulkAsync(BulkRequest bulkRequest, ActionListener<BulkResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(bulkRequest, RequestConverters::bulk, BulkResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
@ -340,16 +318,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise
|
||||
* @deprecated Prefer {@link #ping(RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final boolean ping(Header... headers) throws IOException {
|
||||
return performRequest(new MainRequest(), (request) -> RequestConverters.ping(), RestHighLevelClient::convertExistsResponse,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cluster info otherwise provided when sending an HTTP request to '/'
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
@ -361,16 +329,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
MainResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cluster info otherwise provided when sending an HTTP request to port 9200
|
||||
* @deprecated Prefer {@link #info(RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final MainResponse info(Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(new MainRequest(), (request) -> RequestConverters.info(),
|
||||
MainResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves a document by id using the Get API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
@ -383,17 +341,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
return performRequestAndParseEntity(getRequest, RequestConverters::get, options, GetResponse::fromXContent, singleton(404));
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves a document by id using the Get API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #get(GetRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final GetResponse get(GetRequest getRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(getRequest, RequestConverters::get, GetResponse::fromXContent, singleton(404), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously retrieves a document by id using the Get API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
@ -406,18 +353,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
singleton(404));
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously retrieves a document by id using the Get API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #getAsync(GetRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final void getAsync(GetRequest getRequest, ActionListener<GetResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(getRequest, RequestConverters::get, GetResponse::fromXContent, listener,
|
||||
singleton(404), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves multiple documents by id using the Multi Get API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html">Multi Get API on elastic.co</a>
|
||||
@ -431,18 +366,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
singleton(404));
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves multiple documents by id using the Multi Get API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html">Multi Get API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #multiGet(MultiGetRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final MultiGetResponse multiGet(MultiGetRequest multiGetRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(multiGetRequest, RequestConverters::multiGet, MultiGetResponse::fromXContent,
|
||||
singleton(404), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously retrieves multiple documents by id using the Multi Get API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html">Multi Get API on elastic.co</a>
|
||||
@ -455,18 +378,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
singleton(404));
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously retrieves multiple documents by id using the Multi Get API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html">Multi Get API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #multiGetAsync(MultiGetRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final void multiGetAsync(MultiGetRequest multiGetRequest, ActionListener<MultiGetResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(multiGetRequest, RequestConverters::multiGet, MultiGetResponse::fromXContent, listener,
|
||||
singleton(404), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks for the existence of a document. Returns true if it exists, false otherwise.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
@ -479,17 +390,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
return performRequest(getRequest, RequestConverters::exists, options, RestHighLevelClient::convertExistsResponse, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks for the existence of a document. Returns true if it exists, false otherwise.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #exists(GetRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final boolean exists(GetRequest getRequest, Header... headers) throws IOException {
|
||||
return performRequest(getRequest, RequestConverters::exists, RestHighLevelClient::convertExistsResponse, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously checks for the existence of a document. Returns true if it exists, false otherwise.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
@ -502,18 +402,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously checks for the existence of a document. Returns true if it exists, false otherwise.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #existsAsync(GetRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final void existsAsync(GetRequest getRequest, ActionListener<Boolean> listener, Header... headers) {
|
||||
performRequestAsync(getRequest, RequestConverters::exists, RestHighLevelClient::convertExistsResponse, listener,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Index a document using the Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html">Index API on elastic.co</a>
|
||||
@ -526,17 +414,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
return performRequestAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Index a document using the Index API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html">Index API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #index(IndexRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final IndexResponse index(IndexRequest indexRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(indexRequest, RequestConverters::index, IndexResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously index a document using the Index API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html">Index API on elastic.co</a>
|
||||
@ -549,18 +426,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously index a document using the Index API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html">Index API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #indexAsync(IndexRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final void indexAsync(IndexRequest indexRequest, ActionListener<IndexResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(indexRequest, RequestConverters::index, IndexResponse::fromXContent, listener,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates a document using the Update API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html">Update API on elastic.co</a>
|
||||
@ -573,17 +438,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
return performRequestAndParseEntity(updateRequest, RequestConverters::update, options, UpdateResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates a document using the Update API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html">Update API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #update(UpdateRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final UpdateResponse update(UpdateRequest updateRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(updateRequest, RequestConverters::update, UpdateResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously updates a document using the Update API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html">Update API on elastic.co</a>
|
||||
@ -596,18 +450,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously updates a document using the Update API.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html">Update API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #updateAsync(UpdateRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final void updateAsync(UpdateRequest updateRequest, ActionListener<UpdateResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(updateRequest, RequestConverters::update, UpdateResponse::fromXContent, listener,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a document by id using the Delete API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a>
|
||||
@ -621,18 +463,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
singleton(404));
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a document by id using the Delete API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #delete(DeleteRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final DeleteResponse delete(DeleteRequest deleteRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(deleteRequest, RequestConverters::delete, DeleteResponse::fromXContent,
|
||||
singleton(404), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously deletes a document by id using the Delete API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a>
|
||||
@ -645,18 +475,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
Collections.singleton(404));
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously deletes a document by id using the Delete API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #deleteAsync(DeleteRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final void deleteAsync(DeleteRequest deleteRequest, ActionListener<DeleteResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(deleteRequest, RequestConverters::delete, DeleteResponse::fromXContent, listener,
|
||||
Collections.singleton(404), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a search request using the Search API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html">Search API on elastic.co</a>
|
||||
@ -669,17 +487,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
return performRequestAndParseEntity(searchRequest, RequestConverters::search, options, SearchResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a search using the Search API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html">Search API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #search(SearchRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final SearchResponse search(SearchRequest searchRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(searchRequest, RequestConverters::search, SearchResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a search using the Search API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html">Search API on elastic.co</a>
|
||||
@ -692,18 +499,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a search using the Search API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html">Search API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #searchAsync(SearchRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final void searchAsync(SearchRequest searchRequest, ActionListener<SearchResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(searchRequest, RequestConverters::search, SearchResponse::fromXContent, listener,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a multi search using the msearch API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html">Multi search API on
|
||||
@ -718,19 +513,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a multi search using the msearch API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html">Multi search API on
|
||||
* elastic.co</a>
|
||||
* @deprecated Prefer {@link #multiSearch(MultiSearchRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final MultiSearchResponse multiSearch(MultiSearchRequest multiSearchRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(multiSearchRequest, RequestConverters::multiSearch, MultiSearchResponse::fromXContext,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a multi search using the msearch API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html">Multi search API on
|
||||
@ -745,19 +527,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a multi search using the msearch API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html">Multi search API on
|
||||
* elastic.co</a>
|
||||
* @deprecated Prefer {@link #multiSearchAsync(MultiSearchRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final void multiSearchAsync(MultiSearchRequest searchRequest, ActionListener<MultiSearchResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(searchRequest, RequestConverters::multiSearch, MultiSearchResponse::fromXContext, listener,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a search using the Search Scroll API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html">Search Scroll
|
||||
@ -772,19 +541,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a search using the Search Scroll API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html">Search Scroll
|
||||
* API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #searchScroll(SearchScrollRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final SearchResponse searchScroll(SearchScrollRequest searchScrollRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, SearchResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a search using the Search Scroll API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html">Search Scroll
|
||||
@ -799,20 +555,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a search using the Search Scroll API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html">Search Scroll
|
||||
* API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #searchScrollAsync(SearchScrollRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final void searchScrollAsync(SearchScrollRequest searchScrollRequest,
|
||||
ActionListener<SearchResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, SearchResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears one or more scroll ids using the Clear Scroll API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api">
|
||||
@ -827,19 +569,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears one or more scroll ids using the Clear Scroll API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api">
|
||||
* Clear Scroll API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #clearScroll(ClearScrollRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, ClearScrollResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously clears one or more scroll ids using the Clear Scroll API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api">
|
||||
@ -854,20 +583,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously clears one or more scroll ids using the Clear Scroll API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api">
|
||||
* Clear Scroll API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #clearScrollAsync(ClearScrollRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final void clearScrollAsync(ClearScrollRequest clearScrollRequest,
|
||||
ActionListener<ClearScrollResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, ClearScrollResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a request using the Search Template API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html">Search Template API
|
||||
@ -909,19 +624,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a request using the Ranking Evaluation API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html">Ranking Evaluation API
|
||||
* on elastic.co</a>
|
||||
* @deprecated Prefer {@link #rankEval(RankEvalRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final RankEvalResponse rankEval(RankEvalRequest rankEvalRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(rankEvalRequest, RequestConverters::rankEval, RankEvalResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a request using the Ranking Evaluation API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html">Ranking Evaluation API
|
||||
@ -935,19 +637,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a request using the Ranking Evaluation API.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html">Ranking Evaluation API
|
||||
* on elastic.co</a>
|
||||
* @deprecated Prefer {@link #rankEvalAsync(RankEvalRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final void rankEvalAsync(RankEvalRequest rankEvalRequest, ActionListener<RankEvalResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(rankEvalRequest, RequestConverters::rankEval, RankEvalResponse::fromXContent, listener,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a request using the Field Capabilities API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html">Field Capabilities API
|
||||
@ -977,14 +666,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
FieldCapabilitiesResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
protected final <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
Set<Integer> ignores, Header... headers) throws IOException {
|
||||
return performRequest(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser), ignores, headers);
|
||||
}
|
||||
|
||||
protected final <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
@ -994,14 +675,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
response -> parseEntity(response.getEntity(), entityParser), ignores);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
protected final <Req extends ActionRequest, Resp> Resp performRequest(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
Set<Integer> ignores, Header... headers) throws IOException {
|
||||
return performRequest(request, requestConverter, optionsForHeaders(headers), responseConverter, ignores);
|
||||
}
|
||||
|
||||
protected final <Req extends ActionRequest, Resp> Resp performRequest(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
@ -1021,10 +694,10 @@ public class RestHighLevelClient implements Closeable {
|
||||
try {
|
||||
return responseConverter.apply(e.getResponse());
|
||||
} catch (Exception innerException) {
|
||||
//the exception is ignored as we now try to parse the response as an error.
|
||||
//this covers cases like get where 404 can either be a valid document not found response,
|
||||
//or an error for which parsing is completely different. We try to consider the 404 response as a valid one
|
||||
//first. If parsing of the response breaks, we fall back to parsing it as an error.
|
||||
// the exception is ignored as we now try to parse the response as an error.
|
||||
// this covers cases like get where 404 can either be a valid document not found response,
|
||||
// or an error for which parsing is completely different. We try to consider the 404 response as a valid one
|
||||
// first. If parsing of the response breaks, we fall back to parsing it as an error.
|
||||
throw parseResponseException(e);
|
||||
}
|
||||
}
|
||||
@ -1038,15 +711,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
}
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
protected final <Req extends ActionRequest, Resp> void performRequestAsyncAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores, Header... headers) {
|
||||
performRequestAsync(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser),
|
||||
listener, ignores, headers);
|
||||
}
|
||||
|
||||
protected final <Req extends ActionRequest, Resp> void performRequestAsyncAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
@ -1056,14 +720,6 @@ public class RestHighLevelClient implements Closeable {
|
||||
response -> parseEntity(response.getEntity(), entityParser), listener, ignores);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
protected final <Req extends ActionRequest, Resp> void performRequestAsync(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores, Header... headers) {
|
||||
performRequestAsync(request, requestConverter, optionsForHeaders(headers), responseConverter, listener, ignores);
|
||||
}
|
||||
|
||||
protected final <Req extends ActionRequest, Resp> void performRequestAsync(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
@ -1109,10 +765,10 @@ public class RestHighLevelClient implements Closeable {
|
||||
try {
|
||||
actionListener.onResponse(responseConverter.apply(response));
|
||||
} catch (Exception innerException) {
|
||||
//the exception is ignored as we now try to parse the response as an error.
|
||||
//this covers cases like get where 404 can either be a valid document not found response,
|
||||
//or an error for which parsing is completely different. We try to consider the 404 response as a valid one
|
||||
//first. If parsing of the response breaks, we fall back to parsing it as an error.
|
||||
// the exception is ignored as we now try to parse the response as an error.
|
||||
// this covers cases like get where 404 can either be a valid document not found response,
|
||||
// or an error for which parsing is completely different. We try to consider the 404 response as a valid one
|
||||
// first. If parsing of the response breaks, we fall back to parsing it as an error.
|
||||
actionListener.onFailure(parseResponseException(responseException));
|
||||
}
|
||||
} else {
|
||||
|
@ -21,25 +21,26 @@ package org.elasticsearch.client;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentLocation;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentLocation;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
|
||||
public class SyncedFlushResponse extends ActionResponse implements ToXContentFragment {
|
||||
public class SyncedFlushResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
public static final String SHARDS_FIELD = "_shards";
|
||||
|
||||
@ -86,6 +87,7 @@ public class SyncedFlushResponse extends ActionResponse implements ToXContentFra
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.startObject(SHARDS_FIELD);
|
||||
totalCounts.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
@ -96,6 +98,7 @@ public class SyncedFlushResponse extends ActionResponse implements ToXContentFra
|
||||
indexResult.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
@ -84,7 +84,7 @@ public final class TasksClient {
|
||||
cancelTasksRequest,
|
||||
RequestConverters::cancelTasks,
|
||||
options,
|
||||
parser -> CancelTasksResponse.fromXContent(parser),
|
||||
CancelTasksResponse::fromXContent,
|
||||
emptySet()
|
||||
);
|
||||
}
|
||||
@ -103,7 +103,7 @@ public final class TasksClient {
|
||||
cancelTasksRequest,
|
||||
RequestConverters::cancelTasks,
|
||||
options,
|
||||
parser -> CancelTasksResponse.fromXContent(parser),
|
||||
CancelTasksResponse::fromXContent,
|
||||
listener,
|
||||
emptySet()
|
||||
);
|
||||
|
@ -56,7 +56,8 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
public class BulkProcessorIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
private static BulkProcessor.Builder initBulkProcessorBuilder(BulkProcessor.Listener listener) {
|
||||
return BulkProcessor.builder(highLevelClient()::bulkAsync, listener);
|
||||
return BulkProcessor.builder(
|
||||
(request, bulkListener) -> highLevelClient().bulkAsync(request, RequestOptions.DEFAULT, bulkListener), listener);
|
||||
}
|
||||
|
||||
public void testThatBulkProcessorCountIsCorrect() throws Exception {
|
||||
|
@ -48,7 +48,8 @@ public class BulkProcessorRetryIT extends ESRestHighLevelClientTestCase {
|
||||
private static final String TYPE_NAME = "type";
|
||||
|
||||
private static BulkProcessor.Builder initBulkProcessorBuilder(BulkProcessor.Listener listener) {
|
||||
return BulkProcessor.builder(highLevelClient()::bulkAsync, listener);
|
||||
return BulkProcessor.builder(
|
||||
(request, bulkListener) -> highLevelClient().bulkAsync(request, RequestOptions.DEFAULT, bulkListener), listener);
|
||||
}
|
||||
|
||||
public void testBulkRejectionLoadWithoutBackoff() throws Exception {
|
||||
|
@ -20,8 +20,13 @@
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.health.ClusterIndexHealth;
|
||||
import org.elasticsearch.cluster.health.ClusterShardHealth;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
@ -34,6 +39,7 @@ import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
@ -57,7 +63,6 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
|
||||
setRequest.persistentSettings(map);
|
||||
|
||||
ClusterUpdateSettingsResponse setResponse = execute(setRequest, highLevelClient().cluster()::putSettings,
|
||||
highLevelClient().cluster()::putSettingsAsync, highLevelClient().cluster()::putSettings,
|
||||
highLevelClient().cluster()::putSettingsAsync);
|
||||
|
||||
assertAcked(setResponse);
|
||||
@ -80,7 +85,6 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
|
||||
resetRequest.persistentSettings("{\"" + persistentSettingKey + "\": null }", XContentType.JSON);
|
||||
|
||||
ClusterUpdateSettingsResponse resetResponse = execute(resetRequest, highLevelClient().cluster()::putSettings,
|
||||
highLevelClient().cluster()::putSettingsAsync, highLevelClient().cluster()::putSettings,
|
||||
highLevelClient().cluster()::putSettingsAsync);
|
||||
|
||||
assertThat(resetResponse.getTransientSettings().get(transientSettingKey), equalTo(null));
|
||||
@ -102,10 +106,141 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
|
||||
clusterUpdateSettingsRequest.transientSettings(Settings.builder().put(setting, value).build());
|
||||
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(clusterUpdateSettingsRequest,
|
||||
highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync,
|
||||
highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync));
|
||||
assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST));
|
||||
assertThat(exception.getMessage(), equalTo(
|
||||
"Elasticsearch exception [type=illegal_argument_exception, reason=transient setting [" + setting + "], not recognized]"));
|
||||
}
|
||||
|
||||
public void testClusterHealthGreen() throws IOException {
|
||||
ClusterHealthRequest request = new ClusterHealthRequest();
|
||||
request.timeout("5s");
|
||||
ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync);
|
||||
|
||||
assertThat(response, notNullValue());
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.status(), equalTo(RestStatus.OK));
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertNoIndices(response);
|
||||
}
|
||||
|
||||
public void testClusterHealthYellowClusterLevel() throws IOException {
|
||||
createIndex("index", Settings.EMPTY);
|
||||
createIndex("index2", Settings.EMPTY);
|
||||
ClusterHealthRequest request = new ClusterHealthRequest();
|
||||
request.timeout("5s");
|
||||
ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync);
|
||||
|
||||
assertYellowShards(response);
|
||||
assertThat(response.getIndices().size(), equalTo(0));
|
||||
}
|
||||
|
||||
public void testClusterHealthYellowIndicesLevel() throws IOException {
|
||||
createIndex("index", Settings.EMPTY);
|
||||
createIndex("index2", Settings.EMPTY);
|
||||
ClusterHealthRequest request = new ClusterHealthRequest();
|
||||
request.timeout("5s");
|
||||
request.level(ClusterHealthRequest.Level.INDICES);
|
||||
ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync);
|
||||
|
||||
assertYellowShards(response);
|
||||
assertThat(response.getIndices().size(), equalTo(2));
|
||||
for (Map.Entry<String, ClusterIndexHealth> entry : response.getIndices().entrySet()) {
|
||||
assertYellowIndex(entry.getKey(), entry.getValue(), true);
|
||||
}
|
||||
}
|
||||
|
||||
private static void assertYellowShards(ClusterHealthResponse response) {
|
||||
assertThat(response, notNullValue());
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.status(), equalTo(RestStatus.OK));
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
assertThat(response.getActivePrimaryShards(), equalTo(2));
|
||||
assertThat(response.getNumberOfDataNodes(), equalTo(1));
|
||||
assertThat(response.getNumberOfNodes(), equalTo(1));
|
||||
assertThat(response.getActiveShards(), equalTo(2));
|
||||
assertThat(response.getDelayedUnassignedShards(), equalTo(0));
|
||||
assertThat(response.getInitializingShards(), equalTo(0));
|
||||
assertThat(response.getUnassignedShards(), equalTo(2));
|
||||
assertThat(response.getActiveShardsPercent(), equalTo(50d));
|
||||
}
|
||||
|
||||
public void testClusterHealthYellowSpecificIndex() throws IOException {
|
||||
createIndex("index", Settings.EMPTY);
|
||||
createIndex("index2", Settings.EMPTY);
|
||||
ClusterHealthRequest request = new ClusterHealthRequest("index");
|
||||
request.level(ClusterHealthRequest.Level.SHARDS);
|
||||
request.timeout("5s");
|
||||
ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync);
|
||||
|
||||
assertThat(response, notNullValue());
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.status(), equalTo(RestStatus.OK));
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
assertThat(response.getActivePrimaryShards(), equalTo(1));
|
||||
assertThat(response.getNumberOfDataNodes(), equalTo(1));
|
||||
assertThat(response.getNumberOfNodes(), equalTo(1));
|
||||
assertThat(response.getActiveShards(), equalTo(1));
|
||||
assertThat(response.getDelayedUnassignedShards(), equalTo(0));
|
||||
assertThat(response.getInitializingShards(), equalTo(0));
|
||||
assertThat(response.getUnassignedShards(), equalTo(1));
|
||||
assertThat(response.getActiveShardsPercent(), equalTo(50d));
|
||||
assertThat(response.getIndices().size(), equalTo(1));
|
||||
Map.Entry<String, ClusterIndexHealth> index = response.getIndices().entrySet().iterator().next();
|
||||
assertYellowIndex(index.getKey(), index.getValue(), false);
|
||||
}
|
||||
|
||||
private static void assertYellowIndex(String indexName, ClusterIndexHealth indexHealth, boolean emptyShards) {
|
||||
assertThat(indexHealth, notNullValue());
|
||||
assertThat(indexHealth.getIndex(),equalTo(indexName));
|
||||
assertThat(indexHealth.getActivePrimaryShards(),equalTo(1));
|
||||
assertThat(indexHealth.getActiveShards(),equalTo(1));
|
||||
assertThat(indexHealth.getNumberOfReplicas(),equalTo(1));
|
||||
assertThat(indexHealth.getInitializingShards(),equalTo(0));
|
||||
assertThat(indexHealth.getUnassignedShards(),equalTo(1));
|
||||
assertThat(indexHealth.getRelocatingShards(),equalTo(0));
|
||||
assertThat(indexHealth.getStatus(),equalTo(ClusterHealthStatus.YELLOW));
|
||||
if (emptyShards) {
|
||||
assertThat(indexHealth.getShards().size(), equalTo(0));
|
||||
} else {
|
||||
assertThat(indexHealth.getShards().size(), equalTo(1));
|
||||
for (Map.Entry<Integer, ClusterShardHealth> entry : indexHealth.getShards().entrySet()) {
|
||||
assertYellowShard(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void assertYellowShard(int shardId, ClusterShardHealth shardHealth) {
|
||||
assertThat(shardHealth, notNullValue());
|
||||
assertThat(shardHealth.getShardId(), equalTo(shardId));
|
||||
assertThat(shardHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
assertThat(shardHealth.getActiveShards(), equalTo(1));
|
||||
assertThat(shardHealth.getInitializingShards(), equalTo(0));
|
||||
assertThat(shardHealth.getUnassignedShards(), equalTo(1));
|
||||
assertThat(shardHealth.getRelocatingShards(), equalTo(0));
|
||||
}
|
||||
|
||||
public void testClusterHealthNotFoundIndex() throws IOException {
|
||||
ClusterHealthRequest request = new ClusterHealthRequest("notexisted-index");
|
||||
request.timeout("5s");
|
||||
ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync);
|
||||
|
||||
assertThat(response, notNullValue());
|
||||
assertThat(response.isTimedOut(), equalTo(true));
|
||||
assertThat(response.status(), equalTo(RestStatus.REQUEST_TIMEOUT));
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.RED));
|
||||
assertNoIndices(response);
|
||||
}
|
||||
|
||||
private static void assertNoIndices(ClusterHealthResponse response) {
|
||||
assertThat(response.getIndices(), equalTo(emptyMap()));
|
||||
assertThat(response.getActivePrimaryShards(), equalTo(0));
|
||||
assertThat(response.getNumberOfDataNodes(), equalTo(1));
|
||||
assertThat(response.getNumberOfNodes(), equalTo(1));
|
||||
assertThat(response.getActiveShards(), equalTo(0));
|
||||
assertThat(response.getDelayedUnassignedShards(), equalTo(0));
|
||||
assertThat(response.getInitializingShards(), equalTo(0));
|
||||
assertThat(response.getUnassignedShards(), equalTo(0));
|
||||
assertThat(response.getActiveShardsPercent(), equalTo(100d));
|
||||
}
|
||||
}
|
||||
|
@ -74,8 +74,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
if (randomBoolean()) {
|
||||
deleteRequest.version(1L);
|
||||
}
|
||||
DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync,
|
||||
highLevelClient()::delete, highLevelClient()::deleteAsync);
|
||||
DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync);
|
||||
assertEquals("index", deleteResponse.getIndex());
|
||||
assertEquals("type", deleteResponse.getType());
|
||||
assertEquals(docId, deleteResponse.getId());
|
||||
@ -85,8 +84,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
// Testing non existing document
|
||||
String docId = "does_not_exist";
|
||||
DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId);
|
||||
DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync,
|
||||
highLevelClient()::delete, highLevelClient()::deleteAsync);
|
||||
DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync);
|
||||
assertEquals("index", deleteResponse.getIndex());
|
||||
assertEquals("type", deleteResponse.getType());
|
||||
assertEquals(docId, deleteResponse.getId());
|
||||
@ -99,8 +97,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")), RequestOptions.DEFAULT);
|
||||
DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).version(2);
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync,
|
||||
highLevelClient()::delete, highLevelClient()::deleteAsync));
|
||||
() -> execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync));
|
||||
assertEquals(RestStatus.CONFLICT, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][" + docId + "]: " +
|
||||
"version conflict, current version [1] is different than the one provided [2]]", exception.getMessage());
|
||||
@ -113,8 +110,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar"))
|
||||
.versionType(VersionType.EXTERNAL).version(12), RequestOptions.DEFAULT);
|
||||
DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).versionType(VersionType.EXTERNAL).version(13);
|
||||
DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync,
|
||||
highLevelClient()::delete, highLevelClient()::deleteAsync);
|
||||
DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync);
|
||||
assertEquals("index", deleteResponse.getIndex());
|
||||
assertEquals("type", deleteResponse.getType());
|
||||
assertEquals(docId, deleteResponse.getId());
|
||||
@ -128,8 +124,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
.versionType(VersionType.EXTERNAL).version(12), RequestOptions.DEFAULT);
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> {
|
||||
DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).versionType(VersionType.EXTERNAL).version(10);
|
||||
execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync,
|
||||
highLevelClient()::delete, highLevelClient()::deleteAsync);
|
||||
execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync);
|
||||
});
|
||||
assertEquals(RestStatus.CONFLICT, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][" +
|
||||
@ -142,8 +137,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")).routing("foo"),
|
||||
RequestOptions.DEFAULT);
|
||||
DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).routing("foo");
|
||||
DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync,
|
||||
highLevelClient()::delete, highLevelClient()::deleteAsync);
|
||||
DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync);
|
||||
assertEquals("index", deleteResponse.getIndex());
|
||||
assertEquals("type", deleteResponse.getType());
|
||||
assertEquals(docId, deleteResponse.getId());
|
||||
@ -154,8 +148,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
public void testExists() throws IOException {
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "id");
|
||||
assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync,
|
||||
highLevelClient()::exists, highLevelClient()::existsAsync));
|
||||
assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync));
|
||||
}
|
||||
IndexRequest index = new IndexRequest("index", "type", "id");
|
||||
index.source("{\"field1\":\"value1\",\"field2\":\"value2\"}", XContentType.JSON);
|
||||
@ -163,18 +156,15 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
highLevelClient().index(index, RequestOptions.DEFAULT);
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "id");
|
||||
assertTrue(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync,
|
||||
highLevelClient()::exists, highLevelClient()::existsAsync));
|
||||
assertTrue(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync));
|
||||
}
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "does_not_exist");
|
||||
assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync,
|
||||
highLevelClient()::exists, highLevelClient()::existsAsync));
|
||||
assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync));
|
||||
}
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "does_not_exist").version(1);
|
||||
assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync,
|
||||
highLevelClient()::exists, highLevelClient()::existsAsync));
|
||||
assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync));
|
||||
}
|
||||
}
|
||||
|
||||
@ -182,8 +172,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "id");
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync,
|
||||
highLevelClient()::get, highLevelClient()::getAsync));
|
||||
() -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", exception.getMessage());
|
||||
assertEquals("index", exception.getMetadata("es.index").get(0));
|
||||
@ -196,8 +185,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "id").version(2);
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync,
|
||||
highLevelClient()::get, highLevelClient()::getAsync));
|
||||
() -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync));
|
||||
assertEquals(RestStatus.CONFLICT, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, " + "reason=[type][id]: " +
|
||||
"version conflict, current version [1] is different than the one provided [2]]", exception.getMessage());
|
||||
@ -208,8 +196,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
if (randomBoolean()) {
|
||||
getRequest.version(1L);
|
||||
}
|
||||
GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync,
|
||||
highLevelClient()::get, highLevelClient()::getAsync);
|
||||
GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync);
|
||||
assertEquals("index", getResponse.getIndex());
|
||||
assertEquals("type", getResponse.getType());
|
||||
assertEquals("id", getResponse.getId());
|
||||
@ -220,8 +207,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
}
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "does_not_exist");
|
||||
GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync,
|
||||
highLevelClient()::get, highLevelClient()::getAsync);
|
||||
GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync);
|
||||
assertEquals("index", getResponse.getIndex());
|
||||
assertEquals("type", getResponse.getType());
|
||||
assertEquals("does_not_exist", getResponse.getId());
|
||||
@ -233,8 +219,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "id");
|
||||
getRequest.fetchSourceContext(new FetchSourceContext(false, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY));
|
||||
GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync,
|
||||
highLevelClient()::get, highLevelClient()::getAsync);
|
||||
GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync);
|
||||
assertEquals("index", getResponse.getIndex());
|
||||
assertEquals("type", getResponse.getType());
|
||||
assertEquals("id", getResponse.getId());
|
||||
@ -250,8 +235,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
} else {
|
||||
getRequest.fetchSourceContext(new FetchSourceContext(true, Strings.EMPTY_ARRAY, new String[]{"field2"}));
|
||||
}
|
||||
GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync,
|
||||
highLevelClient()::get, highLevelClient()::getAsync);
|
||||
GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync);
|
||||
assertEquals("index", getResponse.getIndex());
|
||||
assertEquals("type", getResponse.getType());
|
||||
assertEquals("id", getResponse.getId());
|
||||
@ -269,8 +253,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
MultiGetRequest multiGetRequest = new MultiGetRequest();
|
||||
multiGetRequest.add("index", "type", "id1");
|
||||
multiGetRequest.add("index", "type", "id2");
|
||||
MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync,
|
||||
highLevelClient()::multiGet, highLevelClient()::multiGetAsync);
|
||||
MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync);
|
||||
assertEquals(2, response.getResponses().length);
|
||||
|
||||
assertTrue(response.getResponses()[0].isFailed());
|
||||
@ -302,8 +285,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
MultiGetRequest multiGetRequest = new MultiGetRequest();
|
||||
multiGetRequest.add("index", "type", "id1");
|
||||
multiGetRequest.add("index", "type", "id2");
|
||||
MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync,
|
||||
highLevelClient()::multiGet, highLevelClient()::multiGetAsync);
|
||||
MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync);
|
||||
assertEquals(2, response.getResponses().length);
|
||||
|
||||
assertFalse(response.getResponses()[0].isFailed());
|
||||
@ -328,8 +310,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
IndexRequest indexRequest = new IndexRequest("index", "type");
|
||||
indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("test", "test").endObject());
|
||||
|
||||
IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync,
|
||||
highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
assertEquals(RestStatus.CREATED, indexResponse.status());
|
||||
assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
|
||||
assertEquals("index", indexResponse.getIndex());
|
||||
@ -350,8 +331,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
IndexRequest indexRequest = new IndexRequest("index", "type", "id");
|
||||
indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("version", 1).endObject());
|
||||
|
||||
IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync,
|
||||
highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
assertEquals(RestStatus.CREATED, indexResponse.status());
|
||||
assertEquals("index", indexResponse.getIndex());
|
||||
assertEquals("type", indexResponse.getType());
|
||||
@ -361,8 +341,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
indexRequest = new IndexRequest("index", "type", "id");
|
||||
indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("version", 2).endObject());
|
||||
|
||||
indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync,
|
||||
highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
assertEquals(RestStatus.OK, indexResponse.status());
|
||||
assertEquals("index", indexResponse.getIndex());
|
||||
assertEquals("type", indexResponse.getType());
|
||||
@ -374,8 +353,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
wrongRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("field", "test").endObject());
|
||||
wrongRequest.version(5L);
|
||||
|
||||
execute(wrongRequest, highLevelClient()::index, highLevelClient()::indexAsync,
|
||||
highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
execute(wrongRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
});
|
||||
assertEquals(RestStatus.CONFLICT, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][id]: " +
|
||||
@ -388,8 +366,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("field", "test").endObject());
|
||||
indexRequest.setPipeline("missing");
|
||||
|
||||
execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync,
|
||||
highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
});
|
||||
|
||||
assertEquals(RestStatus.BAD_REQUEST, exception.status());
|
||||
@ -402,8 +379,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
indexRequest.version(12L);
|
||||
indexRequest.versionType(VersionType.EXTERNAL);
|
||||
|
||||
IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync,
|
||||
highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
assertEquals(RestStatus.CREATED, indexResponse.status());
|
||||
assertEquals("index", indexResponse.getIndex());
|
||||
assertEquals("type", indexResponse.getType());
|
||||
@ -415,16 +391,14 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("field", "test").endObject());
|
||||
indexRequest.opType(DocWriteRequest.OpType.CREATE);
|
||||
|
||||
IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync,
|
||||
highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
assertEquals(RestStatus.CREATED, indexResponse.status());
|
||||
assertEquals("index", indexResponse.getIndex());
|
||||
assertEquals("type", indexResponse.getType());
|
||||
assertEquals("with_create_op_type", indexResponse.getId());
|
||||
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> {
|
||||
execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync,
|
||||
highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
});
|
||||
|
||||
assertEquals(RestStatus.CONFLICT, exception.status());
|
||||
@ -439,8 +413,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values()));
|
||||
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () ->
|
||||
execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync,
|
||||
highLevelClient()::update, highLevelClient()::updateAsync));
|
||||
execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=document_missing_exception, reason=[type][does_not_exist]: document missing]",
|
||||
exception.getMessage());
|
||||
@ -463,8 +436,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
updateRequestConflict.version(indexResponse.getVersion());
|
||||
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () ->
|
||||
execute(updateRequestConflict, highLevelClient()::update, highLevelClient()::updateAsync,
|
||||
highLevelClient()::update, highLevelClient()::updateAsync));
|
||||
execute(updateRequestConflict, highLevelClient()::update, highLevelClient()::updateAsync));
|
||||
assertEquals(RestStatus.CONFLICT, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][id]: version conflict, " +
|
||||
"current version [2] is different than the one provided [1]]", exception.getMessage());
|
||||
@ -480,8 +452,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
updateRequest.script(script);
|
||||
updateRequest.fetchSource(true);
|
||||
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync,
|
||||
highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
assertEquals(RestStatus.OK, updateResponse.status());
|
||||
assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
|
||||
assertEquals(2L, updateResponse.getVersion());
|
||||
@ -501,8 +472,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
updateRequest.doc(singletonMap("field_2", "two"), randomFrom(XContentType.values()));
|
||||
updateRequest.fetchSource("field_*", "field_3");
|
||||
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync,
|
||||
highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
assertEquals(RestStatus.OK, updateResponse.status());
|
||||
assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
|
||||
assertEquals(13L, updateResponse.getVersion());
|
||||
@ -523,8 +493,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "type", "noop");
|
||||
updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values()));
|
||||
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync,
|
||||
highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
assertEquals(RestStatus.OK, updateResponse.status());
|
||||
assertEquals(DocWriteResponse.Result.NOOP, updateResponse.getResult());
|
||||
assertEquals(1L, updateResponse.getVersion());
|
||||
@ -542,8 +511,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
updateRequest.doc(singletonMap("doc_status", "updated"));
|
||||
updateRequest.fetchSource(true);
|
||||
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync,
|
||||
highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
assertEquals(RestStatus.CREATED, updateResponse.status());
|
||||
assertEquals("index", updateResponse.getIndex());
|
||||
assertEquals("type", updateResponse.getType());
|
||||
@ -558,8 +526,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
updateRequest.fetchSource(true);
|
||||
updateRequest.docAsUpsert(true);
|
||||
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync,
|
||||
highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
assertEquals(RestStatus.CREATED, updateResponse.status());
|
||||
assertEquals("index", updateResponse.getIndex());
|
||||
assertEquals("type", updateResponse.getType());
|
||||
@ -575,8 +542,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
updateRequest.scriptedUpsert(true);
|
||||
updateRequest.upsert(singletonMap("level", "A"));
|
||||
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync,
|
||||
highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
assertEquals(RestStatus.CREATED, updateResponse.status());
|
||||
assertEquals("index", updateResponse.getIndex());
|
||||
assertEquals("type", updateResponse.getType());
|
||||
@ -591,8 +557,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
UpdateRequest updateRequest = new UpdateRequest("index", "type", "id");
|
||||
updateRequest.doc(new IndexRequest().source(Collections.singletonMap("field", "doc"), XContentType.JSON));
|
||||
updateRequest.upsert(new IndexRequest().source(Collections.singletonMap("field", "upsert"), XContentType.YAML));
|
||||
execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync,
|
||||
highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
|
||||
});
|
||||
assertEquals("Update request cannot have different content types for doc [JSON] and upsert [YAML] documents",
|
||||
exception.getMessage());
|
||||
@ -651,8 +616,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync,
|
||||
highLevelClient()::bulk, highLevelClient()::bulkAsync);
|
||||
BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync);
|
||||
assertEquals(RestStatus.OK, bulkResponse.status());
|
||||
assertTrue(bulkResponse.getTook().getMillis() > 0);
|
||||
assertEquals(nbItems, bulkResponse.getItems().length);
|
||||
@ -688,10 +652,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
}
|
||||
};
|
||||
|
||||
// Pull the client to a variable to work around https://bugs.eclipse.org/bugs/show_bug.cgi?id=514884
|
||||
RestHighLevelClient hlClient = highLevelClient();
|
||||
|
||||
try (BulkProcessor processor = BulkProcessor.builder(hlClient::bulkAsync, listener)
|
||||
try (BulkProcessor processor = BulkProcessor.builder(
|
||||
(request, bulkListener) -> highLevelClient().bulkAsync(request, RequestOptions.DEFAULT, bulkListener), listener)
|
||||
.setConcurrentRequests(0)
|
||||
.setBulkSize(new ByteSizeValue(5, ByteSizeUnit.GB))
|
||||
.setBulkActions(nbItems + 1)
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ingest.PutPipelineRequest;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
@ -80,43 +79,6 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase {
|
||||
void execute(Request request, RequestOptions options, ActionListener<Response> listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the provided request using either the sync method or its async variant, both provided as functions
|
||||
*/
|
||||
@Deprecated
|
||||
protected static <Req, Resp> Resp execute(Req request, SyncMethod<Req, Resp> syncMethod, AsyncMethod<Req, Resp> asyncMethod,
|
||||
SyncMethodWithHeaders<Req, Resp> syncMethodWithHeaders,
|
||||
AsyncMethodWithHeaders<Req, Resp> asyncMethodWithHeaders) throws IOException {
|
||||
switch(randomIntBetween(0, 3)) {
|
||||
case 0:
|
||||
return syncMethod.execute(request, RequestOptions.DEFAULT);
|
||||
case 1:
|
||||
PlainActionFuture<Resp> future = PlainActionFuture.newFuture();
|
||||
asyncMethod.execute(request, RequestOptions.DEFAULT, future);
|
||||
return future.actionGet();
|
||||
case 2:
|
||||
return syncMethodWithHeaders.execute(request);
|
||||
case 3:
|
||||
PlainActionFuture<Resp> futureWithHeaders = PlainActionFuture.newFuture();
|
||||
asyncMethodWithHeaders.execute(request, futureWithHeaders);
|
||||
return futureWithHeaders.actionGet();
|
||||
default:
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
@FunctionalInterface
|
||||
protected interface SyncMethodWithHeaders<Request, Response> {
|
||||
Response execute(Request request, Header... headers) throws IOException;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
@FunctionalInterface
|
||||
protected interface AsyncMethodWithHeaders<Request, Response> {
|
||||
void execute(Request request, ActionListener<Response> listener, Header... headers);
|
||||
}
|
||||
|
||||
private static class HighLevelClient extends RestHighLevelClient {
|
||||
private HighLevelClient(RestClient restClient) {
|
||||
super(restClient, (client) -> {}, Collections.emptyList());
|
||||
|
@ -0,0 +1,175 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class GetAliasesResponseTests extends AbstractXContentTestCase<GetAliasesResponse> {
|
||||
|
||||
@Override
|
||||
protected GetAliasesResponse createTestInstance() {
|
||||
RestStatus status = randomFrom(RestStatus.OK, RestStatus.NOT_FOUND);
|
||||
String errorMessage = RestStatus.OK == status ? null : randomAlphaOfLengthBetween(5, 10);
|
||||
return new GetAliasesResponse(status, errorMessage, createIndicesAliasesMap(0, 5));
|
||||
}
|
||||
|
||||
private static Map<String, Set<AliasMetaData>> createIndicesAliasesMap(int min, int max) {
|
||||
Map<String, Set<AliasMetaData>> map = new HashMap<>();
|
||||
int indicesNum = randomIntBetween(min, max);
|
||||
for (int i = 0; i < indicesNum; i++) {
|
||||
String index = randomAlphaOfLength(5);
|
||||
Set<AliasMetaData> aliasMetaData = new HashSet<>();
|
||||
int aliasesNum = randomIntBetween(0, 3);
|
||||
for (int alias = 0; alias < aliasesNum; alias++) {
|
||||
aliasMetaData.add(createAliasMetaData());
|
||||
}
|
||||
map.put(index, aliasMetaData);
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
private static AliasMetaData createAliasMetaData() {
|
||||
AliasMetaData.Builder builder = AliasMetaData.builder(randomAlphaOfLengthBetween(3, 10));
|
||||
if (randomBoolean()) {
|
||||
builder.routing(randomAlphaOfLengthBetween(3, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.searchRouting(randomAlphaOfLengthBetween(3, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.indexRouting(randomAlphaOfLengthBetween(3, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.filter("{\"term\":{\"year\":2016}}");
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GetAliasesResponse doParseInstance(XContentParser parser) throws IOException {
|
||||
return GetAliasesResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Predicate<String> getRandomFieldsExcludeFilter() {
|
||||
return p -> p.equals("") // do not add elements at the top-level as any element at this level is parsed as a new index
|
||||
|| p.endsWith(".aliases") // do not add new alias
|
||||
|| p.contains(".filter"); // do not insert random data into AliasMetaData#filter
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertEqualInstances(GetAliasesResponse expectedInstance, GetAliasesResponse newInstance) {
|
||||
assertEquals(expectedInstance.getAliases(), newInstance.getAliases());
|
||||
assertEquals(expectedInstance.status(), newInstance.status());
|
||||
assertEquals(expectedInstance.getError(), newInstance.getError());
|
||||
assertNull(expectedInstance.getException());
|
||||
assertNull(newInstance.getException());
|
||||
}
|
||||
|
||||
public void testFromXContentWithElasticsearchException() throws IOException {
|
||||
String xContent =
|
||||
"{" +
|
||||
" \"error\": {" +
|
||||
" \"root_cause\": [" +
|
||||
" {" +
|
||||
" \"type\": \"index_not_found_exception\"," +
|
||||
" \"reason\": \"no such index\"," +
|
||||
" \"resource.type\": \"index_or_alias\"," +
|
||||
" \"resource.id\": \"index\"," +
|
||||
" \"index_uuid\": \"_na_\"," +
|
||||
" \"index\": \"index\"" +
|
||||
" }" +
|
||||
" ]," +
|
||||
" \"type\": \"index_not_found_exception\"," +
|
||||
" \"reason\": \"no such index\"," +
|
||||
" \"resource.type\": \"index_or_alias\"," +
|
||||
" \"resource.id\": \"index\"," +
|
||||
" \"index_uuid\": \"_na_\"," +
|
||||
" \"index\": \"index\"" +
|
||||
" }," +
|
||||
" \"status\": 404" +
|
||||
"}";
|
||||
|
||||
try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) {
|
||||
GetAliasesResponse getAliasesResponse = GetAliasesResponse.fromXContent(parser);
|
||||
assertThat(getAliasesResponse.getError(), nullValue());
|
||||
assertThat(getAliasesResponse.status(), equalTo(RestStatus.NOT_FOUND));
|
||||
assertThat(getAliasesResponse.getException().getMessage(),
|
||||
equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testFromXContentWithNoAliasFound() throws IOException {
|
||||
String xContent =
|
||||
"{" +
|
||||
" \"error\": \"alias [aa] missing\"," +
|
||||
" \"status\": 404" +
|
||||
"}";
|
||||
try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) {
|
||||
GetAliasesResponse getAliasesResponse = GetAliasesResponse.fromXContent(parser);
|
||||
assertThat(getAliasesResponse.status(), equalTo(RestStatus.NOT_FOUND));
|
||||
assertThat(getAliasesResponse.getError(), equalTo("alias [aa] missing"));
|
||||
assertThat(getAliasesResponse.getException(), nullValue());
|
||||
}
|
||||
}
|
||||
|
||||
public void testFromXContentWithMissingAndFoundAlias() throws IOException {
|
||||
String xContent =
|
||||
"{" +
|
||||
" \"error\": \"alias [something] missing\"," +
|
||||
" \"status\": 404," +
|
||||
" \"index\": {" +
|
||||
" \"aliases\": {" +
|
||||
" \"alias\": {}" +
|
||||
" }" +
|
||||
" }" +
|
||||
"}";
|
||||
final String index = "index";
|
||||
try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) {
|
||||
GetAliasesResponse response = GetAliasesResponse.fromXContent(parser);
|
||||
assertThat(response.status(), equalTo(RestStatus.NOT_FOUND));
|
||||
assertThat(response.getError(), equalTo("alias [something] missing"));
|
||||
assertThat(response.getAliases().size(), equalTo(1));
|
||||
assertThat(response.getAliases().get(index).size(), equalTo(1));
|
||||
AliasMetaData aliasMetaData = response.getAliases().get(index).iterator().next();
|
||||
assertThat(aliasMetaData.alias(), equalTo("alias"));
|
||||
assertThat(response.getException(), nullValue());
|
||||
}
|
||||
}
|
||||
}
|
@ -52,20 +52,24 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
|
||||
import org.elasticsearch.action.admin.indices.rollover.RolloverRequest;
|
||||
import org.elasticsearch.action.admin.indices.rollover.RolloverResponse;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest;
|
||||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeResponse;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
|
||||
import org.elasticsearch.common.ValidationException;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -88,12 +92,15 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractRawValues;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue;
|
||||
import static org.hamcrest.CoreMatchers.hasItem;
|
||||
import static org.hamcrest.Matchers.arrayContainingInAnyOrder;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasEntry;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
@ -110,8 +117,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
boolean response = execute(
|
||||
request,
|
||||
highLevelClient().indices()::exists,
|
||||
highLevelClient().indices()::existsAsync,
|
||||
highLevelClient().indices()::exists,
|
||||
highLevelClient().indices()::existsAsync
|
||||
);
|
||||
assertTrue(response);
|
||||
@ -127,8 +132,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
boolean response = execute(
|
||||
request,
|
||||
highLevelClient().indices()::exists,
|
||||
highLevelClient().indices()::existsAsync,
|
||||
highLevelClient().indices()::exists,
|
||||
highLevelClient().indices()::existsAsync
|
||||
);
|
||||
assertFalse(response);
|
||||
@ -147,8 +150,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
boolean response = execute(
|
||||
request,
|
||||
highLevelClient().indices()::exists,
|
||||
highLevelClient().indices()::existsAsync,
|
||||
highLevelClient().indices()::exists,
|
||||
highLevelClient().indices()::existsAsync
|
||||
);
|
||||
assertFalse(response);
|
||||
@ -166,8 +167,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
|
||||
|
||||
CreateIndexResponse createIndexResponse =
|
||||
execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync,
|
||||
highLevelClient().indices()::create, highLevelClient().indices()::createAsync);
|
||||
execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
|
||||
assertTrue(indexExists(indexName));
|
||||
@ -195,8 +195,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
createIndexRequest.mapping("type_name", mappingBuilder);
|
||||
|
||||
CreateIndexResponse createIndexResponse =
|
||||
execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync,
|
||||
highLevelClient().indices()::create, highLevelClient().indices()::createAsync);
|
||||
execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
|
||||
Map<String, Object> getIndexResponse = getAsMap(indexName);
|
||||
@ -316,28 +315,26 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
assertEquals(0, getSettingsResponse.getIndexToSettings().get("get_settings_index").size());
|
||||
assertEquals(1, getSettingsResponse.getIndexToDefaultSettings().get("get_settings_index").size());
|
||||
}
|
||||
|
||||
public void testPutMapping() throws IOException {
|
||||
{
|
||||
// Add mappings to index
|
||||
String indexName = "mapping_index";
|
||||
createIndex(indexName, Settings.EMPTY);
|
||||
// Add mappings to index
|
||||
String indexName = "mapping_index";
|
||||
createIndex(indexName, Settings.EMPTY);
|
||||
|
||||
PutMappingRequest putMappingRequest = new PutMappingRequest(indexName);
|
||||
putMappingRequest.type("type_name");
|
||||
XContentBuilder mappingBuilder = JsonXContent.contentBuilder();
|
||||
mappingBuilder.startObject().startObject("properties").startObject("field");
|
||||
mappingBuilder.field("type", "text");
|
||||
mappingBuilder.endObject().endObject().endObject();
|
||||
putMappingRequest.source(mappingBuilder);
|
||||
PutMappingRequest putMappingRequest = new PutMappingRequest(indexName);
|
||||
putMappingRequest.type("type_name");
|
||||
XContentBuilder mappingBuilder = JsonXContent.contentBuilder();
|
||||
mappingBuilder.startObject().startObject("properties").startObject("field");
|
||||
mappingBuilder.field("type", "text");
|
||||
mappingBuilder.endObject().endObject().endObject();
|
||||
putMappingRequest.source(mappingBuilder);
|
||||
|
||||
PutMappingResponse putMappingResponse =
|
||||
execute(putMappingRequest, highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync,
|
||||
highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync);
|
||||
assertTrue(putMappingResponse.isAcknowledged());
|
||||
PutMappingResponse putMappingResponse =
|
||||
execute(putMappingRequest, highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync);
|
||||
assertTrue(putMappingResponse.isAcknowledged());
|
||||
|
||||
Map<String, Object> getIndexResponse = getAsMap(indexName);
|
||||
assertEquals("text", XContentMapValues.extractValue(indexName + ".mappings.type_name.properties.field.type", getIndexResponse));
|
||||
}
|
||||
Map<String, Object> getIndexResponse = getAsMap(indexName);
|
||||
assertEquals("text", XContentMapValues.extractValue(indexName + ".mappings.type_name.properties.field.type", getIndexResponse));
|
||||
}
|
||||
|
||||
public void testGetMapping() throws IOException {
|
||||
@ -384,8 +381,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indexName);
|
||||
DeleteIndexResponse deleteIndexResponse =
|
||||
execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync,
|
||||
highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync);
|
||||
execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync);
|
||||
assertTrue(deleteIndexResponse.isAcknowledged());
|
||||
|
||||
assertFalse(indexExists(indexName));
|
||||
@ -398,8 +394,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(nonExistentIndex);
|
||||
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync,
|
||||
highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync));
|
||||
() -> execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
}
|
||||
}
|
||||
@ -418,7 +413,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
addAction.routing("routing").searchRouting("search_routing").filter("{\"term\":{\"year\":2016}}");
|
||||
aliasesAddRequest.addAliasAction(addAction);
|
||||
IndicesAliasesResponse aliasesAddResponse = execute(aliasesAddRequest, highLevelClient().indices()::updateAliases,
|
||||
highLevelClient().indices()::updateAliasesAsync, highLevelClient().indices()::updateAliases,
|
||||
highLevelClient().indices()::updateAliasesAsync);
|
||||
assertTrue(aliasesAddResponse.isAcknowledged());
|
||||
assertThat(aliasExists(alias), equalTo(true));
|
||||
@ -437,7 +431,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
AliasActions removeAction = new AliasActions(AliasActions.Type.REMOVE).index(index).alias(alias);
|
||||
aliasesAddRemoveRequest.addAliasAction(removeAction);
|
||||
IndicesAliasesResponse aliasesAddRemoveResponse = execute(aliasesAddRemoveRequest, highLevelClient().indices()::updateAliases,
|
||||
highLevelClient().indices()::updateAliasesAsync, highLevelClient().indices()::updateAliases,
|
||||
highLevelClient().indices()::updateAliasesAsync);
|
||||
assertTrue(aliasesAddRemoveResponse.isAcknowledged());
|
||||
assertThat(aliasExists(alias), equalTo(false));
|
||||
@ -449,7 +442,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
AliasActions removeIndexAction = new AliasActions(AliasActions.Type.REMOVE_INDEX).index(index);
|
||||
aliasesRemoveIndexRequest.addAliasAction(removeIndexAction);
|
||||
IndicesAliasesResponse aliasesRemoveIndexResponse = execute(aliasesRemoveIndexRequest, highLevelClient().indices()::updateAliases,
|
||||
highLevelClient().indices()::updateAliasesAsync, highLevelClient().indices()::updateAliases,
|
||||
highLevelClient().indices()::updateAliasesAsync);
|
||||
assertTrue(aliasesRemoveIndexResponse.isAcknowledged());
|
||||
assertThat(aliasExists(alias), equalTo(false));
|
||||
@ -467,9 +459,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
IndicesAliasesRequest nonExistentIndexRequest = new IndicesAliasesRequest();
|
||||
nonExistentIndexRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index(nonExistentIndex).alias(alias));
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(nonExistentIndexRequest,
|
||||
highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync,
|
||||
highLevelClient().indices()::updateAliases,
|
||||
highLevelClient().indices()::updateAliasesAsync));
|
||||
highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync));
|
||||
assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND));
|
||||
assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]"));
|
||||
assertThat(exception.getMetadata("es.index"), hasItem(nonExistentIndex));
|
||||
@ -479,8 +469,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
mixedRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).indices(index).aliases(alias));
|
||||
mixedRequest.addAliasAction(new AliasActions(AliasActions.Type.REMOVE).indices(nonExistentIndex).alias(alias));
|
||||
exception = expectThrows(ElasticsearchStatusException.class,
|
||||
() -> execute(mixedRequest, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync,
|
||||
highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync));
|
||||
() -> execute(mixedRequest, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync));
|
||||
assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND));
|
||||
assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]"));
|
||||
assertThat(exception.getMetadata("es.index"), hasItem(nonExistentIndex));
|
||||
@ -492,7 +481,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
removeIndexRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index(nonExistentIndex).alias(alias));
|
||||
removeIndexRequest.addAliasAction(new AliasActions(AliasActions.Type.REMOVE_INDEX).indices(nonExistentIndex));
|
||||
exception = expectThrows(ElasticsearchException.class, () -> execute(removeIndexRequest, highLevelClient().indices()::updateAliases,
|
||||
highLevelClient().indices()::updateAliasesAsync, highLevelClient().indices()::updateAliases,
|
||||
highLevelClient().indices()::updateAliasesAsync));
|
||||
assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND));
|
||||
assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]"));
|
||||
@ -513,7 +501,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest(index);
|
||||
OpenIndexResponse openIndexResponse = execute(openIndexRequest, highLevelClient().indices()::open,
|
||||
highLevelClient().indices()::openAsync, highLevelClient().indices()::open,
|
||||
highLevelClient().indices()::openAsync);
|
||||
assertTrue(openIndexResponse.isAcknowledged());
|
||||
|
||||
@ -527,22 +514,19 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest(nonExistentIndex);
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync,
|
||||
highLevelClient().indices()::open, highLevelClient().indices()::openAsync));
|
||||
() -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
|
||||
OpenIndexRequest lenientOpenIndexRequest = new OpenIndexRequest(nonExistentIndex);
|
||||
lenientOpenIndexRequest.indicesOptions(IndicesOptions.lenientExpandOpen());
|
||||
OpenIndexResponse lenientOpenIndexResponse = execute(lenientOpenIndexRequest, highLevelClient().indices()::open,
|
||||
highLevelClient().indices()::openAsync, highLevelClient().indices()::open,
|
||||
highLevelClient().indices()::openAsync);
|
||||
assertThat(lenientOpenIndexResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
OpenIndexRequest strictOpenIndexRequest = new OpenIndexRequest(nonExistentIndex);
|
||||
strictOpenIndexRequest.indicesOptions(IndicesOptions.strictExpandOpen());
|
||||
ElasticsearchException strictException = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync,
|
||||
highLevelClient().indices()::open, highLevelClient().indices()::openAsync));
|
||||
() -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, strictException.status());
|
||||
}
|
||||
|
||||
@ -554,7 +538,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
CloseIndexRequest closeIndexRequest = new CloseIndexRequest(index);
|
||||
CloseIndexResponse closeIndexResponse = execute(closeIndexRequest, highLevelClient().indices()::close,
|
||||
highLevelClient().indices()::closeAsync, highLevelClient().indices()::close,
|
||||
highLevelClient().indices()::closeAsync);
|
||||
assertTrue(closeIndexResponse.isAcknowledged());
|
||||
|
||||
@ -570,8 +553,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
CloseIndexRequest closeIndexRequest = new CloseIndexRequest(nonExistentIndex);
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(closeIndexRequest, highLevelClient().indices()::close, highLevelClient().indices()::closeAsync,
|
||||
highLevelClient().indices()::close, highLevelClient().indices()::closeAsync));
|
||||
() -> execute(closeIndexRequest, highLevelClient().indices()::close, highLevelClient().indices()::closeAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
}
|
||||
|
||||
@ -585,8 +567,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
createIndex(index, settings);
|
||||
RefreshRequest refreshRequest = new RefreshRequest(index);
|
||||
RefreshResponse refreshResponse =
|
||||
execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync,
|
||||
highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync);
|
||||
execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync);
|
||||
assertThat(refreshResponse.getTotalShards(), equalTo(1));
|
||||
assertThat(refreshResponse.getSuccessfulShards(), equalTo(1));
|
||||
assertThat(refreshResponse.getFailedShards(), equalTo(0));
|
||||
@ -597,8 +578,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
assertFalse(indexExists(nonExistentIndex));
|
||||
RefreshRequest refreshRequest = new RefreshRequest(nonExistentIndex);
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync,
|
||||
highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync));
|
||||
() -> execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
}
|
||||
}
|
||||
@ -613,8 +593,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
createIndex(index, settings);
|
||||
FlushRequest flushRequest = new FlushRequest(index);
|
||||
FlushResponse flushResponse =
|
||||
execute(flushRequest, highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync,
|
||||
highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync);
|
||||
execute(flushRequest, highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync);
|
||||
assertThat(flushResponse.getTotalShards(), equalTo(1));
|
||||
assertThat(flushResponse.getSuccessfulShards(), equalTo(1));
|
||||
assertThat(flushResponse.getFailedShards(), equalTo(0));
|
||||
@ -625,8 +604,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
assertFalse(indexExists(nonExistentIndex));
|
||||
FlushRequest flushRequest = new FlushRequest(nonExistentIndex);
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(flushRequest, highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync,
|
||||
highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync));
|
||||
() -> execute(flushRequest, highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
}
|
||||
}
|
||||
@ -674,8 +652,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
createIndex(index, settings);
|
||||
ClearIndicesCacheRequest clearCacheRequest = new ClearIndicesCacheRequest(index);
|
||||
ClearIndicesCacheResponse clearCacheResponse =
|
||||
execute(clearCacheRequest, highLevelClient().indices()::clearCache, highLevelClient().indices()::clearCacheAsync,
|
||||
highLevelClient().indices()::clearCache, highLevelClient().indices()::clearCacheAsync);
|
||||
execute(clearCacheRequest, highLevelClient().indices()::clearCache, highLevelClient().indices()::clearCacheAsync);
|
||||
assertThat(clearCacheResponse.getTotalShards(), equalTo(1));
|
||||
assertThat(clearCacheResponse.getSuccessfulShards(), equalTo(1));
|
||||
assertThat(clearCacheResponse.getFailedShards(), equalTo(0));
|
||||
@ -686,8 +663,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
assertFalse(indexExists(nonExistentIndex));
|
||||
ClearIndicesCacheRequest clearCacheRequest = new ClearIndicesCacheRequest(nonExistentIndex);
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(clearCacheRequest, highLevelClient().indices()::clearCache, highLevelClient().indices()::clearCacheAsync,
|
||||
highLevelClient().indices()::clearCache, highLevelClient().indices()::clearCacheAsync));
|
||||
() -> execute(clearCacheRequest, highLevelClient().indices()::clearCache,
|
||||
highLevelClient().indices()::clearCacheAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
}
|
||||
}
|
||||
@ -702,8 +679,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
createIndex(index, settings);
|
||||
ForceMergeRequest forceMergeRequest = new ForceMergeRequest(index);
|
||||
ForceMergeResponse forceMergeResponse =
|
||||
execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync,
|
||||
highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync);
|
||||
execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync);
|
||||
assertThat(forceMergeResponse.getTotalShards(), equalTo(1));
|
||||
assertThat(forceMergeResponse.getSuccessfulShards(), equalTo(1));
|
||||
assertThat(forceMergeResponse.getFailedShards(), equalTo(0));
|
||||
@ -714,30 +690,25 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
assertFalse(indexExists(nonExistentIndex));
|
||||
ForceMergeRequest forceMergeRequest = new ForceMergeRequest(nonExistentIndex);
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync,
|
||||
highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync));
|
||||
() -> execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
}
|
||||
}
|
||||
|
||||
public void testExistsAlias() throws IOException {
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest("alias");
|
||||
assertFalse(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync,
|
||||
highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync));
|
||||
assertFalse(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync));
|
||||
|
||||
createIndex("index", Settings.EMPTY);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index/_alias/alias");
|
||||
assertTrue(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync,
|
||||
highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync));
|
||||
assertTrue(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync));
|
||||
|
||||
GetAliasesRequest getAliasesRequest2 = new GetAliasesRequest();
|
||||
getAliasesRequest2.aliases("alias");
|
||||
getAliasesRequest2.indices("index");
|
||||
assertTrue(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync,
|
||||
highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync));
|
||||
assertTrue(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync));
|
||||
getAliasesRequest2.indices("does_not_exist");
|
||||
assertFalse(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync,
|
||||
highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync));
|
||||
assertFalse(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@ -758,7 +729,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
.build();
|
||||
resizeRequest.setTargetIndex(new CreateIndexRequest("target").settings(targetSettings).alias(new Alias("alias")));
|
||||
ResizeResponse resizeResponse = execute(resizeRequest, highLevelClient().indices()::shrink,
|
||||
highLevelClient().indices()::shrinkAsync, highLevelClient().indices()::shrink, highLevelClient().indices()::shrinkAsync);
|
||||
highLevelClient().indices()::shrinkAsync);
|
||||
assertTrue(resizeResponse.isAcknowledged());
|
||||
assertTrue(resizeResponse.isShardsAcknowledged());
|
||||
Map<String, Object> getIndexResponse = getAsMap("target");
|
||||
@ -780,8 +751,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
resizeRequest.setResizeType(ResizeType.SPLIT);
|
||||
Settings targetSettings = Settings.builder().put("index.number_of_shards", 4).put("index.number_of_replicas", 0).build();
|
||||
resizeRequest.setTargetIndex(new CreateIndexRequest("target").settings(targetSettings).alias(new Alias("alias")));
|
||||
ResizeResponse resizeResponse = execute(resizeRequest, highLevelClient().indices()::split, highLevelClient().indices()::splitAsync,
|
||||
highLevelClient().indices()::split, highLevelClient().indices()::splitAsync);
|
||||
ResizeResponse resizeResponse = execute(resizeRequest, highLevelClient().indices()::split, highLevelClient().indices()::splitAsync);
|
||||
assertTrue(resizeResponse.isAcknowledged());
|
||||
assertTrue(resizeResponse.isShardsAcknowledged());
|
||||
Map<String, Object> getIndexResponse = getAsMap("target");
|
||||
@ -800,7 +770,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
{
|
||||
RolloverResponse rolloverResponse = execute(rolloverRequest, highLevelClient().indices()::rollover,
|
||||
highLevelClient().indices()::rolloverAsync, highLevelClient().indices()::rollover,
|
||||
highLevelClient().indices()::rolloverAsync);
|
||||
assertFalse(rolloverResponse.isRolledOver());
|
||||
assertFalse(rolloverResponse.isDryRun());
|
||||
@ -820,7 +789,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
rolloverRequest.addMaxIndexAgeCondition(new TimeValue(1));
|
||||
rolloverRequest.dryRun(true);
|
||||
RolloverResponse rolloverResponse = execute(rolloverRequest, highLevelClient().indices()::rollover,
|
||||
highLevelClient().indices()::rolloverAsync, highLevelClient().indices()::rollover,
|
||||
highLevelClient().indices()::rolloverAsync);
|
||||
assertFalse(rolloverResponse.isRolledOver());
|
||||
assertTrue(rolloverResponse.isDryRun());
|
||||
@ -835,7 +803,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
rolloverRequest.dryRun(false);
|
||||
rolloverRequest.addMaxIndexSizeCondition(new ByteSizeValue(1, ByteSizeUnit.MB));
|
||||
RolloverResponse rolloverResponse = execute(rolloverRequest, highLevelClient().indices()::rollover,
|
||||
highLevelClient().indices()::rolloverAsync, highLevelClient().indices()::rollover,
|
||||
highLevelClient().indices()::rolloverAsync);
|
||||
assertTrue(rolloverResponse.isRolledOver());
|
||||
assertFalse(rolloverResponse.isDryRun());
|
||||
@ -849,6 +816,197 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetAlias() throws IOException {
|
||||
{
|
||||
createIndex("index1", Settings.EMPTY);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index1/_alias/alias1");
|
||||
|
||||
createIndex("index2", Settings.EMPTY);
|
||||
client().performRequest(HttpPut.METHOD_NAME, "/index2/_alias/alias2");
|
||||
|
||||
createIndex("index3", Settings.EMPTY);
|
||||
}
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest().aliases("alias1");
|
||||
GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias,
|
||||
highLevelClient().indices()::getAliasAsync);
|
||||
|
||||
assertThat(getAliasesResponse.getAliases().size(), equalTo(1));
|
||||
assertThat(getAliasesResponse.getAliases().get("index1").size(), equalTo(1));
|
||||
AliasMetaData aliasMetaData = getAliasesResponse.getAliases().get("index1").iterator().next();
|
||||
assertThat(aliasMetaData, notNullValue());
|
||||
assertThat(aliasMetaData.alias(), equalTo("alias1"));
|
||||
assertThat(aliasMetaData.getFilter(), nullValue());
|
||||
assertThat(aliasMetaData.getIndexRouting(), nullValue());
|
||||
assertThat(aliasMetaData.getSearchRouting(), nullValue());
|
||||
}
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest().aliases("alias*");
|
||||
GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias,
|
||||
highLevelClient().indices()::getAliasAsync);
|
||||
|
||||
assertThat(getAliasesResponse.getAliases().size(), equalTo(2));
|
||||
assertThat(getAliasesResponse.getAliases().get("index1").size(), equalTo(1));
|
||||
AliasMetaData aliasMetaData1 = getAliasesResponse.getAliases().get("index1").iterator().next();
|
||||
assertThat(aliasMetaData1, notNullValue());
|
||||
assertThat(aliasMetaData1.alias(), equalTo("alias1"));
|
||||
assertThat(getAliasesResponse.getAliases().get("index2").size(), equalTo(1));
|
||||
AliasMetaData aliasMetaData2 = getAliasesResponse.getAliases().get("index2").iterator().next();
|
||||
assertThat(aliasMetaData2, notNullValue());
|
||||
assertThat(aliasMetaData2.alias(), equalTo("alias2"));
|
||||
}
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest().aliases("_all");
|
||||
GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias,
|
||||
highLevelClient().indices()::getAliasAsync);
|
||||
|
||||
assertThat(getAliasesResponse.getAliases().size(), equalTo(2));
|
||||
assertThat(getAliasesResponse.getAliases().get("index1").size(), equalTo(1));
|
||||
AliasMetaData aliasMetaData1 = getAliasesResponse.getAliases().get("index1").iterator().next();
|
||||
assertThat(aliasMetaData1, notNullValue());
|
||||
assertThat(aliasMetaData1.alias(), equalTo("alias1"));
|
||||
assertThat(getAliasesResponse.getAliases().get("index2").size(), equalTo(1));
|
||||
AliasMetaData aliasMetaData2 = getAliasesResponse.getAliases().get("index2").iterator().next();
|
||||
assertThat(aliasMetaData2, notNullValue());
|
||||
assertThat(aliasMetaData2.alias(), equalTo("alias2"));
|
||||
}
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest().aliases("*");
|
||||
GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias,
|
||||
highLevelClient().indices()::getAliasAsync);
|
||||
|
||||
assertThat(getAliasesResponse.getAliases().size(), equalTo(2));
|
||||
assertThat(getAliasesResponse.getAliases().get("index1").size(), equalTo(1));
|
||||
AliasMetaData aliasMetaData1 = getAliasesResponse.getAliases().get("index1").iterator().next();
|
||||
assertThat(aliasMetaData1, notNullValue());
|
||||
assertThat(aliasMetaData1.alias(), equalTo("alias1"));
|
||||
assertThat(getAliasesResponse.getAliases().get("index2").size(), equalTo(1));
|
||||
AliasMetaData aliasMetaData2 = getAliasesResponse.getAliases().get("index2").iterator().next();
|
||||
assertThat(aliasMetaData2, notNullValue());
|
||||
assertThat(aliasMetaData2.alias(), equalTo("alias2"));
|
||||
}
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest().indices("_all");
|
||||
GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias,
|
||||
highLevelClient().indices()::getAliasAsync);
|
||||
|
||||
assertThat(getAliasesResponse.getAliases().size(), equalTo(3));
|
||||
assertThat(getAliasesResponse.getAliases().get("index1").size(), equalTo(1));
|
||||
AliasMetaData aliasMetaData1 = getAliasesResponse.getAliases().get("index1").iterator().next();
|
||||
assertThat(aliasMetaData1, notNullValue());
|
||||
assertThat(aliasMetaData1.alias(), equalTo("alias1"));
|
||||
assertThat(getAliasesResponse.getAliases().get("index2").size(), equalTo(1));
|
||||
AliasMetaData aliasMetaData2 = getAliasesResponse.getAliases().get("index2").iterator().next();
|
||||
assertThat(aliasMetaData2, notNullValue());
|
||||
assertThat(aliasMetaData2.alias(), equalTo("alias2"));
|
||||
assertThat(getAliasesResponse.getAliases().get("index3").size(), equalTo(0));
|
||||
}
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest().indices("ind*");
|
||||
GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias,
|
||||
highLevelClient().indices()::getAliasAsync);
|
||||
|
||||
assertThat(getAliasesResponse.getAliases().size(), equalTo(3));
|
||||
assertThat(getAliasesResponse.getAliases().get("index1").size(), equalTo(1));
|
||||
AliasMetaData aliasMetaData1 = getAliasesResponse.getAliases().get("index1").iterator().next();
|
||||
assertThat(aliasMetaData1, notNullValue());
|
||||
assertThat(aliasMetaData1.alias(), equalTo("alias1"));
|
||||
assertThat(getAliasesResponse.getAliases().get("index2").size(), equalTo(1));
|
||||
AliasMetaData aliasMetaData2 = getAliasesResponse.getAliases().get("index2").iterator().next();
|
||||
assertThat(aliasMetaData2, notNullValue());
|
||||
assertThat(aliasMetaData2.alias(), equalTo("alias2"));
|
||||
assertThat(getAliasesResponse.getAliases().get("index3").size(), equalTo(0));
|
||||
}
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest();
|
||||
GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias,
|
||||
highLevelClient().indices()::getAliasAsync);
|
||||
|
||||
assertThat(getAliasesResponse.getAliases().size(), equalTo(3));
|
||||
assertThat(getAliasesResponse.getAliases().get("index1").size(), equalTo(1));
|
||||
AliasMetaData aliasMetaData1 = getAliasesResponse.getAliases().get("index1").iterator().next();
|
||||
assertThat(aliasMetaData1, notNullValue());
|
||||
assertThat(aliasMetaData1.alias(), equalTo("alias1"));
|
||||
assertThat(getAliasesResponse.getAliases().get("index2").size(), equalTo(1));
|
||||
AliasMetaData aliasMetaData2 = getAliasesResponse.getAliases().get("index2").iterator().next();
|
||||
assertThat(aliasMetaData2, notNullValue());
|
||||
assertThat(aliasMetaData2.alias(), equalTo("alias2"));
|
||||
assertThat(getAliasesResponse.getAliases().get("index3").size(), equalTo(0));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetAliasesNonExistentIndexOrAlias() throws IOException {
|
||||
/*
|
||||
* This test is quite extensive as this is the only way we can check that we haven't slid out of sync with the server
|
||||
* because the server renders the xcontent in a spot that is difficult for us to access in a unit test.
|
||||
*/
|
||||
String alias = "alias";
|
||||
String index = "index";
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest().indices(index);
|
||||
GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias,
|
||||
highLevelClient().indices()::getAliasAsync);
|
||||
assertThat(getAliasesResponse.status(), equalTo(RestStatus.NOT_FOUND));
|
||||
assertThat(getAliasesResponse.getException().getMessage(),
|
||||
equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]"));
|
||||
}
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest(alias);
|
||||
GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias,
|
||||
highLevelClient().indices()::getAliasAsync);
|
||||
assertThat(getAliasesResponse.status(), equalTo(RestStatus.NOT_FOUND));
|
||||
assertThat(getAliasesResponse.getError(), equalTo("alias [" + alias + "] missing"));
|
||||
}
|
||||
createIndex(index, Settings.EMPTY);
|
||||
client().performRequest(HttpPut.METHOD_NAME, index + "/_alias/" + alias);
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest().indices(index, "non_existent_index");
|
||||
GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias,
|
||||
highLevelClient().indices()::getAliasAsync);
|
||||
assertThat(getAliasesResponse.status(), equalTo(RestStatus.NOT_FOUND));
|
||||
assertThat(getAliasesResponse.getException().getMessage(),
|
||||
equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]"));
|
||||
}
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest().indices(index, "non_existent_index").aliases(alias);
|
||||
GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias,
|
||||
highLevelClient().indices()::getAliasAsync);
|
||||
assertThat(getAliasesResponse.status(), equalTo(RestStatus.NOT_FOUND));
|
||||
assertThat(getAliasesResponse.getException().getMessage(),
|
||||
equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]"));
|
||||
}
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest().indices("non_existent_index*");
|
||||
GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias,
|
||||
highLevelClient().indices()::getAliasAsync);
|
||||
assertThat(getAliasesResponse.getAliases().size(), equalTo(0));
|
||||
}
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest().indices(index).aliases(alias, "non_existent_alias");
|
||||
GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias,
|
||||
highLevelClient().indices()::getAliasAsync);
|
||||
assertThat(getAliasesResponse.status(), equalTo(RestStatus.NOT_FOUND));
|
||||
|
||||
assertThat(getAliasesResponse.getAliases().size(), equalTo(1));
|
||||
assertThat(getAliasesResponse.getAliases().get(index).size(), equalTo(1));
|
||||
AliasMetaData aliasMetaData = getAliasesResponse.getAliases().get(index).iterator().next();
|
||||
assertThat(aliasMetaData, notNullValue());
|
||||
assertThat(aliasMetaData.alias(), equalTo(alias));
|
||||
/*
|
||||
This is the above response in json format:
|
||||
{
|
||||
"error": "alias [something] missing",
|
||||
"status": 404,
|
||||
"index": {
|
||||
"aliases": {
|
||||
"alias": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
public void testIndexPutSettings() throws IOException {
|
||||
|
||||
final Setting<Integer> dynamicSetting = IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING;
|
||||
@ -870,7 +1028,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
UpdateSettingsRequest dynamicSettingRequest = new UpdateSettingsRequest();
|
||||
dynamicSettingRequest.settings(Settings.builder().put(dynamicSettingKey, dynamicSettingValue).build());
|
||||
UpdateSettingsResponse response = execute(dynamicSettingRequest, highLevelClient().indices()::putSettings,
|
||||
highLevelClient().indices()::putSettingsAsync, highLevelClient().indices()::putSettings,
|
||||
highLevelClient().indices()::putSettingsAsync);
|
||||
|
||||
assertTrue(response.isAcknowledged());
|
||||
@ -881,7 +1038,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
UpdateSettingsRequest staticSettingRequest = new UpdateSettingsRequest();
|
||||
staticSettingRequest.settings(Settings.builder().put(staticSettingKey, staticSettingValue).build());
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(staticSettingRequest,
|
||||
highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync,
|
||||
highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync));
|
||||
assertThat(exception.getMessage(),
|
||||
startsWith("Elasticsearch exception [type=illegal_argument_exception, "
|
||||
@ -892,7 +1048,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
closeIndex(index);
|
||||
response = execute(staticSettingRequest, highLevelClient().indices()::putSettings,
|
||||
highLevelClient().indices()::putSettingsAsync, highLevelClient().indices()::putSettings,
|
||||
highLevelClient().indices()::putSettingsAsync);
|
||||
assertTrue(response.isAcknowledged());
|
||||
openIndex(index);
|
||||
@ -903,7 +1058,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
UpdateSettingsRequest unmodifiableSettingRequest = new UpdateSettingsRequest();
|
||||
unmodifiableSettingRequest.settings(Settings.builder().put(unmodifiableSettingKey, unmodifiableSettingValue).build());
|
||||
exception = expectThrows(ElasticsearchException.class, () -> execute(unmodifiableSettingRequest,
|
||||
highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync,
|
||||
highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync));
|
||||
assertThat(exception.getMessage(), startsWith(
|
||||
"Elasticsearch exception [type=illegal_argument_exception, "
|
||||
@ -931,14 +1085,12 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
indexUpdateSettingsRequest.settings(Settings.builder().put(setting, value).build());
|
||||
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(indexUpdateSettingsRequest,
|
||||
highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync,
|
||||
highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]"));
|
||||
|
||||
createIndex(index, Settings.EMPTY);
|
||||
exception = expectThrows(ElasticsearchException.class, () -> execute(indexUpdateSettingsRequest,
|
||||
highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync,
|
||||
highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync));
|
||||
assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST));
|
||||
assertThat(exception.getMessage(), equalTo(
|
||||
@ -1002,4 +1154,51 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
() -> execute(unknownSettingTemplate, client.indices()::putTemplate, client.indices()::putTemplateAsync));
|
||||
assertThat(unknownSettingError.getDetailedMessage(), containsString("unknown setting [index.this-setting-does-not-exist]"));
|
||||
}
|
||||
|
||||
public void testGetIndexTemplate() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
PutIndexTemplateRequest putTemplate1 = new PutIndexTemplateRequest().name("template-1")
|
||||
.patterns(Arrays.asList("pattern-1", "name-1")).alias(new Alias("alias-1"));
|
||||
assertThat(execute(putTemplate1, client.indices()::putTemplate, client.indices()::putTemplateAsync).isAcknowledged(),
|
||||
equalTo(true));
|
||||
PutIndexTemplateRequest putTemplate2 = new PutIndexTemplateRequest().name("template-2")
|
||||
.patterns(Arrays.asList("pattern-2", "name-2"))
|
||||
.settings(Settings.builder().put("number_of_shards", "2").put("number_of_replicas", "0"));
|
||||
assertThat(execute(putTemplate2, client.indices()::putTemplate, client.indices()::putTemplateAsync).isAcknowledged(),
|
||||
equalTo(true));
|
||||
|
||||
GetIndexTemplatesResponse getTemplate1 = execute(new GetIndexTemplatesRequest().names("template-1"),
|
||||
client.indices()::getTemplate, client.indices()::getTemplateAsync);
|
||||
assertThat(getTemplate1.getIndexTemplates(), hasSize(1));
|
||||
IndexTemplateMetaData template1 = getTemplate1.getIndexTemplates().get(0);
|
||||
assertThat(template1.name(), equalTo("template-1"));
|
||||
assertThat(template1.patterns(), contains("pattern-1", "name-1"));
|
||||
assertTrue(template1.aliases().containsKey("alias-1"));
|
||||
|
||||
GetIndexTemplatesResponse getTemplate2 = execute(new GetIndexTemplatesRequest().names("template-2"),
|
||||
client.indices()::getTemplate, client.indices()::getTemplateAsync);
|
||||
assertThat(getTemplate2.getIndexTemplates(), hasSize(1));
|
||||
IndexTemplateMetaData template2 = getTemplate2.getIndexTemplates().get(0);
|
||||
assertThat(template2.name(), equalTo("template-2"));
|
||||
assertThat(template2.patterns(), contains("pattern-2", "name-2"));
|
||||
assertTrue(template2.aliases().isEmpty());
|
||||
assertThat(template2.settings().get("index.number_of_shards"), equalTo("2"));
|
||||
assertThat(template2.settings().get("index.number_of_replicas"), equalTo("0"));
|
||||
|
||||
GetIndexTemplatesRequest getBothRequest = new GetIndexTemplatesRequest();
|
||||
if (randomBoolean()) {
|
||||
getBothRequest.names("*-1", "template-2");
|
||||
} else {
|
||||
getBothRequest.names("template-*");
|
||||
}
|
||||
GetIndexTemplatesResponse getBoth = execute(getBothRequest, client.indices()::getTemplate, client.indices()::getTemplateAsync);
|
||||
assertThat(getBoth.getIndexTemplates(), hasSize(2));
|
||||
assertThat(getBoth.getIndexTemplates().stream().map(IndexTemplateMetaData::getName).toArray(),
|
||||
arrayContainingInAnyOrder("template-1", "template-2"));
|
||||
|
||||
ElasticsearchException notFound = expectThrows(ElasticsearchException.class, () -> execute(
|
||||
new GetIndexTemplatesRequest().names("the-template-*"), client.indices()::getTemplate, client.indices()::getTemplateAsync));
|
||||
assertThat(notFound.status(), equalTo(RestStatus.NOT_FOUND));
|
||||
}
|
||||
}
|
||||
|
@ -82,8 +82,7 @@ public class RankEvalIT extends ESRestHighLevelClientTestCase {
|
||||
RankEvalSpec spec = new RankEvalSpec(specifications, metric);
|
||||
|
||||
RankEvalRequest rankEvalRequest = new RankEvalRequest(spec, new String[] { "index", "index2" });
|
||||
RankEvalResponse response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync,
|
||||
highLevelClient()::rankEval, highLevelClient()::rankEvalAsync);
|
||||
RankEvalResponse response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync);
|
||||
// the expected Prec@ for the first query is 5/7 and the expected Prec@ for the second is 1/7, divided by 2 to get the average
|
||||
double expectedPrecision = (1.0 / 7.0 + 5.0 / 7.0) / 2.0;
|
||||
assertEquals(expectedPrecision, response.getEvaluationResult(), Double.MIN_VALUE);
|
||||
@ -117,8 +116,7 @@ public class RankEvalIT extends ESRestHighLevelClientTestCase {
|
||||
// now try this when test2 is closed
|
||||
client().performRequest("POST", "index2/_close", Collections.emptyMap());
|
||||
rankEvalRequest.indicesOptions(IndicesOptions.fromParameters(null, "true", null, SearchRequest.DEFAULT_INDICES_OPTIONS));
|
||||
response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync,
|
||||
highLevelClient()::rankEval, highLevelClient()::rankEvalAsync);
|
||||
response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync);
|
||||
}
|
||||
|
||||
private static List<RatedDocument> createRelevant(String indexName, String... docs) {
|
||||
|
@ -29,8 +29,8 @@ import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
@ -58,6 +58,7 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||
@ -83,8 +84,10 @@ import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.client.RequestConverters.EndpointBuilder;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.common.CheckedBiConsumer;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
@ -94,12 +97,12 @@ import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.RandomCreateIndexGenerator;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.query.TermQueryBuilder;
|
||||
@ -125,6 +128,7 @@ import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.RandomObjects;
|
||||
import org.hamcrest.CoreMatchers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
@ -142,6 +146,7 @@ import java.util.StringJoiner;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE;
|
||||
@ -1525,6 +1530,85 @@ public class RequestConvertersTests extends ESTestCase {
|
||||
assertEquals(expectedParams, expectedRequest.getParameters());
|
||||
}
|
||||
|
||||
public void testClusterHealth() {
|
||||
ClusterHealthRequest healthRequest = new ClusterHealthRequest();
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomLocal(healthRequest, expectedParams);
|
||||
String timeoutType = randomFrom("timeout", "masterTimeout", "both", "none");
|
||||
String timeout = randomTimeValue();
|
||||
String masterTimeout = randomTimeValue();
|
||||
switch (timeoutType) {
|
||||
case "timeout":
|
||||
healthRequest.timeout(timeout);
|
||||
expectedParams.put("timeout", timeout);
|
||||
// If Master Timeout wasn't set it uses the same value as Timeout
|
||||
expectedParams.put("master_timeout", timeout);
|
||||
break;
|
||||
case "masterTimeout":
|
||||
expectedParams.put("timeout", "30s");
|
||||
healthRequest.masterNodeTimeout(masterTimeout);
|
||||
expectedParams.put("master_timeout", masterTimeout);
|
||||
break;
|
||||
case "both":
|
||||
healthRequest.timeout(timeout);
|
||||
expectedParams.put("timeout", timeout);
|
||||
healthRequest.masterNodeTimeout(timeout);
|
||||
expectedParams.put("master_timeout", timeout);
|
||||
break;
|
||||
case "none":
|
||||
expectedParams.put("timeout", "30s");
|
||||
expectedParams.put("master_timeout", "30s");
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
setRandomWaitForActiveShards(healthRequest::waitForActiveShards, ActiveShardCount.NONE, expectedParams);
|
||||
if (randomBoolean()) {
|
||||
ClusterHealthRequest.Level level = randomFrom(ClusterHealthRequest.Level.values());
|
||||
healthRequest.level(level);
|
||||
expectedParams.put("level", level.name().toLowerCase(Locale.ROOT));
|
||||
} else {
|
||||
expectedParams.put("level", "cluster");
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
Priority priority = randomFrom(Priority.values());
|
||||
healthRequest.waitForEvents(priority);
|
||||
expectedParams.put("wait_for_events", priority.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
ClusterHealthStatus status = randomFrom(ClusterHealthStatus.values());
|
||||
healthRequest.waitForStatus(status);
|
||||
expectedParams.put("wait_for_status", status.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
boolean waitForNoInitializingShards = randomBoolean();
|
||||
healthRequest.waitForNoInitializingShards(waitForNoInitializingShards);
|
||||
if (waitForNoInitializingShards) {
|
||||
expectedParams.put("wait_for_no_initializing_shards", Boolean.TRUE.toString());
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
boolean waitForNoRelocatingShards = randomBoolean();
|
||||
healthRequest.waitForNoRelocatingShards(waitForNoRelocatingShards);
|
||||
if (waitForNoRelocatingShards) {
|
||||
expectedParams.put("wait_for_no_relocating_shards", Boolean.TRUE.toString());
|
||||
}
|
||||
}
|
||||
String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5);
|
||||
healthRequest.indices(indices);
|
||||
|
||||
Request request = RequestConverters.clusterHealth(healthRequest);
|
||||
assertThat(request, CoreMatchers.notNullValue());
|
||||
assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME));
|
||||
assertThat(request.getEntity(), nullValue());
|
||||
if (indices != null && indices.length > 0) {
|
||||
assertThat(request.getEndpoint(), equalTo("/_cluster/health/" + String.join(",", indices)));
|
||||
} else {
|
||||
assertThat(request.getEndpoint(), equalTo("/_cluster/health"));
|
||||
}
|
||||
assertThat(request.getParameters(), equalTo(expectedParams));
|
||||
}
|
||||
|
||||
public void testRollover() throws IOException {
|
||||
RolloverRequest rolloverRequest = new RolloverRequest(randomAlphaOfLengthBetween(3, 10),
|
||||
randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10));
|
||||
@ -1563,6 +1647,36 @@ public class RequestConvertersTests extends ESTestCase {
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
}
|
||||
|
||||
public void testGetAlias() {
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest();
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomLocal(getAliasesRequest, expectedParams);
|
||||
setRandomIndicesOptions(getAliasesRequest::indicesOptions, getAliasesRequest::indicesOptions, expectedParams);
|
||||
|
||||
String[] indices = randomBoolean() ? null : randomIndicesNames(0, 2);
|
||||
String[] aliases = randomBoolean() ? null : randomIndicesNames(0, 2);
|
||||
getAliasesRequest.indices(indices);
|
||||
getAliasesRequest.aliases(aliases);
|
||||
|
||||
Request request = RequestConverters.getAlias(getAliasesRequest);
|
||||
StringJoiner expectedEndpoint = new StringJoiner("/", "/", "");
|
||||
|
||||
if (false == CollectionUtils.isEmpty(indices)) {
|
||||
expectedEndpoint.add(String.join(",", indices));
|
||||
}
|
||||
expectedEndpoint.add("_alias");
|
||||
|
||||
if (false == CollectionUtils.isEmpty(aliases)) {
|
||||
expectedEndpoint.add(String.join(",", aliases));
|
||||
}
|
||||
|
||||
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
|
||||
assertEquals(expectedEndpoint.toString(), request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertNull(request.getEntity());
|
||||
}
|
||||
|
||||
public void testIndexPutSettings() throws IOException {
|
||||
String[] indices = randomBoolean() ? null : randomIndicesNames(0, 2);
|
||||
UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indices);
|
||||
@ -1781,6 +1895,24 @@ public class RequestConvertersTests extends ESTestCase {
|
||||
assertToXContentBody(putTemplateRequest, request.getEntity());
|
||||
}
|
||||
|
||||
public void testGetTemplateRequest() throws Exception {
|
||||
Map<String, String> encodes = new HashMap<>();
|
||||
encodes.put("log", "log");
|
||||
encodes.put("1", "1");
|
||||
encodes.put("template#1", "template%231");
|
||||
encodes.put("template-*", "template-*");
|
||||
encodes.put("foo^bar", "foo%5Ebar");
|
||||
List<String> names = randomSubsetOf(1, encodes.keySet());
|
||||
GetIndexTemplatesRequest getTemplatesRequest = new GetIndexTemplatesRequest().names(names.toArray(new String[0]));
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomMasterTimeout(getTemplatesRequest, expectedParams);
|
||||
setRandomLocal(getTemplatesRequest, expectedParams);
|
||||
Request request = RequestConverters.getTemplates(getTemplatesRequest);
|
||||
assertThat(request.getEndpoint(), equalTo("/_template/" + names.stream().map(encodes::get).collect(Collectors.joining(","))));
|
||||
assertThat(request.getParameters(), equalTo(expectedParams));
|
||||
assertThat(request.getEntity(), nullValue());
|
||||
}
|
||||
|
||||
private static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException {
|
||||
BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false);
|
||||
assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue());
|
||||
@ -2055,16 +2187,24 @@ public class RequestConvertersTests extends ESTestCase {
|
||||
}
|
||||
|
||||
private static void setRandomWaitForActiveShards(Consumer<ActiveShardCount> setter, Map<String, String> expectedParams) {
|
||||
setRandomWaitForActiveShards(setter, ActiveShardCount.DEFAULT, expectedParams);
|
||||
}
|
||||
|
||||
private static void setRandomWaitForActiveShards(Consumer<ActiveShardCount> setter, ActiveShardCount defaultActiveShardCount,
|
||||
Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
int waitForActiveShardsInt = randomIntBetween(-1, 5);
|
||||
String waitForActiveShardsString;
|
||||
int waitForActiveShards = randomIntBetween(-1, 5);
|
||||
if (waitForActiveShards == -1) {
|
||||
if (waitForActiveShardsInt == -1) {
|
||||
waitForActiveShardsString = "all";
|
||||
} else {
|
||||
waitForActiveShardsString = String.valueOf(waitForActiveShards);
|
||||
waitForActiveShardsString = String.valueOf(waitForActiveShardsInt);
|
||||
}
|
||||
ActiveShardCount activeShardCount = ActiveShardCount.parseString(waitForActiveShardsString);
|
||||
setter.accept(activeShardCount);
|
||||
if (defaultActiveShardCount.equals(activeShardCount) == false) {
|
||||
expectedParams.put("wait_for_active_shards", waitForActiveShardsString);
|
||||
}
|
||||
setter.accept(ActiveShardCount.parseString(waitForActiveShardsString));
|
||||
expectedParams.put("wait_for_active_shards", waitForActiveShardsString);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -188,12 +188,12 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||
|
||||
{
|
||||
ActionRequestValidationException actualException = expectThrows(ActionRequestValidationException.class,
|
||||
() -> restHighLevelClient.performRequest(request, null, null, null));
|
||||
() -> restHighLevelClient.performRequest(request, null, RequestOptions.DEFAULT, null, null));
|
||||
assertSame(validationException, actualException);
|
||||
}
|
||||
{
|
||||
TrackingActionListener trackingActionListener = new TrackingActionListener();
|
||||
restHighLevelClient.performRequestAsync(request, null, null, trackingActionListener, null);
|
||||
restHighLevelClient.performRequestAsync(request, null, RequestOptions.DEFAULT, null, trackingActionListener, null);
|
||||
assertSame(validationException, trackingActionListener.exception.get());
|
||||
}
|
||||
}
|
||||
@ -307,13 +307,13 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
when(restClient.performRequest(any(Request.class))).thenReturn(mockResponse);
|
||||
{
|
||||
Integer result = restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
Integer result = restHighLevelClient.performRequest(mainRequest, requestConverter, RequestOptions.DEFAULT,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.emptySet());
|
||||
assertEquals(restStatus.getStatus(), result.intValue());
|
||||
}
|
||||
{
|
||||
IOException ioe = expectThrows(IOException.class, () -> restHighLevelClient.performRequest(mainRequest,
|
||||
requestConverter, response -> {throw new IllegalStateException();}, Collections.emptySet()));
|
||||
requestConverter, RequestOptions.DEFAULT, response -> {throw new IllegalStateException();}, Collections.emptySet()));
|
||||
assertEquals("Unable to parse response body for Response{requestLine=GET / http/1.1, host=http://localhost:9200, " +
|
||||
"response=http/1.1 " + restStatus.getStatus() + " " + restStatus.name() + "}", ioe.getMessage());
|
||||
}
|
||||
@ -328,7 +328,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter, RequestOptions.DEFAULT,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
|
||||
assertEquals(responseException.getMessage(), elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
@ -346,7 +346,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter, RequestOptions.DEFAULT,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
|
||||
assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
@ -363,7 +363,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter, RequestOptions.DEFAULT,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
|
||||
assertEquals("Unable to parse response body", elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
@ -381,7 +381,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter, RequestOptions.DEFAULT,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
|
||||
assertEquals("Unable to parse response body", elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
@ -397,7 +397,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
|
||||
//although we got an exception, we turn it into a successful response because the status code was provided among ignores
|
||||
assertEquals(Integer.valueOf(404), restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
assertEquals(Integer.valueOf(404), restHighLevelClient.performRequest(mainRequest, requestConverter, RequestOptions.DEFAULT,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.singleton(404)));
|
||||
}
|
||||
|
||||
@ -409,7 +409,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter, RequestOptions.DEFAULT,
|
||||
response -> {throw new IllegalStateException();}, Collections.singleton(404)));
|
||||
assertEquals(RestStatus.NOT_FOUND, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getCause());
|
||||
@ -426,7 +426,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter, RequestOptions.DEFAULT,
|
||||
response -> {throw new IllegalStateException();}, Collections.singleton(404)));
|
||||
assertEquals(RestStatus.NOT_FOUND, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getSuppressed()[0]);
|
||||
|
@ -164,8 +164,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
public void testSearchMatchQuery() throws IOException {
|
||||
SearchRequest searchRequest = new SearchRequest("index");
|
||||
searchRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("num", 10)));
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync,
|
||||
highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
assertSearchHeader(searchResponse);
|
||||
assertNull(searchResponse.getAggregations());
|
||||
assertNull(searchResponse.getSuggest());
|
||||
@ -191,8 +190,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1", ValueType.STRING).field("type.keyword"));
|
||||
searchSourceBuilder.size(0);
|
||||
searchRequest.source(searchSourceBuilder);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync,
|
||||
highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
assertSearchHeader(searchResponse);
|
||||
assertNull(searchResponse.getSuggest());
|
||||
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||
@ -218,8 +216,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
searchRequest.source(searchSourceBuilder);
|
||||
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class,
|
||||
() -> execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync,
|
||||
highLevelClient()::search, highLevelClient()::searchAsync));
|
||||
() -> execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync));
|
||||
assertEquals(RestStatus.BAD_REQUEST, exception.status());
|
||||
}
|
||||
|
||||
@ -229,8 +226,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
.addRange("first", 0, 30).addRange("second", 31, 200));
|
||||
searchSourceBuilder.size(0);
|
||||
searchRequest.source(searchSourceBuilder);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync,
|
||||
highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
assertSearchHeader(searchResponse);
|
||||
assertNull(searchResponse.getSuggest());
|
||||
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||
@ -261,8 +257,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
searchSourceBuilder.aggregation(agg);
|
||||
searchSourceBuilder.size(0);
|
||||
searchRequest.source(searchSourceBuilder);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync,
|
||||
highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
assertSearchHeader(searchResponse);
|
||||
assertNull(searchResponse.getSuggest());
|
||||
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||
@ -313,8 +308,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
searchSourceBuilder.aggregation(new MatrixStatsAggregationBuilder("agg1").fields(Arrays.asList("num", "num2")));
|
||||
searchSourceBuilder.size(0);
|
||||
searchRequest.source(searchSourceBuilder);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync,
|
||||
highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
assertSearchHeader(searchResponse);
|
||||
assertNull(searchResponse.getSuggest());
|
||||
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||
@ -403,8 +397,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
SearchRequest searchRequest = new SearchRequest(indexName);
|
||||
searchRequest.source(searchSourceBuilder);
|
||||
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync,
|
||||
highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
assertSearchHeader(searchResponse);
|
||||
assertNull(searchResponse.getSuggest());
|
||||
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||
@ -444,8 +437,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
searchSourceBuilder.size(0);
|
||||
searchRequest.source(searchSourceBuilder);
|
||||
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync,
|
||||
highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
assertSearchHeader(searchResponse);
|
||||
assertNull(searchResponse.getAggregations());
|
||||
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||
@ -477,8 +469,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
{
|
||||
SearchRequest searchRequest = new SearchRequest("test").source(SearchSourceBuilder.searchSource()
|
||||
.scriptField("result", new Script("null")));
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync,
|
||||
highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchHit searchHit = searchResponse.getHits().getAt(0);
|
||||
List<Object> values = searchHit.getFields().get("result").getValues();
|
||||
assertNotNull(values);
|
||||
@ -488,8 +479,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
{
|
||||
SearchRequest searchRequest = new SearchRequest("test").source(SearchSourceBuilder.searchSource()
|
||||
.scriptField("result", new Script("new HashMap()")));
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync,
|
||||
highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchHit searchHit = searchResponse.getHits().getAt(0);
|
||||
List<Object> values = searchHit.getFields().get("result").getValues();
|
||||
assertNotNull(values);
|
||||
@ -501,8 +491,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
{
|
||||
SearchRequest searchRequest = new SearchRequest("test").source(SearchSourceBuilder.searchSource()
|
||||
.scriptField("result", new Script("new String[]{}")));
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync,
|
||||
highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchHit searchHit = searchResponse.getHits().getAt(0);
|
||||
List<Object> values = searchHit.getFields().get("result").getValues();
|
||||
assertNotNull(values);
|
||||
@ -524,8 +513,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(35).sort("field", SortOrder.ASC);
|
||||
SearchRequest searchRequest = new SearchRequest("test").scroll(TimeValue.timeValueMinutes(2)).source(searchSourceBuilder);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync,
|
||||
highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||
|
||||
try {
|
||||
long counter = 0;
|
||||
@ -537,7 +525,6 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
}
|
||||
|
||||
searchResponse = execute(new SearchScrollRequest(searchResponse.getScrollId()).scroll(TimeValue.timeValueMinutes(2)),
|
||||
highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync,
|
||||
highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync);
|
||||
|
||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(100L));
|
||||
@ -547,7 +534,6 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
}
|
||||
|
||||
searchResponse = execute(new SearchScrollRequest(searchResponse.getScrollId()).scroll(TimeValue.timeValueMinutes(2)),
|
||||
highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync,
|
||||
highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync);
|
||||
|
||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(100L));
|
||||
@ -559,14 +545,12 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
|
||||
clearScrollRequest.addScrollId(searchResponse.getScrollId());
|
||||
ClearScrollResponse clearScrollResponse = execute(clearScrollRequest,
|
||||
highLevelClient()::clearScroll, highLevelClient()::clearScrollAsync,
|
||||
highLevelClient()::clearScroll, highLevelClient()::clearScrollAsync);
|
||||
assertThat(clearScrollResponse.getNumFreed(), greaterThan(0));
|
||||
assertTrue(clearScrollResponse.isSucceeded());
|
||||
|
||||
SearchScrollRequest scrollRequest = new SearchScrollRequest(searchResponse.getScrollId()).scroll(TimeValue.timeValueMinutes(2));
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> execute(scrollRequest,
|
||||
highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync,
|
||||
highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
assertThat(exception.getRootCause(), instanceOf(ElasticsearchException.class));
|
||||
@ -588,8 +572,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
multiSearchRequest.add(searchRequest3);
|
||||
|
||||
MultiSearchResponse multiSearchResponse =
|
||||
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync,
|
||||
highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
|
||||
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
|
||||
assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
|
||||
assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3));
|
||||
|
||||
@ -631,8 +614,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
multiSearchRequest.add(searchRequest3);
|
||||
|
||||
MultiSearchResponse multiSearchResponse =
|
||||
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync,
|
||||
highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
|
||||
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
|
||||
assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
|
||||
assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3));
|
||||
|
||||
@ -680,8 +662,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
multiSearchRequest.add(searchRequest3);
|
||||
|
||||
MultiSearchResponse multiSearchResponse =
|
||||
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync,
|
||||
highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
|
||||
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
|
||||
assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
|
||||
assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3));
|
||||
|
||||
@ -744,8 +725,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||
multiSearchRequest.add(searchRequest2);
|
||||
|
||||
MultiSearchResponse multiSearchResponse =
|
||||
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync,
|
||||
highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
|
||||
execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync);
|
||||
assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L));
|
||||
assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(2));
|
||||
|
||||
|
@ -19,8 +19,6 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse;
|
||||
@ -35,7 +33,6 @@ import org.elasticsearch.repositories.fs.FsRepository;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
|
@ -18,14 +18,6 @@
|
||||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Set;
|
||||
import java.util.HashSet;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectIntHashMap;
|
||||
import com.carrotsearch.hppc.ObjectIntMap;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
@ -42,6 +34,14 @@ import org.elasticsearch.indices.flush.ShardsSyncedFlushResult;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class SyncedFlushResponseTests extends ESTestCase {
|
||||
|
||||
public void testXContentSerialization() throws IOException {
|
||||
@ -55,9 +55,7 @@ public class SyncedFlushResponseTests extends ESTestCase {
|
||||
serverResponsebuilder.endObject();
|
||||
XContentBuilder clientResponsebuilder = XContentBuilder.builder(xContentType.xContent());
|
||||
assertNotNull(plan.result);
|
||||
clientResponsebuilder.startObject();
|
||||
plan.clientResult.toXContent(clientResponsebuilder, ToXContent.EMPTY_PARAMS);
|
||||
clientResponsebuilder.endObject();
|
||||
Map<String, Object> serverContentMap = convertFailureListToSet(
|
||||
serverResponsebuilder
|
||||
.generator()
|
||||
|
@ -746,7 +746,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::bulk-execute-async
|
||||
client.bulkAsync(request, listener); // <1>
|
||||
client.bulkAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::bulk-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
@ -995,8 +995,9 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
}
|
||||
};
|
||||
|
||||
BulkProcessor bulkProcessor =
|
||||
BulkProcessor.builder(client::bulkAsync, listener).build(); // <5>
|
||||
BulkProcessor bulkProcessor = BulkProcessor.builder(
|
||||
(request, bulkListener) -> client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener),
|
||||
listener).build(); // <5>
|
||||
// end::bulk-processor-init
|
||||
assertNotNull(bulkProcessor);
|
||||
|
||||
@ -1054,7 +1055,8 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
// end::bulk-processor-listener
|
||||
|
||||
// tag::bulk-processor-options
|
||||
BulkProcessor.Builder builder = BulkProcessor.builder(client::bulkAsync, listener);
|
||||
BulkProcessor.Builder builder = BulkProcessor.builder(
|
||||
(request, bulkListener) -> client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener), listener);
|
||||
builder.setBulkActions(500); // <1>
|
||||
builder.setBulkSize(new ByteSizeValue(1L, ByteSizeUnit.MB)); // <2>
|
||||
builder.setConcurrentRequests(0); // <3>
|
||||
@ -1175,7 +1177,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::multi-get-execute-async
|
||||
client.multiGetAsync(request, listener); // <1>
|
||||
client.multiGetAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::multi-get-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
@ -21,17 +21,26 @@ package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.health.ClusterIndexHealth;
|
||||
import org.elasticsearch.cluster.health.ClusterShardHealth;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
@ -40,6 +49,7 @@ import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
/**
|
||||
* This class is used to generate the Java Cluster API documentation.
|
||||
@ -179,4 +189,174 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase
|
||||
}
|
||||
}
|
||||
|
||||
public void testClusterHealth() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
client.indices().create(new CreateIndexRequest("index"), RequestOptions.DEFAULT);
|
||||
{
|
||||
// tag::health-request
|
||||
ClusterHealthRequest request = new ClusterHealthRequest();
|
||||
// end::health-request
|
||||
}
|
||||
{
|
||||
// tag::health-request-indices-ctr
|
||||
ClusterHealthRequest request = new ClusterHealthRequest("index1", "index2");
|
||||
// end::health-request-indices-ctr
|
||||
}
|
||||
{
|
||||
// tag::health-request-indices-setter
|
||||
ClusterHealthRequest request = new ClusterHealthRequest();
|
||||
request.indices("index1", "index2");
|
||||
// end::health-request-indices-setter
|
||||
}
|
||||
ClusterHealthRequest request = new ClusterHealthRequest();
|
||||
|
||||
// tag::health-request-timeout
|
||||
request.timeout(TimeValue.timeValueSeconds(50)); // <1>
|
||||
request.timeout("50s"); // <2>
|
||||
// end::health-request-timeout
|
||||
|
||||
// tag::health-request-master-timeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueSeconds(20)); // <1>
|
||||
request.masterNodeTimeout("20s"); // <2>
|
||||
// end::health-request-master-timeout
|
||||
|
||||
// tag::health-request-wait-status
|
||||
request.waitForStatus(ClusterHealthStatus.YELLOW); // <1>
|
||||
request.waitForYellowStatus(); // <2>
|
||||
// end::health-request-wait-status
|
||||
|
||||
// tag::health-request-wait-events
|
||||
request.waitForEvents(Priority.NORMAL); // <1>
|
||||
// end::health-request-wait-events
|
||||
|
||||
// tag::health-request-level
|
||||
request.level(ClusterHealthRequest.Level.SHARDS); // <1>
|
||||
// end::health-request-level
|
||||
|
||||
// tag::health-request-wait-relocation
|
||||
request.waitForNoRelocatingShards(true); // <1>
|
||||
// end::health-request-wait-relocation
|
||||
|
||||
// tag::health-request-wait-initializing
|
||||
request.waitForNoInitializingShards(true); // <1>
|
||||
// end::health-request-wait-initializing
|
||||
|
||||
// tag::health-request-wait-nodes
|
||||
request.waitForNodes("2"); // <1>
|
||||
request.waitForNodes(">=2"); // <2>
|
||||
request.waitForNodes("le(2)"); // <3>
|
||||
// end::health-request-wait-nodes
|
||||
|
||||
// tag::health-request-wait-active
|
||||
request.waitForActiveShards(ActiveShardCount.ALL); // <1>
|
||||
request.waitForActiveShards(1); // <2>
|
||||
// end::health-request-wait-active
|
||||
|
||||
// tag::health-request-local
|
||||
request.local(true); // <1>
|
||||
// end::health-request-local
|
||||
|
||||
// tag::health-execute
|
||||
ClusterHealthResponse response = client.cluster().health(request, RequestOptions.DEFAULT);
|
||||
// end::health-execute
|
||||
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.status(), equalTo(RestStatus.OK));
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
assertThat(response, notNullValue());
|
||||
// tag::health-response-general
|
||||
String clusterName = response.getClusterName(); // <1>
|
||||
ClusterHealthStatus status = response.getStatus(); // <2>
|
||||
// end::health-response-general
|
||||
|
||||
// tag::health-response-request-status
|
||||
boolean timedOut = response.isTimedOut(); // <1>
|
||||
RestStatus restStatus = response.status(); // <2>
|
||||
// end::health-response-request-status
|
||||
|
||||
// tag::health-response-nodes
|
||||
int numberOfNodes = response.getNumberOfNodes(); // <1>
|
||||
int numberOfDataNodes = response.getNumberOfDataNodes(); // <2>
|
||||
// end::health-response-nodes
|
||||
|
||||
{
|
||||
// tag::health-response-shards
|
||||
int activeShards = response.getActiveShards(); // <1>
|
||||
int activePrimaryShards = response.getActivePrimaryShards(); // <2>
|
||||
int relocatingShards = response.getRelocatingShards(); // <3>
|
||||
int initializingShards = response.getInitializingShards(); // <4>
|
||||
int unassignedShards = response.getUnassignedShards(); // <5>
|
||||
int delayedUnassignedShards = response.getDelayedUnassignedShards(); // <6>
|
||||
double activeShardsPercent = response.getActiveShardsPercent(); // <7>
|
||||
// end::health-response-shards
|
||||
}
|
||||
|
||||
// tag::health-response-task
|
||||
TimeValue taskMaxWaitingTime = response.getTaskMaxWaitingTime(); // <1>
|
||||
int numberOfPendingTasks = response.getNumberOfPendingTasks(); // <2>
|
||||
int numberOfInFlightFetch = response.getNumberOfInFlightFetch(); // <3>
|
||||
// end::health-response-task
|
||||
|
||||
// tag::health-response-indices
|
||||
Map<String, ClusterIndexHealth> indices = response.getIndices(); // <1>
|
||||
// end::health-response-indices
|
||||
|
||||
{
|
||||
// tag::health-response-index
|
||||
ClusterIndexHealth index = indices.get("index"); // <1>
|
||||
ClusterHealthStatus indexStatus = index.getStatus();
|
||||
int numberOfShards = index.getNumberOfShards();
|
||||
int numberOfReplicas = index.getNumberOfReplicas();
|
||||
int activeShards = index.getActiveShards();
|
||||
int activePrimaryShards = index.getActivePrimaryShards();
|
||||
int initializingShards = index.getInitializingShards();
|
||||
int relocatingShards = index.getRelocatingShards();
|
||||
int unassignedShards = index.getUnassignedShards();
|
||||
// end::health-response-index
|
||||
|
||||
// tag::health-response-shard-details
|
||||
Map<Integer, ClusterShardHealth> shards = index.getShards(); // <1>
|
||||
ClusterShardHealth shardHealth = shards.get(0);
|
||||
int shardId = shardHealth.getShardId();
|
||||
ClusterHealthStatus shardStatus = shardHealth.getStatus();
|
||||
int active = shardHealth.getActiveShards();
|
||||
int initializing = shardHealth.getInitializingShards();
|
||||
int unassigned = shardHealth.getUnassignedShards();
|
||||
int relocating = shardHealth.getRelocatingShards();
|
||||
boolean primaryActive = shardHealth.isPrimaryActive();
|
||||
// end::health-response-shard-details
|
||||
}
|
||||
}
|
||||
|
||||
public void testClusterHealthAsync() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
ClusterHealthRequest request = new ClusterHealthRequest();
|
||||
|
||||
// tag::health-execute-listener
|
||||
ActionListener<ClusterHealthResponse> listener =
|
||||
new ActionListener<ClusterHealthResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClusterHealthResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::health-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::health-execute-async
|
||||
client.cluster().healthAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::health-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -51,22 +51,27 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
|
||||
import org.elasticsearch.action.admin.indices.rollover.RolloverRequest;
|
||||
import org.elasticsearch.action.admin.indices.rollover.RolloverResponse;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest;
|
||||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeResponse;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.GetAliasesResponse;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.SyncedFlushResponse;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -82,11 +87,14 @@ import org.elasticsearch.rest.RestStatus;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
/**
|
||||
* This class is used to generate the Java Indices API documentation.
|
||||
@ -568,7 +576,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"));
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"), RequestOptions.DEFAULT);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
PutMappingRequest request = new PutMappingRequest("twitter");
|
||||
request.type("tweet");
|
||||
@ -581,7 +589,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
||||
" }\n" +
|
||||
"}", // <1>
|
||||
XContentType.JSON);
|
||||
PutMappingResponse putMappingResponse = client.indices().putMapping(request);
|
||||
PutMappingResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT);
|
||||
assertTrue(putMappingResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
@ -625,7 +633,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
||||
final RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"));
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"), RequestOptions.DEFAULT);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
PutMappingRequest request = new PutMappingRequest("twitter");
|
||||
request.type("tweet");
|
||||
@ -638,7 +646,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
||||
" }\n" +
|
||||
"}", // <1>
|
||||
XContentType.JSON);
|
||||
PutMappingResponse putMappingResponse = client.indices().putMapping(request);
|
||||
PutMappingResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT);
|
||||
assertTrue(putMappingResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
@ -1721,6 +1729,76 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testGetAlias() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index").alias(new Alias("alias")),
|
||||
RequestOptions.DEFAULT);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
{
|
||||
// tag::get-alias-request
|
||||
GetAliasesRequest request = new GetAliasesRequest();
|
||||
GetAliasesRequest requestWithAlias = new GetAliasesRequest("alias1");
|
||||
GetAliasesRequest requestWithAliases =
|
||||
new GetAliasesRequest(new String[]{"alias1", "alias2"});
|
||||
// end::get-alias-request
|
||||
|
||||
// tag::get-alias-request-alias
|
||||
request.aliases("alias"); // <1>
|
||||
// end::get-alias-request-alias
|
||||
// tag::get-alias-request-indices
|
||||
request.indices("index"); // <1>
|
||||
// end::get-alias-request-indices
|
||||
|
||||
// tag::get-alias-request-indicesOptions
|
||||
request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
|
||||
// end::get-alias-request-indicesOptions
|
||||
|
||||
// tag::get-alias-request-local
|
||||
request.local(true); // <1>
|
||||
// end::get-alias-request-local
|
||||
|
||||
// tag::get-alias-execute
|
||||
GetAliasesResponse response = client.indices().getAlias(request, RequestOptions.DEFAULT);
|
||||
// end::get-alias-execute
|
||||
|
||||
// tag::get-alias-response
|
||||
Map<String, Set<AliasMetaData>> aliases = response.getAliases(); // <1>
|
||||
// end::get-alias-response
|
||||
|
||||
assertThat(response.getAliases().get("index").size(), equalTo(1));
|
||||
assertThat(response.getAliases().get("index").iterator().next().alias(), equalTo("alias"));
|
||||
|
||||
// tag::get-alias-listener
|
||||
ActionListener<GetAliasesResponse> listener =
|
||||
new ActionListener<GetAliasesResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetAliasesResponse getAliasesResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::get-alias-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::get-alias-execute-async
|
||||
client.indices().getAliasAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::get-alias-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testIndexPutSettings() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
@ -1983,4 +2061,71 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testGetTemplates() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest("my-template");
|
||||
putRequest.patterns(Arrays.asList("pattern-1", "log-*"));
|
||||
putRequest.settings(Settings.builder().put("index.number_of_shards", 3).put("index.number_of_replicas", 1));
|
||||
putRequest.mapping("tweet",
|
||||
"{\n" +
|
||||
" \"tweet\": {\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"message\": {\n" +
|
||||
" \"type\": \"text\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
"}", XContentType.JSON);
|
||||
assertTrue(client.indices().putTemplate(putRequest, RequestOptions.DEFAULT).isAcknowledged());
|
||||
}
|
||||
|
||||
// tag::get-templates-request
|
||||
GetIndexTemplatesRequest request = new GetIndexTemplatesRequest("my-template"); // <1>
|
||||
request.names("template-1", "template-2"); // <2>
|
||||
request.names("my-*"); // <3>
|
||||
// end::get-templates-request
|
||||
|
||||
// tag::get-templates-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::get-templates-request-masterTimeout
|
||||
|
||||
// tag::get-templates-execute
|
||||
GetIndexTemplatesResponse getTemplatesResponse = client.indices().getTemplate(request, RequestOptions.DEFAULT);
|
||||
// end::get-templates-execute
|
||||
|
||||
// tag::get-templates-response
|
||||
List<IndexTemplateMetaData> templates = getTemplatesResponse.getIndexTemplates(); // <1>
|
||||
// end::get-templates-response
|
||||
|
||||
assertThat(templates, hasSize(1));
|
||||
assertThat(templates.get(0).name(), equalTo("my-template"));
|
||||
|
||||
// tag::get-templates-execute-listener
|
||||
ActionListener<GetIndexTemplatesResponse> listener =
|
||||
new ActionListener<GetIndexTemplatesResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetIndexTemplatesResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::get-templates-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::get-templates-execute-async
|
||||
client.indices().getTemplateAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::get-templates-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
@ -628,7 +628,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
scrollListener = new LatchedActionListener<>(scrollListener, latch);
|
||||
|
||||
// tag::search-scroll-execute-async
|
||||
client.searchScrollAsync(scrollRequest, scrollListener); // <1>
|
||||
client.searchScrollAsync(scrollRequest, RequestOptions.DEFAULT, scrollListener); // <1>
|
||||
// end::search-scroll-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
@ -29,7 +29,7 @@ import java.util.concurrent.TimeUnit;
|
||||
final class DeadHostState implements Comparable<DeadHostState> {
|
||||
|
||||
private static final long MIN_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(1);
|
||||
private static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30);
|
||||
static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30);
|
||||
|
||||
private final int failedAttempts;
|
||||
private final long deadUntilNanos;
|
||||
@ -55,12 +55,12 @@ final class DeadHostState implements Comparable<DeadHostState> {
|
||||
*
|
||||
* @param previousDeadHostState the previous state of the host which allows us to increase the wait till the next retry attempt
|
||||
*/
|
||||
DeadHostState(DeadHostState previousDeadHostState, TimeSupplier timeSupplier) {
|
||||
DeadHostState(DeadHostState previousDeadHostState) {
|
||||
long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1),
|
||||
MAX_CONNECTION_TIMEOUT_NANOS);
|
||||
this.deadUntilNanos = timeSupplier.nanoTime() + timeoutNanos;
|
||||
this.deadUntilNanos = previousDeadHostState.timeSupplier.nanoTime() + timeoutNanos;
|
||||
this.failedAttempts = previousDeadHostState.failedAttempts + 1;
|
||||
this.timeSupplier = timeSupplier;
|
||||
this.timeSupplier = previousDeadHostState.timeSupplier;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -86,6 +86,10 @@ final class DeadHostState implements Comparable<DeadHostState> {
|
||||
|
||||
@Override
|
||||
public int compareTo(DeadHostState other) {
|
||||
if (timeSupplier != other.timeSupplier) {
|
||||
throw new IllegalArgumentException("can't compare DeadHostStates with different clocks ["
|
||||
+ timeSupplier + " != " + other.timeSupplier + "]");
|
||||
}
|
||||
return Long.compare(deadUntilNanos, other.deadUntilNanos);
|
||||
}
|
||||
|
||||
@ -94,6 +98,7 @@ final class DeadHostState implements Comparable<DeadHostState> {
|
||||
return "DeadHostState{" +
|
||||
"failedAttempts=" + failedAttempts +
|
||||
", deadUntilNanos=" + deadUntilNanos +
|
||||
", timeSupplier=" + timeSupplier +
|
||||
'}';
|
||||
}
|
||||
|
||||
@ -101,12 +106,16 @@ final class DeadHostState implements Comparable<DeadHostState> {
|
||||
* Time supplier that makes timing aspects pluggable to ease testing
|
||||
*/
|
||||
interface TimeSupplier {
|
||||
|
||||
TimeSupplier DEFAULT = new TimeSupplier() {
|
||||
@Override
|
||||
public long nanoTime() {
|
||||
return System.nanoTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "nanoTime";
|
||||
}
|
||||
};
|
||||
|
||||
long nanoTime();
|
||||
|
213
client/rest/src/main/java/org/elasticsearch/client/Node.java
Normal file
213
client/rest/src/main/java/org/elasticsearch/client/Node.java
Normal file
@ -0,0 +1,213 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
|
||||
/**
|
||||
* Metadata about an {@link HttpHost} running Elasticsearch.
|
||||
*/
|
||||
public class Node {
|
||||
/**
|
||||
* Address that this host claims is its primary contact point.
|
||||
*/
|
||||
private final HttpHost host;
|
||||
/**
|
||||
* Addresses on which the host is listening. These are useful to have
|
||||
* around because they allow you to find a host based on any address it
|
||||
* is listening on.
|
||||
*/
|
||||
private final Set<HttpHost> boundHosts;
|
||||
/**
|
||||
* Name of the node as configured by the {@code node.name} attribute.
|
||||
*/
|
||||
private final String name;
|
||||
/**
|
||||
* Version of Elasticsearch that the node is running or {@code null}
|
||||
* if we don't know the version.
|
||||
*/
|
||||
private final String version;
|
||||
/**
|
||||
* Roles that the Elasticsearch process on the host has or {@code null}
|
||||
* if we don't know what roles the node has.
|
||||
*/
|
||||
private final Roles roles;
|
||||
|
||||
/**
|
||||
* Create a {@linkplain Node} with metadata. All parameters except
|
||||
* {@code host} are nullable and implementations of {@link NodeSelector}
|
||||
* need to decide what to do in their absence.
|
||||
*/
|
||||
public Node(HttpHost host, Set<HttpHost> boundHosts, String name, String version, Roles roles) {
|
||||
if (host == null) {
|
||||
throw new IllegalArgumentException("host cannot be null");
|
||||
}
|
||||
this.host = host;
|
||||
this.boundHosts = boundHosts;
|
||||
this.name = name;
|
||||
this.version = version;
|
||||
this.roles = roles;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a {@linkplain Node} without any metadata.
|
||||
*/
|
||||
public Node(HttpHost host) {
|
||||
this(host, null, null, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Contact information for the host.
|
||||
*/
|
||||
public HttpHost getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
/**
|
||||
* Addresses on which the host is listening. These are useful to have
|
||||
* around because they allow you to find a host based on any address it
|
||||
* is listening on.
|
||||
*/
|
||||
public Set<HttpHost> getBoundHosts() {
|
||||
return boundHosts;
|
||||
}
|
||||
|
||||
/**
|
||||
* The {@code node.name} of the node.
|
||||
*/
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Version of Elasticsearch that the node is running or {@code null}
|
||||
* if we don't know the version.
|
||||
*/
|
||||
public String getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Roles that the Elasticsearch process on the host has or {@code null}
|
||||
* if we don't know what roles the node has.
|
||||
*/
|
||||
public Roles getRoles() {
|
||||
return roles;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder b = new StringBuilder();
|
||||
b.append("[host=").append(host);
|
||||
if (boundHosts != null) {
|
||||
b.append(", bound=").append(boundHosts);
|
||||
}
|
||||
if (name != null) {
|
||||
b.append(", name=").append(name);
|
||||
}
|
||||
if (version != null) {
|
||||
b.append(", version=").append(version);
|
||||
}
|
||||
if (roles != null) {
|
||||
b.append(", roles=").append(roles);
|
||||
}
|
||||
return b.append(']').toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
Node other = (Node) obj;
|
||||
return host.equals(other.host)
|
||||
&& Objects.equals(boundHosts, other.boundHosts)
|
||||
&& Objects.equals(name, other.name)
|
||||
&& Objects.equals(version, other.version)
|
||||
&& Objects.equals(roles, other.roles);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(host, boundHosts, name, version, roles);
|
||||
}
|
||||
|
||||
/**
|
||||
* Role information about an Elasticsearch process.
|
||||
*/
|
||||
public static final class Roles {
|
||||
private final boolean masterEligible;
|
||||
private final boolean data;
|
||||
private final boolean ingest;
|
||||
|
||||
public Roles(boolean masterEligible, boolean data, boolean ingest) {
|
||||
this.masterEligible = masterEligible;
|
||||
this.data = data;
|
||||
this.ingest = ingest;
|
||||
}
|
||||
|
||||
/**
|
||||
* Teturns whether or not the node <strong>could</strong> be elected master.
|
||||
*/
|
||||
public boolean isMasterEligible() {
|
||||
return masterEligible;
|
||||
}
|
||||
/**
|
||||
* Teturns whether or not the node stores data.
|
||||
*/
|
||||
public boolean isData() {
|
||||
return data;
|
||||
}
|
||||
/**
|
||||
* Teturns whether or not the node runs ingest pipelines.
|
||||
*/
|
||||
public boolean isIngest() {
|
||||
return ingest;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder result = new StringBuilder(3);
|
||||
if (masterEligible) result.append('m');
|
||||
if (data) result.append('d');
|
||||
if (ingest) result.append('i');
|
||||
return result.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
Roles other = (Roles) obj;
|
||||
return masterEligible == other.masterEligible
|
||||
&& data == other.data
|
||||
&& ingest == other.ingest;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(masterEligible, data, ingest);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,90 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import java.util.Iterator;
|
||||
|
||||
/**
|
||||
* Selects nodes that can receive requests. Used to keep requests away
|
||||
* from master nodes or to send them to nodes with a particular attribute.
|
||||
* Use with {@link RequestOptions.Builder#setNodeSelector(NodeSelector)}.
|
||||
*/
|
||||
public interface NodeSelector {
|
||||
/**
|
||||
* Select the {@link Node}s to which to send requests. This is called with
|
||||
* a mutable {@link Iterable} of {@linkplain Node}s in the order that the
|
||||
* rest client would prefer to use them and implementers should remove
|
||||
* nodes from the that should not receive the request. Implementers may
|
||||
* iterate the nodes as many times as they need.
|
||||
* <p>
|
||||
* This may be called twice per request: first for "living" nodes that
|
||||
* have not been blacklisted by previous errors. If the selector removes
|
||||
* all nodes from the list or if there aren't any living nodes then the
|
||||
* {@link RestClient} will call this method with a list of "dead" nodes.
|
||||
* <p>
|
||||
* Implementers should not rely on the ordering of the nodes.
|
||||
*/
|
||||
void select(Iterable<Node> nodes);
|
||||
/*
|
||||
* We were fairly careful with our choice of Iterable here. The caller has
|
||||
* a List but reordering the list is likely to break round robin. Luckily
|
||||
* Iterable doesn't allow any reordering.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Selector that matches any node.
|
||||
*/
|
||||
NodeSelector ANY = new NodeSelector() {
|
||||
@Override
|
||||
public void select(Iterable<Node> nodes) {
|
||||
// Intentionally does nothing
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ANY";
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Selector that matches any node that has metadata and doesn't
|
||||
* have the {@code master} role OR it has the data {@code data}
|
||||
* role.
|
||||
*/
|
||||
NodeSelector NOT_MASTER_ONLY = new NodeSelector() {
|
||||
@Override
|
||||
public void select(Iterable<Node> nodes) {
|
||||
for (Iterator<Node> itr = nodes.iterator(); itr.hasNext();) {
|
||||
Node node = itr.next();
|
||||
if (node.getRoles() == null) continue;
|
||||
if (node.getRoles().isMasterEligible()
|
||||
&& false == node.getRoles().isData()
|
||||
&& false == node.getRoles().isIngest()) {
|
||||
itr.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "NOT_MASTER_ONLY";
|
||||
}
|
||||
};
|
||||
}
|
@ -87,14 +87,14 @@ final class RequestLogger {
|
||||
/**
|
||||
* Logs a request that failed
|
||||
*/
|
||||
static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, Exception e) {
|
||||
static void logFailedRequest(Log logger, HttpUriRequest request, Node node, Exception e) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) + "] failed", e);
|
||||
logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request.getRequestLine()) + "] failed", e);
|
||||
}
|
||||
if (tracer.isTraceEnabled()) {
|
||||
String traceRequest;
|
||||
try {
|
||||
traceRequest = buildTraceRequest(request, host);
|
||||
traceRequest = buildTraceRequest(request, node.getHost());
|
||||
} catch (IOException e1) {
|
||||
tracer.trace("error while reading request for trace purposes", e);
|
||||
traceRequest = "";
|
||||
|
@ -37,18 +37,21 @@ import java.util.ArrayList;
|
||||
*/
|
||||
public final class RequestOptions {
|
||||
public static final RequestOptions DEFAULT = new Builder(
|
||||
Collections.<Header>emptyList(), HeapBufferedResponseConsumerFactory.DEFAULT).build();
|
||||
Collections.<Header>emptyList(), NodeSelector.ANY,
|
||||
HeapBufferedResponseConsumerFactory.DEFAULT).build();
|
||||
|
||||
private final List<Header> headers;
|
||||
private final NodeSelector nodeSelector;
|
||||
private final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory;
|
||||
|
||||
private RequestOptions(Builder builder) {
|
||||
this.headers = Collections.unmodifiableList(new ArrayList<>(builder.headers));
|
||||
this.nodeSelector = builder.nodeSelector;
|
||||
this.httpAsyncResponseConsumerFactory = builder.httpAsyncResponseConsumerFactory;
|
||||
}
|
||||
|
||||
public Builder toBuilder() {
|
||||
Builder builder = new Builder(headers, httpAsyncResponseConsumerFactory);
|
||||
Builder builder = new Builder(headers, nodeSelector, httpAsyncResponseConsumerFactory);
|
||||
return builder;
|
||||
}
|
||||
|
||||
@ -59,6 +62,14 @@ public final class RequestOptions {
|
||||
return headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* The selector that chooses which nodes are valid destinations for
|
||||
* {@link Request}s with these options.
|
||||
*/
|
||||
public NodeSelector getNodeSelector() {
|
||||
return nodeSelector;
|
||||
}
|
||||
|
||||
/**
|
||||
* The {@link HttpAsyncResponseConsumerFactory} used to create one
|
||||
* {@link HttpAsyncResponseConsumer} callback per retry. Controls how the
|
||||
@ -82,6 +93,9 @@ public final class RequestOptions {
|
||||
b.append(headers.get(h).toString());
|
||||
}
|
||||
}
|
||||
if (nodeSelector != NodeSelector.ANY) {
|
||||
b.append(", nodeSelector=").append(nodeSelector);
|
||||
}
|
||||
if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) {
|
||||
b.append(", consumerFactory=").append(httpAsyncResponseConsumerFactory);
|
||||
}
|
||||
@ -99,20 +113,24 @@ public final class RequestOptions {
|
||||
|
||||
RequestOptions other = (RequestOptions) obj;
|
||||
return headers.equals(other.headers)
|
||||
&& nodeSelector.equals(other.nodeSelector)
|
||||
&& httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(headers, httpAsyncResponseConsumerFactory);
|
||||
return Objects.hash(headers, nodeSelector, httpAsyncResponseConsumerFactory);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
private final List<Header> headers;
|
||||
private NodeSelector nodeSelector;
|
||||
private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory;
|
||||
|
||||
private Builder(List<Header> headers, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) {
|
||||
private Builder(List<Header> headers, NodeSelector nodeSelector,
|
||||
HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) {
|
||||
this.headers = new ArrayList<>(headers);
|
||||
this.nodeSelector = nodeSelector;
|
||||
this.httpAsyncResponseConsumerFactory = httpAsyncResponseConsumerFactory;
|
||||
}
|
||||
|
||||
@ -133,7 +151,15 @@ public final class RequestOptions {
|
||||
}
|
||||
|
||||
/**
|
||||
* set the {@link HttpAsyncResponseConsumerFactory} used to create one
|
||||
* Configure the selector that chooses which nodes are valid
|
||||
* destinations for {@link Request}s with these options
|
||||
*/
|
||||
public void setNodeSelector(NodeSelector nodeSelector) {
|
||||
this.nodeSelector = Objects.requireNonNull(nodeSelector, "nodeSelector cannot be null");
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the {@link HttpAsyncResponseConsumerFactory} used to create one
|
||||
* {@link HttpAsyncResponseConsumer} callback per retry. Controls how the
|
||||
* response body gets streamed from a non-blocking HTTP connection on the
|
||||
* client side.
|
||||
|
@ -40,7 +40,7 @@ public class Response {
|
||||
|
||||
Response(RequestLine requestLine, HttpHost host, HttpResponse response) {
|
||||
Objects.requireNonNull(requestLine, "requestLine cannot be null");
|
||||
Objects.requireNonNull(host, "node cannot be null");
|
||||
Objects.requireNonNull(host, "host cannot be null");
|
||||
Objects.requireNonNull(response, "response cannot be null");
|
||||
this.requestLine = requestLine;
|
||||
this.host = host;
|
||||
|
@ -46,10 +46,11 @@ import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
import org.apache.http.nio.client.methods.HttpAsyncMethods;
|
||||
import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
|
||||
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
import org.elasticsearch.client.DeadHostState.TimeSupplier;
|
||||
|
||||
import javax.net.ssl.SSLHandshakeException;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.net.ConnectException;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
@ -57,11 +58,10 @@ import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
@ -74,13 +74,16 @@ import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import javax.net.ssl.SSLHandshakeException;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
|
||||
/**
|
||||
* Client that connects to an Elasticsearch cluster through HTTP.
|
||||
* <p>
|
||||
* Must be created using {@link RestClientBuilder}, which allows to set all the different options or just rely on defaults.
|
||||
* The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later
|
||||
* by calling {@link #setHosts(HttpHost...)}.
|
||||
* by calling {@link #setNodes(Collection)}.
|
||||
* <p>
|
||||
* The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When
|
||||
* sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and
|
||||
@ -102,53 +105,93 @@ public class RestClient implements Closeable {
|
||||
final List<Header> defaultHeaders;
|
||||
private final long maxRetryTimeoutMillis;
|
||||
private final String pathPrefix;
|
||||
private final AtomicInteger lastHostIndex = new AtomicInteger(0);
|
||||
private volatile HostTuple<Set<HttpHost>> hostTuple;
|
||||
private final AtomicInteger lastNodeIndex = new AtomicInteger(0);
|
||||
private final ConcurrentMap<HttpHost, DeadHostState> blacklist = new ConcurrentHashMap<>();
|
||||
private final FailureListener failureListener;
|
||||
private volatile NodeTuple<List<Node>> nodeTuple;
|
||||
|
||||
RestClient(CloseableHttpAsyncClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders,
|
||||
HttpHost[] hosts, String pathPrefix, FailureListener failureListener) {
|
||||
List<Node> nodes, String pathPrefix, FailureListener failureListener) {
|
||||
this.client = client;
|
||||
this.maxRetryTimeoutMillis = maxRetryTimeoutMillis;
|
||||
this.defaultHeaders = Collections.unmodifiableList(Arrays.asList(defaultHeaders));
|
||||
this.failureListener = failureListener;
|
||||
this.pathPrefix = pathPrefix;
|
||||
setHosts(hosts);
|
||||
setNodes(nodes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation.
|
||||
* Creates a new builder instance and sets the hosts that the client will send requests to.
|
||||
* <p>
|
||||
* Prefer this to {@link #builder(HttpHost...)} if you have metadata up front about the nodes.
|
||||
* If you don't either one is fine.
|
||||
*/
|
||||
public static RestClientBuilder builder(HttpHost... hosts) {
|
||||
return new RestClientBuilder(hosts);
|
||||
public static RestClientBuilder builder(Node... nodes) {
|
||||
return new RestClientBuilder(nodes == null ? null : Arrays.asList(nodes));
|
||||
}
|
||||
|
||||
/**
|
||||
* Replaces the hosts that the client communicates with.
|
||||
* @see HttpHost
|
||||
* Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation.
|
||||
* Creates a new builder instance and sets the nodes that the client will send requests to.
|
||||
* <p>
|
||||
* You can use this if you do not have metadata up front about the nodes. If you do, prefer
|
||||
* {@link #builder(Node...)}.
|
||||
* @see Node#Node(HttpHost)
|
||||
*/
|
||||
public synchronized void setHosts(HttpHost... hosts) {
|
||||
if (hosts == null || hosts.length == 0) {
|
||||
throw new IllegalArgumentException("hosts must not be null nor empty");
|
||||
public static RestClientBuilder builder(HttpHost... hosts) {
|
||||
return new RestClientBuilder(hostsToNodes(hosts));
|
||||
}
|
||||
|
||||
/**
|
||||
* Replaces the hosts with which the client communicates.
|
||||
*
|
||||
* @deprecated prefer {@link setNodes} because it allows you
|
||||
* to set metadata for use with {@link NodeSelector}s
|
||||
*/
|
||||
@Deprecated
|
||||
public void setHosts(HttpHost... hosts) {
|
||||
setNodes(hostsToNodes(hosts));
|
||||
}
|
||||
|
||||
/**
|
||||
* Replaces the nodes with which the client communicates.
|
||||
*/
|
||||
public synchronized void setNodes(Collection<Node> nodes) {
|
||||
if (nodes == null || nodes.isEmpty()) {
|
||||
throw new IllegalArgumentException("nodes must not be null or empty");
|
||||
}
|
||||
Set<HttpHost> httpHosts = new LinkedHashSet<>();
|
||||
AuthCache authCache = new BasicAuthCache();
|
||||
for (HttpHost host : hosts) {
|
||||
Objects.requireNonNull(host, "host cannot be null");
|
||||
httpHosts.add(host);
|
||||
authCache.put(host, new BasicScheme());
|
||||
|
||||
Map<HttpHost, Node> nodesByHost = new LinkedHashMap<>();
|
||||
for (Node node : nodes) {
|
||||
Objects.requireNonNull(node, "node cannot be null");
|
||||
// TODO should we throw an IAE if we have two nodes with the same host?
|
||||
nodesByHost.put(node.getHost(), node);
|
||||
authCache.put(node.getHost(), new BasicScheme());
|
||||
}
|
||||
this.hostTuple = new HostTuple<>(Collections.unmodifiableSet(httpHosts), authCache);
|
||||
this.nodeTuple = new NodeTuple<>(
|
||||
Collections.unmodifiableList(new ArrayList<>(nodesByHost.values())), authCache);
|
||||
this.blacklist.clear();
|
||||
}
|
||||
|
||||
private static List<Node> hostsToNodes(HttpHost[] hosts) {
|
||||
if (hosts == null || hosts.length == 0) {
|
||||
throw new IllegalArgumentException("hosts must not be null nor empty");
|
||||
}
|
||||
List<Node> nodes = new ArrayList<>(hosts.length);
|
||||
for (int i = 0; i < hosts.length; i++) {
|
||||
nodes.add(new Node(hosts[i]));
|
||||
}
|
||||
return nodes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the configured hosts
|
||||
* Get the list of nodes that the client knows about. The list is
|
||||
* unmodifiable.
|
||||
*/
|
||||
public List<HttpHost> getHosts() {
|
||||
return new ArrayList<>(hostTuple.hosts);
|
||||
public List<Node> getNodes() {
|
||||
return nodeTuple.nodes;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -434,7 +477,7 @@ public class RestClient implements Closeable {
|
||||
performRequestAsync(request, responseListener);
|
||||
}
|
||||
|
||||
void performRequestAsyncNoCatch(Request request, ResponseListener listener) {
|
||||
void performRequestAsyncNoCatch(Request request, ResponseListener listener) throws IOException {
|
||||
Map<String, String> requestParams = new HashMap<>(request.getParameters());
|
||||
//ignore is a special parameter supported by the clients, shouldn't be sent to es
|
||||
String ignoreString = requestParams.remove("ignore");
|
||||
@ -466,40 +509,40 @@ public class RestClient implements Closeable {
|
||||
setHeaders(httpRequest, request.getOptions().getHeaders());
|
||||
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(listener);
|
||||
long startTime = System.nanoTime();
|
||||
performRequestAsync(startTime, nextHost(), httpRequest, ignoreErrorCodes,
|
||||
performRequestAsync(startTime, nextNode(request.getOptions().getNodeSelector()), httpRequest, ignoreErrorCodes,
|
||||
request.getOptions().getHttpAsyncResponseConsumerFactory(), failureTrackingResponseListener);
|
||||
}
|
||||
|
||||
private void performRequestAsync(final long startTime, final HostTuple<Iterator<HttpHost>> hostTuple, final HttpRequestBase request,
|
||||
private void performRequestAsync(final long startTime, final NodeTuple<Iterator<Node>> nodeTuple, final HttpRequestBase request,
|
||||
final Set<Integer> ignoreErrorCodes,
|
||||
final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory,
|
||||
final FailureTrackingResponseListener listener) {
|
||||
final HttpHost host = hostTuple.hosts.next();
|
||||
final Node node = nodeTuple.nodes.next();
|
||||
//we stream the request body if the entity allows for it
|
||||
final HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(host, request);
|
||||
final HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(node.getHost(), request);
|
||||
final HttpAsyncResponseConsumer<HttpResponse> asyncResponseConsumer =
|
||||
httpAsyncResponseConsumerFactory.createHttpAsyncResponseConsumer();
|
||||
final HttpClientContext context = HttpClientContext.create();
|
||||
context.setAuthCache(hostTuple.authCache);
|
||||
context.setAuthCache(nodeTuple.authCache);
|
||||
client.execute(requestProducer, asyncResponseConsumer, context, new FutureCallback<HttpResponse>() {
|
||||
@Override
|
||||
public void completed(HttpResponse httpResponse) {
|
||||
try {
|
||||
RequestLogger.logResponse(logger, request, host, httpResponse);
|
||||
RequestLogger.logResponse(logger, request, node.getHost(), httpResponse);
|
||||
int statusCode = httpResponse.getStatusLine().getStatusCode();
|
||||
Response response = new Response(request.getRequestLine(), host, httpResponse);
|
||||
Response response = new Response(request.getRequestLine(), node.getHost(), httpResponse);
|
||||
if (isSuccessfulResponse(statusCode) || ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) {
|
||||
onResponse(host);
|
||||
onResponse(node);
|
||||
listener.onSuccess(response);
|
||||
} else {
|
||||
ResponseException responseException = new ResponseException(response);
|
||||
if (isRetryStatus(statusCode)) {
|
||||
//mark host dead and retry against next one
|
||||
onFailure(host);
|
||||
onFailure(node);
|
||||
retryIfPossible(responseException);
|
||||
} else {
|
||||
//mark host alive and don't retry, as the error should be a request problem
|
||||
onResponse(host);
|
||||
onResponse(node);
|
||||
listener.onDefinitiveFailure(responseException);
|
||||
}
|
||||
}
|
||||
@ -511,8 +554,8 @@ public class RestClient implements Closeable {
|
||||
@Override
|
||||
public void failed(Exception failure) {
|
||||
try {
|
||||
RequestLogger.logFailedRequest(logger, request, host, failure);
|
||||
onFailure(host);
|
||||
RequestLogger.logFailedRequest(logger, request, node, failure);
|
||||
onFailure(node);
|
||||
retryIfPossible(failure);
|
||||
} catch(Exception e) {
|
||||
listener.onDefinitiveFailure(e);
|
||||
@ -520,7 +563,7 @@ public class RestClient implements Closeable {
|
||||
}
|
||||
|
||||
private void retryIfPossible(Exception exception) {
|
||||
if (hostTuple.hosts.hasNext()) {
|
||||
if (nodeTuple.nodes.hasNext()) {
|
||||
//in case we are retrying, check whether maxRetryTimeout has been reached
|
||||
long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
|
||||
long timeout = maxRetryTimeoutMillis - timeElapsedMillis;
|
||||
@ -531,7 +574,7 @@ public class RestClient implements Closeable {
|
||||
} else {
|
||||
listener.trackFailure(exception);
|
||||
request.reset();
|
||||
performRequestAsync(startTime, hostTuple, request, ignoreErrorCodes, httpAsyncResponseConsumerFactory, listener);
|
||||
performRequestAsync(startTime, nodeTuple, request, ignoreErrorCodes, httpAsyncResponseConsumerFactory, listener);
|
||||
}
|
||||
} else {
|
||||
listener.onDefinitiveFailure(exception);
|
||||
@ -560,54 +603,103 @@ public class RestClient implements Closeable {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an {@link Iterable} of hosts to be used for a request call.
|
||||
* Ideally, the first host is retrieved from the iterable and used successfully for the request.
|
||||
* Otherwise, after each failure the next host has to be retrieved from the iterator so that the request can be retried until
|
||||
* there are no more hosts available to retry against. The maximum total of attempts is equal to the number of hosts in the iterable.
|
||||
* The iterator returned will never be empty. In case there are no healthy hosts available, or dead ones to be be retried,
|
||||
* one dead host gets returned so that it can be retried.
|
||||
* Returns a non-empty {@link Iterator} of nodes to be used for a request
|
||||
* that match the {@link NodeSelector}.
|
||||
* <p>
|
||||
* If there are no living nodes that match the {@link NodeSelector}
|
||||
* this will return the dead node that matches the {@link NodeSelector}
|
||||
* that is closest to being revived.
|
||||
* @throws IOException if no nodes are available
|
||||
*/
|
||||
private HostTuple<Iterator<HttpHost>> nextHost() {
|
||||
final HostTuple<Set<HttpHost>> hostTuple = this.hostTuple;
|
||||
Collection<HttpHost> nextHosts = Collections.emptySet();
|
||||
do {
|
||||
Set<HttpHost> filteredHosts = new HashSet<>(hostTuple.hosts);
|
||||
for (Map.Entry<HttpHost, DeadHostState> entry : blacklist.entrySet()) {
|
||||
if (entry.getValue().shallBeRetried() == false) {
|
||||
filteredHosts.remove(entry.getKey());
|
||||
}
|
||||
private NodeTuple<Iterator<Node>> nextNode(NodeSelector nodeSelector) throws IOException {
|
||||
NodeTuple<List<Node>> nodeTuple = this.nodeTuple;
|
||||
List<Node> hosts = selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector);
|
||||
return new NodeTuple<>(hosts.iterator(), nodeTuple.authCache);
|
||||
}
|
||||
|
||||
/**
|
||||
* Select hosts to try. Package private for testing.
|
||||
*/
|
||||
static List<Node> selectHosts(NodeTuple<List<Node>> nodeTuple,
|
||||
Map<HttpHost, DeadHostState> blacklist, AtomicInteger lastNodeIndex,
|
||||
NodeSelector nodeSelector) throws IOException {
|
||||
/*
|
||||
* Sort the nodes into living and dead lists.
|
||||
*/
|
||||
List<Node> livingNodes = new ArrayList<>(nodeTuple.nodes.size() - blacklist.size());
|
||||
List<DeadNode> deadNodes = new ArrayList<>(blacklist.size());
|
||||
for (Node node : nodeTuple.nodes) {
|
||||
DeadHostState deadness = blacklist.get(node.getHost());
|
||||
if (deadness == null) {
|
||||
livingNodes.add(node);
|
||||
continue;
|
||||
}
|
||||
if (filteredHosts.isEmpty()) {
|
||||
//last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried
|
||||
List<Map.Entry<HttpHost, DeadHostState>> sortedHosts = new ArrayList<>(blacklist.entrySet());
|
||||
if (sortedHosts.size() > 0) {
|
||||
Collections.sort(sortedHosts, new Comparator<Map.Entry<HttpHost, DeadHostState>>() {
|
||||
@Override
|
||||
public int compare(Map.Entry<HttpHost, DeadHostState> o1, Map.Entry<HttpHost, DeadHostState> o2) {
|
||||
return o1.getValue().compareTo(o2.getValue());
|
||||
}
|
||||
});
|
||||
HttpHost deadHost = sortedHosts.get(0).getKey();
|
||||
logger.trace("resurrecting host [" + deadHost + "]");
|
||||
nextHosts = Collections.singleton(deadHost);
|
||||
}
|
||||
} else {
|
||||
List<HttpHost> rotatedHosts = new ArrayList<>(filteredHosts);
|
||||
Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement());
|
||||
nextHosts = rotatedHosts;
|
||||
if (deadness.shallBeRetried()) {
|
||||
livingNodes.add(node);
|
||||
continue;
|
||||
}
|
||||
} while(nextHosts.isEmpty());
|
||||
return new HostTuple<>(nextHosts.iterator(), hostTuple.authCache);
|
||||
deadNodes.add(new DeadNode(node, deadness));
|
||||
}
|
||||
|
||||
if (false == livingNodes.isEmpty()) {
|
||||
/*
|
||||
* Normal state: there is at least one living node. If the
|
||||
* selector is ok with any over the living nodes then use them
|
||||
* for the request.
|
||||
*/
|
||||
List<Node> selectedLivingNodes = new ArrayList<>(livingNodes);
|
||||
nodeSelector.select(selectedLivingNodes);
|
||||
if (false == selectedLivingNodes.isEmpty()) {
|
||||
/*
|
||||
* Rotate the list so subsequent requests will prefer the
|
||||
* nodes in a different order.
|
||||
*/
|
||||
Collections.rotate(selectedLivingNodes, lastNodeIndex.getAndIncrement());
|
||||
return selectedLivingNodes;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Last resort: If there are no good nodes to use, either because
|
||||
* the selector rejected all the living nodes or because there aren't
|
||||
* any living ones. Either way, we want to revive a single dead node
|
||||
* that the NodeSelectors are OK with. We do this by sorting the dead
|
||||
* nodes by their revival time and passing them through the
|
||||
* NodeSelector so it can have its say in which nodes are ok and their
|
||||
* ordering. If the selector is ok with any of the nodes then use just
|
||||
* the first one in the list because we only want to revive a single
|
||||
* node.
|
||||
*/
|
||||
if (false == deadNodes.isEmpty()) {
|
||||
final List<DeadNode> selectedDeadNodes = new ArrayList<>(deadNodes);
|
||||
/*
|
||||
* We'd like NodeSelectors to remove items directly from deadNodes
|
||||
* so we can find the minimum after it is filtered without having
|
||||
* to compare many things. This saves us a sort on the unfiltered
|
||||
* list.
|
||||
*/
|
||||
nodeSelector.select(new Iterable<Node>() {
|
||||
@Override
|
||||
public Iterator<Node> iterator() {
|
||||
return new DeadNodeIteratorAdapter(selectedDeadNodes.iterator());
|
||||
}
|
||||
});
|
||||
if (false == selectedDeadNodes.isEmpty()) {
|
||||
return singletonList(Collections.min(selectedDeadNodes).node);
|
||||
}
|
||||
}
|
||||
throw new IOException("NodeSelector [" + nodeSelector + "] rejected all nodes, "
|
||||
+ "living " + livingNodes + " and dead " + deadNodes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called after each successful request call.
|
||||
* Receives as an argument the host that was used for the successful request.
|
||||
*/
|
||||
private void onResponse(HttpHost host) {
|
||||
DeadHostState removedHost = this.blacklist.remove(host);
|
||||
private void onResponse(Node node) {
|
||||
DeadHostState removedHost = this.blacklist.remove(node.getHost());
|
||||
if (logger.isDebugEnabled() && removedHost != null) {
|
||||
logger.debug("removed host [" + host + "] from blacklist");
|
||||
logger.debug("removed [" + node + "] from blacklist");
|
||||
}
|
||||
}
|
||||
|
||||
@ -615,20 +707,25 @@ public class RestClient implements Closeable {
|
||||
* Called after each failed attempt.
|
||||
* Receives as an argument the host that was used for the failed attempt.
|
||||
*/
|
||||
private void onFailure(HttpHost host) {
|
||||
private void onFailure(Node node) {
|
||||
while(true) {
|
||||
DeadHostState previousDeadHostState = blacklist.putIfAbsent(host, new DeadHostState(DeadHostState.TimeSupplier.DEFAULT));
|
||||
DeadHostState previousDeadHostState =
|
||||
blacklist.putIfAbsent(node.getHost(), new DeadHostState(TimeSupplier.DEFAULT));
|
||||
if (previousDeadHostState == null) {
|
||||
logger.debug("added host [" + host + "] to blacklist");
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("added [" + node + "] to blacklist");
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (blacklist.replace(host, previousDeadHostState,
|
||||
new DeadHostState(previousDeadHostState, DeadHostState.TimeSupplier.DEFAULT))) {
|
||||
logger.debug("updated host [" + host + "] already in blacklist");
|
||||
if (blacklist.replace(node.getHost(), previousDeadHostState,
|
||||
new DeadHostState(previousDeadHostState))) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("updated [" + node + "] already in blacklist");
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
failureListener.onFailure(host);
|
||||
failureListener.onFailure(node);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -840,6 +937,11 @@ public class RestClient implements Closeable {
|
||||
e.initCause(exception);
|
||||
throw e;
|
||||
}
|
||||
if (exception instanceof ConnectException) {
|
||||
ConnectException e = new ConnectException(exception.getMessage());
|
||||
e.initCause(exception);
|
||||
throw e;
|
||||
}
|
||||
if (exception instanceof IOException) {
|
||||
throw new IOException(exception.getMessage(), exception);
|
||||
}
|
||||
@ -862,24 +964,73 @@ public class RestClient implements Closeable {
|
||||
*/
|
||||
public static class FailureListener {
|
||||
/**
|
||||
* Notifies that the host provided as argument has just failed
|
||||
* Notifies that the node provided as argument has just failed
|
||||
*/
|
||||
public void onFailure(HttpHost host) {
|
||||
public void onFailure(Node node) {}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link NodeTuple} enables the {@linkplain Node}s and {@linkplain AuthCache}
|
||||
* to be set together in a thread safe, volatile way.
|
||||
*/
|
||||
static class NodeTuple<T> {
|
||||
final T nodes;
|
||||
final AuthCache authCache;
|
||||
|
||||
NodeTuple(final T nodes, final AuthCache authCache) {
|
||||
this.nodes = nodes;
|
||||
this.authCache = authCache;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@code HostTuple} enables the {@linkplain HttpHost}s and {@linkplain AuthCache} to be set together in a thread
|
||||
* safe, volatile way.
|
||||
* Contains a reference to a blacklisted node and the time until it is
|
||||
* revived. We use this so we can do a single pass over the blacklist.
|
||||
*/
|
||||
private static class HostTuple<T> {
|
||||
final T hosts;
|
||||
final AuthCache authCache;
|
||||
private static class DeadNode implements Comparable<DeadNode> {
|
||||
final Node node;
|
||||
final DeadHostState deadness;
|
||||
|
||||
HostTuple(final T hosts, final AuthCache authCache) {
|
||||
this.hosts = hosts;
|
||||
this.authCache = authCache;
|
||||
DeadNode(Node node, DeadHostState deadness) {
|
||||
this.node = node;
|
||||
this.deadness = deadness;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return node.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(DeadNode rhs) {
|
||||
return deadness.compareTo(rhs.deadness);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adapts an <code>Iterator<DeadNodeAndRevival></code> into an
|
||||
* <code>Iterator<Node></code>.
|
||||
*/
|
||||
private static class DeadNodeIteratorAdapter implements Iterator<Node> {
|
||||
private final Iterator<DeadNode> itr;
|
||||
|
||||
private DeadNodeIteratorAdapter(Iterator<DeadNode> itr) {
|
||||
this.itr = itr;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return itr.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Node next() {
|
||||
return itr.next().node;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove() {
|
||||
itr.remove();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,7 +20,6 @@
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
@ -32,6 +31,7 @@ import javax.net.ssl.SSLContext;
|
||||
import java.security.AccessController;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
@ -48,7 +48,7 @@ public final class RestClientBuilder {
|
||||
|
||||
private static final Header[] EMPTY_HEADERS = new Header[0];
|
||||
|
||||
private final HttpHost[] hosts;
|
||||
private final List<Node> nodes;
|
||||
private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS;
|
||||
private Header[] defaultHeaders = EMPTY_HEADERS;
|
||||
private RestClient.FailureListener failureListener;
|
||||
@ -59,18 +59,18 @@ public final class RestClientBuilder {
|
||||
/**
|
||||
* Creates a new builder instance and sets the hosts that the client will send requests to.
|
||||
*
|
||||
* @throws NullPointerException if {@code hosts} or any host is {@code null}.
|
||||
* @throws IllegalArgumentException if {@code hosts} is empty.
|
||||
* @throws IllegalArgumentException if {@code nodes} is {@code null} or empty.
|
||||
*/
|
||||
RestClientBuilder(HttpHost... hosts) {
|
||||
Objects.requireNonNull(hosts, "hosts must not be null");
|
||||
if (hosts.length == 0) {
|
||||
throw new IllegalArgumentException("no hosts provided");
|
||||
RestClientBuilder(List<Node> nodes) {
|
||||
if (nodes == null || nodes.isEmpty()) {
|
||||
throw new IllegalArgumentException("nodes must not be null or empty");
|
||||
}
|
||||
for (HttpHost host : hosts) {
|
||||
Objects.requireNonNull(host, "host cannot be null");
|
||||
for (Node node : nodes) {
|
||||
if (node == null) {
|
||||
throw new IllegalArgumentException("node cannot be null");
|
||||
}
|
||||
}
|
||||
this.hosts = hosts;
|
||||
this.nodes = nodes;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -186,7 +186,7 @@ public final class RestClientBuilder {
|
||||
return createHttpClient();
|
||||
}
|
||||
});
|
||||
RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, pathPrefix, failureListener);
|
||||
RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, nodes, pathPrefix, failureListener);
|
||||
httpClient.start();
|
||||
return restClient;
|
||||
}
|
||||
|
@ -21,11 +21,15 @@ package org.elasticsearch.client;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.elasticsearch.client.DeadHostState.TimeSupplier;
|
||||
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class DeadHostStateTests extends RestClientTestCase {
|
||||
|
||||
@ -42,7 +46,7 @@ public class DeadHostStateTests extends RestClientTestCase {
|
||||
DeadHostState previous = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT);
|
||||
int iters = randomIntBetween(5, 30);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
DeadHostState deadHostState = new DeadHostState(previous, DeadHostState.TimeSupplier.DEFAULT);
|
||||
DeadHostState deadHostState = new DeadHostState(previous);
|
||||
assertThat(deadHostState.getDeadUntilNanos(), greaterThan(previous.getDeadUntilNanos()));
|
||||
assertThat(deadHostState.getFailedAttempts(), equalTo(previous.getFailedAttempts() + 1));
|
||||
previous = deadHostState;
|
||||
@ -56,7 +60,7 @@ public class DeadHostStateTests extends RestClientTestCase {
|
||||
if (i == 0) {
|
||||
deadHostStates[i] = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT);
|
||||
} else {
|
||||
deadHostStates[i] = new DeadHostState(deadHostStates[i - 1], DeadHostState.TimeSupplier.DEFAULT);
|
||||
deadHostStates[i] = new DeadHostState(deadHostStates[i - 1]);
|
||||
}
|
||||
}
|
||||
for (int k = 1; k < deadHostStates.length; k++) {
|
||||
@ -65,6 +69,17 @@ public class DeadHostStateTests extends RestClientTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testCompareToDifferingTimeSupplier() {
|
||||
try {
|
||||
new DeadHostState(TimeSupplier.DEFAULT).compareTo(
|
||||
new DeadHostState(new ConfigurableTimeSupplier()));
|
||||
fail("expected failure");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("can't compare DeadHostStates with different clocks [nanoTime != configured[0]]",
|
||||
e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testShallBeRetried() {
|
||||
ConfigurableTimeSupplier timeSupplier = new ConfigurableTimeSupplier();
|
||||
DeadHostState deadHostState = null;
|
||||
@ -74,7 +89,7 @@ public class DeadHostStateTests extends RestClientTestCase {
|
||||
if (i == 0) {
|
||||
deadHostState = new DeadHostState(timeSupplier);
|
||||
} else {
|
||||
deadHostState = new DeadHostState(deadHostState, timeSupplier);
|
||||
deadHostState = new DeadHostState(deadHostState);
|
||||
}
|
||||
for (int j = 0; j < expectedTimeoutSecond; j++) {
|
||||
timeSupplier.nanoTime += TimeUnit.SECONDS.toNanos(1);
|
||||
@ -94,25 +109,29 @@ public class DeadHostStateTests extends RestClientTestCase {
|
||||
DeadHostState previous = new DeadHostState(zeroTimeSupplier);
|
||||
for (long expectedTimeoutsSecond : EXPECTED_TIMEOUTS_SECONDS) {
|
||||
assertThat(TimeUnit.NANOSECONDS.toSeconds(previous.getDeadUntilNanos()), equalTo(expectedTimeoutsSecond));
|
||||
previous = new DeadHostState(previous, zeroTimeSupplier);
|
||||
previous = new DeadHostState(previous);
|
||||
}
|
||||
//check that from here on the timeout does not increase
|
||||
int iters = randomIntBetween(5, 30);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
DeadHostState deadHostState = new DeadHostState(previous, zeroTimeSupplier);
|
||||
DeadHostState deadHostState = new DeadHostState(previous);
|
||||
assertThat(TimeUnit.NANOSECONDS.toSeconds(deadHostState.getDeadUntilNanos()),
|
||||
equalTo(EXPECTED_TIMEOUTS_SECONDS[EXPECTED_TIMEOUTS_SECONDS.length - 1]));
|
||||
previous = deadHostState;
|
||||
}
|
||||
}
|
||||
|
||||
private static class ConfigurableTimeSupplier implements DeadHostState.TimeSupplier {
|
||||
|
||||
static class ConfigurableTimeSupplier implements DeadHostState.TimeSupplier {
|
||||
long nanoTime;
|
||||
|
||||
@Override
|
||||
public long nanoTime() {
|
||||
return nanoTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "configured[" + nanoTime + "]";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.client;
|
||||
import org.apache.http.HttpHost;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
@ -29,14 +30,22 @@ import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertThat;
|
||||
|
||||
/**
|
||||
* {@link org.elasticsearch.client.RestClient.FailureListener} impl that allows to track when it gets called for which host.
|
||||
* {@link RestClient.FailureListener} impl that allows to track when it gets called for which host.
|
||||
*/
|
||||
class HostsTrackingFailureListener extends RestClient.FailureListener {
|
||||
private volatile Set<HttpHost> hosts = new HashSet<>();
|
||||
|
||||
@Override
|
||||
public void onFailure(HttpHost host) {
|
||||
hosts.add(host);
|
||||
public void onFailure(Node node) {
|
||||
hosts.add(node.getHost());
|
||||
}
|
||||
|
||||
void assertCalled(List<Node> nodes) {
|
||||
HttpHost[] hosts = new HttpHost[nodes.size()];
|
||||
for (int i = 0 ; i < nodes.size(); i++) {
|
||||
hosts[i] = nodes.get(i).getHost();
|
||||
}
|
||||
assertCalled(hosts);
|
||||
}
|
||||
|
||||
void assertCalled(HttpHost... hosts) {
|
||||
@ -48,4 +57,4 @@ class HostsTrackingFailureListener extends RestClient.FailureListener {
|
||||
void assertNotCalled() {
|
||||
assertEquals(0, hosts.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.Node.Roles;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class NodeSelectorTests extends RestClientTestCase {
|
||||
public void testAny() {
|
||||
List<Node> nodes = new ArrayList<>();
|
||||
int size = between(2, 5);
|
||||
for (int i = 0; i < size; i++) {
|
||||
nodes.add(dummyNode(randomBoolean(), randomBoolean(), randomBoolean()));
|
||||
}
|
||||
List<Node> expected = new ArrayList<>(nodes);
|
||||
NodeSelector.ANY.select(nodes);
|
||||
assertEquals(expected, nodes);
|
||||
}
|
||||
|
||||
public void testNotMasterOnly() {
|
||||
Node masterOnly = dummyNode(true, false, false);
|
||||
Node all = dummyNode(true, true, true);
|
||||
Node masterAndData = dummyNode(true, true, false);
|
||||
Node masterAndIngest = dummyNode(true, false, true);
|
||||
Node coordinatingOnly = dummyNode(false, false, false);
|
||||
Node ingestOnly = dummyNode(false, false, true);
|
||||
Node data = dummyNode(false, true, randomBoolean());
|
||||
List<Node> nodes = new ArrayList<>();
|
||||
nodes.add(masterOnly);
|
||||
nodes.add(all);
|
||||
nodes.add(masterAndData);
|
||||
nodes.add(masterAndIngest);
|
||||
nodes.add(coordinatingOnly);
|
||||
nodes.add(ingestOnly);
|
||||
nodes.add(data);
|
||||
Collections.shuffle(nodes, getRandom());
|
||||
List<Node> expected = new ArrayList<>(nodes);
|
||||
expected.remove(masterOnly);
|
||||
NodeSelector.NOT_MASTER_ONLY.select(nodes);
|
||||
assertEquals(expected, nodes);
|
||||
}
|
||||
|
||||
private Node dummyNode(boolean master, boolean data, boolean ingest) {
|
||||
return new Node(new HttpHost("dummy"), Collections.<HttpHost>emptySet(),
|
||||
randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5),
|
||||
new Roles(master, data, ingest));
|
||||
}
|
||||
}
|
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.Node.Roles;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
|
||||
import static java.util.Collections.singleton;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
public class NodeTests extends RestClientTestCase {
|
||||
public void testToString() {
|
||||
assertEquals("[host=http://1]", new Node(new HttpHost("1")).toString());
|
||||
assertEquals("[host=http://1, roles=mdi]", new Node(new HttpHost("1"),
|
||||
null, null, null, new Roles(true, true, true)).toString());
|
||||
assertEquals("[host=http://1, version=ver]", new Node(new HttpHost("1"),
|
||||
null, null, "ver", null).toString());
|
||||
assertEquals("[host=http://1, name=nam]", new Node(new HttpHost("1"),
|
||||
null, "nam", null, null).toString());
|
||||
assertEquals("[host=http://1, bound=[http://1, http://2]]", new Node(new HttpHost("1"),
|
||||
new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), null, null, null).toString());
|
||||
assertEquals("[host=http://1, bound=[http://1, http://2], name=nam, version=ver, roles=m]",
|
||||
new Node(new HttpHost("1"), new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))),
|
||||
"nam", "ver", new Roles(true, false, false)).toString());
|
||||
|
||||
}
|
||||
|
||||
public void testEqualsAndHashCode() {
|
||||
HttpHost host = new HttpHost(randomAsciiAlphanumOfLength(5));
|
||||
Node node = new Node(host,
|
||||
randomBoolean() ? null : singleton(host),
|
||||
randomBoolean() ? null : randomAsciiAlphanumOfLength(5),
|
||||
randomBoolean() ? null : randomAsciiAlphanumOfLength(5),
|
||||
randomBoolean() ? null : new Roles(true, true, true));
|
||||
assertFalse(node.equals(null));
|
||||
assertTrue(node.equals(node));
|
||||
assertEquals(node.hashCode(), node.hashCode());
|
||||
Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), node.getRoles());
|
||||
assertTrue(node.equals(copy));
|
||||
assertEquals(node.hashCode(), copy.hashCode());
|
||||
assertFalse(node.equals(new Node(new HttpHost(host.toHostString() + "changed"), node.getBoundHosts(),
|
||||
node.getName(), node.getVersion(), node.getRoles())));
|
||||
assertFalse(node.equals(new Node(host, new HashSet<>(Arrays.asList(host, new HttpHost(host.toHostString() + "changed"))),
|
||||
node.getName(), node.getVersion(), node.getRoles())));
|
||||
assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName() + "changed", node.getVersion(), node.getRoles())));
|
||||
assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion() + "changed", node.getRoles())));
|
||||
assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), new Roles(false, false, false))));
|
||||
}
|
||||
}
|
@ -114,6 +114,10 @@ public class RequestOptionsTests extends RestClientTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
builder.setNodeSelector(mock(NodeSelector.class));
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
builder.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1));
|
||||
}
|
||||
@ -127,12 +131,15 @@ public class RequestOptionsTests extends RestClientTestCase {
|
||||
|
||||
private static RequestOptions mutate(RequestOptions options) {
|
||||
RequestOptions.Builder mutant = options.toBuilder();
|
||||
int mutationType = between(0, 1);
|
||||
int mutationType = between(0, 2);
|
||||
switch (mutationType) {
|
||||
case 0:
|
||||
mutant.addHeader("extra", "m");
|
||||
return mutant.build();
|
||||
case 1:
|
||||
mutant.setNodeSelector(mock(NodeSelector.class));
|
||||
return mutant.build();
|
||||
case 2:
|
||||
mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5));
|
||||
return mutant.build();
|
||||
default:
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
@ -29,9 +28,7 @@ import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedRes
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
@ -39,21 +39,42 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
||||
try {
|
||||
RestClient.builder((HttpHost[])null);
|
||||
fail("should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("hosts must not be null", e.getMessage());
|
||||
} catch(IllegalArgumentException e) {
|
||||
assertEquals("hosts must not be null nor empty", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
RestClient.builder();
|
||||
RestClient.builder(new HttpHost[] {});
|
||||
fail("should have failed");
|
||||
} catch(IllegalArgumentException e) {
|
||||
assertEquals("no hosts provided", e.getMessage());
|
||||
assertEquals("hosts must not be null nor empty", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
RestClient.builder((Node[])null);
|
||||
fail("should have failed");
|
||||
} catch(IllegalArgumentException e) {
|
||||
assertEquals("nodes must not be null or empty", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
RestClient.builder(new Node[] {});
|
||||
fail("should have failed");
|
||||
} catch(IllegalArgumentException e) {
|
||||
assertEquals("nodes must not be null or empty", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
RestClient.builder(new Node(new HttpHost("localhost", 9200)), null);
|
||||
fail("should have failed");
|
||||
} catch(IllegalArgumentException e) {
|
||||
assertEquals("node cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
RestClient.builder(new HttpHost("localhost", 9200), null);
|
||||
fail("should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
} catch(IllegalArgumentException e) {
|
||||
assertEquals("host cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
|
@ -29,9 +29,11 @@ import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.ConnectException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
@ -42,6 +44,7 @@ import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStat
|
||||
import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
/**
|
||||
* Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}.
|
||||
@ -50,31 +53,37 @@ import static org.junit.Assert.assertTrue;
|
||||
public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
|
||||
|
||||
private static HttpServer[] httpServers;
|
||||
private static RestClient restClient;
|
||||
private static HttpHost[] httpHosts;
|
||||
private static boolean stoppedFirstHost = false;
|
||||
private static String pathPrefixWithoutLeadingSlash;
|
||||
private static String pathPrefix;
|
||||
private static RestClient restClient;
|
||||
|
||||
@BeforeClass
|
||||
public static void startHttpServer() throws Exception {
|
||||
String pathPrefixWithoutLeadingSlash;
|
||||
if (randomBoolean()) {
|
||||
pathPrefixWithoutLeadingSlash = "testPathPrefix/" + randomAsciiOfLengthBetween(1, 5);
|
||||
pathPrefixWithoutLeadingSlash = "testPathPrefix/" + randomAsciiLettersOfLengthBetween(1, 5);
|
||||
pathPrefix = "/" + pathPrefixWithoutLeadingSlash;
|
||||
} else {
|
||||
pathPrefix = pathPrefixWithoutLeadingSlash = "";
|
||||
}
|
||||
int numHttpServers = randomIntBetween(2, 4);
|
||||
httpServers = new HttpServer[numHttpServers];
|
||||
HttpHost[] httpHosts = new HttpHost[numHttpServers];
|
||||
httpHosts = new HttpHost[numHttpServers];
|
||||
for (int i = 0; i < numHttpServers; i++) {
|
||||
HttpServer httpServer = createHttpServer();
|
||||
httpServers[i] = httpServer;
|
||||
httpHosts[i] = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort());
|
||||
}
|
||||
restClient = buildRestClient();
|
||||
}
|
||||
|
||||
private static RestClient buildRestClient() {
|
||||
RestClientBuilder restClientBuilder = RestClient.builder(httpHosts);
|
||||
if (pathPrefix.length() > 0) {
|
||||
restClientBuilder.setPathPrefix((randomBoolean() ? "/" : "") + pathPrefixWithoutLeadingSlash);
|
||||
}
|
||||
restClient = restClientBuilder.build();
|
||||
return restClientBuilder.build();
|
||||
}
|
||||
|
||||
private static HttpServer createHttpServer() throws Exception {
|
||||
@ -118,6 +127,9 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
|
||||
if (httpServers.length > 1 && randomBoolean()) {
|
||||
List<HttpServer> updatedHttpServers = new ArrayList<>(httpServers.length - 1);
|
||||
int nodeIndex = randomInt(httpServers.length - 1);
|
||||
if (0 == nodeIndex) {
|
||||
stoppedFirstHost = true;
|
||||
}
|
||||
for (int i = 0; i < httpServers.length; i++) {
|
||||
HttpServer httpServer = httpServers[i];
|
||||
if (i == nodeIndex) {
|
||||
@ -182,6 +194,35 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test host selector against a real server <strong>and</strong>
|
||||
* test what happens after calling
|
||||
*/
|
||||
public void testNodeSelector() throws IOException {
|
||||
Request request = new Request("GET", "/200");
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.setNodeSelector(firstPositionNodeSelector());
|
||||
request.setOptions(options);
|
||||
int rounds = between(1, 10);
|
||||
for (int i = 0; i < rounds; i++) {
|
||||
/*
|
||||
* Run the request more than once to verify that the
|
||||
* NodeSelector overrides the round robin behavior.
|
||||
*/
|
||||
if (stoppedFirstHost) {
|
||||
try {
|
||||
restClient.performRequest(request);
|
||||
fail("expected to fail to connect");
|
||||
} catch (ConnectException e) {
|
||||
assertEquals("Connection refused", e.getMessage());
|
||||
}
|
||||
} else {
|
||||
Response response = restClient.performRequest(request);
|
||||
assertEquals(httpHosts[0], response.getHost());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class TestResponse {
|
||||
private final String method;
|
||||
private final int statusCode;
|
||||
@ -203,4 +244,17 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
|
||||
throw new AssertionError("unexpected response " + response.getClass());
|
||||
}
|
||||
}
|
||||
|
||||
private NodeSelector firstPositionNodeSelector() {
|
||||
return new NodeSelector() {
|
||||
@Override
|
||||
public void select(Iterable<Node> nodes) {
|
||||
for (Iterator<Node> itr = nodes.iterator(); itr.hasNext();) {
|
||||
if (httpHosts[0] != itr.next().getHost()) {
|
||||
itr.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
|
||||
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
import org.elasticsearch.client.Node.Roles;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
@ -42,8 +43,11 @@ import org.mockito.stubbing.Answer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
@ -71,7 +75,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
||||
|
||||
private ExecutorService exec = Executors.newFixedThreadPool(1);
|
||||
private RestClient restClient;
|
||||
private HttpHost[] httpHosts;
|
||||
private List<Node> nodes;
|
||||
private HostsTrackingFailureListener failureListener;
|
||||
|
||||
@Before
|
||||
@ -108,13 +112,14 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
||||
return null;
|
||||
}
|
||||
});
|
||||
int numHosts = RandomNumbers.randomIntBetween(getRandom(), 2, 5);
|
||||
httpHosts = new HttpHost[numHosts];
|
||||
for (int i = 0; i < numHosts; i++) {
|
||||
httpHosts[i] = new HttpHost("localhost", 9200 + i);
|
||||
int numNodes = RandomNumbers.randomIntBetween(getRandom(), 2, 5);
|
||||
nodes = new ArrayList<>(numNodes);
|
||||
for (int i = 0; i < numNodes; i++) {
|
||||
nodes.add(new Node(new HttpHost("localhost", 9200 + i)));
|
||||
}
|
||||
nodes = Collections.unmodifiableList(nodes);
|
||||
failureListener = new HostsTrackingFailureListener();
|
||||
restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, null, failureListener);
|
||||
restClient = new RestClient(httpClient, 10000, new Header[0], nodes, null, failureListener);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -128,9 +133,8 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
||||
public void testRoundRobinOkStatusCodes() throws IOException {
|
||||
int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5);
|
||||
for (int i = 0; i < numIters; i++) {
|
||||
Set<HttpHost> hostsSet = new HashSet<>();
|
||||
Collections.addAll(hostsSet, httpHosts);
|
||||
for (int j = 0; j < httpHosts.length; j++) {
|
||||
Set<HttpHost> hostsSet = hostsSet();
|
||||
for (int j = 0; j < nodes.size(); j++) {
|
||||
int statusCode = randomOkStatusCode(getRandom());
|
||||
Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode);
|
||||
assertEquals(statusCode, response.getStatusLine().getStatusCode());
|
||||
@ -144,9 +148,8 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
||||
public void testRoundRobinNoRetryErrors() throws IOException {
|
||||
int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5);
|
||||
for (int i = 0; i < numIters; i++) {
|
||||
Set<HttpHost> hostsSet = new HashSet<>();
|
||||
Collections.addAll(hostsSet, httpHosts);
|
||||
for (int j = 0; j < httpHosts.length; j++) {
|
||||
Set<HttpHost> hostsSet = hostsSet();
|
||||
for (int j = 0; j < nodes.size(); j++) {
|
||||
String method = randomHttpMethod(getRandom());
|
||||
int statusCode = randomErrorNoRetryStatusCode(getRandom());
|
||||
try {
|
||||
@ -185,10 +188,9 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
||||
* the caller. It wraps the exception that contains the failed hosts.
|
||||
*/
|
||||
e = (ResponseException) e.getCause();
|
||||
Set<HttpHost> hostsSet = new HashSet<>();
|
||||
Collections.addAll(hostsSet, httpHosts);
|
||||
Set<HttpHost> hostsSet = hostsSet();
|
||||
//first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each
|
||||
failureListener.assertCalled(httpHosts);
|
||||
failureListener.assertCalled(nodes);
|
||||
do {
|
||||
Response response = e.getResponse();
|
||||
assertEquals(Integer.parseInt(retryEndpoint.substring(1)), response.getStatusLine().getStatusCode());
|
||||
@ -210,10 +212,9 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
||||
* the caller. It wraps the exception that contains the failed hosts.
|
||||
*/
|
||||
e = (IOException) e.getCause();
|
||||
Set<HttpHost> hostsSet = new HashSet<>();
|
||||
Collections.addAll(hostsSet, httpHosts);
|
||||
Set<HttpHost> hostsSet = hostsSet();
|
||||
//first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each
|
||||
failureListener.assertCalled(httpHosts);
|
||||
failureListener.assertCalled(nodes);
|
||||
do {
|
||||
HttpHost httpHost = HttpHost.create(e.getMessage());
|
||||
assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost));
|
||||
@ -232,9 +233,8 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
||||
int numIters = RandomNumbers.randomIntBetween(getRandom(), 2, 5);
|
||||
for (int i = 1; i <= numIters; i++) {
|
||||
//check that one different host is resurrected at each new attempt
|
||||
Set<HttpHost> hostsSet = new HashSet<>();
|
||||
Collections.addAll(hostsSet, httpHosts);
|
||||
for (int j = 0; j < httpHosts.length; j++) {
|
||||
Set<HttpHost> hostsSet = hostsSet();
|
||||
for (int j = 0; j < nodes.size(); j++) {
|
||||
retryEndpoint = randomErrorRetryEndpoint();
|
||||
try {
|
||||
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint);
|
||||
@ -308,6 +308,58 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testNodeSelector() throws IOException {
|
||||
NodeSelector firstPositionOnly = new NodeSelector() {
|
||||
@Override
|
||||
public void select(Iterable<Node> restClientNodes) {
|
||||
boolean found = false;
|
||||
for (Iterator<Node> itr = restClientNodes.iterator(); itr.hasNext();) {
|
||||
if (nodes.get(0) == itr.next()) {
|
||||
found = true;
|
||||
} else {
|
||||
itr.remove();
|
||||
}
|
||||
}
|
||||
assertTrue(found);
|
||||
}
|
||||
};
|
||||
int rounds = between(1, 10);
|
||||
for (int i = 0; i < rounds; i++) {
|
||||
/*
|
||||
* Run the request more than once to verify that the
|
||||
* NodeSelector overrides the round robin behavior.
|
||||
*/
|
||||
Request request = new Request("GET", "/200");
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.setNodeSelector(firstPositionOnly);
|
||||
request.setOptions(options);
|
||||
Response response = restClient.performRequest(request);
|
||||
assertEquals(nodes.get(0).getHost(), response.getHost());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetNodes() throws IOException {
|
||||
List<Node> newNodes = new ArrayList<>(nodes.size());
|
||||
for (int i = 0; i < nodes.size(); i++) {
|
||||
Roles roles = i == 0 ? new Roles(false, true, true) : new Roles(true, false, false);
|
||||
newNodes.add(new Node(nodes.get(i).getHost(), null, null, null, roles));
|
||||
}
|
||||
restClient.setNodes(newNodes);
|
||||
int rounds = between(1, 10);
|
||||
for (int i = 0; i < rounds; i++) {
|
||||
/*
|
||||
* Run the request more than once to verify that the
|
||||
* NodeSelector overrides the round robin behavior.
|
||||
*/
|
||||
Request request = new Request("GET", "/200");
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.setNodeSelector(NodeSelector.NOT_MASTER_ONLY);
|
||||
request.setOptions(options);
|
||||
Response response = restClient.performRequest(request);
|
||||
assertEquals(newNodes.get(0).getHost(), response.getHost());
|
||||
}
|
||||
}
|
||||
|
||||
private static String randomErrorRetryEndpoint() {
|
||||
switch(RandomNumbers.randomIntBetween(getRandom(), 0, 3)) {
|
||||
case 0:
|
||||
@ -321,4 +373,16 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
||||
}
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a mutable {@link Set} containing all the {@link Node#getHost() hosts}
|
||||
* in use by the test.
|
||||
*/
|
||||
private Set<HttpHost> hostsSet() {
|
||||
Set<HttpHost> hosts = new HashSet<>();
|
||||
for (Node node : nodes) {
|
||||
hosts.add(node.getHost());
|
||||
}
|
||||
return hosts;
|
||||
}
|
||||
}
|
||||
|
@ -65,6 +65,7 @@ import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes;
|
||||
import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
|
||||
import static org.elasticsearch.client.RestClientTestUtil.getOkStatusCodes;
|
||||
@ -94,7 +95,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
||||
private ExecutorService exec = Executors.newFixedThreadPool(1);
|
||||
private RestClient restClient;
|
||||
private Header[] defaultHeaders;
|
||||
private HttpHost httpHost;
|
||||
private Node node;
|
||||
private CloseableHttpAsyncClient httpClient;
|
||||
private HostsTrackingFailureListener failureListener;
|
||||
|
||||
@ -108,7 +109,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
||||
public Future<HttpResponse> answer(InvocationOnMock invocationOnMock) throws Throwable {
|
||||
HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0];
|
||||
HttpClientContext context = (HttpClientContext) invocationOnMock.getArguments()[2];
|
||||
assertThat(context.getAuthCache().get(httpHost), instanceOf(BasicScheme.class));
|
||||
assertThat(context.getAuthCache().get(node.getHost()), instanceOf(BasicScheme.class));
|
||||
final FutureCallback<HttpResponse> futureCallback =
|
||||
(FutureCallback<HttpResponse>) invocationOnMock.getArguments()[3];
|
||||
HttpUriRequest request = (HttpUriRequest)requestProducer.generateRequest();
|
||||
@ -146,9 +147,10 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
||||
});
|
||||
|
||||
defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default");
|
||||
httpHost = new HttpHost("localhost", 9200);
|
||||
node = new Node(new HttpHost("localhost", 9200));
|
||||
failureListener = new HostsTrackingFailureListener();
|
||||
restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, null, failureListener);
|
||||
restClient = new RestClient(httpClient, 10000, defaultHeaders,
|
||||
singletonList(node), null, failureListener);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -244,7 +246,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
||||
if (errorStatusCode <= 500 || expectedIgnores.contains(errorStatusCode)) {
|
||||
failureListener.assertNotCalled();
|
||||
} else {
|
||||
failureListener.assertCalled(httpHost);
|
||||
failureListener.assertCalled(singletonList(node));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -259,14 +261,14 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
||||
} catch(IOException e) {
|
||||
assertThat(e, instanceOf(ConnectTimeoutException.class));
|
||||
}
|
||||
failureListener.assertCalled(httpHost);
|
||||
failureListener.assertCalled(singletonList(node));
|
||||
try {
|
||||
performRequest(method, "/soe");
|
||||
fail("request should have failed");
|
||||
} catch(IOException e) {
|
||||
assertThat(e, instanceOf(SocketTimeoutException.class));
|
||||
}
|
||||
failureListener.assertCalled(httpHost);
|
||||
failureListener.assertCalled(singletonList(node));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,14 +22,23 @@ package org.elasticsearch.client;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
import org.elasticsearch.client.DeadHostStateTests.ConfigurableTimeSupplier;
|
||||
import org.elasticsearch.client.RestClient.NodeTuple;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
@ -43,9 +52,9 @@ import static org.mockito.Mockito.verify;
|
||||
public class RestClientTests extends RestClientTestCase {
|
||||
|
||||
public void testCloseIsIdempotent() throws IOException {
|
||||
HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)};
|
||||
List<Node> nodes = singletonList(new Node(new HttpHost("localhost", 9200)));
|
||||
CloseableHttpAsyncClient closeableHttpAsyncClient = mock(CloseableHttpAsyncClient.class);
|
||||
RestClient restClient = new RestClient(closeableHttpAsyncClient, 1_000, new Header[0], hosts, null, null);
|
||||
RestClient restClient = new RestClient(closeableHttpAsyncClient, 1_000, new Header[0], nodes, null, null);
|
||||
restClient.close();
|
||||
verify(closeableHttpAsyncClient, times(1)).close();
|
||||
restClient.close();
|
||||
@ -225,6 +234,7 @@ public class RestClientTests extends RestClientTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public void testSetHostsWrongArguments() throws IOException {
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.setHosts((HttpHost[]) null);
|
||||
@ -241,45 +251,75 @@ public class RestClientTests extends RestClientTestCase {
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.setHosts((HttpHost) null);
|
||||
fail("setHosts should have failed");
|
||||
} catch (NullPointerException e) {
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("host cannot be null", e.getMessage());
|
||||
}
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201));
|
||||
fail("setHosts should have failed");
|
||||
} catch (NullPointerException e) {
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("host cannot be null", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetHostsPreservesOrdering() throws Exception {
|
||||
public void testSetNodesWrongArguments() throws IOException {
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
HttpHost[] hosts = randomHosts();
|
||||
restClient.setHosts(hosts);
|
||||
assertEquals(Arrays.asList(hosts), restClient.getHosts());
|
||||
restClient.setNodes(null);
|
||||
fail("setNodes should have failed");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("nodes must not be null or empty", e.getMessage());
|
||||
}
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.setNodes(Collections.<Node>emptyList());
|
||||
fail("setNodes should have failed");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("nodes must not be null or empty", e.getMessage());
|
||||
}
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.setNodes(Collections.singletonList((Node) null));
|
||||
fail("setNodes should have failed");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("node cannot be null", e.getMessage());
|
||||
}
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.setNodes(Arrays.asList(
|
||||
new Node(new HttpHost("localhost", 9200)),
|
||||
null,
|
||||
new Node(new HttpHost("localhost", 9201))));
|
||||
fail("setNodes should have failed");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("node cannot be null", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private static HttpHost[] randomHosts() {
|
||||
int numHosts = randomIntBetween(1, 10);
|
||||
HttpHost[] hosts = new HttpHost[numHosts];
|
||||
for (int i = 0; i < hosts.length; i++) {
|
||||
hosts[i] = new HttpHost("host-" + i, 9200);
|
||||
public void testSetNodesPreservesOrdering() throws Exception {
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
List<Node> nodes = randomNodes();
|
||||
restClient.setNodes(nodes);
|
||||
assertEquals(nodes, restClient.getNodes());
|
||||
}
|
||||
return hosts;
|
||||
}
|
||||
|
||||
public void testSetHostsDuplicatedHosts() throws Exception {
|
||||
private static List<Node> randomNodes() {
|
||||
int numNodes = randomIntBetween(1, 10);
|
||||
List<Node> nodes = new ArrayList<>(numNodes);
|
||||
for (int i = 0; i < numNodes; i++) {
|
||||
nodes.add(new Node(new HttpHost("host-" + i, 9200)));
|
||||
}
|
||||
return nodes;
|
||||
}
|
||||
|
||||
public void testSetNodesDuplicatedHosts() throws Exception {
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
int numHosts = randomIntBetween(1, 10);
|
||||
HttpHost[] hosts = new HttpHost[numHosts];
|
||||
HttpHost host = new HttpHost("host", 9200);
|
||||
for (int i = 0; i < hosts.length; i++) {
|
||||
hosts[i] = host;
|
||||
int numNodes = randomIntBetween(1, 10);
|
||||
List<Node> nodes = new ArrayList<>(numNodes);
|
||||
Node node = new Node(new HttpHost("host", 9200));
|
||||
for (int i = 0; i < numNodes; i++) {
|
||||
nodes.add(node);
|
||||
}
|
||||
restClient.setHosts(hosts);
|
||||
assertEquals(1, restClient.getHosts().size());
|
||||
assertEquals(host, restClient.getHosts().get(0));
|
||||
restClient.setNodes(nodes);
|
||||
assertEquals(1, restClient.getNodes().size());
|
||||
assertEquals(node, restClient.getNodes().get(0));
|
||||
}
|
||||
}
|
||||
|
||||
@ -300,8 +340,143 @@ public class RestClientTests extends RestClientTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
private static RestClient createRestClient() {
|
||||
HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)};
|
||||
return new RestClient(mock(CloseableHttpAsyncClient.class), randomIntBetween(1_000, 30_000), new Header[]{}, hosts, null, null);
|
||||
public void testSelectHosts() throws IOException {
|
||||
Node n1 = new Node(new HttpHost("1"), null, null, "1", null);
|
||||
Node n2 = new Node(new HttpHost("2"), null, null, "2", null);
|
||||
Node n3 = new Node(new HttpHost("3"), null, null, "3", null);
|
||||
|
||||
NodeSelector not1 = new NodeSelector() {
|
||||
@Override
|
||||
public void select(Iterable<Node> nodes) {
|
||||
for (Iterator<Node> itr = nodes.iterator(); itr.hasNext();) {
|
||||
if ("1".equals(itr.next().getVersion())) {
|
||||
itr.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "NOT 1";
|
||||
}
|
||||
};
|
||||
NodeSelector noNodes = new NodeSelector() {
|
||||
@Override
|
||||
public void select(Iterable<Node> nodes) {
|
||||
for (Iterator<Node> itr = nodes.iterator(); itr.hasNext();) {
|
||||
itr.next();
|
||||
itr.remove();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "NONE";
|
||||
}
|
||||
};
|
||||
|
||||
NodeTuple<List<Node>> nodeTuple = new NodeTuple<>(Arrays.asList(n1, n2, n3), null);
|
||||
|
||||
Map<HttpHost, DeadHostState> emptyBlacklist = Collections.emptyMap();
|
||||
|
||||
// Normal cases where the node selector doesn't reject all living nodes
|
||||
assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, emptyBlacklist, NodeSelector.ANY);
|
||||
assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, emptyBlacklist, not1);
|
||||
|
||||
/*
|
||||
* Try a NodeSelector that excludes all nodes. This should
|
||||
* throw an exception
|
||||
*/
|
||||
{
|
||||
String message = "NodeSelector [NONE] rejected all nodes, living ["
|
||||
+ "[host=http://1, version=1], [host=http://2, version=2], "
|
||||
+ "[host=http://3, version=3]] and dead []";
|
||||
assertEquals(message, assertSelectAllRejected(nodeTuple, emptyBlacklist, noNodes));
|
||||
}
|
||||
|
||||
// Mark all the nodes dead for a few test cases
|
||||
{
|
||||
ConfigurableTimeSupplier timeSupplier = new ConfigurableTimeSupplier();
|
||||
Map<HttpHost, DeadHostState> blacklist = new HashMap<>();
|
||||
blacklist.put(n1.getHost(), new DeadHostState(timeSupplier));
|
||||
blacklist.put(n2.getHost(), new DeadHostState(new DeadHostState(timeSupplier)));
|
||||
blacklist.put(n3.getHost(), new DeadHostState(new DeadHostState(new DeadHostState(timeSupplier))));
|
||||
|
||||
/*
|
||||
* selectHosts will revive a single host if regardless of
|
||||
* blacklist time. It'll revive the node that is closest
|
||||
* to being revived that the NodeSelector is ok with.
|
||||
*/
|
||||
assertEquals(singletonList(n1), RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(), NodeSelector.ANY));
|
||||
assertEquals(singletonList(n2), RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(), not1));
|
||||
|
||||
/*
|
||||
* Try a NodeSelector that excludes all nodes. This should
|
||||
* return a failure, but a different failure than when the
|
||||
* blacklist is empty so that the caller knows that all of
|
||||
* their nodes are blacklisted AND blocked.
|
||||
*/
|
||||
String message = "NodeSelector [NONE] rejected all nodes, living [] and dead ["
|
||||
+ "[host=http://1, version=1], [host=http://2, version=2], "
|
||||
+ "[host=http://3, version=3]]";
|
||||
assertEquals(message, assertSelectAllRejected(nodeTuple, blacklist, noNodes));
|
||||
|
||||
/*
|
||||
* Now lets wind the clock forward, past the timeout for one of
|
||||
* the dead nodes. We should return it.
|
||||
*/
|
||||
timeSupplier.nanoTime = new DeadHostState(timeSupplier).getDeadUntilNanos();
|
||||
assertSelectLivingHosts(Arrays.asList(n1), nodeTuple, blacklist, NodeSelector.ANY);
|
||||
|
||||
/*
|
||||
* But if the NodeSelector rejects that node then we'll pick the
|
||||
* first on that the NodeSelector doesn't reject.
|
||||
*/
|
||||
assertSelectLivingHosts(Arrays.asList(n2), nodeTuple, blacklist, not1);
|
||||
|
||||
/*
|
||||
* If we wind the clock way into the future, past any of the
|
||||
* blacklist timeouts then we function as though the nodes aren't
|
||||
* in the blacklist at all.
|
||||
*/
|
||||
timeSupplier.nanoTime += DeadHostState.MAX_CONNECTION_TIMEOUT_NANOS;
|
||||
assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, blacklist, NodeSelector.ANY);
|
||||
assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, blacklist, not1);
|
||||
}
|
||||
}
|
||||
|
||||
private void assertSelectLivingHosts(List<Node> expectedNodes, NodeTuple<List<Node>> nodeTuple,
|
||||
Map<HttpHost, DeadHostState> blacklist, NodeSelector nodeSelector) throws IOException {
|
||||
int iterations = 1000;
|
||||
AtomicInteger lastNodeIndex = new AtomicInteger(0);
|
||||
assertEquals(expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector));
|
||||
// Calling it again rotates the set of results
|
||||
for (int i = 1; i < iterations; i++) {
|
||||
Collections.rotate(expectedNodes, 1);
|
||||
assertEquals("iteration " + i, expectedNodes,
|
||||
RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Assert that {@link RestClient#selectHosts} fails on the provided arguments.
|
||||
* @return the message in the exception thrown by the failure
|
||||
*/
|
||||
private String assertSelectAllRejected( NodeTuple<List<Node>> nodeTuple,
|
||||
Map<HttpHost, DeadHostState> blacklist, NodeSelector nodeSelector) {
|
||||
try {
|
||||
RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(0), nodeSelector);
|
||||
throw new AssertionError("expected selectHosts to fail");
|
||||
} catch (IOException e) {
|
||||
return e.getMessage();
|
||||
}
|
||||
}
|
||||
|
||||
private static RestClient createRestClient() {
|
||||
List<Node> nodes = Collections.singletonList(new Node(new HttpHost("localhost", 9200)));
|
||||
return new RestClient(mock(CloseableHttpAsyncClient.class), randomLongBetween(1_000, 30_000),
|
||||
new Header[] {}, nodes, null, null);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -36,7 +36,9 @@ import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.apache.http.ssl.SSLContextBuilder;
|
||||
import org.apache.http.ssl.SSLContexts;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.HttpAsyncResponseConsumerFactory;
|
||||
import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory;
|
||||
import org.elasticsearch.client.Node;
|
||||
import org.elasticsearch.client.NodeSelector;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
@ -72,6 +74,19 @@ import java.util.concurrent.CountDownLatch;
|
||||
*/
|
||||
@SuppressWarnings("unused")
|
||||
public class RestClientDocumentation {
|
||||
private static final String TOKEN = "DUMMY";
|
||||
|
||||
// tag::rest-client-options-singleton
|
||||
private static final RequestOptions COMMON_OPTIONS;
|
||||
static {
|
||||
RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder();
|
||||
builder.addHeader("Authorization", "Bearer " + TOKEN); // <1>
|
||||
builder.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); // <2>
|
||||
builder.setHttpAsyncResponseConsumerFactory( // <3>
|
||||
new HeapBufferedResponseConsumerFactory(30 * 1024 * 1024 * 1024));
|
||||
COMMON_OPTIONS = builder.build();
|
||||
}
|
||||
// end::rest-client-options-singleton
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testUsage() throws IOException, InterruptedException {
|
||||
@ -104,7 +119,7 @@ public class RestClientDocumentation {
|
||||
RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200, "http"));
|
||||
builder.setFailureListener(new RestClient.FailureListener() {
|
||||
@Override
|
||||
public void onFailure(HttpHost host) {
|
||||
public void onFailure(Node node) {
|
||||
// <1>
|
||||
}
|
||||
});
|
||||
@ -172,22 +187,14 @@ public class RestClientDocumentation {
|
||||
//tag::rest-client-body-shorter
|
||||
request.setJsonEntity("{\"json\":\"text\"}");
|
||||
//end::rest-client-body-shorter
|
||||
{
|
||||
//tag::rest-client-headers
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.addHeader("Accept", "text/plain");
|
||||
options.addHeader("Cache-Control", "no-cache");
|
||||
request.setOptions(options);
|
||||
//end::rest-client-headers
|
||||
}
|
||||
{
|
||||
//tag::rest-client-response-consumer
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.setHttpAsyncResponseConsumerFactory(
|
||||
new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024));
|
||||
request.setOptions(options);
|
||||
//end::rest-client-response-consumer
|
||||
}
|
||||
//tag::rest-client-options-set-singleton
|
||||
request.setOptions(COMMON_OPTIONS);
|
||||
//end::rest-client-options-set-singleton
|
||||
//tag::rest-client-options-customize
|
||||
RequestOptions.Builder options = COMMON_OPTIONS.toBuilder();
|
||||
options.addHeader("cats", "knock things off of other things");
|
||||
request.setOptions(options);
|
||||
//end::rest-client-options-customize
|
||||
}
|
||||
{
|
||||
HttpEntity[] documents = new HttpEntity[10];
|
||||
|
@ -26,31 +26,34 @@ import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.Node;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.Node.Roles;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back.
|
||||
* Compatible with elasticsearch 5.x and 2.x.
|
||||
* Compatible with elasticsearch 2.x+.
|
||||
*/
|
||||
public final class ElasticsearchHostsSniffer implements HostsSniffer {
|
||||
public final class ElasticsearchNodesSniffer implements NodesSniffer {
|
||||
|
||||
private static final Log logger = LogFactory.getLog(ElasticsearchHostsSniffer.class);
|
||||
private static final Log logger = LogFactory.getLog(ElasticsearchNodesSniffer.class);
|
||||
|
||||
public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1);
|
||||
|
||||
private final RestClient restClient;
|
||||
private final Map<String, String> sniffRequestParams;
|
||||
private final Request request;
|
||||
private final Scheme scheme;
|
||||
private final JsonFactory jsonFactory = new JsonFactory();
|
||||
|
||||
@ -62,8 +65,8 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer {
|
||||
* that is also provided to {@link Sniffer#builder(RestClient)}, so that the hosts are set to the same
|
||||
* client that was used to fetch them.
|
||||
*/
|
||||
public ElasticsearchHostsSniffer(RestClient restClient) {
|
||||
this(restClient, DEFAULT_SNIFF_REQUEST_TIMEOUT, ElasticsearchHostsSniffer.Scheme.HTTP);
|
||||
public ElasticsearchNodesSniffer(RestClient restClient) {
|
||||
this(restClient, DEFAULT_SNIFF_REQUEST_TIMEOUT, ElasticsearchNodesSniffer.Scheme.HTTP);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -77,30 +80,32 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer {
|
||||
* that have responded within this timeout will be returned.
|
||||
* @param scheme the scheme to associate sniffed nodes with (as it is not returned by elasticsearch)
|
||||
*/
|
||||
public ElasticsearchHostsSniffer(RestClient restClient, long sniffRequestTimeoutMillis, Scheme scheme) {
|
||||
public ElasticsearchNodesSniffer(RestClient restClient, long sniffRequestTimeoutMillis, Scheme scheme) {
|
||||
this.restClient = Objects.requireNonNull(restClient, "restClient cannot be null");
|
||||
if (sniffRequestTimeoutMillis < 0) {
|
||||
throw new IllegalArgumentException("sniffRequestTimeoutMillis must be greater than 0");
|
||||
}
|
||||
this.sniffRequestParams = Collections.<String, String>singletonMap("timeout", sniffRequestTimeoutMillis + "ms");
|
||||
this.request = new Request("GET", "/_nodes/http");
|
||||
request.addParameter("timeout", sniffRequestTimeoutMillis + "ms");
|
||||
this.scheme = Objects.requireNonNull(scheme, "scheme cannot be null");
|
||||
}
|
||||
|
||||
/**
|
||||
* Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts
|
||||
*/
|
||||
public List<HttpHost> sniffHosts() throws IOException {
|
||||
Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams);
|
||||
return readHosts(response.getEntity());
|
||||
@Override
|
||||
public List<Node> sniff() throws IOException {
|
||||
Response response = restClient.performRequest(request);
|
||||
return readHosts(response.getEntity(), scheme, jsonFactory);
|
||||
}
|
||||
|
||||
private List<HttpHost> readHosts(HttpEntity entity) throws IOException {
|
||||
static List<Node> readHosts(HttpEntity entity, Scheme scheme, JsonFactory jsonFactory) throws IOException {
|
||||
try (InputStream inputStream = entity.getContent()) {
|
||||
JsonParser parser = jsonFactory.createParser(inputStream);
|
||||
if (parser.nextToken() != JsonToken.START_OBJECT) {
|
||||
throw new IOException("expected data to start with an object");
|
||||
}
|
||||
List<HttpHost> hosts = new ArrayList<>();
|
||||
List<Node> nodes = new ArrayList<>();
|
||||
while (parser.nextToken() != JsonToken.END_OBJECT) {
|
||||
if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
|
||||
if ("nodes".equals(parser.getCurrentName())) {
|
||||
@ -108,10 +113,9 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer {
|
||||
JsonToken token = parser.nextToken();
|
||||
assert token == JsonToken.START_OBJECT;
|
||||
String nodeId = parser.getCurrentName();
|
||||
HttpHost sniffedHost = readHost(nodeId, parser, this.scheme);
|
||||
if (sniffedHost != null) {
|
||||
logger.trace("adding node [" + nodeId + "]");
|
||||
hosts.add(sniffedHost);
|
||||
Node node = readNode(nodeId, parser, scheme);
|
||||
if (node != null) {
|
||||
nodes.add(node);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -119,13 +123,31 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer {
|
||||
}
|
||||
}
|
||||
}
|
||||
return hosts;
|
||||
return nodes;
|
||||
}
|
||||
}
|
||||
|
||||
private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme) throws IOException {
|
||||
HttpHost httpHost = null;
|
||||
private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) throws IOException {
|
||||
HttpHost publishedHost = null;
|
||||
/*
|
||||
* We sniff the bound hosts so we can look up the node based on any
|
||||
* address on which it is listening. This is useful in Elasticsearch's
|
||||
* test framework where we sometimes publish ipv6 addresses but the
|
||||
* tests contact the node on ipv4.
|
||||
*/
|
||||
Set<HttpHost> boundHosts = new HashSet<>();
|
||||
String name = null;
|
||||
String version = null;
|
||||
String fieldName = null;
|
||||
// Used to read roles from 5.0+
|
||||
boolean sawRoles = false;
|
||||
boolean master = false;
|
||||
boolean data = false;
|
||||
boolean ingest = false;
|
||||
// Used to read roles from 2.x
|
||||
Boolean masterAttribute = null;
|
||||
Boolean dataAttribute = null;
|
||||
boolean clientAttribute = false;
|
||||
while (parser.nextToken() != JsonToken.END_OBJECT) {
|
||||
if (parser.getCurrentToken() == JsonToken.FIELD_NAME) {
|
||||
fieldName = parser.getCurrentName();
|
||||
@ -133,9 +155,27 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer {
|
||||
if ("http".equals(fieldName)) {
|
||||
while (parser.nextToken() != JsonToken.END_OBJECT) {
|
||||
if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) {
|
||||
URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString());
|
||||
httpHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(),
|
||||
boundAddressAsURI.getScheme());
|
||||
URI publishAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString());
|
||||
publishedHost = new HttpHost(publishAddressAsURI.getHost(), publishAddressAsURI.getPort(),
|
||||
publishAddressAsURI.getScheme());
|
||||
} else if (parser.currentToken() == JsonToken.START_ARRAY && "bound_address".equals(parser.getCurrentName())) {
|
||||
while (parser.nextToken() != JsonToken.END_ARRAY) {
|
||||
URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString());
|
||||
boundHosts.add(new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(),
|
||||
boundAddressAsURI.getScheme()));
|
||||
}
|
||||
} else if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
} else if ("attributes".equals(fieldName)) {
|
||||
while (parser.nextToken() != JsonToken.END_OBJECT) {
|
||||
if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "master".equals(parser.getCurrentName())) {
|
||||
masterAttribute = toBoolean(parser.getValueAsString());
|
||||
} else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "data".equals(parser.getCurrentName())) {
|
||||
dataAttribute = toBoolean(parser.getValueAsString());
|
||||
} else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "client".equals(parser.getCurrentName())) {
|
||||
clientAttribute = toBoolean(parser.getValueAsString());
|
||||
} else if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
|
||||
parser.skipChildren();
|
||||
}
|
||||
@ -143,14 +183,55 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer {
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
} else if (parser.currentToken() == JsonToken.START_ARRAY) {
|
||||
if ("roles".equals(fieldName)) {
|
||||
sawRoles = true;
|
||||
while (parser.nextToken() != JsonToken.END_ARRAY) {
|
||||
switch (parser.getText()) {
|
||||
case "master":
|
||||
master = true;
|
||||
break;
|
||||
case "data":
|
||||
data = true;
|
||||
break;
|
||||
case "ingest":
|
||||
ingest = true;
|
||||
break;
|
||||
default:
|
||||
logger.warn("unknown role [" + parser.getText() + "] on node [" + nodeId + "]");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
} else if (parser.currentToken().isScalarValue()) {
|
||||
if ("version".equals(fieldName)) {
|
||||
version = parser.getText();
|
||||
} else if ("name".equals(fieldName)) {
|
||||
name = parser.getText();
|
||||
}
|
||||
}
|
||||
}
|
||||
//http section is not present if http is not enabled on the node, ignore such nodes
|
||||
if (httpHost == null) {
|
||||
if (publishedHost == null) {
|
||||
logger.debug("skipping node [" + nodeId + "] with http disabled");
|
||||
return null;
|
||||
} else {
|
||||
logger.trace("adding node [" + nodeId + "]");
|
||||
if (version.startsWith("2.")) {
|
||||
/*
|
||||
* 2.x doesn't send roles, instead we try to read them from
|
||||
* attributes.
|
||||
*/
|
||||
master = masterAttribute == null ? false == clientAttribute : masterAttribute;
|
||||
data = dataAttribute == null ? false == clientAttribute : dataAttribute;
|
||||
} else {
|
||||
assert sawRoles : "didn't see roles for [" + nodeId + "]";
|
||||
}
|
||||
assert boundHosts.contains(publishedHost) :
|
||||
"[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts";
|
||||
return new Node(publishedHost, boundHosts, name, version, new Roles(master, data, ingest));
|
||||
}
|
||||
return httpHost;
|
||||
}
|
||||
|
||||
public enum Scheme {
|
||||
@ -167,4 +248,15 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer {
|
||||
return name;
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean toBoolean(String string) {
|
||||
switch (string) {
|
||||
case "true":
|
||||
return true;
|
||||
case "false":
|
||||
return false;
|
||||
default:
|
||||
throw new IllegalArgumentException("[" + string + "] is not a valid boolean");
|
||||
}
|
||||
}
|
||||
}
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.client.sniff;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.Node;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
@ -27,9 +27,9 @@ import java.util.List;
|
||||
/**
|
||||
* Responsible for sniffing the http hosts
|
||||
*/
|
||||
public interface HostsSniffer {
|
||||
public interface NodesSniffer {
|
||||
/**
|
||||
* Returns the sniffed http hosts
|
||||
* Returns the sniffed Elasticsearch nodes.
|
||||
*/
|
||||
List<HttpHost> sniffHosts() throws IOException;
|
||||
List<Node> sniff() throws IOException;
|
||||
}
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.client.sniff;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.Node;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
|
||||
import java.util.Objects;
|
||||
@ -54,7 +54,7 @@ public class SniffOnFailureListener extends RestClient.FailureListener {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(HttpHost host) {
|
||||
public void onFailure(Node node) {
|
||||
if (sniffer == null) {
|
||||
throw new IllegalStateException("sniffer was not set, unable to sniff on failure");
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ package org.elasticsearch.client.sniff;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.Node;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientBuilder;
|
||||
|
||||
@ -29,6 +29,7 @@ import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
@ -43,7 +44,7 @@ import java.util.concurrent.atomic.AtomicReference;
|
||||
/**
|
||||
* Class responsible for sniffing nodes from some source (default is elasticsearch itself) and setting them to a provided instance of
|
||||
* {@link RestClient}. Must be created via {@link SnifferBuilder}, which allows to set all of the different options or rely on defaults.
|
||||
* A background task fetches the nodes through the {@link HostsSniffer} and sets them to the {@link RestClient} instance.
|
||||
* A background task fetches the nodes through the {@link NodesSniffer} and sets them to the {@link RestClient} instance.
|
||||
* It is possible to perform sniffing on failure by creating a {@link SniffOnFailureListener} and providing it as an argument to
|
||||
* {@link RestClientBuilder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation needs to be lazily set to the
|
||||
* previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}.
|
||||
@ -53,7 +54,7 @@ public class Sniffer implements Closeable {
|
||||
private static final Log logger = LogFactory.getLog(Sniffer.class);
|
||||
private static final String SNIFFER_THREAD_NAME = "es_rest_client_sniffer";
|
||||
|
||||
private final HostsSniffer hostsSniffer;
|
||||
private final NodesSniffer nodesSniffer;
|
||||
private final RestClient restClient;
|
||||
private final long sniffIntervalMillis;
|
||||
private final long sniffAfterFailureDelayMillis;
|
||||
@ -61,12 +62,12 @@ public class Sniffer implements Closeable {
|
||||
private final AtomicBoolean initialized = new AtomicBoolean(false);
|
||||
private volatile ScheduledTask nextScheduledTask;
|
||||
|
||||
Sniffer(RestClient restClient, HostsSniffer hostsSniffer, long sniffInterval, long sniffAfterFailureDelay) {
|
||||
this(restClient, hostsSniffer, new DefaultScheduler(), sniffInterval, sniffAfterFailureDelay);
|
||||
Sniffer(RestClient restClient, NodesSniffer nodesSniffer, long sniffInterval, long sniffAfterFailureDelay) {
|
||||
this(restClient, nodesSniffer, new DefaultScheduler(), sniffInterval, sniffAfterFailureDelay);
|
||||
}
|
||||
|
||||
Sniffer(RestClient restClient, HostsSniffer hostsSniffer, Scheduler scheduler, long sniffInterval, long sniffAfterFailureDelay) {
|
||||
this.hostsSniffer = hostsSniffer;
|
||||
Sniffer(RestClient restClient, NodesSniffer nodesSniffer, Scheduler scheduler, long sniffInterval, long sniffAfterFailureDelay) {
|
||||
this.nodesSniffer = nodesSniffer;
|
||||
this.restClient = restClient;
|
||||
this.sniffIntervalMillis = sniffInterval;
|
||||
this.sniffAfterFailureDelayMillis = sniffAfterFailureDelay;
|
||||
@ -205,14 +206,14 @@ public class Sniffer implements Closeable {
|
||||
}
|
||||
|
||||
final void sniff() throws IOException {
|
||||
List<HttpHost> sniffedHosts = hostsSniffer.sniffHosts();
|
||||
List<Node> sniffedNodes = nodesSniffer.sniff();
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("sniffed hosts: " + sniffedHosts);
|
||||
logger.debug("sniffed nodes: " + sniffedNodes);
|
||||
}
|
||||
if (sniffedHosts.isEmpty()) {
|
||||
logger.warn("no hosts to set, hosts will be updated at the next sniffing round");
|
||||
if (sniffedNodes.isEmpty()) {
|
||||
logger.warn("no nodes to set, nodes will be updated at the next sniffing round");
|
||||
} else {
|
||||
restClient.setHosts(sniffedHosts.toArray(new HttpHost[sniffedHosts.size()]));
|
||||
restClient.setNodes(sniffedNodes);
|
||||
}
|
||||
}
|
||||
|
||||
@ -227,7 +228,8 @@ public class Sniffer implements Closeable {
|
||||
/**
|
||||
* Returns a new {@link SnifferBuilder} to help with {@link Sniffer} creation.
|
||||
*
|
||||
* @param restClient the client that gets its hosts set (via {@link RestClient#setHosts(HttpHost...)}) once they are fetched
|
||||
* @param restClient the client that gets its hosts set (via
|
||||
* {@link RestClient#setNodes(Collection)}) once they are fetched
|
||||
* @return a new instance of {@link SnifferBuilder}
|
||||
*/
|
||||
public static SnifferBuilder builder(RestClient restClient) {
|
||||
|
@ -34,7 +34,7 @@ public final class SnifferBuilder {
|
||||
private final RestClient restClient;
|
||||
private long sniffIntervalMillis = DEFAULT_SNIFF_INTERVAL;
|
||||
private long sniffAfterFailureDelayMillis = DEFAULT_SNIFF_AFTER_FAILURE_DELAY;
|
||||
private HostsSniffer hostsSniffer;
|
||||
private NodesSniffer nodesSniffer;
|
||||
|
||||
/**
|
||||
* Creates a new builder instance by providing the {@link RestClient} that will be used to communicate with elasticsearch
|
||||
@ -69,13 +69,13 @@ public final class SnifferBuilder {
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the {@link HostsSniffer} to be used to read hosts. A default instance of {@link ElasticsearchHostsSniffer}
|
||||
* is created when not provided. This method can be used to change the configuration of the {@link ElasticsearchHostsSniffer},
|
||||
* Sets the {@link NodesSniffer} to be used to read hosts. A default instance of {@link ElasticsearchNodesSniffer}
|
||||
* is created when not provided. This method can be used to change the configuration of the {@link ElasticsearchNodesSniffer},
|
||||
* or to provide a different implementation (e.g. in case hosts need to taken from a different source).
|
||||
*/
|
||||
public SnifferBuilder setHostsSniffer(HostsSniffer hostsSniffer) {
|
||||
Objects.requireNonNull(hostsSniffer, "hostsSniffer cannot be null");
|
||||
this.hostsSniffer = hostsSniffer;
|
||||
public SnifferBuilder setNodesSniffer(NodesSniffer nodesSniffer) {
|
||||
Objects.requireNonNull(nodesSniffer, "nodesSniffer cannot be null");
|
||||
this.nodesSniffer = nodesSniffer;
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -83,9 +83,9 @@ public final class SnifferBuilder {
|
||||
* Creates the {@link Sniffer} based on the provided configuration.
|
||||
*/
|
||||
public Sniffer build() {
|
||||
if (hostsSniffer == null) {
|
||||
this.hostsSniffer = new ElasticsearchHostsSniffer(restClient);
|
||||
if (nodesSniffer == null) {
|
||||
this.nodesSniffer = new ElasticsearchNodesSniffer(restClient);
|
||||
}
|
||||
return new Sniffer(restClient, hostsSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis);
|
||||
return new Sniffer(restClient, nodesSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis);
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.sniff;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.InputStreamEntity;
|
||||
import org.elasticsearch.client.Node;
|
||||
import org.elasticsearch.client.RestClientTestCase;
|
||||
import org.elasticsearch.client.Node.Roles;
|
||||
import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer.Scheme;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonFactory;
|
||||
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.junit.Assert.assertThat;
|
||||
|
||||
/**
|
||||
* Test parsing the response from the {@code /_nodes/http} API from fixed
|
||||
* versions of Elasticsearch.
|
||||
*/
|
||||
public class ElasticsearchNodesSnifferParseTests extends RestClientTestCase {
|
||||
private void checkFile(String file, Node... expected) throws IOException {
|
||||
InputStream in = Thread.currentThread().getContextClassLoader().getResourceAsStream(file);
|
||||
if (in == null) {
|
||||
throw new IllegalArgumentException("Couldn't find [" + file + "]");
|
||||
}
|
||||
try {
|
||||
HttpEntity entity = new InputStreamEntity(in, ContentType.APPLICATION_JSON);
|
||||
List<Node> nodes = ElasticsearchNodesSniffer.readHosts(entity, Scheme.HTTP, new JsonFactory());
|
||||
// Use these assertions because the error messages are nicer than hasItems.
|
||||
assertThat(nodes, hasSize(expected.length));
|
||||
for (Node expectedNode : expected) {
|
||||
assertThat(nodes, hasItem(expectedNode));
|
||||
}
|
||||
} finally {
|
||||
in.close();
|
||||
}
|
||||
}
|
||||
|
||||
public void test2x() throws IOException {
|
||||
checkFile("2.0.0_nodes_http.json",
|
||||
node(9200, "m1", "2.0.0", true, false, false),
|
||||
node(9202, "m2", "2.0.0", true, true, false),
|
||||
node(9201, "m3", "2.0.0", true, false, false),
|
||||
node(9205, "d1", "2.0.0", false, true, false),
|
||||
node(9204, "d2", "2.0.0", false, true, false),
|
||||
node(9203, "d3", "2.0.0", false, true, false),
|
||||
node(9207, "c1", "2.0.0", false, false, false),
|
||||
node(9206, "c2", "2.0.0", false, false, false));
|
||||
}
|
||||
|
||||
public void test5x() throws IOException {
|
||||
checkFile("5.0.0_nodes_http.json",
|
||||
node(9200, "m1", "5.0.0", true, false, true),
|
||||
node(9201, "m2", "5.0.0", true, true, true),
|
||||
node(9202, "m3", "5.0.0", true, false, true),
|
||||
node(9203, "d1", "5.0.0", false, true, true),
|
||||
node(9204, "d2", "5.0.0", false, true, true),
|
||||
node(9205, "d3", "5.0.0", false, true, true),
|
||||
node(9206, "c1", "5.0.0", false, false, true),
|
||||
node(9207, "c2", "5.0.0", false, false, true));
|
||||
}
|
||||
|
||||
public void test6x() throws IOException {
|
||||
checkFile("6.0.0_nodes_http.json",
|
||||
node(9200, "m1", "6.0.0", true, false, true),
|
||||
node(9201, "m2", "6.0.0", true, true, true),
|
||||
node(9202, "m3", "6.0.0", true, false, true),
|
||||
node(9203, "d1", "6.0.0", false, true, true),
|
||||
node(9204, "d2", "6.0.0", false, true, true),
|
||||
node(9205, "d3", "6.0.0", false, true, true),
|
||||
node(9206, "c1", "6.0.0", false, false, true),
|
||||
node(9207, "c2", "6.0.0", false, false, true));
|
||||
}
|
||||
|
||||
private Node node(int port, String name, String version, boolean master, boolean data, boolean ingest) {
|
||||
HttpHost host = new HttpHost("127.0.0.1", port);
|
||||
Set<HttpHost> boundHosts = new HashSet<>(2);
|
||||
boundHosts.add(host);
|
||||
boundHosts.add(new HttpHost("[::1]", port));
|
||||
return new Node(host, boundHosts, name, version, new Roles(master, data, ingest));
|
||||
}
|
||||
}
|
@ -30,6 +30,7 @@ import com.sun.net.httpserver.HttpServer;
|
||||
import org.apache.http.Consts;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.elasticsearch.client.Node;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
@ -44,10 +45,10 @@ import java.io.StringWriter;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
@ -59,17 +60,17 @@ import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
||||
public class ElasticsearchNodesSnifferTests extends RestClientTestCase {
|
||||
|
||||
private int sniffRequestTimeout;
|
||||
private ElasticsearchHostsSniffer.Scheme scheme;
|
||||
private ElasticsearchNodesSniffer.Scheme scheme;
|
||||
private SniffResponse sniffResponse;
|
||||
private HttpServer httpServer;
|
||||
|
||||
@Before
|
||||
public void startHttpServer() throws IOException {
|
||||
this.sniffRequestTimeout = RandomNumbers.randomIntBetween(getRandom(), 1000, 10000);
|
||||
this.scheme = RandomPicks.randomFrom(getRandom(), ElasticsearchHostsSniffer.Scheme.values());
|
||||
this.scheme = RandomPicks.randomFrom(getRandom(), ElasticsearchNodesSniffer.Scheme.values());
|
||||
if (rarely()) {
|
||||
this.sniffResponse = SniffResponse.buildFailure();
|
||||
} else {
|
||||
@ -86,7 +87,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
||||
|
||||
public void testConstructorValidation() throws IOException {
|
||||
try {
|
||||
new ElasticsearchHostsSniffer(null, 1, ElasticsearchHostsSniffer.Scheme.HTTP);
|
||||
new ElasticsearchNodesSniffer(null, 1, ElasticsearchNodesSniffer.Scheme.HTTP);
|
||||
fail("should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("restClient cannot be null", e.getMessage());
|
||||
@ -94,14 +95,14 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
||||
HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort());
|
||||
try (RestClient restClient = RestClient.builder(httpHost).build()) {
|
||||
try {
|
||||
new ElasticsearchHostsSniffer(restClient, 1, null);
|
||||
new ElasticsearchNodesSniffer(restClient, 1, null);
|
||||
fail("should have failed");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals(e.getMessage(), "scheme cannot be null");
|
||||
}
|
||||
try {
|
||||
new ElasticsearchHostsSniffer(restClient, RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0),
|
||||
ElasticsearchHostsSniffer.Scheme.HTTP);
|
||||
new ElasticsearchNodesSniffer(restClient, RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0),
|
||||
ElasticsearchNodesSniffer.Scheme.HTTP);
|
||||
fail("should have failed");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals(e.getMessage(), "sniffRequestTimeoutMillis must be greater than 0");
|
||||
@ -112,17 +113,13 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
||||
public void testSniffNodes() throws IOException {
|
||||
HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort());
|
||||
try (RestClient restClient = RestClient.builder(httpHost).build()) {
|
||||
ElasticsearchHostsSniffer sniffer = new ElasticsearchHostsSniffer(restClient, sniffRequestTimeout, scheme);
|
||||
ElasticsearchNodesSniffer sniffer = new ElasticsearchNodesSniffer(restClient, sniffRequestTimeout, scheme);
|
||||
try {
|
||||
List<HttpHost> sniffedHosts = sniffer.sniffHosts();
|
||||
List<Node> sniffedNodes = sniffer.sniff();
|
||||
if (sniffResponse.isFailure) {
|
||||
fail("sniffNodes should have failed");
|
||||
}
|
||||
assertThat(sniffedHosts.size(), equalTo(sniffResponse.hosts.size()));
|
||||
Iterator<HttpHost> responseHostsIterator = sniffResponse.hosts.iterator();
|
||||
for (HttpHost sniffedHost : sniffedHosts) {
|
||||
assertEquals(sniffedHost, responseHostsIterator.next());
|
||||
}
|
||||
assertEquals(sniffResponse.result, sniffedNodes);
|
||||
} catch(ResponseException e) {
|
||||
Response response = e.getResponse();
|
||||
if (sniffResponse.isFailure) {
|
||||
@ -173,9 +170,9 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme scheme) throws IOException {
|
||||
private static SniffResponse buildSniffResponse(ElasticsearchNodesSniffer.Scheme scheme) throws IOException {
|
||||
int numNodes = RandomNumbers.randomIntBetween(getRandom(), 1, 5);
|
||||
List<HttpHost> hosts = new ArrayList<>(numNodes);
|
||||
List<Node> nodes = new ArrayList<>(numNodes);
|
||||
JsonFactory jsonFactory = new JsonFactory();
|
||||
StringWriter writer = new StringWriter();
|
||||
JsonGenerator generator = jsonFactory.createGenerator(writer);
|
||||
@ -190,6 +187,23 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
||||
generator.writeObjectFieldStart("nodes");
|
||||
for (int i = 0; i < numNodes; i++) {
|
||||
String nodeId = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 5, 10);
|
||||
String host = "host" + i;
|
||||
int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299);
|
||||
HttpHost publishHost = new HttpHost(host, port, scheme.toString());
|
||||
Set<HttpHost> boundHosts = new HashSet<>();
|
||||
boundHosts.add(publishHost);
|
||||
|
||||
if (randomBoolean()) {
|
||||
int bound = between(1, 5);
|
||||
for (int b = 0; b < bound; b++) {
|
||||
boundHosts.add(new HttpHost(host + b, port, scheme.toString()));
|
||||
}
|
||||
}
|
||||
|
||||
Node node = new Node(publishHost, boundHosts, randomAsciiAlphanumOfLength(5),
|
||||
randomAsciiAlphanumOfLength(5),
|
||||
new Node.Roles(randomBoolean(), randomBoolean(), randomBoolean()));
|
||||
|
||||
generator.writeObjectFieldStart(nodeId);
|
||||
if (getRandom().nextBoolean()) {
|
||||
generator.writeObjectFieldStart("bogus_object");
|
||||
@ -203,44 +217,45 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
||||
}
|
||||
boolean isHttpEnabled = rarely() == false;
|
||||
if (isHttpEnabled) {
|
||||
String host = "host" + i;
|
||||
int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299);
|
||||
HttpHost httpHost = new HttpHost(host, port, scheme.toString());
|
||||
hosts.add(httpHost);
|
||||
nodes.add(node);
|
||||
generator.writeObjectFieldStart("http");
|
||||
if (getRandom().nextBoolean()) {
|
||||
generator.writeArrayFieldStart("bound_address");
|
||||
generator.writeString("[fe80::1]:" + port);
|
||||
generator.writeString("[::1]:" + port);
|
||||
generator.writeString("127.0.0.1:" + port);
|
||||
generator.writeEndArray();
|
||||
generator.writeArrayFieldStart("bound_address");
|
||||
for (HttpHost bound : boundHosts) {
|
||||
generator.writeString(bound.toHostString());
|
||||
}
|
||||
generator.writeEndArray();
|
||||
if (getRandom().nextBoolean()) {
|
||||
generator.writeObjectFieldStart("bogus_object");
|
||||
generator.writeEndObject();
|
||||
}
|
||||
generator.writeStringField("publish_address", httpHost.toHostString());
|
||||
generator.writeStringField("publish_address", publishHost.toHostString());
|
||||
if (getRandom().nextBoolean()) {
|
||||
generator.writeNumberField("max_content_length_in_bytes", 104857600);
|
||||
}
|
||||
generator.writeEndObject();
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
String[] roles = {"master", "data", "ingest"};
|
||||
int numRoles = RandomNumbers.randomIntBetween(getRandom(), 0, 3);
|
||||
Set<String> nodeRoles = new HashSet<>(numRoles);
|
||||
for (int j = 0; j < numRoles; j++) {
|
||||
String role;
|
||||
do {
|
||||
role = RandomPicks.randomFrom(getRandom(), roles);
|
||||
} while(nodeRoles.add(role) == false);
|
||||
|
||||
List<String> roles = Arrays.asList(new String[] {"master", "data", "ingest"});
|
||||
Collections.shuffle(roles, getRandom());
|
||||
generator.writeArrayFieldStart("roles");
|
||||
for (String role : roles) {
|
||||
if ("master".equals(role) && node.getRoles().isMasterEligible()) {
|
||||
generator.writeString("master");
|
||||
}
|
||||
generator.writeArrayFieldStart("roles");
|
||||
for (String nodeRole : nodeRoles) {
|
||||
generator.writeString(nodeRole);
|
||||
if ("data".equals(role) && node.getRoles().isData()) {
|
||||
generator.writeString("data");
|
||||
}
|
||||
if ("ingest".equals(role) && node.getRoles().isIngest()) {
|
||||
generator.writeString("ingest");
|
||||
}
|
||||
generator.writeEndArray();
|
||||
}
|
||||
generator.writeEndArray();
|
||||
|
||||
generator.writeFieldName("version");
|
||||
generator.writeString(node.getVersion());
|
||||
generator.writeFieldName("name");
|
||||
generator.writeString(node.getName());
|
||||
|
||||
int numAttributes = RandomNumbers.randomIntBetween(getRandom(), 0, 3);
|
||||
Map<String, String> attributes = new HashMap<>(numAttributes);
|
||||
for (int j = 0; j < numAttributes; j++) {
|
||||
@ -260,18 +275,18 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
||||
generator.writeEndObject();
|
||||
generator.writeEndObject();
|
||||
generator.close();
|
||||
return SniffResponse.buildResponse(writer.toString(), hosts);
|
||||
return SniffResponse.buildResponse(writer.toString(), nodes);
|
||||
}
|
||||
|
||||
private static class SniffResponse {
|
||||
private final String nodesInfoBody;
|
||||
private final int nodesInfoResponseCode;
|
||||
private final List<HttpHost> hosts;
|
||||
private final List<Node> result;
|
||||
private final boolean isFailure;
|
||||
|
||||
SniffResponse(String nodesInfoBody, List<HttpHost> hosts, boolean isFailure) {
|
||||
SniffResponse(String nodesInfoBody, List<Node> result, boolean isFailure) {
|
||||
this.nodesInfoBody = nodesInfoBody;
|
||||
this.hosts = hosts;
|
||||
this.result = result;
|
||||
this.isFailure = isFailure;
|
||||
if (isFailure) {
|
||||
this.nodesInfoResponseCode = randomErrorResponseCode();
|
||||
@ -281,11 +296,11 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
||||
}
|
||||
|
||||
static SniffResponse buildFailure() {
|
||||
return new SniffResponse("", Collections.<HttpHost>emptyList(), true);
|
||||
return new SniffResponse("", Collections.<Node>emptyList(), true);
|
||||
}
|
||||
|
||||
static SniffResponse buildResponse(String nodesInfoBody, List<HttpHost> hosts) {
|
||||
return new SniffResponse(nodesInfoBody, hosts, false);
|
||||
static SniffResponse buildResponse(String nodesInfoBody, List<Node> nodes) {
|
||||
return new SniffResponse(nodesInfoBody, nodes, false);
|
||||
}
|
||||
}
|
||||
|
@ -20,16 +20,17 @@
|
||||
package org.elasticsearch.client.sniff;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.Node;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Mock implementation of {@link HostsSniffer}. Useful to prevent any connection attempt while testing builders etc.
|
||||
* Mock implementation of {@link NodesSniffer}. Useful to prevent any connection attempt while testing builders etc.
|
||||
*/
|
||||
class MockHostsSniffer implements HostsSniffer {
|
||||
class MockNodesSniffer implements NodesSniffer {
|
||||
@Override
|
||||
public List<HttpHost> sniffHosts() {
|
||||
return Collections.singletonList(new HttpHost("localhost", 9200));
|
||||
public List<Node> sniff() {
|
||||
return Collections.singletonList(new Node(new HttpHost("localhost", 9200)));
|
||||
}
|
||||
}
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.client.sniff;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.Node;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientTestCase;
|
||||
|
||||
@ -46,7 +47,7 @@ public class SniffOnFailureListenerTests extends RestClientTestCase {
|
||||
}
|
||||
|
||||
try (RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build()) {
|
||||
try (Sniffer sniffer = Sniffer.builder(restClient).setHostsSniffer(new MockHostsSniffer()).build()) {
|
||||
try (Sniffer sniffer = Sniffer.builder(restClient).setNodesSniffer(new MockNodesSniffer()).build()) {
|
||||
listener.setSniffer(sniffer);
|
||||
try {
|
||||
listener.setSniffer(sniffer);
|
||||
@ -54,7 +55,7 @@ public class SniffOnFailureListenerTests extends RestClientTestCase {
|
||||
} catch(IllegalStateException e) {
|
||||
assertEquals("sniffer can only be set once", e.getMessage());
|
||||
}
|
||||
listener.onFailure(new HttpHost("localhost", 9200));
|
||||
listener.onFailure(new Node(new HttpHost("localhost", 9200)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -61,10 +61,10 @@ public class SnifferBuilderTests extends RestClientTestCase {
|
||||
|
||||
|
||||
try {
|
||||
Sniffer.builder(client).setHostsSniffer(null);
|
||||
Sniffer.builder(client).setNodesSniffer(null);
|
||||
fail("should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("hostsSniffer cannot be null", e.getMessage());
|
||||
assertEquals("nodesSniffer cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
|
||||
@ -80,7 +80,7 @@ public class SnifferBuilderTests extends RestClientTestCase {
|
||||
builder.setSniffAfterFailureDelayMillis(RandomNumbers.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
builder.setHostsSniffer(new MockHostsSniffer());
|
||||
builder.setNodesSniffer(new MockNodesSniffer());
|
||||
}
|
||||
|
||||
try (Sniffer sniffer = builder.build()) {
|
||||
|
@ -20,11 +20,11 @@
|
||||
package org.elasticsearch.client.sniff;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.Node;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientTestCase;
|
||||
import org.elasticsearch.client.sniff.Sniffer.DefaultScheduler;
|
||||
import org.elasticsearch.client.sniff.Sniffer.Scheduler;
|
||||
import org.mockito.Matchers;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
@ -62,6 +62,7 @@ import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyCollectionOf;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
@ -71,12 +72,12 @@ import static org.mockito.Mockito.when;
|
||||
public class SnifferTests extends RestClientTestCase {
|
||||
|
||||
/**
|
||||
* Tests the {@link Sniffer#sniff()} method in isolation. Verifies that it uses the {@link HostsSniffer} implementation
|
||||
* Tests the {@link Sniffer#sniff()} method in isolation. Verifies that it uses the {@link NodesSniffer} implementation
|
||||
* to retrieve nodes and set them (when not empty) to the provided {@link RestClient} instance.
|
||||
*/
|
||||
public void testSniff() throws IOException {
|
||||
HttpHost initialHost = new HttpHost("localhost", 9200);
|
||||
try (RestClient restClient = RestClient.builder(initialHost).build()) {
|
||||
Node initialNode = new Node(new HttpHost("localhost", 9200));
|
||||
try (RestClient restClient = RestClient.builder(initialNode).build()) {
|
||||
Scheduler noOpScheduler = new Scheduler() {
|
||||
@Override
|
||||
public Future<?> schedule(Sniffer.Task task, long delayMillis) {
|
||||
@ -88,53 +89,53 @@ public class SnifferTests extends RestClientTestCase {
|
||||
|
||||
}
|
||||
};
|
||||
CountingHostsSniffer hostsSniffer = new CountingHostsSniffer();
|
||||
CountingNodesSniffer nodesSniffer = new CountingNodesSniffer();
|
||||
int iters = randomIntBetween(5, 30);
|
||||
try (Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 1000L, -1)){
|
||||
try (Sniffer sniffer = new Sniffer(restClient, nodesSniffer, noOpScheduler, 1000L, -1)){
|
||||
{
|
||||
assertEquals(1, restClient.getHosts().size());
|
||||
HttpHost httpHost = restClient.getHosts().get(0);
|
||||
assertEquals("localhost", httpHost.getHostName());
|
||||
assertEquals(9200, httpHost.getPort());
|
||||
assertEquals(1, restClient.getNodes().size());
|
||||
Node node = restClient.getNodes().get(0);
|
||||
assertEquals("localhost", node.getHost().getHostName());
|
||||
assertEquals(9200, node.getHost().getPort());
|
||||
}
|
||||
int emptyList = 0;
|
||||
int failures = 0;
|
||||
int runs = 0;
|
||||
List<HttpHost> lastHosts = Collections.singletonList(initialHost);
|
||||
List<Node> lastNodes = Collections.singletonList(initialNode);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
try {
|
||||
runs++;
|
||||
sniffer.sniff();
|
||||
if (hostsSniffer.failures.get() > failures) {
|
||||
if (nodesSniffer.failures.get() > failures) {
|
||||
failures++;
|
||||
fail("should have failed given that hostsSniffer says it threw an exception");
|
||||
} else if (hostsSniffer.emptyList.get() > emptyList) {
|
||||
fail("should have failed given that nodesSniffer says it threw an exception");
|
||||
} else if (nodesSniffer.emptyList.get() > emptyList) {
|
||||
emptyList++;
|
||||
assertEquals(lastHosts, restClient.getHosts());
|
||||
assertEquals(lastNodes, restClient.getNodes());
|
||||
} else {
|
||||
assertNotEquals(lastHosts, restClient.getHosts());
|
||||
List<HttpHost> expectedHosts = CountingHostsSniffer.buildHosts(runs);
|
||||
assertEquals(expectedHosts, restClient.getHosts());
|
||||
lastHosts = restClient.getHosts();
|
||||
assertNotEquals(lastNodes, restClient.getNodes());
|
||||
List<Node> expectedNodes = CountingNodesSniffer.buildNodes(runs);
|
||||
assertEquals(expectedNodes, restClient.getNodes());
|
||||
lastNodes = restClient.getNodes();
|
||||
}
|
||||
} catch(IOException e) {
|
||||
if (hostsSniffer.failures.get() > failures) {
|
||||
if (nodesSniffer.failures.get() > failures) {
|
||||
failures++;
|
||||
assertEquals("communication breakdown", e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
assertEquals(hostsSniffer.emptyList.get(), emptyList);
|
||||
assertEquals(hostsSniffer.failures.get(), failures);
|
||||
assertEquals(hostsSniffer.runs.get(), runs);
|
||||
assertEquals(nodesSniffer.emptyList.get(), emptyList);
|
||||
assertEquals(nodesSniffer.failures.get(), failures);
|
||||
assertEquals(nodesSniffer.runs.get(), runs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test multiple sniffing rounds by mocking the {@link Scheduler} as well as the {@link HostsSniffer}.
|
||||
* Test multiple sniffing rounds by mocking the {@link Scheduler} as well as the {@link NodesSniffer}.
|
||||
* Simulates the ordinary behaviour of {@link Sniffer} when sniffing on failure is not enabled.
|
||||
* The {@link CountingHostsSniffer} doesn't make any network connection but may throw exception or return no hosts, which makes
|
||||
* The {@link CountingNodesSniffer} doesn't make any network connection but may throw exception or return no nodes, which makes
|
||||
* it possible to verify that errors are properly handled and don't affect subsequent runs and their scheduling.
|
||||
* The {@link Scheduler} implementation submits rather than scheduling tasks, meaning that it doesn't respect the requested sniff
|
||||
* delays while allowing to assert that the requested delays for each requested run and the following one are the expected values.
|
||||
@ -143,7 +144,7 @@ public class SnifferTests extends RestClientTestCase {
|
||||
final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
|
||||
long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
|
||||
RestClient restClient = mock(RestClient.class);
|
||||
CountingHostsSniffer hostsSniffer = new CountingHostsSniffer();
|
||||
CountingNodesSniffer nodesSniffer = new CountingNodesSniffer();
|
||||
final int iters = randomIntBetween(30, 100);
|
||||
final Set<Future<?>> futures = new CopyOnWriteArraySet<>();
|
||||
final CountDownLatch completionLatch = new CountDownLatch(1);
|
||||
@ -185,7 +186,7 @@ public class SnifferTests extends RestClientTestCase {
|
||||
}
|
||||
};
|
||||
try {
|
||||
new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
|
||||
new Sniffer(restClient, nodesSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
|
||||
assertTrue("timeout waiting for sniffing rounds to be completed", completionLatch.await(1000, TimeUnit.MILLISECONDS));
|
||||
assertEquals(iters, futures.size());
|
||||
//the last future is the only one that may not be completed yet, as the count down happens
|
||||
@ -200,10 +201,10 @@ public class SnifferTests extends RestClientTestCase {
|
||||
executor.shutdown();
|
||||
assertTrue(executor.awaitTermination(1000, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
int totalRuns = hostsSniffer.runs.get();
|
||||
int totalRuns = nodesSniffer.runs.get();
|
||||
assertEquals(iters, totalRuns);
|
||||
int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get();
|
||||
verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg());
|
||||
int setNodesRuns = totalRuns - nodesSniffer.failures.get() - nodesSniffer.emptyList.get();
|
||||
verify(restClient, times(setNodesRuns)).setNodes(anyCollectionOf(Node.class));
|
||||
verifyNoMoreInteractions(restClient);
|
||||
}
|
||||
|
||||
@ -234,7 +235,7 @@ public class SnifferTests extends RestClientTestCase {
|
||||
}
|
||||
};
|
||||
|
||||
Sniffer sniffer = new Sniffer(restClient, new MockHostsSniffer(), scheduler, sniffInterval, sniffAfterFailureDelay);
|
||||
Sniffer sniffer = new Sniffer(restClient, new MockNodesSniffer(), scheduler, sniffInterval, sniffAfterFailureDelay);
|
||||
assertEquals(0, shutdown.get());
|
||||
int iters = randomIntBetween(3, 10);
|
||||
for (int i = 1; i <= iters; i++) {
|
||||
@ -246,7 +247,7 @@ public class SnifferTests extends RestClientTestCase {
|
||||
|
||||
public void testSniffOnFailureNotInitialized() {
|
||||
RestClient restClient = mock(RestClient.class);
|
||||
CountingHostsSniffer hostsSniffer = new CountingHostsSniffer();
|
||||
CountingNodesSniffer nodesSniffer = new CountingNodesSniffer();
|
||||
long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
|
||||
long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
|
||||
final AtomicInteger scheduleCalls = new AtomicInteger(0);
|
||||
@ -262,15 +263,15 @@ public class SnifferTests extends RestClientTestCase {
|
||||
}
|
||||
};
|
||||
|
||||
Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
|
||||
Sniffer sniffer = new Sniffer(restClient, nodesSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
|
||||
for (int i = 0; i < 10; i++) {
|
||||
sniffer.sniffOnFailure();
|
||||
}
|
||||
assertEquals(1, scheduleCalls.get());
|
||||
int totalRuns = hostsSniffer.runs.get();
|
||||
int totalRuns = nodesSniffer.runs.get();
|
||||
assertEquals(0, totalRuns);
|
||||
int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get();
|
||||
verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg());
|
||||
int setNodesRuns = totalRuns - nodesSniffer.failures.get() - nodesSniffer.emptyList.get();
|
||||
verify(restClient, times(setNodesRuns)).setNodes(anyCollectionOf(Node.class));
|
||||
verifyNoMoreInteractions(restClient);
|
||||
}
|
||||
|
||||
@ -281,7 +282,7 @@ public class SnifferTests extends RestClientTestCase {
|
||||
*/
|
||||
public void testSniffOnFailure() throws Exception {
|
||||
RestClient restClient = mock(RestClient.class);
|
||||
CountingHostsSniffer hostsSniffer = new CountingHostsSniffer();
|
||||
CountingNodesSniffer nodesSniffer = new CountingNodesSniffer();
|
||||
final AtomicBoolean initializing = new AtomicBoolean(true);
|
||||
final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
|
||||
final long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
|
||||
@ -351,7 +352,7 @@ public class SnifferTests extends RestClientTestCase {
|
||||
public void shutdown() {
|
||||
}
|
||||
};
|
||||
final Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
|
||||
final Sniffer sniffer = new Sniffer(restClient, nodesSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
|
||||
assertTrue("timeout waiting for sniffer to get initialized", initializingLatch.await(1000, TimeUnit.MILLISECONDS));
|
||||
|
||||
ExecutorService onFailureExecutor = Executors.newFixedThreadPool(randomIntBetween(5, 20));
|
||||
@ -413,9 +414,9 @@ public class SnifferTests extends RestClientTestCase {
|
||||
}
|
||||
assertEquals(onFailureTasks.size(), cancelledTasks);
|
||||
|
||||
assertEquals(completedTasks, hostsSniffer.runs.get());
|
||||
int setHostsRuns = hostsSniffer.runs.get() - hostsSniffer.failures.get() - hostsSniffer.emptyList.get();
|
||||
verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg());
|
||||
assertEquals(completedTasks, nodesSniffer.runs.get());
|
||||
int setNodesRuns = nodesSniffer.runs.get() - nodesSniffer.failures.get() - nodesSniffer.emptyList.get();
|
||||
verify(restClient, times(setNodesRuns)).setNodes(anyCollectionOf(Node.class));
|
||||
verifyNoMoreInteractions(restClient);
|
||||
} finally {
|
||||
executor.shutdown();
|
||||
@ -446,7 +447,7 @@ public class SnifferTests extends RestClientTestCase {
|
||||
|
||||
public void testTaskCancelling() throws Exception {
|
||||
RestClient restClient = mock(RestClient.class);
|
||||
HostsSniffer hostsSniffer = mock(HostsSniffer.class);
|
||||
NodesSniffer nodesSniffer = mock(NodesSniffer.class);
|
||||
Scheduler noOpScheduler = new Scheduler() {
|
||||
@Override
|
||||
public Future<?> schedule(Sniffer.Task task, long delayMillis) {
|
||||
@ -457,7 +458,7 @@ public class SnifferTests extends RestClientTestCase {
|
||||
public void shutdown() {
|
||||
}
|
||||
};
|
||||
Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L);
|
||||
Sniffer sniffer = new Sniffer(restClient, nodesSniffer, noOpScheduler, 0L, 0L);
|
||||
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
|
||||
try {
|
||||
int numIters = randomIntBetween(50, 100);
|
||||
@ -540,18 +541,18 @@ public class SnifferTests extends RestClientTestCase {
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock {@link HostsSniffer} implementation used for testing, which most of the times return a fixed host.
|
||||
* It rarely throws exception or return an empty list of hosts, to make sure that such situations are properly handled.
|
||||
* Mock {@link NodesSniffer} implementation used for testing, which most of the times return a fixed node.
|
||||
* It rarely throws exception or return an empty list of nodes, to make sure that such situations are properly handled.
|
||||
* It also asserts that it never gets called concurrently, based on the assumption that only one sniff run can be run
|
||||
* at a given point in time.
|
||||
*/
|
||||
private static class CountingHostsSniffer implements HostsSniffer {
|
||||
private static class CountingNodesSniffer implements NodesSniffer {
|
||||
private final AtomicInteger runs = new AtomicInteger(0);
|
||||
private final AtomicInteger failures = new AtomicInteger(0);
|
||||
private final AtomicInteger emptyList = new AtomicInteger(0);
|
||||
|
||||
@Override
|
||||
public List<HttpHost> sniffHosts() throws IOException {
|
||||
public List<Node> sniff() throws IOException {
|
||||
int run = runs.incrementAndGet();
|
||||
if (rarely()) {
|
||||
failures.incrementAndGet();
|
||||
@ -562,24 +563,23 @@ public class SnifferTests extends RestClientTestCase {
|
||||
emptyList.incrementAndGet();
|
||||
return Collections.emptyList();
|
||||
}
|
||||
return buildHosts(run);
|
||||
return buildNodes(run);
|
||||
}
|
||||
|
||||
private static List<HttpHost> buildHosts(int run) {
|
||||
private static List<Node> buildNodes(int run) {
|
||||
int size = run % 5 + 1;
|
||||
assert size > 0;
|
||||
List<HttpHost> hosts = new ArrayList<>(size);
|
||||
List<Node> nodes = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
hosts.add(new HttpHost("sniffed-" + run, 9200 + i));
|
||||
nodes.add(new Node(new HttpHost("sniffed-" + run, 9200 + i)));
|
||||
}
|
||||
return hosts;
|
||||
return nodes;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testDefaultSchedulerSchedule() {
|
||||
RestClient restClient = mock(RestClient.class);
|
||||
HostsSniffer hostsSniffer = mock(HostsSniffer.class);
|
||||
NodesSniffer nodesSniffer = mock(NodesSniffer.class);
|
||||
Scheduler noOpScheduler = new Scheduler() {
|
||||
@Override
|
||||
public Future<?> schedule(Sniffer.Task task, long delayMillis) {
|
||||
@ -591,7 +591,7 @@ public class SnifferTests extends RestClientTestCase {
|
||||
|
||||
}
|
||||
};
|
||||
Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L);
|
||||
Sniffer sniffer = new Sniffer(restClient, nodesSniffer, noOpScheduler, 0L, 0L);
|
||||
Sniffer.Task task = sniffer.new Task(randomLongBetween(1, Long.MAX_VALUE));
|
||||
|
||||
ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class);
|
||||
|
@ -20,9 +20,10 @@
|
||||
package org.elasticsearch.client.sniff.documentation;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.Node;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer;
|
||||
import org.elasticsearch.client.sniff.HostsSniffer;
|
||||
import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer;
|
||||
import org.elasticsearch.client.sniff.NodesSniffer;
|
||||
import org.elasticsearch.client.sniff.SniffOnFailureListener;
|
||||
import org.elasticsearch.client.sniff.Sniffer;
|
||||
|
||||
@ -91,12 +92,12 @@ public class SnifferDocumentation {
|
||||
RestClient restClient = RestClient.builder(
|
||||
new HttpHost("localhost", 9200, "http"))
|
||||
.build();
|
||||
HostsSniffer hostsSniffer = new ElasticsearchHostsSniffer(
|
||||
NodesSniffer nodesSniffer = new ElasticsearchNodesSniffer(
|
||||
restClient,
|
||||
ElasticsearchHostsSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT,
|
||||
ElasticsearchHostsSniffer.Scheme.HTTPS);
|
||||
ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT,
|
||||
ElasticsearchNodesSniffer.Scheme.HTTPS);
|
||||
Sniffer sniffer = Sniffer.builder(restClient)
|
||||
.setHostsSniffer(hostsSniffer).build();
|
||||
.setNodesSniffer(nodesSniffer).build();
|
||||
//end::sniffer-https
|
||||
}
|
||||
{
|
||||
@ -104,28 +105,28 @@ public class SnifferDocumentation {
|
||||
RestClient restClient = RestClient.builder(
|
||||
new HttpHost("localhost", 9200, "http"))
|
||||
.build();
|
||||
HostsSniffer hostsSniffer = new ElasticsearchHostsSniffer(
|
||||
NodesSniffer nodesSniffer = new ElasticsearchNodesSniffer(
|
||||
restClient,
|
||||
TimeUnit.SECONDS.toMillis(5),
|
||||
ElasticsearchHostsSniffer.Scheme.HTTP);
|
||||
ElasticsearchNodesSniffer.Scheme.HTTP);
|
||||
Sniffer sniffer = Sniffer.builder(restClient)
|
||||
.setHostsSniffer(hostsSniffer).build();
|
||||
.setNodesSniffer(nodesSniffer).build();
|
||||
//end::sniff-request-timeout
|
||||
}
|
||||
{
|
||||
//tag::custom-hosts-sniffer
|
||||
//tag::custom-nodes-sniffer
|
||||
RestClient restClient = RestClient.builder(
|
||||
new HttpHost("localhost", 9200, "http"))
|
||||
.build();
|
||||
HostsSniffer hostsSniffer = new HostsSniffer() {
|
||||
NodesSniffer nodesSniffer = new NodesSniffer() {
|
||||
@Override
|
||||
public List<HttpHost> sniffHosts() throws IOException {
|
||||
public List<Node> sniff() throws IOException {
|
||||
return null; // <1>
|
||||
}
|
||||
};
|
||||
Sniffer sniffer = Sniffer.builder(restClient)
|
||||
.setHostsSniffer(hostsSniffer).build();
|
||||
//end::custom-hosts-sniffer
|
||||
.setNodesSniffer(nodesSniffer).build();
|
||||
//end::custom-nodes-sniffer
|
||||
}
|
||||
}
|
||||
}
|
||||
|
141
client/sniffer/src/test/resources/2.0.0_nodes_http.json
Normal file
141
client/sniffer/src/test/resources/2.0.0_nodes_http.json
Normal file
@ -0,0 +1,141 @@
|
||||
{
|
||||
"cluster_name" : "elasticsearch",
|
||||
"nodes" : {
|
||||
"qYUZ_8bTRwODPxukDlFw6Q" : {
|
||||
"name" : "d2",
|
||||
"transport_address" : "127.0.0.1:9304",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9204",
|
||||
"attributes" : {
|
||||
"master" : "false"
|
||||
},
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9204", "[::1]:9204" ],
|
||||
"publish_address" : "127.0.0.1:9204",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"Yej5UVNgR2KgBjUFHOQpCw" : {
|
||||
"name" : "c1",
|
||||
"transport_address" : "127.0.0.1:9307",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9207",
|
||||
"attributes" : {
|
||||
"data" : "false",
|
||||
"master" : "false"
|
||||
},
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9207", "[::1]:9207" ],
|
||||
"publish_address" : "127.0.0.1:9207",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"mHttJwhwReangKEx9EGuAg" : {
|
||||
"name" : "m3",
|
||||
"transport_address" : "127.0.0.1:9301",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9201",
|
||||
"attributes" : {
|
||||
"data" : "false",
|
||||
"master" : "true"
|
||||
},
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9201", "[::1]:9201" ],
|
||||
"publish_address" : "127.0.0.1:9201",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"6Erdptt_QRGLxMiLi9mTkg" : {
|
||||
"name" : "c2",
|
||||
"transport_address" : "127.0.0.1:9306",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9206",
|
||||
"attributes" : {
|
||||
"data" : "false",
|
||||
"client" : "true"
|
||||
},
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9206", "[::1]:9206" ],
|
||||
"publish_address" : "127.0.0.1:9206",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"mLRCZBypTiys6e8KY5DMnA" : {
|
||||
"name" : "m1",
|
||||
"transport_address" : "127.0.0.1:9300",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9200",
|
||||
"attributes" : {
|
||||
"data" : "false"
|
||||
},
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9200", "[::1]:9200" ],
|
||||
"publish_address" : "127.0.0.1:9200",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"pVqOhytXQwetsZVzCBppYw" : {
|
||||
"name" : "m2",
|
||||
"transport_address" : "127.0.0.1:9302",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9202",
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9202", "[::1]:9202" ],
|
||||
"publish_address" : "127.0.0.1:9202",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"ARyzVfpJSw2a9TOIUpbsBA" : {
|
||||
"name" : "d1",
|
||||
"transport_address" : "127.0.0.1:9305",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9205",
|
||||
"attributes" : {
|
||||
"master" : "false"
|
||||
},
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9205", "[::1]:9205" ],
|
||||
"publish_address" : "127.0.0.1:9205",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"2Hpid-g5Sc2BKCevhN6VQw" : {
|
||||
"name" : "d3",
|
||||
"transport_address" : "127.0.0.1:9303",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9203",
|
||||
"attributes" : {
|
||||
"master" : "false"
|
||||
},
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9203", "[::1]:9203" ],
|
||||
"publish_address" : "127.0.0.1:9203",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
169
client/sniffer/src/test/resources/5.0.0_nodes_http.json
Normal file
169
client/sniffer/src/test/resources/5.0.0_nodes_http.json
Normal file
@ -0,0 +1,169 @@
|
||||
{
|
||||
"_nodes" : {
|
||||
"total" : 8,
|
||||
"successful" : 8,
|
||||
"failed" : 0
|
||||
},
|
||||
"cluster_name" : "test",
|
||||
"nodes" : {
|
||||
"DXz_rhcdSF2xJ96qyjaLVw" : {
|
||||
"name" : "m1",
|
||||
"transport_address" : "127.0.0.1:9300",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"master",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9200",
|
||||
"127.0.0.1:9200"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9200",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"53Mi6jYdRgeR1cdyuoNfQQ" : {
|
||||
"name" : "m2",
|
||||
"transport_address" : "127.0.0.1:9301",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"master",
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9201",
|
||||
"127.0.0.1:9201"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9201",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"XBIghcHiRlWP9c4vY6rETw" : {
|
||||
"name" : "c2",
|
||||
"transport_address" : "127.0.0.1:9307",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9207",
|
||||
"127.0.0.1:9207"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9207",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"cFM30FlyS8K1njH_bovwwQ" : {
|
||||
"name" : "d1",
|
||||
"transport_address" : "127.0.0.1:9303",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9203",
|
||||
"127.0.0.1:9203"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9203",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"eoVUVRGNRDyyOapqIcrsIA" : {
|
||||
"name" : "d2",
|
||||
"transport_address" : "127.0.0.1:9304",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9204",
|
||||
"127.0.0.1:9204"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9204",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"xPN76uDcTP-DyXaRzPg2NQ" : {
|
||||
"name" : "c1",
|
||||
"transport_address" : "127.0.0.1:9306",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9206",
|
||||
"127.0.0.1:9206"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9206",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"RY0oW2d7TISEqazk-U4Kcw" : {
|
||||
"name" : "d3",
|
||||
"transport_address" : "127.0.0.1:9305",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9205",
|
||||
"127.0.0.1:9205"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9205",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"tU0rXEZmQ9GsWfn2TQ4kow" : {
|
||||
"name" : "m3",
|
||||
"transport_address" : "127.0.0.1:9302",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"master",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9202",
|
||||
"127.0.0.1:9202"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9202",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
169
client/sniffer/src/test/resources/6.0.0_nodes_http.json
Normal file
169
client/sniffer/src/test/resources/6.0.0_nodes_http.json
Normal file
@ -0,0 +1,169 @@
|
||||
{
|
||||
"_nodes" : {
|
||||
"total" : 8,
|
||||
"successful" : 8,
|
||||
"failed" : 0
|
||||
},
|
||||
"cluster_name" : "test",
|
||||
"nodes" : {
|
||||
"FX9npqGQSL2mOGF8Zkf3hw" : {
|
||||
"name" : "m2",
|
||||
"transport_address" : "127.0.0.1:9301",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"master",
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9201",
|
||||
"127.0.0.1:9201"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9201",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"jmUqzYLGTbWCg127kve3Tg" : {
|
||||
"name" : "d1",
|
||||
"transport_address" : "127.0.0.1:9303",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9203",
|
||||
"127.0.0.1:9203"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9203",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"soBU6bzvTOqdLxPstSbJ2g" : {
|
||||
"name" : "d3",
|
||||
"transport_address" : "127.0.0.1:9305",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9205",
|
||||
"127.0.0.1:9205"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9205",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"mtYDAhURTP6twdmNAkMnOg" : {
|
||||
"name" : "m3",
|
||||
"transport_address" : "127.0.0.1:9302",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"master",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9202",
|
||||
"127.0.0.1:9202"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9202",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"URxHiUQPROOt1G22Ev6lXw" : {
|
||||
"name" : "c2",
|
||||
"transport_address" : "127.0.0.1:9307",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9207",
|
||||
"127.0.0.1:9207"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9207",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"_06S_kWoRqqFR8Z8CS3JRw" : {
|
||||
"name" : "c1",
|
||||
"transport_address" : "127.0.0.1:9306",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9206",
|
||||
"127.0.0.1:9206"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9206",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"QZE5Bd6DQJmnfVs2dglOvA" : {
|
||||
"name" : "d2",
|
||||
"transport_address" : "127.0.0.1:9304",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9204",
|
||||
"127.0.0.1:9204"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9204",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"_3mTXg6dSweZn5ReB2fQqw" : {
|
||||
"name" : "m1",
|
||||
"transport_address" : "127.0.0.1:9300",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"master",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9200",
|
||||
"127.0.0.1:9200"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9200",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
4
client/sniffer/src/test/resources/readme.txt
Normal file
4
client/sniffer/src/test/resources/readme.txt
Normal file
@ -0,0 +1,4 @@
|
||||
`*_node_http.json` contains files created by spinning up toy clusters with a
|
||||
few nodes in different configurations locally at various versions. They are
|
||||
for testing `ElasticsearchNodesSniffer` against different versions of
|
||||
Elasticsearch.
|
@ -23,7 +23,6 @@ import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -231,6 +231,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
|
||||
from { project(':server').jar }
|
||||
from { project(':server').configurations.runtime }
|
||||
from { project(':libs:plugin-classloader').jar }
|
||||
from { project(':distribution:tools:java-version-checker').jar }
|
||||
from { project(':distribution:tools:launchers').jar }
|
||||
into('tools/plugin-cli') {
|
||||
from { project(':distribution:tools:plugin-cli').jar }
|
||||
|
Binary file not shown.
@ -63,7 +63,7 @@ if [ ! -z "$JAVA_OPTS" ]; then
|
||||
fi
|
||||
|
||||
# check the Java version
|
||||
"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.JavaVersionChecker
|
||||
"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.java_version_checker.JavaVersionChecker
|
||||
|
||||
export HOSTNAME=$HOSTNAME
|
||||
|
||||
|
Binary file not shown.
14
distribution/tools/java-version-checker/build.gradle
Normal file
14
distribution/tools/java-version-checker/build.gradle
Normal file
@ -0,0 +1,14 @@
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
|
||||
targetCompatibility = JavaVersion.VERSION_1_7
|
||||
|
||||
// java_version_checker do not depend on core so only JDK signatures should be checked
|
||||
forbiddenApisMain.signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
|
||||
test.enabled = false
|
||||
namingConventions.enabled = false
|
||||
javadoc.enabled = false
|
||||
loggerUsageCheck.enabled = false
|
||||
jarHell.enabled = false
|
@ -17,7 +17,7 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.tools.launchers;
|
||||
package org.elasticsearch.tools.java_version_checker;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
@ -25,8 +25,8 @@ import java.util.Objects;
|
||||
|
||||
public class JavaVersion {
|
||||
|
||||
static final List<Integer> CURRENT = parse(System.getProperty("java.specification.version"));
|
||||
static final List<Integer> JAVA_8 = parse("1.8");
|
||||
public static final List<Integer> CURRENT = parse(System.getProperty("java.specification.version"));
|
||||
public static final List<Integer> JAVA_8 = parse("1.8");
|
||||
|
||||
static List<Integer> parse(final String value) {
|
||||
if (!value.matches("^0*[0-9]+(\\.[0-9]+)*$")) {
|
||||
@ -41,7 +41,7 @@ public class JavaVersion {
|
||||
return version;
|
||||
}
|
||||
|
||||
static int majorVersion(final List<Integer> javaVersion) {
|
||||
public static int majorVersion(final List<Integer> javaVersion) {
|
||||
Objects.requireNonNull(javaVersion);
|
||||
if (javaVersion.get(0) > 1) {
|
||||
return javaVersion.get(0);
|
@ -17,7 +17,7 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.tools.launchers;
|
||||
package org.elasticsearch.tools.java_version_checker;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Locale;
|
||||
@ -45,10 +45,30 @@ final class JavaVersionChecker {
|
||||
Locale.ROOT,
|
||||
"the minimum required Java version is 8; your Java version from [%s] does not meet this requirement",
|
||||
System.getProperty("java.home"));
|
||||
Launchers.errPrintln(message);
|
||||
Launchers.exit(1);
|
||||
errPrintln(message);
|
||||
exit(1);
|
||||
}
|
||||
Launchers.exit(0);
|
||||
exit(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Prints a string and terminates the line on standard error.
|
||||
*
|
||||
* @param message the message to print
|
||||
*/
|
||||
@SuppressForbidden(reason = "System#err")
|
||||
static void errPrintln(final String message) {
|
||||
System.err.println(message);
|
||||
}
|
||||
|
||||
/**
|
||||
* Exit the VM with the specified status.
|
||||
*
|
||||
* @param status the status
|
||||
*/
|
||||
@SuppressForbidden(reason = "System#exit")
|
||||
static void exit(final int status) {
|
||||
System.exit(status);
|
||||
}
|
||||
|
||||
}
|
@ -17,17 +17,18 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.tools.launchers;
|
||||
package org.elasticsearch.tools.java_version_checker;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
/**
|
||||
* Annotation to suppress forbidden-apis errors inside a whole class, a method, or a field.
|
||||
*/
|
||||
@Retention(RetentionPolicy.CLASS)
|
||||
@Target({ ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE })
|
||||
@interface SuppressForbidden {
|
||||
public @interface SuppressForbidden {
|
||||
String reason();
|
||||
}
|
@ -22,10 +22,8 @@ import org.gradle.api.JavaVersion
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
|
||||
sourceCompatibility = JavaVersion.VERSION_1_7
|
||||
targetCompatibility = JavaVersion.VERSION_1_7
|
||||
|
||||
dependencies {
|
||||
compile parent.project('java-version-checker')
|
||||
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
testCompile "junit:junit:${versions.junit}"
|
||||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
@ -33,13 +31,10 @@ dependencies {
|
||||
|
||||
archivesBaseName = 'elasticsearch-launchers'
|
||||
|
||||
// launchers do not depend on core so only JDK signatures should be checked
|
||||
forbiddenApisMain {
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
}
|
||||
forbiddenApisTest {
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
}
|
||||
// java_version_checker do not depend on core so only JDK signatures should be checked
|
||||
List jdkSignatures = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
forbiddenApisMain.signaturesURLs = jdkSignatures
|
||||
forbiddenApisTest.signaturesURLs = jdkSignatures
|
||||
|
||||
namingConventions {
|
||||
testClass = 'org.elasticsearch.tools.launchers.LaunchersTestCase'
|
||||
@ -48,4 +43,4 @@ namingConventions {
|
||||
|
||||
javadoc.enabled = false
|
||||
loggerUsageCheck.enabled = false
|
||||
jarHell.enabled=false
|
||||
jarHell.enabled = false
|
||||
|
@ -38,6 +38,8 @@ import java.util.TreeMap;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.elasticsearch.tools.java_version_checker.JavaVersion;
|
||||
|
||||
/**
|
||||
* Parses JVM options from a file and prints a single line with all JVM options to standard output.
|
||||
*/
|
||||
|
@ -19,6 +19,8 @@
|
||||
|
||||
package org.elasticsearch.tools.launchers;
|
||||
|
||||
import org.elasticsearch.tools.java_version_checker.SuppressForbidden;
|
||||
|
||||
/**
|
||||
* Utility methods for launchers.
|
||||
*/
|
||||
|
@ -23,6 +23,7 @@ import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.apache.lucene.search.spell.LevensteinDistance;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.bouncycastle.bcpg.ArmoredInputStream;
|
||||
import org.bouncycastle.jce.provider.BouncyCastleProvider;
|
||||
import org.bouncycastle.openpgp.PGPException;
|
||||
import org.bouncycastle.openpgp.PGPPublicKey;
|
||||
@ -47,7 +48,6 @@ import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
@ -74,7 +74,6 @@ import java.security.MessageDigest;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Base64;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
@ -543,8 +542,8 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
InputStream fin = pluginZipInputStream(zip);
|
||||
// sin is a URL stream to the signature corresponding to the downloaded plugin zip
|
||||
InputStream sin = urlOpenStream(ascUrl);
|
||||
// pin is a input stream to the public key in ASCII-Armor format (RFC4880); the Armor data is in RFC2045 format
|
||||
InputStream pin = getPublicKey()) {
|
||||
// ain is a input stream to the public key in ASCII-Armor format (RFC4880)
|
||||
InputStream ain = new ArmoredInputStream(getPublicKey())) {
|
||||
final JcaPGPObjectFactory factory = new JcaPGPObjectFactory(PGPUtil.getDecoderStream(sin));
|
||||
final PGPSignature signature = ((PGPSignatureList) factory.nextObject()).get(0);
|
||||
|
||||
@ -555,18 +554,6 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
||||
}
|
||||
|
||||
// compute the signature of the downloaded plugin zip
|
||||
final List<String> lines =
|
||||
new BufferedReader(new InputStreamReader(pin, StandardCharsets.UTF_8)).lines().collect(Collectors.toList());
|
||||
// skip armor headers and possible blank line
|
||||
int index = 1;
|
||||
for (; index < lines.size(); index++) {
|
||||
if (lines.get(index).matches(".*: .*") == false && lines.get(index).matches("\\s*") == false) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
final byte[] armoredData =
|
||||
lines.subList(index, lines.size() - 1).stream().collect(Collectors.joining("\n")).getBytes(StandardCharsets.UTF_8);
|
||||
final InputStream ain = Base64.getMimeDecoder().wrap(new ByteArrayInputStream(armoredData));
|
||||
final PGPPublicKeyRingCollection collection = new PGPPublicKeyRingCollection(ain, new JcaKeyFingerprintCalculator());
|
||||
final PGPPublicKey key = collection.getPublicKey(signature.getKeyID());
|
||||
signature.init(new JcaPGPContentVerifierBuilderProvider().setProvider(new BouncyCastleProvider()), key);
|
||||
|
@ -23,7 +23,6 @@ import joptsimple.OptionSet;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cli.EnvironmentAwareCommand;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -31,11 +30,8 @@ import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* A command for the plugin cli to list plugins installed in elasticsearch.
|
||||
|
@ -22,7 +22,6 @@ package org.elasticsearch.plugins;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.cli.Command;
|
||||
import org.elasticsearch.cli.LoggingAwareMultiCommand;
|
||||
import org.elasticsearch.cli.MultiCommand;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -23,7 +23,6 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import com.google.common.jimfs.Configuration;
|
||||
import com.google.common.jimfs.Jimfs;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
|
||||
import org.bouncycastle.bcpg.ArmoredOutputStream;
|
||||
import org.bouncycastle.bcpg.BCPGOutputStream;
|
||||
import org.bouncycastle.bcpg.HashAlgorithmTags;
|
||||
@ -116,7 +115,6 @@ import static org.hamcrest.Matchers.hasToString;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
@LuceneTestCase.SuppressFileSystems("*")
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30900")
|
||||
public class InstallPluginCommandTests extends ESTestCase {
|
||||
|
||||
private InstallPluginCommand skipJarHellCommand;
|
||||
|
@ -23,7 +23,6 @@ import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.MockTerminal;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
206
docs/java-rest/high-level/cluster/health.asciidoc
Normal file
206
docs/java-rest/high-level/cluster/health.asciidoc
Normal file
@ -0,0 +1,206 @@
|
||||
[[java-rest-high-cluster-health]]
|
||||
=== Cluster Health API
|
||||
|
||||
The Cluster Health API allows getting cluster health.
|
||||
|
||||
[[java-rest-high-cluster-health-request]]
|
||||
==== Cluster Health Request
|
||||
|
||||
A `ClusterHealthRequest`:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-request]
|
||||
--------------------------------------------------
|
||||
There are no required parameters. By default, the client will check all indices and will not wait
|
||||
for any events.
|
||||
|
||||
==== Indices
|
||||
|
||||
Indices which should be checked can be passed in the constructor:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-request-indices-ctr]
|
||||
--------------------------------------------------
|
||||
|
||||
Or using the corresponding setter method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-request-indices-setter]
|
||||
--------------------------------------------------
|
||||
|
||||
==== Other parameters
|
||||
|
||||
Other parameters can be passed only through setter methods:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout for the request as a `TimeValue`. Defaults to 30 seconds
|
||||
<2> As a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-request-master-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`. Defaults to the same as `timeout`
|
||||
<2> As a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-request-wait-status]
|
||||
--------------------------------------------------
|
||||
<1> The status to wait (e.g. `green`, `yellow`, or `red`). Accepts a `ClusterHealthStatus` value.
|
||||
<2> Using predefined method
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-request-wait-events]
|
||||
--------------------------------------------------
|
||||
<1> The priority of the events to wait for. Accepts a `Priority` value.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-request-level]
|
||||
--------------------------------------------------
|
||||
<1> The level of detail of the returned health information. Accepts a `ClusterHealthRequest.Level` value.
|
||||
Default value is `cluster`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-request-wait-relocation]
|
||||
--------------------------------------------------
|
||||
<1> Wait for 0 relocating shards. Defaults to `false`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-request-wait-initializing]
|
||||
--------------------------------------------------
|
||||
<1> Wait for 0 initializing shards. Defaults to `false`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-request-wait-nodes]
|
||||
--------------------------------------------------
|
||||
<1> Wait for `N` nodes in the cluster. Defaults to `0`
|
||||
<2> Using `>=N`, `<=N`, `>N` and `<N` notation
|
||||
<3> Using `ge(N)`, `le(N)`, `gt(N)`, `lt(N)` notation
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-request-wait-active]
|
||||
--------------------------------------------------
|
||||
|
||||
<1> Wait for all shards to be active in the cluster
|
||||
<2> Wait for `N` shards to be active in the cluster
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-request-local]
|
||||
--------------------------------------------------
|
||||
<1> Non-master node can be used for this request. Defaults to `false`
|
||||
|
||||
[[java-rest-high-cluster-health-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-cluster-health-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a cluster health request requires both the
|
||||
`ClusterHealthRequest` instance and an `ActionListener` instance to be
|
||||
passed to the asynchronous method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `ClusterHealthRequest` to execute and the `ActionListener` to use
|
||||
when the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `ClusterHealthResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of a failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-cluster-health-response]]
|
||||
==== Cluster Health Response
|
||||
|
||||
The returned `ClusterHealthResponse` contains the next information about the
|
||||
cluster:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-response-general]
|
||||
--------------------------------------------------
|
||||
<1> Name of the cluster
|
||||
<2> Cluster status (`green`, `yellow` or `red`)
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-response-request-status]
|
||||
--------------------------------------------------
|
||||
<1> Whether request was timed out while processing
|
||||
<2> Status of the request (`OK` or `REQUEST_TIMEOUT`). Other errors will be thrown as exceptions
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-response-nodes]
|
||||
--------------------------------------------------
|
||||
<1> Number of nodes in the cluster
|
||||
<2> Number of data nodes in the cluster
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-response-shards]
|
||||
--------------------------------------------------
|
||||
<1> Number of active shards
|
||||
<2> Number of primary active shards
|
||||
<3> Number of relocating shards
|
||||
<4> Number of initializing shards
|
||||
<5> Number of unassigned shards
|
||||
<6> Number of unassigned shards that are currently being delayed
|
||||
<7> Percent of active shards
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-response-task]
|
||||
--------------------------------------------------
|
||||
<1> Maximum wait time of all tasks in the queue
|
||||
<2> Number of currently pending tasks
|
||||
<3> Number of async fetches that are currently ongoing
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-response-indices]
|
||||
--------------------------------------------------
|
||||
<1> Detailed information about indices in the cluster
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-response-index]
|
||||
--------------------------------------------------
|
||||
<1> Detailed information about a specific index
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[health-response-shard-details]
|
||||
--------------------------------------------------
|
||||
<1> Detailed information about a specific shard
|
@ -144,3 +144,13 @@ include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[rest-high-level-cl
|
||||
|
||||
In the rest of this documentation about the Java High Level Client, the `RestHighLevelClient` instance
|
||||
will be referenced as `client`.
|
||||
|
||||
[[java-rest-hight-getting-started-request-options]]
|
||||
=== RequestOptions
|
||||
|
||||
All APIs in the `RestHighLevelClient` accept a `RequestOptions` which you can
|
||||
use to customize the request in ways that won't change how Elasticsearch
|
||||
executes the request. For example, this is the place where you'd specify a
|
||||
`NodeSelector` to control which node receives the request. See the
|
||||
<<java-rest-low-usage-request-options,low level client documentation>> for
|
||||
more examples of customizing the options.
|
||||
|
94
docs/java-rest/high-level/indices/get_alias.asciidoc
Normal file
94
docs/java-rest/high-level/indices/get_alias.asciidoc
Normal file
@ -0,0 +1,94 @@
|
||||
[[java-rest-high-get-alias]]
|
||||
=== Get Alias API
|
||||
|
||||
[[java-rest-high-get-alias-request]]
|
||||
==== Get Alias Request
|
||||
|
||||
The Get Alias API uses `GetAliasesRequest` as its request object.
|
||||
One or more aliases can be optionally provided either at construction
|
||||
time or later on through the relevant setter method.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-alias-request]
|
||||
--------------------------------------------------
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-alias-request-alias]
|
||||
--------------------------------------------------
|
||||
<1> One or more aliases to retrieve
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-alias-request-indices]
|
||||
--------------------------------------------------
|
||||
<1> The index or indices that the alias is associated with
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-alias-request-indicesOptions]
|
||||
--------------------------------------------------
|
||||
<1> Setting `IndicesOptions` controls how unavailable indices are resolved and
|
||||
how wildcard expressions are expanded when looking for aliases that belong to
|
||||
specified indices.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-alias-request-local]
|
||||
--------------------------------------------------
|
||||
<1> The `local` flag (defaults to `false`) controls whether the aliases need
|
||||
to be looked up in the local cluster state or in the cluster state held by
|
||||
the elected master node
|
||||
|
||||
[[java-rest-high-get-alias-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-alias-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-get-alias-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a get alias request requires both a `GetAliasesRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-alias-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `GetAliasesRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for the `Boolean` response looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-alias-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-get-alias-response]]
|
||||
==== Get Alias Response
|
||||
|
||||
The returned `GetAliasesResponse` allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-alias-response]
|
||||
--------------------------------------------------
|
||||
<1> Retrieves a map of indices and their aliases
|
73
docs/java-rest/high-level/indices/get_templates.asciidoc
Normal file
73
docs/java-rest/high-level/indices/get_templates.asciidoc
Normal file
@ -0,0 +1,73 @@
|
||||
[[java-rest-high-get-templates]]
|
||||
=== Get Templates API
|
||||
|
||||
The Get Templates API allows to retrieve a list of index templates by name.
|
||||
|
||||
[[java-rest-high-get-templates-request]]
|
||||
==== Get Index Templates Request
|
||||
|
||||
A `GetIndexTemplatesRequest` specifies one or several names of the index templates to get.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-templates-request]
|
||||
--------------------------------------------------
|
||||
<1> A single index template name
|
||||
<2> Multiple index template names
|
||||
<3> An index template name using wildcard
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-templates-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
[[java-rest-high-get-templates-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-templates-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-get-templates-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a get index templates request requires a `GetTemplatesRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-templates-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `GetTemplatesRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `GetTemplatesResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-templates-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-get-templates-response]]
|
||||
==== Get Templates Response
|
||||
|
||||
The returned `GetTemplatesResponse` consists a list of matching index templates.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[get-templates-response]
|
||||
--------------------------------------------------
|
||||
<1> A list of matching index templates
|
||||
|
@ -80,6 +80,7 @@ Mapping Management::
|
||||
Alias Management::
|
||||
* <<java-rest-high-update-aliases>>
|
||||
* <<java-rest-high-exists-alias>>
|
||||
* <<java-rest-high-get-alias>>
|
||||
|
||||
include::indices/create_index.asciidoc[]
|
||||
include::indices/delete_index.asciidoc[]
|
||||
@ -98,17 +99,21 @@ include::indices/put_mapping.asciidoc[]
|
||||
include::indices/get_mappings.asciidoc[]
|
||||
include::indices/update_aliases.asciidoc[]
|
||||
include::indices/exists_alias.asciidoc[]
|
||||
include::indices/get_alias.asciidoc[]
|
||||
include::indices/put_settings.asciidoc[]
|
||||
include::indices/get_settings.asciidoc[]
|
||||
include::indices/put_template.asciidoc[]
|
||||
include::indices/get_templates.asciidoc[]
|
||||
|
||||
== Cluster APIs
|
||||
|
||||
The Java High Level REST Client supports the following Cluster APIs:
|
||||
|
||||
* <<java-rest-high-cluster-put-settings>>
|
||||
* <<java-rest-high-cluster-health>>
|
||||
|
||||
include::cluster/put_settings.asciidoc[]
|
||||
include::cluster/health.asciidoc[]
|
||||
|
||||
== Ingest APIs
|
||||
The Java High Level REST Client supports the following Ingest APIs:
|
||||
|
@ -55,7 +55,7 @@ dependencies {
|
||||
Once a `RestClient` instance has been created as shown in <<java-rest-low-usage-initialization>>,
|
||||
a `Sniffer` can be associated to it. The `Sniffer` will make use of the provided `RestClient`
|
||||
to periodically (every 5 minutes by default) fetch the list of current nodes from the cluster
|
||||
and update them by calling `RestClient#setHosts`.
|
||||
and update them by calling `RestClient#setNodes`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
@ -105,7 +105,7 @@ on failure is not enabled like explained above.
|
||||
The Elasticsearch Nodes Info api doesn't return the protocol to use when
|
||||
connecting to the nodes but only their `host:port` key-pair, hence `http`
|
||||
is used by default. In case `https` should be used instead, the
|
||||
`ElasticsearchHostsSniffer` instance has to be manually created and provided
|
||||
`ElasticsearchNodesSniffer` instance has to be manually created and provided
|
||||
as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
@ -125,12 +125,12 @@ cluster, the ones that have responded until then.
|
||||
include-tagged::{doc-tests}/SnifferDocumentation.java[sniff-request-timeout]
|
||||
--------------------------------------------------
|
||||
|
||||
Also, a custom `HostsSniffer` implementation can be provided for advanced
|
||||
use-cases that may require fetching the hosts from external sources rather
|
||||
Also, a custom `NodesSniffer` implementation can be provided for advanced
|
||||
use-cases that may require fetching the `Node`s from external sources rather
|
||||
than from Elasticsearch:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnifferDocumentation.java[custom-hosts-sniffer]
|
||||
include-tagged::{doc-tests}/SnifferDocumentation.java[custom-nodes-sniffer]
|
||||
--------------------------------------------------
|
||||
<1> Fetch the hosts from the external source
|
||||
|
@ -271,24 +271,51 @@ a `ContentType` of `application/json`.
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-body-shorter]
|
||||
--------------------------------------------------
|
||||
|
||||
And you can add one or more headers to send with the request:
|
||||
[[java-rest-low-usage-request-options]]
|
||||
==== RequestOptions
|
||||
|
||||
The `RequestOptions` class holds parts of the request that should be shared
|
||||
between many requests in the same application. You can make a singleton
|
||||
instance and share it between all requests:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-headers]
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-singleton]
|
||||
--------------------------------------------------
|
||||
<1> Add any headers needed by all requests.
|
||||
<2> Set a `NodeSelector`.
|
||||
<3> Customize the response consumer.
|
||||
|
||||
`addHeader` is for headers that are required for authorization or to work with
|
||||
a proxy in front of Elasticsearch. There is no need to set the `Content-Type`
|
||||
header because the client will automatically set that from the `HttpEntity`
|
||||
attached to the request.
|
||||
|
||||
You can set the `NodeSelector` which controls which nodes will receive
|
||||
requests. `NodeSelector.NOT_MASTER_ONLY` is a good choice.
|
||||
|
||||
You can also customize the response consumer used to buffer the asynchronous
|
||||
responses. The default consumer will buffer up to 100MB of response on the
|
||||
JVM heap. If the response is larger then the request will fail. You could,
|
||||
for example, lower the maximum size which might be useful if you are running
|
||||
in a heap constrained environment:
|
||||
in a heap constrained environment like the exmaple above.
|
||||
|
||||
Once you've created the singleton you can use it when making requests:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-response-consumer]
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-set-singleton]
|
||||
--------------------------------------------------
|
||||
|
||||
You can also customize these options on a per request basis. For example, this
|
||||
adds an extra header:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize]
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
==== Multiple parallel asynchronous actions
|
||||
|
||||
The client is quite happy to execute many actions in parallel. The following
|
||||
|
@ -1,8 +1,6 @@
|
||||
[[search-aggregations-metrics-scripted-metric-aggregation]]
|
||||
=== Scripted Metric Aggregation
|
||||
|
||||
experimental[]
|
||||
|
||||
A metric aggregation that executes using scripts to provide a metric output.
|
||||
|
||||
Example:
|
||||
|
@ -104,10 +104,19 @@ The cluster health API accepts the following request parameters:
|
||||
Alternatively, it is possible to use `ge(N)`, `le(N)`, `gt(N)` and
|
||||
`lt(N)` notation.
|
||||
|
||||
`wait_for_events`::
|
||||
Can be one of `immediate`, `urgent`, `high`, `normal`, `low`, `languid`.
|
||||
Wait until all currently queued events with the given priority are processed.
|
||||
|
||||
`timeout`::
|
||||
A time based parameter controlling how long to wait if one of
|
||||
the wait_for_XXX are provided. Defaults to `30s`.
|
||||
|
||||
`master_timeout`::
|
||||
A time based parameter controlling how long to wait if the master has not been
|
||||
discovered yet or disconnected.
|
||||
If not provided, uses the same value as `timeout`.
|
||||
|
||||
`local`::
|
||||
If `true` returns the local node information and does not provide
|
||||
the state from master node. Default: `false`.
|
||||
|
@ -1325,7 +1325,7 @@ This pipeline will insert these named captures as new fields within the document
|
||||
// NOTCONSOLE
|
||||
|
||||
[[custom-patterns]]
|
||||
==== Custom Patterns and Pattern Files
|
||||
==== Custom Patterns
|
||||
|
||||
The Grok processor comes pre-packaged with a base set of pattern. These patterns may not always have
|
||||
what you are looking for. Pattern have a very basic format. Each entry describes has a name and the pattern itself.
|
||||
@ -1512,6 +1512,24 @@ The above request will return a response body containing a key-value representat
|
||||
|
||||
This can be useful to reference as the built-in patterns change across versions.
|
||||
|
||||
[[grok-watchdog]]
|
||||
==== Grok watchdog
|
||||
|
||||
Grok expressions that take too long to execute are interrupted and
|
||||
the grok processor then fails with an exception. The grok
|
||||
processor has a watchdog thread that determines when evaluation of
|
||||
a grok expression takes too long and is controlled by the following
|
||||
settings:
|
||||
|
||||
[[grok-watchdog-options]]
|
||||
.Grok watchdog settings
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Default | Description
|
||||
| `ingest.grok.watchdog.interval` | 1s | How often to check whether there are grok evaluations that take longer than the maximum allowed execution time.
|
||||
| `ingest.grok.watchdog.max_execution_time` | 1s | The maximum allowed execution of a grok expression evaluation.
|
||||
|======
|
||||
|
||||
[[gsub-processor]]
|
||||
=== Gsub Processor
|
||||
Converts a string field by applying a regular expression and a replacement.
|
||||
|
@ -36,6 +36,7 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x.
|
||||
* <<breaking_70_settings_changes>>
|
||||
* <<breaking_70_scripting_changes>>
|
||||
* <<breaking_70_snapshotstats_changes>>
|
||||
* <<breaking_70_restclient_changes>>
|
||||
|
||||
include::migrate_7_0/aggregations.asciidoc[]
|
||||
include::migrate_7_0/analysis.asciidoc[]
|
||||
@ -49,4 +50,5 @@ include::migrate_7_0/api.asciidoc[]
|
||||
include::migrate_7_0/java.asciidoc[]
|
||||
include::migrate_7_0/settings.asciidoc[]
|
||||
include::migrate_7_0/scripting.asciidoc[]
|
||||
include::migrate_7_0/snapshotstats.asciidoc[]
|
||||
include::migrate_7_0/snapshotstats.asciidoc[]
|
||||
include::migrate_7_0/restclient.asciidoc[]
|
20
docs/reference/migration/migrate_7_0/restclient.asciidoc
Normal file
20
docs/reference/migration/migrate_7_0/restclient.asciidoc
Normal file
@ -0,0 +1,20 @@
|
||||
[[breaking_70_restclient_changes]]
|
||||
=== High-level REST client changes
|
||||
|
||||
==== API methods accepting `Header` argument have been removed
|
||||
|
||||
All API methods accepting headers as a `Header` varargs argument, deprecated
|
||||
since 6.4, have been removed in favour of the newly introduced methods that
|
||||
accept instead a `RequestOptions` argument. In case you are not specifying any
|
||||
header, e.g. `client.index(indexRequest)` becomes
|
||||
`client.index(indexRequest, RequestOptions.DEFAULT)`.
|
||||
In case you are specifying headers
|
||||
e.g. `client.index(indexRequest, new Header("name" "value"))` becomes
|
||||
`client.index(indexRequest, RequestOptions.DEFAULT.toBuilder().addHeader("name", "value").build());`
|
||||
|
||||
==== Cluster Health API default to `cluster` level
|
||||
|
||||
The Cluster Health API used to default to `shards` level to ease migration
|
||||
from transport client that doesn't support the `level` parameter and always
|
||||
returns information including indices and shards details. The level default
|
||||
value has been aligned with the Elasticsearch default level: `cluster`.
|
@ -84,3 +84,9 @@ for a particular index with the index setting `index.max_regex_length`.
|
||||
|
||||
Search requests with extra content after the main object will no longer be accepted
|
||||
by the `_search` endpoint. A parsing exception will be thrown instead.
|
||||
|
||||
==== Semantics changed for `max_concurrent_shard_requests`
|
||||
|
||||
`max_concurrent_shard_requests` used to limit the total number of concurrent shard
|
||||
requests a single high level search request can execute. In 7.0 this changed to be the
|
||||
max number of concurrent shard requests per node. The default is now `5`.
|
||||
|
@ -112,6 +112,15 @@ xpack.security.audit.index.settings:
|
||||
number_of_replicas: 1
|
||||
----------------------------
|
||||
--
|
||||
+
|
||||
--
|
||||
NOTE: These settings apply to the local audit indices, as well as to the
|
||||
<<remote-audit-settings, remote audit indices>>, but only if the remote cluster
|
||||
does *not* have {security} installed, or the {es} versions are different.
|
||||
If the remote cluster has {security} installed, and the versions coincide, the
|
||||
settings for the audit indices there will take precedence,
|
||||
even if they are unspecified (i.e. left to defaults).
|
||||
--
|
||||
|
||||
[[remote-audit-settings]]
|
||||
==== Remote Audit Log Indexing Configuration Settings
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.common.unit;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -76,15 +76,24 @@ public final class Grok {
|
||||
private final Map<String, String> patternBank;
|
||||
private final boolean namedCaptures;
|
||||
private final Regex compiledExpression;
|
||||
private final ThreadWatchdog threadWatchdog;
|
||||
|
||||
public Grok(Map<String, String> patternBank, String grokPattern) {
|
||||
this(patternBank, grokPattern, true);
|
||||
this(patternBank, grokPattern, true, ThreadWatchdog.noop());
|
||||
}
|
||||
|
||||
public Grok(Map<String, String> patternBank, String grokPattern, ThreadWatchdog threadWatchdog) {
|
||||
this(patternBank, grokPattern, true, threadWatchdog);
|
||||
}
|
||||
|
||||
Grok(Map<String, String> patternBank, String grokPattern, boolean namedCaptures) {
|
||||
this(patternBank, grokPattern, namedCaptures, ThreadWatchdog.noop());
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
Grok(Map<String, String> patternBank, String grokPattern, boolean namedCaptures) {
|
||||
private Grok(Map<String, String> patternBank, String grokPattern, boolean namedCaptures, ThreadWatchdog threadWatchdog) {
|
||||
this.patternBank = patternBank;
|
||||
this.namedCaptures = namedCaptures;
|
||||
this.threadWatchdog = threadWatchdog;
|
||||
|
||||
for (Map.Entry<String, String> entry : patternBank.entrySet()) {
|
||||
String name = entry.getKey();
|
||||
@ -163,7 +172,13 @@ public final class Grok {
|
||||
byte[] grokPatternBytes = grokPattern.getBytes(StandardCharsets.UTF_8);
|
||||
Matcher matcher = GROK_PATTERN_REGEX.matcher(grokPatternBytes);
|
||||
|
||||
int result = matcher.search(0, grokPatternBytes.length, Option.NONE);
|
||||
int result;
|
||||
try {
|
||||
threadWatchdog.register();
|
||||
result = matcher.search(0, grokPatternBytes.length, Option.NONE);
|
||||
} finally {
|
||||
threadWatchdog.unregister();
|
||||
}
|
||||
if (result != -1) {
|
||||
Region region = matcher.getEagerRegion();
|
||||
String namedPatternRef = groupMatch(NAME_GROUP, region, grokPattern);
|
||||
@ -205,7 +220,13 @@ public final class Grok {
|
||||
*/
|
||||
public boolean match(String text) {
|
||||
Matcher matcher = compiledExpression.matcher(text.getBytes(StandardCharsets.UTF_8));
|
||||
int result = matcher.search(0, text.length(), Option.DEFAULT);
|
||||
int result;
|
||||
try {
|
||||
threadWatchdog.register();
|
||||
result = matcher.search(0, text.length(), Option.DEFAULT);
|
||||
} finally {
|
||||
threadWatchdog.unregister();
|
||||
}
|
||||
return (result != -1);
|
||||
}
|
||||
|
||||
@ -220,8 +241,20 @@ public final class Grok {
|
||||
byte[] textAsBytes = text.getBytes(StandardCharsets.UTF_8);
|
||||
Map<String, Object> fields = new HashMap<>();
|
||||
Matcher matcher = compiledExpression.matcher(textAsBytes);
|
||||
int result = matcher.search(0, textAsBytes.length, Option.DEFAULT);
|
||||
if (result != -1 && compiledExpression.numberOfNames() > 0) {
|
||||
int result;
|
||||
try {
|
||||
threadWatchdog.register();
|
||||
result = matcher.search(0, textAsBytes.length, Option.DEFAULT);
|
||||
} finally {
|
||||
threadWatchdog.unregister();
|
||||
}
|
||||
if (result == Matcher.INTERRUPTED) {
|
||||
throw new RuntimeException("grok pattern matching was interrupted after [" +
|
||||
threadWatchdog.maxExecutionTimeInMillis() + "] ms");
|
||||
} else if (result == Matcher.FAILED) {
|
||||
// TODO: I think we should throw an error here?
|
||||
return null;
|
||||
} else if (compiledExpression.numberOfNames() > 0) {
|
||||
Region region = matcher.getEagerRegion();
|
||||
for (Iterator<NameEntry> entry = compiledExpression.namedBackrefIterator(); entry.hasNext();) {
|
||||
NameEntry e = entry.next();
|
||||
@ -235,13 +268,9 @@ public final class Grok {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return fields;
|
||||
} else if (result != -1) {
|
||||
return fields;
|
||||
}
|
||||
return null;
|
||||
return fields;
|
||||
}
|
||||
|
||||
public static Map<String, String> getBuiltinPatterns() {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user