Merge branch 'master' into ccr
* master: (68 commits) [DOCS] Removes X-Pack Elasticsearch release notes (#30272) Correct an example in the top-level suggester documentation. (#30224) [DOCS] Removes broken link [DOCS] Adds file realm configuration details (#30221) [DOCS] Adds PKI realm configuration details (#30225) Fix a reference to match_phrase_prefix in the match query docs. (#30282) Fix failure for validate API on a terms query (#29483) [DOCS] Fix 6.4-specific link in changelog (#30314) Remove RepositoriesMetaData variadic constructor (#29569) Test: increase authentication logging for debugging [DOCS] Removes redundant SAML realm settings (#30196) REST Client: Add Request object flavored methods (#29623) [DOCS] Adds changelog to Elasticsearch Reference (#30271) [DOCS] Fixes section error SQL: Teach the CLI to ignore empty commands (#30265) [DOCS] Adds Active Directory realm configuration details (#30223) [DOCS] Removes redundant file realm settings (#30192) [DOCS] Fixes users command name (#30275) Build: Move gradle wrapper jar to a dot dir (#30146) Build: Log a warning if disabling reindex-from-old (#30304)
This commit is contained in:
commit
d52ca33bd9
Binary file not shown.
|
@ -1,6 +1,6 @@
|
|||
distributionUrl=https\://services.gradle.org/distributions/gradle-4.7-all.zip
|
||||
distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
zipStorePath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-4.7-all.zip
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
||||
distributionSha256Sum=203f4537da8b8075e38c036a6d14cb71b1149de5bf0a8f6db32ac2833a1d1294
|
|
@ -337,6 +337,7 @@ export BATS=/project/build/bats
|
|||
export BATS_UTILS=/project/build/packaging/bats/utils
|
||||
export BATS_TESTS=/project/build/packaging/bats/tests
|
||||
export PACKAGING_ARCHIVES=/project/build/packaging/archives
|
||||
export PACKAGING_TESTS=/project/build/packaging/tests
|
||||
VARS
|
||||
cat \<\<SUDOERS_VARS > /etc/sudoers.d/elasticsearch_vars
|
||||
Defaults env_keep += "ZIP"
|
||||
|
@ -347,6 +348,7 @@ Defaults env_keep += "BATS"
|
|||
Defaults env_keep += "BATS_UTILS"
|
||||
Defaults env_keep += "BATS_TESTS"
|
||||
Defaults env_keep += "PACKAGING_ARCHIVES"
|
||||
Defaults env_keep += "PACKAGING_TESTS"
|
||||
SUDOERS_VARS
|
||||
chmod 0440 /etc/sudoers.d/elasticsearch_vars
|
||||
SHELL
|
||||
|
|
29
build.gradle
29
build.gradle
|
@ -440,25 +440,18 @@ task run(type: Run) {
|
|||
impliesSubProjects = true
|
||||
}
|
||||
|
||||
task wrapper(type: Wrapper)
|
||||
|
||||
gradle.projectsEvaluated {
|
||||
|
||||
allprojects {
|
||||
tasks.withType(Wrapper) { Wrapper wrapper ->
|
||||
wrapper.distributionType = DistributionType.ALL
|
||||
|
||||
wrapper.doLast {
|
||||
final DistributionLocator locator = new DistributionLocator()
|
||||
final GradleVersion version = GradleVersion.version(wrapper.gradleVersion)
|
||||
final URI distributionUri = locator.getDistributionFor(version, wrapper.distributionType.name().toLowerCase(Locale.ENGLISH))
|
||||
final URI sha256Uri = new URI(distributionUri.toString() + ".sha256")
|
||||
final String sha256Sum = new String(sha256Uri.toURL().bytes)
|
||||
wrapper.getPropertiesFile() << "distributionSha256Sum=${sha256Sum}\n"
|
||||
}
|
||||
}
|
||||
task wrapper(type: Wrapper) {
|
||||
distributionType = DistributionType.ALL
|
||||
jarFile = file('.gradle-wrapper/gradle-wrapper.jar')
|
||||
doLast {
|
||||
final DistributionLocator locator = new DistributionLocator()
|
||||
final GradleVersion version = GradleVersion.version(gradleVersion)
|
||||
final URI distributionUri = locator.getDistributionFor(version, distributionType.name().toLowerCase(Locale.ENGLISH))
|
||||
final URI sha256Uri = new URI(distributionUri.toString() + ".sha256")
|
||||
final String sha256Sum = new String(sha256Uri.toURL().bytes)
|
||||
final String existingProperties = getPropertiesFile().getText('UTF-8')
|
||||
getPropertiesFile().setText("${existingProperties}distributionSha256Sum=${sha256Sum}\n", 'UTF-8')
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void assertLinesInFile(final Path path, final List<String> expectedLines) {
|
||||
|
|
|
@ -549,6 +549,22 @@ class BuildPlugin implements Plugin<Project> {
|
|||
javadoc.classpath = javadoc.getClasspath().filter { f ->
|
||||
return classes.contains(f) == false
|
||||
}
|
||||
/*
|
||||
* Force html5 on projects that support it to silence the warning
|
||||
* that `javadoc` will change its defaults in the future.
|
||||
*
|
||||
* But not all of our javadoc is actually valid html5. So we
|
||||
* have to become valid incrementally. We only set html5 on the
|
||||
* projects we have converted so that we still get the annoying
|
||||
* warning on the unconverted ones. That will give us an
|
||||
* incentive to convert them....
|
||||
*/
|
||||
List html4Projects = [
|
||||
':server',
|
||||
]
|
||||
if (false == html4Projects.contains(project.path)) {
|
||||
javadoc.options.addBooleanOption('html5', true)
|
||||
}
|
||||
}
|
||||
configureJavadocJar(project)
|
||||
}
|
||||
|
|
|
@ -41,6 +41,9 @@ class VagrantPropertiesExtension {
|
|||
@Input
|
||||
Boolean inheritTestUtils
|
||||
|
||||
@Input
|
||||
String testClass
|
||||
|
||||
VagrantPropertiesExtension(List<String> availableBoxes) {
|
||||
this.boxes = availableBoxes
|
||||
this.batsDir = 'src/test/resources/packaging'
|
||||
|
|
|
@ -51,6 +51,7 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
static List<String> UPGRADE_FROM_ARCHIVES = ['rpm', 'deb']
|
||||
|
||||
private static final PACKAGING_CONFIGURATION = 'packaging'
|
||||
private static final PACKAGING_TEST_CONFIGURATION = 'packagingTest'
|
||||
private static final BATS = 'bats'
|
||||
private static final String BATS_TEST_COMMAND ="cd \$PACKAGING_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS"
|
||||
private static final String PLATFORM_TEST_COMMAND ="rm -rf ~/elasticsearch && rsync -r /elasticsearch/ ~/elasticsearch && cd ~/elasticsearch && ./gradlew test integTest"
|
||||
|
@ -66,6 +67,7 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
|
||||
// Creates custom configurations for Bats testing files (and associated scripts and archives)
|
||||
createPackagingConfiguration(project)
|
||||
project.configurations.create(PACKAGING_TEST_CONFIGURATION)
|
||||
|
||||
// Creates all the main Vagrant tasks
|
||||
createVagrantTasks(project)
|
||||
|
@ -144,10 +146,12 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
}
|
||||
|
||||
private static void createCleanTask(Project project) {
|
||||
project.tasks.create('clean', Delete.class) {
|
||||
description 'Clean the project build directory'
|
||||
group 'Build'
|
||||
delete project.buildDir
|
||||
if (project.tasks.findByName('clean') == null) {
|
||||
project.tasks.create('clean', Delete.class) {
|
||||
description 'Clean the project build directory'
|
||||
group 'Build'
|
||||
delete project.buildDir
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -174,6 +178,18 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
from project.configurations[PACKAGING_CONFIGURATION]
|
||||
}
|
||||
|
||||
File testsDir = new File(packagingDir, 'tests')
|
||||
Copy copyPackagingTests = project.tasks.create('copyPackagingTests', Copy) {
|
||||
into testsDir
|
||||
from project.configurations[PACKAGING_TEST_CONFIGURATION]
|
||||
}
|
||||
|
||||
Task createTestRunnerScript = project.tasks.create('createTestRunnerScript', FileContentsTask) {
|
||||
dependsOn copyPackagingTests
|
||||
file "${testsDir}/run-tests.sh"
|
||||
contents "java -cp \"\$PACKAGING_TESTS/*\" org.junit.runner.JUnitCore ${-> project.extensions.esvagrant.testClass}"
|
||||
}
|
||||
|
||||
Task createVersionFile = project.tasks.create('createVersionFile', FileContentsTask) {
|
||||
dependsOn copyPackagingArchives
|
||||
file "${archivesDir}/version"
|
||||
|
@ -234,7 +250,8 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
|
||||
Task vagrantSetUpTask = project.tasks.create('setupPackagingTest')
|
||||
vagrantSetUpTask.dependsOn 'vagrantCheckVersion'
|
||||
vagrantSetUpTask.dependsOn copyPackagingArchives, createVersionFile, createUpgradeFromFile, createUpgradeIsOssFile
|
||||
vagrantSetUpTask.dependsOn copyPackagingArchives, copyPackagingTests, createTestRunnerScript
|
||||
vagrantSetUpTask.dependsOn createVersionFile, createUpgradeFromFile, createUpgradeIsOssFile
|
||||
vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils
|
||||
}
|
||||
|
||||
|
@ -393,20 +410,29 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
packagingTest.dependsOn(batsPackagingTest)
|
||||
}
|
||||
|
||||
// This task doesn't do anything yet. In the future it will execute a jar containing tests on the vm
|
||||
Task groovyPackagingTest = project.tasks.create("vagrant${boxTask}#groovyPackagingTest")
|
||||
groovyPackagingTest.dependsOn(up)
|
||||
groovyPackagingTest.finalizedBy(halt)
|
||||
|
||||
TaskExecutionAdapter groovyPackagingReproListener = createReproListener(project, groovyPackagingTest.path)
|
||||
groovyPackagingTest.doFirst {
|
||||
project.gradle.addListener(groovyPackagingReproListener)
|
||||
Task javaPackagingTest = project.tasks.create("vagrant${boxTask}#javaPackagingTest", VagrantCommandTask) {
|
||||
command 'ssh'
|
||||
boxName box
|
||||
environmentVars vagrantEnvVars
|
||||
dependsOn up, setupPackagingTest
|
||||
finalizedBy halt
|
||||
args '--command', "bash \"\$PACKAGING_TESTS/run-tests.sh\""
|
||||
}
|
||||
groovyPackagingTest.doLast {
|
||||
project.gradle.removeListener(groovyPackagingReproListener)
|
||||
|
||||
// todo remove this onlyIf after all packaging tests are consolidated
|
||||
javaPackagingTest.onlyIf {
|
||||
project.extensions.esvagrant.testClass != null
|
||||
}
|
||||
|
||||
TaskExecutionAdapter javaPackagingReproListener = createReproListener(project, javaPackagingTest.path)
|
||||
javaPackagingTest.doFirst {
|
||||
project.gradle.addListener(javaPackagingReproListener)
|
||||
}
|
||||
javaPackagingTest.doLast {
|
||||
project.gradle.removeListener(javaPackagingReproListener)
|
||||
}
|
||||
if (project.extensions.esvagrant.boxes.contains(box)) {
|
||||
packagingTest.dependsOn(groovyPackagingTest)
|
||||
packagingTest.dependsOn(javaPackagingTest)
|
||||
}
|
||||
|
||||
Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) {
|
||||
|
|
|
@ -129,7 +129,8 @@ public class NoopBulkRequestBuilder extends ActionRequestBuilder<BulkRequest, Bu
|
|||
}
|
||||
|
||||
/**
|
||||
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
|
||||
* A timeout to wait if the index operation can't be performed immediately.
|
||||
* Defaults to {@code 1m}.
|
||||
*/
|
||||
public final NoopBulkRequestBuilder setTimeout(TimeValue timeout) {
|
||||
request.timeout(timeout);
|
||||
|
@ -137,7 +138,8 @@ public class NoopBulkRequestBuilder extends ActionRequestBuilder<BulkRequest, Bu
|
|||
}
|
||||
|
||||
/**
|
||||
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
|
||||
* A timeout to wait if the index operation can't be performed immediately.
|
||||
* Defaults to {@code 1m}.
|
||||
*/
|
||||
public final NoopBulkRequestBuilder setTimeout(String timeout) {
|
||||
request.timeout(timeout);
|
||||
|
@ -151,4 +153,3 @@ public class NoopBulkRequestBuilder extends ActionRequestBuilder<BulkRequest, Bu
|
|||
return request.numberOfActions();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -142,7 +142,7 @@ public class NoopSearchRequestBuilder extends ActionRequestBuilder<SearchRequest
|
|||
|
||||
/**
|
||||
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
|
||||
* <tt>_local</tt> to prefer local shards or a custom value, which guarantees that the same order
|
||||
* {@code _local} to prefer local shards or a custom value, which guarantees that the same order
|
||||
* will be used across different requests.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setPreference(String preference) {
|
||||
|
@ -188,7 +188,7 @@ public class NoopSearchRequestBuilder extends ActionRequestBuilder<SearchRequest
|
|||
}
|
||||
|
||||
/**
|
||||
* From index to start the search from. Defaults to <tt>0</tt>.
|
||||
* From index to start the search from. Defaults to {@code 0}.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setFrom(int from) {
|
||||
sourceBuilder().from(from);
|
||||
|
@ -196,7 +196,7 @@ public class NoopSearchRequestBuilder extends ActionRequestBuilder<SearchRequest
|
|||
}
|
||||
|
||||
/**
|
||||
* The number of search hits to return. Defaults to <tt>10</tt>.
|
||||
* The number of search hits to return. Defaults to {@code 10}.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setSize(int size) {
|
||||
sourceBuilder().size(size);
|
||||
|
@ -349,7 +349,7 @@ public class NoopSearchRequestBuilder extends ActionRequestBuilder<SearchRequest
|
|||
|
||||
/**
|
||||
* Applies when sorting, and controls if scores will be tracked as well. Defaults to
|
||||
* <tt>false</tt>.
|
||||
* {@code false}.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setTrackScores(boolean trackScores) {
|
||||
sourceBuilder().trackScores(trackScores);
|
||||
|
|
|
@ -48,7 +48,7 @@ public final class ClusterClient {
|
|||
*/
|
||||
public ClusterUpdateSettingsResponse putSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, Header... headers)
|
||||
throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(clusterUpdateSettingsRequest, Request::clusterPutSettings,
|
||||
return restHighLevelClient.performRequestAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings,
|
||||
ClusterUpdateSettingsResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ public final class ClusterClient {
|
|||
*/
|
||||
public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest,
|
||||
ActionListener<ClusterUpdateSettingsResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, Request::clusterPutSettings,
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings,
|
||||
ClusterUpdateSettingsResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,8 +74,8 @@ public final class IndicesClient {
|
|||
* Delete Index API on elastic.co</a>
|
||||
*/
|
||||
public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex,
|
||||
DeleteIndexResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -85,8 +85,8 @@ public final class IndicesClient {
|
|||
* Delete Index API on elastic.co</a>
|
||||
*/
|
||||
public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener<DeleteIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex,
|
||||
DeleteIndexResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -96,8 +96,8 @@ public final class IndicesClient {
|
|||
* Create Index API on elastic.co</a>
|
||||
*/
|
||||
public CreateIndexResponse create(CreateIndexRequest createIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, RequestConverters::createIndex,
|
||||
CreateIndexResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -107,8 +107,8 @@ public final class IndicesClient {
|
|||
* Create Index API on elastic.co</a>
|
||||
*/
|
||||
public void createAsync(CreateIndexRequest createIndexRequest, ActionListener<CreateIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, RequestConverters::createIndex,
|
||||
CreateIndexResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -118,8 +118,8 @@ public final class IndicesClient {
|
|||
* Put Mapping API on elastic.co</a>
|
||||
*/
|
||||
public PutMappingResponse putMapping(PutMappingRequest putMappingRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, Request::putMapping, PutMappingResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, RequestConverters::putMapping,
|
||||
PutMappingResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -130,8 +130,8 @@ public final class IndicesClient {
|
|||
*/
|
||||
public void putMappingAsync(PutMappingRequest putMappingRequest, ActionListener<PutMappingResponse> listener,
|
||||
Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, Request::putMapping, PutMappingResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, RequestConverters::putMapping,
|
||||
PutMappingResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -142,7 +142,7 @@ public final class IndicesClient {
|
|||
* Index Aliases API on elastic.co</a>
|
||||
*/
|
||||
public IndicesAliasesResponse updateAliases(IndicesAliasesRequest indicesAliasesRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, Request::updateAliases,
|
||||
return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases,
|
||||
IndicesAliasesResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -155,7 +155,7 @@ public final class IndicesClient {
|
|||
*/
|
||||
public void updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequest, ActionListener<IndicesAliasesResponse> listener,
|
||||
Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, Request::updateAliases,
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases,
|
||||
IndicesAliasesResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -166,8 +166,8 @@ public final class IndicesClient {
|
|||
* Open Index API on elastic.co</a>
|
||||
*/
|
||||
public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, RequestConverters::openIndex,
|
||||
OpenIndexResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -177,8 +177,8 @@ public final class IndicesClient {
|
|||
* Open Index API on elastic.co</a>
|
||||
*/
|
||||
public void openAsync(OpenIndexRequest openIndexRequest, ActionListener<OpenIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, RequestConverters::openIndex,
|
||||
OpenIndexResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -188,8 +188,8 @@ public final class IndicesClient {
|
|||
* Close Index API on elastic.co</a>
|
||||
*/
|
||||
public CloseIndexResponse close(CloseIndexRequest closeIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, RequestConverters::closeIndex,
|
||||
CloseIndexResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -199,8 +199,8 @@ public final class IndicesClient {
|
|||
* Close Index API on elastic.co</a>
|
||||
*/
|
||||
public void closeAsync(CloseIndexRequest closeIndexRequest, ActionListener<CloseIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, RequestConverters::closeIndex,
|
||||
CloseIndexResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -210,8 +210,8 @@ public final class IndicesClient {
|
|||
* Indices Aliases API on elastic.co</a>
|
||||
*/
|
||||
public boolean existsAlias(GetAliasesRequest getAliasesRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequest(getAliasesRequest, Request::existsAlias, RestHighLevelClient::convertExistsResponse,
|
||||
emptySet(), headers);
|
||||
return restHighLevelClient.performRequest(getAliasesRequest, RequestConverters::existsAlias,
|
||||
RestHighLevelClient::convertExistsResponse, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -221,8 +221,8 @@ public final class IndicesClient {
|
|||
* Indices Aliases API on elastic.co</a>
|
||||
*/
|
||||
public void existsAliasAsync(GetAliasesRequest getAliasesRequest, ActionListener<Boolean> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsync(getAliasesRequest, Request::existsAlias, RestHighLevelClient::convertExistsResponse,
|
||||
listener, emptySet(), headers);
|
||||
restHighLevelClient.performRequestAsync(getAliasesRequest, RequestConverters::existsAlias,
|
||||
RestHighLevelClient::convertExistsResponse, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -231,7 +231,7 @@ public final class IndicesClient {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html"> Refresh API on elastic.co</a>
|
||||
*/
|
||||
public RefreshResponse refresh(RefreshRequest refreshRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(refreshRequest, Request::refresh, RefreshResponse::fromXContent,
|
||||
return restHighLevelClient.performRequestAndParseEntity(refreshRequest, RequestConverters::refresh, RefreshResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -241,7 +241,7 @@ public final class IndicesClient {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html"> Refresh API on elastic.co</a>
|
||||
*/
|
||||
public void refreshAsync(RefreshRequest refreshRequest, ActionListener<RefreshResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, Request::refresh, RefreshResponse::fromXContent,
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, RequestConverters::refresh, RefreshResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -251,7 +251,7 @@ public final class IndicesClient {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html"> Flush API on elastic.co</a>
|
||||
*/
|
||||
public FlushResponse flush(FlushRequest flushRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(flushRequest, Request::flush, FlushResponse::fromXContent,
|
||||
return restHighLevelClient.performRequestAndParseEntity(flushRequest, RequestConverters::flush, FlushResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -261,7 +261,7 @@ public final class IndicesClient {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html"> Flush API on elastic.co</a>
|
||||
*/
|
||||
public void flushAsync(FlushRequest flushRequest, ActionListener<FlushResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(flushRequest, Request::flush, FlushResponse::fromXContent,
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(flushRequest, RequestConverters::flush, FlushResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -272,8 +272,8 @@ public final class IndicesClient {
|
|||
* Force Merge API on elastic.co</a>
|
||||
*/
|
||||
public ForceMergeResponse forceMerge(ForceMergeRequest forceMergeRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, Request::forceMerge, ForceMergeResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, RequestConverters::forceMerge,
|
||||
ForceMergeResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -283,8 +283,8 @@ public final class IndicesClient {
|
|||
* Force Merge API on elastic.co</a>
|
||||
*/
|
||||
public void forceMergeAsync(ForceMergeRequest forceMergeRequest, ActionListener<ForceMergeResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, Request::forceMerge, ForceMergeResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, RequestConverters::forceMerge,
|
||||
ForceMergeResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -294,7 +294,7 @@ public final class IndicesClient {
|
|||
* Clear Cache API on elastic.co</a>
|
||||
*/
|
||||
public ClearIndicesCacheResponse clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(clearIndicesCacheRequest, Request::clearCache,
|
||||
return restHighLevelClient.performRequestAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache,
|
||||
ClearIndicesCacheResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -306,7 +306,7 @@ public final class IndicesClient {
|
|||
*/
|
||||
public void clearCacheAsync(ClearIndicesCacheRequest clearIndicesCacheRequest, ActionListener<ClearIndicesCacheResponse> listener,
|
||||
Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(clearIndicesCacheRequest, Request::clearCache,
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache,
|
||||
ClearIndicesCacheResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -319,7 +319,7 @@ public final class IndicesClient {
|
|||
public boolean exists(GetIndexRequest request, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequest(
|
||||
request,
|
||||
Request::indicesExist,
|
||||
RequestConverters::indicesExist,
|
||||
RestHighLevelClient::convertExistsResponse,
|
||||
Collections.emptySet(),
|
||||
headers
|
||||
|
@ -335,7 +335,7 @@ public final class IndicesClient {
|
|||
public void existsAsync(GetIndexRequest request, ActionListener<Boolean> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsync(
|
||||
request,
|
||||
Request::indicesExist,
|
||||
RequestConverters::indicesExist,
|
||||
RestHighLevelClient::convertExistsResponse,
|
||||
listener,
|
||||
Collections.emptySet(),
|
||||
|
@ -350,7 +350,7 @@ public final class IndicesClient {
|
|||
* Shrink Index API on elastic.co</a>
|
||||
*/
|
||||
public ResizeResponse shrink(ResizeRequest resizeRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(resizeRequest, Request::shrink, ResizeResponse::fromXContent,
|
||||
return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::shrink, ResizeResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -361,7 +361,7 @@ public final class IndicesClient {
|
|||
* Shrink Index API on elastic.co</a>
|
||||
*/
|
||||
public void shrinkAsync(ResizeRequest resizeRequest, ActionListener<ResizeResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, Request::shrink, ResizeResponse::fromXContent,
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::shrink, ResizeResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -372,7 +372,7 @@ public final class IndicesClient {
|
|||
* Split Index API on elastic.co</a>
|
||||
*/
|
||||
public ResizeResponse split(ResizeRequest resizeRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(resizeRequest, Request::split, ResizeResponse::fromXContent,
|
||||
return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::split, ResizeResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -383,7 +383,7 @@ public final class IndicesClient {
|
|||
* Split Index API on elastic.co</a>
|
||||
*/
|
||||
public void splitAsync(ResizeRequest resizeRequest, ActionListener<ResizeResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, Request::split, ResizeResponse::fromXContent,
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::split, ResizeResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -394,8 +394,8 @@ public final class IndicesClient {
|
|||
* Rollover Index API on elastic.co</a>
|
||||
*/
|
||||
public RolloverResponse rollover(RolloverRequest rolloverRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(rolloverRequest, Request::rollover, RolloverResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
return restHighLevelClient.performRequestAndParseEntity(rolloverRequest, RequestConverters::rollover,
|
||||
RolloverResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -405,7 +405,7 @@ public final class IndicesClient {
|
|||
* Rollover Index API on elastic.co</a>
|
||||
*/
|
||||
public void rolloverAsync(RolloverRequest rolloverRequest, ActionListener<RolloverResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, Request::rollover, RolloverResponse::fromXContent,
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, RequestConverters::rollover, RolloverResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -416,7 +416,7 @@ public final class IndicesClient {
|
|||
* API on elastic.co</a>
|
||||
*/
|
||||
public UpdateSettingsResponse putSettings(UpdateSettingsRequest updateSettingsRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, Request::indexPutSettings,
|
||||
return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings,
|
||||
UpdateSettingsResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -428,7 +428,7 @@ public final class IndicesClient {
|
|||
*/
|
||||
public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, ActionListener<UpdateSettingsResponse> listener,
|
||||
Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, Request::indexPutSettings,
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings,
|
||||
UpdateSettingsResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
|
|
@ -89,117 +89,85 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.StringJoiner;
|
||||
|
||||
public final class Request {
|
||||
|
||||
final class RequestConverters {
|
||||
static final XContentType REQUEST_BODY_CONTENT_TYPE = XContentType.JSON;
|
||||
|
||||
private final String method;
|
||||
private final String endpoint;
|
||||
private final Map<String, String> parameters;
|
||||
private final HttpEntity entity;
|
||||
|
||||
public Request(String method, String endpoint, Map<String, String> parameters, HttpEntity entity) {
|
||||
this.method = Objects.requireNonNull(method, "method cannot be null");
|
||||
this.endpoint = Objects.requireNonNull(endpoint, "endpoint cannot be null");
|
||||
this.parameters = Objects.requireNonNull(parameters, "parameters cannot be null");
|
||||
this.entity = entity;
|
||||
}
|
||||
|
||||
public String getMethod() {
|
||||
return method;
|
||||
}
|
||||
|
||||
public String getEndpoint() {
|
||||
return endpoint;
|
||||
}
|
||||
|
||||
public Map<String, String> getParameters() {
|
||||
return parameters;
|
||||
}
|
||||
|
||||
public HttpEntity getEntity() {
|
||||
return entity;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Request{" +
|
||||
"method='" + method + '\'' +
|
||||
", endpoint='" + endpoint + '\'' +
|
||||
", params=" + parameters +
|
||||
", hasBody=" + (entity != null) +
|
||||
'}';
|
||||
private RequestConverters() {
|
||||
// Contains only status utility methods
|
||||
}
|
||||
|
||||
static Request delete(DeleteRequest deleteRequest) {
|
||||
String endpoint = endpoint(deleteRequest.index(), deleteRequest.type(), deleteRequest.id());
|
||||
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
|
||||
|
||||
Params parameters = Params.builder();
|
||||
Params parameters = new Params(request);
|
||||
parameters.withRouting(deleteRequest.routing());
|
||||
parameters.withTimeout(deleteRequest.timeout());
|
||||
parameters.withVersion(deleteRequest.version());
|
||||
parameters.withVersionType(deleteRequest.versionType());
|
||||
parameters.withRefreshPolicy(deleteRequest.getRefreshPolicy());
|
||||
parameters.withWaitForActiveShards(deleteRequest.waitForActiveShards());
|
||||
|
||||
return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request deleteIndex(DeleteIndexRequest deleteIndexRequest) {
|
||||
String endpoint = endpoint(deleteIndexRequest.indices());
|
||||
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
|
||||
|
||||
Params parameters = Params.builder();
|
||||
Params parameters = new Params(request);
|
||||
parameters.withTimeout(deleteIndexRequest.timeout());
|
||||
parameters.withMasterTimeout(deleteIndexRequest.masterNodeTimeout());
|
||||
parameters.withIndicesOptions(deleteIndexRequest.indicesOptions());
|
||||
|
||||
return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request openIndex(OpenIndexRequest openIndexRequest) {
|
||||
String endpoint = endpoint(openIndexRequest.indices(), "_open");
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
|
||||
Params parameters = Params.builder();
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withTimeout(openIndexRequest.timeout());
|
||||
parameters.withMasterTimeout(openIndexRequest.masterNodeTimeout());
|
||||
parameters.withWaitForActiveShards(openIndexRequest.waitForActiveShards());
|
||||
parameters.withIndicesOptions(openIndexRequest.indicesOptions());
|
||||
|
||||
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request closeIndex(CloseIndexRequest closeIndexRequest) {
|
||||
String endpoint = endpoint(closeIndexRequest.indices(), "_close");
|
||||
Params parameters = Params.builder();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withTimeout(closeIndexRequest.timeout());
|
||||
parameters.withMasterTimeout(closeIndexRequest.masterNodeTimeout());
|
||||
parameters.withIndicesOptions(closeIndexRequest.indicesOptions());
|
||||
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException {
|
||||
String endpoint = endpoint(createIndexRequest.indices());
|
||||
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
|
||||
|
||||
Params parameters = Params.builder();
|
||||
Params parameters = new Params(request);
|
||||
parameters.withTimeout(createIndexRequest.timeout());
|
||||
parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout());
|
||||
parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards());
|
||||
|
||||
HttpEntity entity = createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity);
|
||||
request.setEntity(createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request updateAliases(IndicesAliasesRequest indicesAliasesRequest) throws IOException {
|
||||
Params parameters = Params.builder();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, "/_aliases");
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withTimeout(indicesAliasesRequest.timeout());
|
||||
parameters.withMasterTimeout(indicesAliasesRequest.masterNodeTimeout());
|
||||
|
||||
HttpEntity entity = createEntity(indicesAliasesRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpPost.METHOD_NAME, "/_aliases", parameters.getParams(), entity);
|
||||
request.setEntity(createEntity(indicesAliasesRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request putMapping(PutMappingRequest putMappingRequest) throws IOException {
|
||||
|
@ -208,63 +176,69 @@ public final class Request {
|
|||
throw new IllegalArgumentException("concreteIndex cannot be set on PutMapping requests made over the REST API");
|
||||
}
|
||||
|
||||
String endpoint = endpoint(putMappingRequest.indices(), "_mapping", putMappingRequest.type());
|
||||
Request request = new Request(HttpPut.METHOD_NAME, endpoint(putMappingRequest.indices(), "_mapping", putMappingRequest.type()));
|
||||
|
||||
Params parameters = Params.builder();
|
||||
Params parameters = new Params(request);
|
||||
parameters.withTimeout(putMappingRequest.timeout());
|
||||
parameters.withMasterTimeout(putMappingRequest.masterNodeTimeout());
|
||||
|
||||
HttpEntity entity = createEntity(putMappingRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity);
|
||||
request.setEntity(createEntity(putMappingRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request refresh(RefreshRequest refreshRequest) {
|
||||
String[] indices = refreshRequest.indices() == null ? Strings.EMPTY_ARRAY : refreshRequest.indices();
|
||||
String endpoint = endpoint(indices, "_refresh");
|
||||
Params parameters = Params.builder();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_refresh"));
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withIndicesOptions(refreshRequest.indicesOptions());
|
||||
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request flush(FlushRequest flushRequest) {
|
||||
String[] indices = flushRequest.indices() == null ? Strings.EMPTY_ARRAY : flushRequest.indices();
|
||||
String endpoint = endpoint(indices, "_flush");
|
||||
Params parameters = Params.builder();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_flush"));
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withIndicesOptions(flushRequest.indicesOptions());
|
||||
parameters.putParam("wait_if_ongoing", Boolean.toString(flushRequest.waitIfOngoing()));
|
||||
parameters.putParam("force", Boolean.toString(flushRequest.force()));
|
||||
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request forceMerge(ForceMergeRequest forceMergeRequest) {
|
||||
String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices();
|
||||
String endpoint = endpoint(indices, "_forcemerge");
|
||||
Params parameters = Params.builder();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_forcemerge"));
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withIndicesOptions(forceMergeRequest.indicesOptions());
|
||||
parameters.putParam("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments()));
|
||||
parameters.putParam("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes()));
|
||||
parameters.putParam("flush", Boolean.toString(forceMergeRequest.flush()));
|
||||
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest) {
|
||||
String[] indices = clearIndicesCacheRequest.indices() == null ? Strings.EMPTY_ARRAY :clearIndicesCacheRequest.indices();
|
||||
String endpoint = endpoint(indices, "_cache/clear");
|
||||
Params parameters = Params.builder();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_cache/clear"));
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withIndicesOptions(clearIndicesCacheRequest.indicesOptions());
|
||||
parameters.putParam("query", Boolean.toString(clearIndicesCacheRequest.queryCache()));
|
||||
parameters.putParam("fielddata", Boolean.toString(clearIndicesCacheRequest.fieldDataCache()));
|
||||
parameters.putParam("request", Boolean.toString(clearIndicesCacheRequest.requestCache()));
|
||||
parameters.putParam("fields", String.join(",", clearIndicesCacheRequest.fields()));
|
||||
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request info() {
|
||||
return new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
return new Request(HttpGet.METHOD_NAME, "/");
|
||||
}
|
||||
|
||||
static Request bulk(BulkRequest bulkRequest) throws IOException {
|
||||
Params parameters = Params.builder();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, "/_bulk");
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withTimeout(bulkRequest.timeout());
|
||||
parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy());
|
||||
|
||||
|
@ -273,14 +247,14 @@ public final class Request {
|
|||
// and this content-type is supported by the Bulk API.
|
||||
XContentType bulkContentType = null;
|
||||
for (int i = 0; i < bulkRequest.numberOfActions(); i++) {
|
||||
DocWriteRequest<?> request = bulkRequest.requests().get(i);
|
||||
DocWriteRequest<?> action = bulkRequest.requests().get(i);
|
||||
|
||||
DocWriteRequest.OpType opType = request.opType();
|
||||
DocWriteRequest.OpType opType = action.opType();
|
||||
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
|
||||
bulkContentType = enforceSameContentType((IndexRequest) request, bulkContentType);
|
||||
bulkContentType = enforceSameContentType((IndexRequest) action, bulkContentType);
|
||||
|
||||
} else if (opType == DocWriteRequest.OpType.UPDATE) {
|
||||
UpdateRequest updateRequest = (UpdateRequest) request;
|
||||
UpdateRequest updateRequest = (UpdateRequest) action;
|
||||
if (updateRequest.doc() != null) {
|
||||
bulkContentType = enforceSameContentType(updateRequest.doc(), bulkContentType);
|
||||
}
|
||||
|
@ -298,30 +272,30 @@ public final class Request {
|
|||
final ContentType requestContentType = createContentType(bulkContentType);
|
||||
|
||||
ByteArrayOutputStream content = new ByteArrayOutputStream();
|
||||
for (DocWriteRequest<?> request : bulkRequest.requests()) {
|
||||
DocWriteRequest.OpType opType = request.opType();
|
||||
for (DocWriteRequest<?> action : bulkRequest.requests()) {
|
||||
DocWriteRequest.OpType opType = action.opType();
|
||||
|
||||
try (XContentBuilder metadata = XContentBuilder.builder(bulkContentType.xContent())) {
|
||||
metadata.startObject();
|
||||
{
|
||||
metadata.startObject(opType.getLowercase());
|
||||
if (Strings.hasLength(request.index())) {
|
||||
metadata.field("_index", request.index());
|
||||
if (Strings.hasLength(action.index())) {
|
||||
metadata.field("_index", action.index());
|
||||
}
|
||||
if (Strings.hasLength(request.type())) {
|
||||
metadata.field("_type", request.type());
|
||||
if (Strings.hasLength(action.type())) {
|
||||
metadata.field("_type", action.type());
|
||||
}
|
||||
if (Strings.hasLength(request.id())) {
|
||||
metadata.field("_id", request.id());
|
||||
if (Strings.hasLength(action.id())) {
|
||||
metadata.field("_id", action.id());
|
||||
}
|
||||
if (Strings.hasLength(request.routing())) {
|
||||
metadata.field("routing", request.routing());
|
||||
if (Strings.hasLength(action.routing())) {
|
||||
metadata.field("routing", action.routing());
|
||||
}
|
||||
if (request.version() != Versions.MATCH_ANY) {
|
||||
metadata.field("version", request.version());
|
||||
if (action.version() != Versions.MATCH_ANY) {
|
||||
metadata.field("version", action.version());
|
||||
}
|
||||
|
||||
VersionType versionType = request.versionType();
|
||||
VersionType versionType = action.versionType();
|
||||
if (versionType != VersionType.INTERNAL) {
|
||||
if (versionType == VersionType.EXTERNAL) {
|
||||
metadata.field("version_type", "external");
|
||||
|
@ -333,12 +307,12 @@ public final class Request {
|
|||
}
|
||||
|
||||
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
IndexRequest indexRequest = (IndexRequest) action;
|
||||
if (Strings.hasLength(indexRequest.getPipeline())) {
|
||||
metadata.field("pipeline", indexRequest.getPipeline());
|
||||
}
|
||||
} else if (opType == DocWriteRequest.OpType.UPDATE) {
|
||||
UpdateRequest updateRequest = (UpdateRequest) request;
|
||||
UpdateRequest updateRequest = (UpdateRequest) action;
|
||||
if (updateRequest.retryOnConflict() > 0) {
|
||||
metadata.field("retry_on_conflict", updateRequest.retryOnConflict());
|
||||
}
|
||||
|
@ -357,7 +331,7 @@ public final class Request {
|
|||
|
||||
BytesRef source = null;
|
||||
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
IndexRequest indexRequest = (IndexRequest) action;
|
||||
BytesReference indexSource = indexRequest.source();
|
||||
XContentType indexXContentType = indexRequest.getContentType();
|
||||
|
||||
|
@ -369,7 +343,7 @@ public final class Request {
|
|||
}
|
||||
}
|
||||
} else if (opType == DocWriteRequest.OpType.UPDATE) {
|
||||
source = XContentHelper.toXContent((UpdateRequest) request, bulkContentType, false).toBytesRef();
|
||||
source = XContentHelper.toXContent((UpdateRequest) action, bulkContentType, false).toBytesRef();
|
||||
}
|
||||
|
||||
if (source != null) {
|
||||
|
@ -377,20 +351,22 @@ public final class Request {
|
|||
content.write(separator);
|
||||
}
|
||||
}
|
||||
|
||||
HttpEntity entity = new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType);
|
||||
return new Request(HttpPost.METHOD_NAME, "/_bulk", parameters.getParams(), entity);
|
||||
request.setEntity(new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request exists(GetRequest getRequest) {
|
||||
Request request = get(getRequest);
|
||||
return new Request(HttpHead.METHOD_NAME, request.endpoint, request.parameters, null);
|
||||
return getStyleRequest(HttpHead.METHOD_NAME, getRequest);
|
||||
}
|
||||
|
||||
static Request get(GetRequest getRequest) {
|
||||
String endpoint = endpoint(getRequest.index(), getRequest.type(), getRequest.id());
|
||||
return getStyleRequest(HttpGet.METHOD_NAME, getRequest);
|
||||
}
|
||||
|
||||
Params parameters = Params.builder();
|
||||
private static Request getStyleRequest(String method, GetRequest getRequest) {
|
||||
Request request = new Request(method, endpoint(getRequest.index(), getRequest.type(), getRequest.id()));
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withPreference(getRequest.preference());
|
||||
parameters.withRouting(getRequest.routing());
|
||||
parameters.withRefresh(getRequest.refresh());
|
||||
|
@ -400,25 +376,28 @@ public final class Request {
|
|||
parameters.withVersionType(getRequest.versionType());
|
||||
parameters.withFetchSourceContext(getRequest.fetchSourceContext());
|
||||
|
||||
return new Request(HttpGet.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request multiGet(MultiGetRequest multiGetRequest) throws IOException {
|
||||
Params parameters = Params.builder();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, "/_mget");
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withPreference(multiGetRequest.preference());
|
||||
parameters.withRealtime(multiGetRequest.realtime());
|
||||
parameters.withRefresh(multiGetRequest.refresh());
|
||||
HttpEntity entity = createEntity(multiGetRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpPost.METHOD_NAME, "/_mget", parameters.getParams(), entity);
|
||||
|
||||
request.setEntity(createEntity(multiGetRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request index(IndexRequest indexRequest) {
|
||||
String method = Strings.hasLength(indexRequest.id()) ? HttpPut.METHOD_NAME : HttpPost.METHOD_NAME;
|
||||
|
||||
boolean isCreate = (indexRequest.opType() == DocWriteRequest.OpType.CREATE);
|
||||
String endpoint = endpoint(indexRequest.index(), indexRequest.type(), indexRequest.id(), isCreate ? "_create" : null);
|
||||
Request request = new Request(method, endpoint);
|
||||
|
||||
Params parameters = Params.builder();
|
||||
Params parameters = new Params(request);
|
||||
parameters.withRouting(indexRequest.routing());
|
||||
parameters.withTimeout(indexRequest.timeout());
|
||||
parameters.withVersion(indexRequest.version());
|
||||
|
@ -429,19 +408,19 @@ public final class Request {
|
|||
|
||||
BytesRef source = indexRequest.source().toBytesRef();
|
||||
ContentType contentType = createContentType(indexRequest.getContentType());
|
||||
HttpEntity entity = new ByteArrayEntity(source.bytes, source.offset, source.length, contentType);
|
||||
|
||||
return new Request(method, endpoint, parameters.getParams(), entity);
|
||||
request.setEntity(new ByteArrayEntity(source.bytes, source.offset, source.length, contentType));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request ping() {
|
||||
return new Request(HttpHead.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
return new Request(HttpHead.METHOD_NAME, "/");
|
||||
}
|
||||
|
||||
static Request update(UpdateRequest updateRequest) throws IOException {
|
||||
String endpoint = endpoint(updateRequest.index(), updateRequest.type(), updateRequest.id(), "_update");
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
|
||||
Params parameters = Params.builder();
|
||||
Params parameters = new Params(request);
|
||||
parameters.withRouting(updateRequest.routing());
|
||||
parameters.withTimeout(updateRequest.timeout());
|
||||
parameters.withRefreshPolicy(updateRequest.getRefreshPolicy());
|
||||
|
@ -472,14 +451,14 @@ public final class Request {
|
|||
if (xContentType == null) {
|
||||
xContentType = Requests.INDEX_CONTENT_TYPE;
|
||||
}
|
||||
|
||||
HttpEntity entity = createEntity(updateRequest, xContentType);
|
||||
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), entity);
|
||||
request.setEntity(createEntity(updateRequest, xContentType));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request search(SearchRequest searchRequest) throws IOException {
|
||||
String endpoint = endpoint(searchRequest.indices(), searchRequest.types(), "_search");
|
||||
Params params = Params.builder();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint(searchRequest.indices(), searchRequest.types(), "_search"));
|
||||
|
||||
Params params = new Params(request);
|
||||
params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true");
|
||||
params.withRouting(searchRequest.routing());
|
||||
params.withPreference(searchRequest.preference());
|
||||
|
@ -495,65 +474,73 @@ public final class Request {
|
|||
if (searchRequest.scroll() != null) {
|
||||
params.putParam("scroll", searchRequest.scroll().keepAlive());
|
||||
}
|
||||
HttpEntity entity = null;
|
||||
|
||||
if (searchRequest.source() != null) {
|
||||
entity = createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE);
|
||||
request.setEntity(createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE));
|
||||
}
|
||||
return new Request(HttpPost.METHOD_NAME, endpoint, params.getParams(), entity);
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request searchScroll(SearchScrollRequest searchScrollRequest) throws IOException {
|
||||
HttpEntity entity = createEntity(searchScrollRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpPost.METHOD_NAME, "/_search/scroll", Collections.emptyMap(), entity);
|
||||
Request request = new Request(HttpPost.METHOD_NAME, "/_search/scroll");
|
||||
request.setEntity(createEntity(searchScrollRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOException {
|
||||
HttpEntity entity = createEntity(clearScrollRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpDelete.METHOD_NAME, "/_search/scroll", Collections.emptyMap(), entity);
|
||||
Request request = new Request(HttpDelete.METHOD_NAME, "/_search/scroll");
|
||||
request.setEntity(createEntity(clearScrollRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException {
|
||||
Params params = Params.builder();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, "/_msearch");
|
||||
|
||||
Params params = new Params(request);
|
||||
params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true");
|
||||
if (multiSearchRequest.maxConcurrentSearchRequests() != MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) {
|
||||
params.putParam("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests()));
|
||||
}
|
||||
|
||||
XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent();
|
||||
byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent);
|
||||
HttpEntity entity = new ByteArrayEntity(source, createContentType(xContent.type()));
|
||||
return new Request(HttpPost.METHOD_NAME, "/_msearch", params.getParams(), entity);
|
||||
request.setEntity(new ByteArrayEntity(source, createContentType(xContent.type())));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request existsAlias(GetAliasesRequest getAliasesRequest) {
|
||||
Params params = Params.builder();
|
||||
params.withIndicesOptions(getAliasesRequest.indicesOptions());
|
||||
params.withLocal(getAliasesRequest.local());
|
||||
if ((getAliasesRequest.indices() == null || getAliasesRequest.indices().length == 0) &&
|
||||
(getAliasesRequest.aliases() == null || getAliasesRequest.aliases().length == 0)) {
|
||||
throw new IllegalArgumentException("existsAlias requires at least an alias or an index");
|
||||
}
|
||||
String[] indices = getAliasesRequest.indices() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.indices();
|
||||
String[] aliases = getAliasesRequest.aliases() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.aliases();
|
||||
String endpoint = endpoint(indices, "_alias", aliases);
|
||||
return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null);
|
||||
|
||||
Request request = new Request(HttpHead.METHOD_NAME, endpoint(indices, "_alias", aliases));
|
||||
|
||||
Params params = new Params(request);
|
||||
params.withIndicesOptions(getAliasesRequest.indicesOptions());
|
||||
params.withLocal(getAliasesRequest.local());
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest) {
|
||||
Params params = Params.builder();
|
||||
Request request = new Request(HttpGet.METHOD_NAME, endpoint(fieldCapabilitiesRequest.indices(), "_field_caps"));
|
||||
|
||||
Params params = new Params(request);
|
||||
params.withFields(fieldCapabilitiesRequest.fields());
|
||||
params.withIndicesOptions(fieldCapabilitiesRequest.indicesOptions());
|
||||
|
||||
String[] indices = fieldCapabilitiesRequest.indices();
|
||||
String endpoint = endpoint(indices, "_field_caps");
|
||||
return new Request(HttpGet.METHOD_NAME, endpoint, params.getParams(), null);
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException {
|
||||
String endpoint = endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval");
|
||||
Params params = Params.builder();
|
||||
Request request = new Request(HttpGet.METHOD_NAME, endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval"));
|
||||
|
||||
Params params = new Params(request);
|
||||
params.withIndicesOptions(rankEvalRequest.indicesOptions());
|
||||
HttpEntity entity = createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpGet.METHOD_NAME, endpoint, params.getParams(), entity);
|
||||
|
||||
request.setEntity(createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request split(ResizeRequest resizeRequest) throws IOException {
|
||||
|
@ -571,64 +558,76 @@ public final class Request {
|
|||
}
|
||||
|
||||
private static Request resize(ResizeRequest resizeRequest) throws IOException {
|
||||
Params params = Params.builder();
|
||||
params.withTimeout(resizeRequest.timeout());
|
||||
params.withMasterTimeout(resizeRequest.masterNodeTimeout());
|
||||
params.withWaitForActiveShards(resizeRequest.getTargetIndexRequest().waitForActiveShards());
|
||||
String endpoint = new EndpointBuilder().addPathPart(resizeRequest.getSourceIndex())
|
||||
.addPathPartAsIs("_" + resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT))
|
||||
.addPathPart(resizeRequest.getTargetIndexRequest().index()).build();
|
||||
HttpEntity entity = createEntity(resizeRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpPut.METHOD_NAME, endpoint, params.getParams(), entity);
|
||||
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
|
||||
|
||||
Params params = new Params(request);
|
||||
params.withTimeout(resizeRequest.timeout());
|
||||
params.withMasterTimeout(resizeRequest.masterNodeTimeout());
|
||||
params.withWaitForActiveShards(resizeRequest.getTargetIndexRequest().waitForActiveShards());
|
||||
|
||||
request.setEntity(createEntity(resizeRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest) throws IOException {
|
||||
Params parameters = Params.builder();
|
||||
Request request = new Request(HttpPut.METHOD_NAME, "/_cluster/settings");
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withTimeout(clusterUpdateSettingsRequest.timeout());
|
||||
parameters.withMasterTimeout(clusterUpdateSettingsRequest.masterNodeTimeout());
|
||||
HttpEntity entity = createEntity(clusterUpdateSettingsRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpPut.METHOD_NAME, "/_cluster/settings", parameters.getParams(), entity);
|
||||
|
||||
request.setEntity(createEntity(clusterUpdateSettingsRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request rollover(RolloverRequest rolloverRequest) throws IOException {
|
||||
Params params = Params.builder();
|
||||
String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover")
|
||||
.addPathPart(rolloverRequest.getNewIndexName()).build();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
|
||||
Params params = new Params(request);
|
||||
params.withTimeout(rolloverRequest.timeout());
|
||||
params.withMasterTimeout(rolloverRequest.masterNodeTimeout());
|
||||
params.withWaitForActiveShards(rolloverRequest.getCreateIndexRequest().waitForActiveShards());
|
||||
if (rolloverRequest.isDryRun()) {
|
||||
params.putParam("dry_run", Boolean.TRUE.toString());
|
||||
}
|
||||
String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover")
|
||||
.addPathPart(rolloverRequest.getNewIndexName()).build();
|
||||
HttpEntity entity = createEntity(rolloverRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpPost.METHOD_NAME, endpoint, params.getParams(), entity);
|
||||
|
||||
request.setEntity(createEntity(rolloverRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request indicesExist(GetIndexRequest request) {
|
||||
static Request indicesExist(GetIndexRequest getIndexRequest) {
|
||||
// this can be called with no indices as argument by transport client, not via REST though
|
||||
if (request.indices() == null || request.indices().length == 0) {
|
||||
if (getIndexRequest.indices() == null || getIndexRequest.indices().length == 0) {
|
||||
throw new IllegalArgumentException("indices are mandatory");
|
||||
}
|
||||
String endpoint = endpoint(request.indices(), "");
|
||||
Params params = Params.builder();
|
||||
params.withLocal(request.local());
|
||||
params.withHuman(request.humanReadable());
|
||||
params.withIndicesOptions(request.indicesOptions());
|
||||
params.withIncludeDefaults(request.includeDefaults());
|
||||
return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null);
|
||||
String endpoint = endpoint(getIndexRequest.indices(), "");
|
||||
Request request = new Request(HttpHead.METHOD_NAME, endpoint);
|
||||
|
||||
Params params = new Params(request);
|
||||
params.withLocal(getIndexRequest.local());
|
||||
params.withHuman(getIndexRequest.humanReadable());
|
||||
params.withIndicesOptions(getIndexRequest.indicesOptions());
|
||||
params.withIncludeDefaults(getIndexRequest.includeDefaults());
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request indexPutSettings(UpdateSettingsRequest updateSettingsRequest) throws IOException {
|
||||
Params parameters = Params.builder();
|
||||
String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices();
|
||||
Request request = new Request(HttpPut.METHOD_NAME, endpoint(indices, "_settings"));
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withTimeout(updateSettingsRequest.timeout());
|
||||
parameters.withMasterTimeout(updateSettingsRequest.masterNodeTimeout());
|
||||
parameters.withIndicesOptions(updateSettingsRequest.indicesOptions());
|
||||
parameters.withPreserveExisting(updateSettingsRequest.isPreserveExisting());
|
||||
|
||||
String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices();
|
||||
String endpoint = endpoint(indices, "_settings");
|
||||
HttpEntity entity = createEntity(updateSettingsRequest, REQUEST_BODY_CONTENT_TYPE);
|
||||
return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity);
|
||||
request.setEntity(createEntity(updateSettingsRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException {
|
||||
|
@ -678,19 +677,19 @@ public final class Request {
|
|||
}
|
||||
|
||||
/**
|
||||
* Utility class to build request's parameters map and centralize all parameter names.
|
||||
* Utility class to help with common parameter names and patterns. Wraps
|
||||
* a {@link Request} and adds the parameters to it directly.
|
||||
*/
|
||||
static class Params {
|
||||
private final Map<String, String> params = new HashMap<>();
|
||||
private final Request request;
|
||||
|
||||
private Params() {
|
||||
Params(Request request) {
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
Params putParam(String key, String value) {
|
||||
Params putParam(String name, String value) {
|
||||
if (Strings.hasLength(value)) {
|
||||
if (params.putIfAbsent(key, value) != null) {
|
||||
throw new IllegalArgumentException("Request parameter [" + key + "] is already registered");
|
||||
}
|
||||
request.addParameter(name, value);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
@ -841,13 +840,6 @@ public final class Request {
|
|||
return this;
|
||||
}
|
||||
|
||||
Params withFlatSettings(boolean flatSettings) {
|
||||
if (flatSettings) {
|
||||
return putParam("flat_settings", Boolean.TRUE.toString());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withIncludeDefaults(boolean includeDefaults) {
|
||||
if (includeDefaults) {
|
||||
return putParam("include_defaults", Boolean.TRUE.toString());
|
||||
|
@ -861,14 +853,6 @@ public final class Request {
|
|||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Map<String, String> getParams() {
|
||||
return Collections.unmodifiableMap(params);
|
||||
}
|
||||
|
||||
static Params builder() {
|
||||
return new Params();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
|
@ -258,7 +258,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
|
||||
*/
|
||||
public final BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, emptySet(), headers);
|
||||
return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, BulkResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -267,14 +267,14 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
|
||||
*/
|
||||
public final void bulkAsync(BulkRequest bulkRequest, ActionListener<BulkResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, listener, emptySet(), headers);
|
||||
performRequestAsyncAndParseEntity(bulkRequest, RequestConverters::bulk, BulkResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise
|
||||
*/
|
||||
public final boolean ping(Header... headers) throws IOException {
|
||||
return performRequest(new MainRequest(), (request) -> Request.ping(), RestHighLevelClient::convertExistsResponse,
|
||||
return performRequest(new MainRequest(), (request) -> RequestConverters.ping(), RestHighLevelClient::convertExistsResponse,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -282,8 +282,8 @@ public class RestHighLevelClient implements Closeable {
|
|||
* Get the cluster info otherwise provided when sending an HTTP request to port 9200
|
||||
*/
|
||||
public final MainResponse info(Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(new MainRequest(), (request) -> Request.info(), MainResponse::fromXContent, emptySet(),
|
||||
headers);
|
||||
return performRequestAndParseEntity(new MainRequest(), (request) -> RequestConverters.info(),
|
||||
MainResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -292,7 +292,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
*/
|
||||
public final GetResponse get(GetRequest getRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(getRequest, Request::get, GetResponse::fromXContent, singleton(404), headers);
|
||||
return performRequestAndParseEntity(getRequest, RequestConverters::get, GetResponse::fromXContent, singleton(404), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -301,7 +301,8 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
*/
|
||||
public final void getAsync(GetRequest getRequest, ActionListener<GetResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(getRequest, Request::get, GetResponse::fromXContent, listener, singleton(404), headers);
|
||||
performRequestAsyncAndParseEntity(getRequest, RequestConverters::get, GetResponse::fromXContent, listener,
|
||||
singleton(404), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -310,7 +311,8 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html">Multi Get API on elastic.co</a>
|
||||
*/
|
||||
public final MultiGetResponse multiGet(MultiGetRequest multiGetRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(multiGetRequest, Request::multiGet, MultiGetResponse::fromXContent, singleton(404), headers);
|
||||
return performRequestAndParseEntity(multiGetRequest, RequestConverters::multiGet, MultiGetResponse::fromXContent,
|
||||
singleton(404), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -319,7 +321,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html">Multi Get API on elastic.co</a>
|
||||
*/
|
||||
public final void multiGetAsync(MultiGetRequest multiGetRequest, ActionListener<MultiGetResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(multiGetRequest, Request::multiGet, MultiGetResponse::fromXContent, listener,
|
||||
performRequestAsyncAndParseEntity(multiGetRequest, RequestConverters::multiGet, MultiGetResponse::fromXContent, listener,
|
||||
singleton(404), headers);
|
||||
}
|
||||
|
||||
|
@ -329,7 +331,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
*/
|
||||
public final boolean exists(GetRequest getRequest, Header... headers) throws IOException {
|
||||
return performRequest(getRequest, Request::exists, RestHighLevelClient::convertExistsResponse, emptySet(), headers);
|
||||
return performRequest(getRequest, RequestConverters::exists, RestHighLevelClient::convertExistsResponse, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -338,7 +340,8 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
*/
|
||||
public final void existsAsync(GetRequest getRequest, ActionListener<Boolean> listener, Header... headers) {
|
||||
performRequestAsync(getRequest, Request::exists, RestHighLevelClient::convertExistsResponse, listener, emptySet(), headers);
|
||||
performRequestAsync(getRequest, RequestConverters::exists, RestHighLevelClient::convertExistsResponse, listener,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -347,7 +350,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html">Index API on elastic.co</a>
|
||||
*/
|
||||
public final IndexResponse index(IndexRequest indexRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(indexRequest, Request::index, IndexResponse::fromXContent, emptySet(), headers);
|
||||
return performRequestAndParseEntity(indexRequest, RequestConverters::index, IndexResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -356,7 +359,8 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html">Index API on elastic.co</a>
|
||||
*/
|
||||
public final void indexAsync(IndexRequest indexRequest, ActionListener<IndexResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(indexRequest, Request::index, IndexResponse::fromXContent, listener, emptySet(), headers);
|
||||
performRequestAsyncAndParseEntity(indexRequest, RequestConverters::index, IndexResponse::fromXContent, listener,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -365,7 +369,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html">Update API on elastic.co</a>
|
||||
*/
|
||||
public final UpdateResponse update(UpdateRequest updateRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, emptySet(), headers);
|
||||
return performRequestAndParseEntity(updateRequest, RequestConverters::update, UpdateResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -374,7 +378,8 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html">Update API on elastic.co</a>
|
||||
*/
|
||||
public final void updateAsync(UpdateRequest updateRequest, ActionListener<UpdateResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, listener, emptySet(), headers);
|
||||
performRequestAsyncAndParseEntity(updateRequest, RequestConverters::update, UpdateResponse::fromXContent, listener,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -383,8 +388,8 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a>
|
||||
*/
|
||||
public final DeleteResponse delete(DeleteRequest deleteRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(deleteRequest, Request::delete, DeleteResponse::fromXContent, Collections.singleton(404),
|
||||
headers);
|
||||
return performRequestAndParseEntity(deleteRequest, RequestConverters::delete, DeleteResponse::fromXContent,
|
||||
singleton(404), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -393,7 +398,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a>
|
||||
*/
|
||||
public final void deleteAsync(DeleteRequest deleteRequest, ActionListener<DeleteResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(deleteRequest, Request::delete, DeleteResponse::fromXContent, listener,
|
||||
performRequestAsyncAndParseEntity(deleteRequest, RequestConverters::delete, DeleteResponse::fromXContent, listener,
|
||||
Collections.singleton(404), headers);
|
||||
}
|
||||
|
||||
|
@ -403,7 +408,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html">Search API on elastic.co</a>
|
||||
*/
|
||||
public final SearchResponse search(SearchRequest searchRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(searchRequest, Request::search, SearchResponse::fromXContent, emptySet(), headers);
|
||||
return performRequestAndParseEntity(searchRequest, RequestConverters::search, SearchResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -412,7 +417,8 @@ public class RestHighLevelClient implements Closeable {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html">Search API on elastic.co</a>
|
||||
*/
|
||||
public final void searchAsync(SearchRequest searchRequest, ActionListener<SearchResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(searchRequest, Request::search, SearchResponse::fromXContent, listener, emptySet(), headers);
|
||||
performRequestAsyncAndParseEntity(searchRequest, RequestConverters::search, SearchResponse::fromXContent, listener,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -422,7 +428,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
* elastic.co</a>
|
||||
*/
|
||||
public final MultiSearchResponse multiSearch(MultiSearchRequest multiSearchRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(multiSearchRequest, Request::multiSearch, MultiSearchResponse::fromXContext,
|
||||
return performRequestAndParseEntity(multiSearchRequest, RequestConverters::multiSearch, MultiSearchResponse::fromXContext,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -433,7 +439,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
* elastic.co</a>
|
||||
*/
|
||||
public final void multiSearchAsync(MultiSearchRequest searchRequest, ActionListener<MultiSearchResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(searchRequest, Request::multiSearch, MultiSearchResponse::fromXContext, listener,
|
||||
performRequestAsyncAndParseEntity(searchRequest, RequestConverters::multiSearch, MultiSearchResponse::fromXContext, listener,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -444,7 +450,8 @@ public class RestHighLevelClient implements Closeable {
|
|||
* API on elastic.co</a>
|
||||
*/
|
||||
public final SearchResponse searchScroll(SearchScrollRequest searchScrollRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(searchScrollRequest, Request::searchScroll, SearchResponse::fromXContent, emptySet(), headers);
|
||||
return performRequestAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, SearchResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -455,7 +462,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
*/
|
||||
public final void searchScrollAsync(SearchScrollRequest searchScrollRequest,
|
||||
ActionListener<SearchResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(searchScrollRequest, Request::searchScroll, SearchResponse::fromXContent,
|
||||
performRequestAsyncAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, SearchResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -466,7 +473,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
* Clear Scroll API on elastic.co</a>
|
||||
*/
|
||||
public final ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(clearScrollRequest, Request::clearScroll, ClearScrollResponse::fromXContent,
|
||||
return performRequestAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, ClearScrollResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -478,7 +485,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
*/
|
||||
public final void clearScrollAsync(ClearScrollRequest clearScrollRequest,
|
||||
ActionListener<ClearScrollResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(clearScrollRequest, Request::clearScroll, ClearScrollResponse::fromXContent,
|
||||
performRequestAsyncAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, ClearScrollResponse::fromXContent,
|
||||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -489,7 +496,8 @@ public class RestHighLevelClient implements Closeable {
|
|||
* on elastic.co</a>
|
||||
*/
|
||||
public final RankEvalResponse rankEval(RankEvalRequest rankEvalRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(rankEvalRequest, Request::rankEval, RankEvalResponse::fromXContent, emptySet(), headers);
|
||||
return performRequestAndParseEntity(rankEvalRequest, RequestConverters::rankEval, RankEvalResponse::fromXContent,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -499,8 +507,8 @@ public class RestHighLevelClient implements Closeable {
|
|||
* on elastic.co</a>
|
||||
*/
|
||||
public final void rankEvalAsync(RankEvalRequest rankEvalRequest, ActionListener<RankEvalResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(rankEvalRequest, Request::rankEval, RankEvalResponse::fromXContent, listener, emptySet(),
|
||||
headers);
|
||||
performRequestAsyncAndParseEntity(rankEvalRequest, RequestConverters::rankEval, RankEvalResponse::fromXContent, listener,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -511,7 +519,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
*/
|
||||
public final FieldCapabilitiesResponse fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest,
|
||||
Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(fieldCapabilitiesRequest, Request::fieldCaps,
|
||||
return performRequestAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps,
|
||||
FieldCapabilitiesResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -524,7 +532,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesRequest,
|
||||
ActionListener<FieldCapabilitiesResponse> listener,
|
||||
Header... headers) {
|
||||
performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, Request::fieldCaps,
|
||||
performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps,
|
||||
FieldCapabilitiesResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
@ -544,9 +552,10 @@ public class RestHighLevelClient implements Closeable {
|
|||
throw validationException;
|
||||
}
|
||||
Request req = requestConverter.apply(request);
|
||||
req.setHeaders(headers);
|
||||
Response response;
|
||||
try {
|
||||
response = client.performRequest(req.getMethod(), req.getEndpoint(), req.getParameters(), req.getEntity(), headers);
|
||||
response = client.performRequest(req);
|
||||
} catch (ResponseException e) {
|
||||
if (ignores.contains(e.getResponse().getStatusLine().getStatusCode())) {
|
||||
try {
|
||||
|
@ -593,9 +602,10 @@ public class RestHighLevelClient implements Closeable {
|
|||
listener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
req.setHeaders(headers);
|
||||
|
||||
ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores);
|
||||
client.performRequestAsync(req.getMethod(), req.getEndpoint(), req.getParameters(), req.getEntity(), responseListener, headers);
|
||||
client.performRequestAsync(req, responseListener);
|
||||
}
|
||||
|
||||
final <Resp> ResponseListener wrapResponseListener(CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.RequestLine;
|
||||
|
@ -52,14 +51,9 @@ import java.util.Collections;
|
|||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyMapOf;
|
||||
import static org.mockito.Matchers.anyObject;
|
||||
import static org.mockito.Matchers.anyVararg;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
@ -79,14 +73,15 @@ public class CustomRestHighLevelClientTests extends ESTestCase {
|
|||
final RestClient restClient = mock(RestClient.class);
|
||||
restHighLevelClient = new CustomRestClient(restClient);
|
||||
|
||||
doAnswer(mock -> mockPerformRequest((Header) mock.getArguments()[4]))
|
||||
doAnswer(inv -> mockPerformRequest(((Request) inv.getArguments()[0]).getHeaders()[0]))
|
||||
.when(restClient)
|
||||
.performRequest(eq(HttpGet.METHOD_NAME), eq(ENDPOINT), anyMapOf(String.class, String.class), anyObject(), anyVararg());
|
||||
.performRequest(any(Request.class));
|
||||
|
||||
doAnswer(mock -> mockPerformRequestAsync((Header) mock.getArguments()[5], (ResponseListener) mock.getArguments()[4]))
|
||||
doAnswer(inv -> mockPerformRequestAsync(
|
||||
((Request) inv.getArguments()[0]).getHeaders()[0],
|
||||
(ResponseListener) inv.getArguments()[1]))
|
||||
.when(restClient)
|
||||
.performRequestAsync(eq(HttpGet.METHOD_NAME), eq(ENDPOINT), anyMapOf(String.class, String.class),
|
||||
any(HttpEntity.class), any(ResponseListener.class), anyVararg());
|
||||
.performRequestAsync(any(Request.class), any(ResponseListener.class));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -193,7 +188,7 @@ public class CustomRestHighLevelClientTests extends ESTestCase {
|
|||
}
|
||||
|
||||
Request toRequest(MainRequest mainRequest) throws IOException {
|
||||
return new Request(HttpGet.METHOD_NAME, ENDPOINT, emptyMap(), null);
|
||||
return new Request(HttpGet.METHOD_NAME, ENDPOINT);
|
||||
}
|
||||
|
||||
MainResponse toResponse(Response response) throws IOException {
|
||||
|
|
|
@ -82,6 +82,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.client.RequestConverters.EndpointBuilder;
|
||||
import org.elasticsearch.client.RequestConverters.Params;
|
||||
import org.elasticsearch.index.RandomCreateIndexGenerator;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.query.TermQueryBuilder;
|
||||
|
@ -124,8 +126,8 @@ import java.util.function.Function;
|
|||
import java.util.function.Supplier;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.client.Request.REQUEST_BODY_CONTENT_TYPE;
|
||||
import static org.elasticsearch.client.Request.enforceSameContentType;
|
||||
import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE;
|
||||
import static org.elasticsearch.client.RequestConverters.enforceSameContentType;
|
||||
import static org.elasticsearch.index.RandomCreateIndexGenerator.randomAliases;
|
||||
import static org.elasticsearch.index.RandomCreateIndexGenerator.randomCreateIndexRequest;
|
||||
import static org.elasticsearch.index.RandomCreateIndexGenerator.randomIndexSettings;
|
||||
|
@ -137,40 +139,9 @@ import static org.hamcrest.Matchers.hasEntry;
|
|||
import static org.hamcrest.Matchers.hasKey;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class RequestTests extends ESTestCase {
|
||||
|
||||
public void testConstructor() {
|
||||
final String method = randomFrom("GET", "PUT", "POST", "HEAD", "DELETE");
|
||||
final String endpoint = randomAlphaOfLengthBetween(1, 10);
|
||||
final Map<String, String> parameters = singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5));
|
||||
final HttpEntity entity = randomBoolean() ? new StringEntity(randomAlphaOfLengthBetween(1, 100), ContentType.TEXT_PLAIN) : null;
|
||||
|
||||
NullPointerException e = expectThrows(NullPointerException.class, () -> new Request(null, endpoint, parameters, entity));
|
||||
assertEquals("method cannot be null", e.getMessage());
|
||||
|
||||
e = expectThrows(NullPointerException.class, () -> new Request(method, null, parameters, entity));
|
||||
assertEquals("endpoint cannot be null", e.getMessage());
|
||||
|
||||
e = expectThrows(NullPointerException.class, () -> new Request(method, endpoint, null, entity));
|
||||
assertEquals("parameters cannot be null", e.getMessage());
|
||||
|
||||
final Request request = new Request(method, endpoint, parameters, entity);
|
||||
assertEquals(method, request.getMethod());
|
||||
assertEquals(endpoint, request.getEndpoint());
|
||||
assertEquals(parameters, request.getParameters());
|
||||
assertEquals(entity, request.getEntity());
|
||||
|
||||
final Constructor<?>[] constructors = Request.class.getConstructors();
|
||||
assertEquals("Expected only 1 constructor", 1, constructors.length);
|
||||
assertTrue("Request constructor is not public", Modifier.isPublic(constructors[0].getModifiers()));
|
||||
}
|
||||
|
||||
public void testClassVisibility() {
|
||||
assertTrue("Request class is not public", Modifier.isPublic(Request.class.getModifiers()));
|
||||
}
|
||||
|
||||
public class RequestConvertersTests extends ESTestCase {
|
||||
public void testPing() {
|
||||
Request request = Request.ping();
|
||||
Request request = RequestConverters.ping();
|
||||
assertEquals("/", request.getEndpoint());
|
||||
assertEquals(0, request.getParameters().size());
|
||||
assertNull(request.getEntity());
|
||||
|
@ -178,7 +149,7 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testInfo() {
|
||||
Request request = Request.info();
|
||||
Request request = RequestConverters.info();
|
||||
assertEquals("/", request.getEndpoint());
|
||||
assertEquals(0, request.getParameters().size());
|
||||
assertNull(request.getEntity());
|
||||
|
@ -186,7 +157,7 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testGet() {
|
||||
getAndExistsTest(Request::get, HttpGet.METHOD_NAME);
|
||||
getAndExistsTest(RequestConverters::get, HttpGet.METHOD_NAME);
|
||||
}
|
||||
|
||||
public void testMultiGet() throws IOException {
|
||||
|
@ -232,7 +203,7 @@ public class RequestTests extends ESTestCase {
|
|||
multiGetRequest.add(item);
|
||||
}
|
||||
|
||||
Request request = Request.multiGet(multiGetRequest);
|
||||
Request request = RequestConverters.multiGet(multiGetRequest);
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_mget", request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
|
@ -260,7 +231,7 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
Request request = Request.delete(deleteRequest);
|
||||
Request request = RequestConverters.delete(deleteRequest);
|
||||
assertEquals("/" + index + "/" + type + "/" + id, request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
|
||||
|
@ -268,7 +239,7 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testExists() {
|
||||
getAndExistsTest(Request::exists, HttpHead.METHOD_NAME);
|
||||
getAndExistsTest(RequestConverters::exists, HttpHead.METHOD_NAME);
|
||||
}
|
||||
|
||||
public void testIndicesExist() {
|
||||
|
@ -282,7 +253,7 @@ public class RequestTests extends ESTestCase {
|
|||
setRandomHumanReadable(getIndexRequest, expectedParams);
|
||||
setRandomIncludeDefaults(getIndexRequest, expectedParams);
|
||||
|
||||
final Request request = Request.indicesExist(getIndexRequest);
|
||||
final Request request = RequestConverters.indicesExist(getIndexRequest);
|
||||
|
||||
assertEquals(HttpHead.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/" + String.join(",", indices), request.getEndpoint());
|
||||
|
@ -291,8 +262,8 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testIndicesExistEmptyIndices() {
|
||||
expectThrows(IllegalArgumentException.class, () -> Request.indicesExist(new GetIndexRequest()));
|
||||
expectThrows(IllegalArgumentException.class, () -> Request.indicesExist(new GetIndexRequest().indices((String[])null)));
|
||||
expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest()));
|
||||
expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest().indices((String[])null)));
|
||||
}
|
||||
|
||||
private static void getAndExistsTest(Function<GetRequest, Request> requestConverter, String method) {
|
||||
|
@ -361,7 +332,7 @@ public class RequestTests extends ESTestCase {
|
|||
setRandomMasterTimeout(createIndexRequest, expectedParams);
|
||||
setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams);
|
||||
|
||||
Request request = Request.createIndex(createIndexRequest);
|
||||
Request request = RequestConverters.createIndex(createIndexRequest);
|
||||
assertEquals("/" + createIndexRequest.index(), request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertEquals(HttpPut.METHOD_NAME, request.getMethod());
|
||||
|
@ -382,7 +353,7 @@ public class RequestTests extends ESTestCase {
|
|||
setRandomTimeout(indicesAliasesRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
setRandomMasterTimeout(indicesAliasesRequest, expectedParams);
|
||||
|
||||
Request request = Request.updateAliases(indicesAliasesRequest);
|
||||
Request request = RequestConverters.updateAliases(indicesAliasesRequest);
|
||||
assertEquals("/_aliases", request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertToXContentBody(indicesAliasesRequest, request.getEntity());
|
||||
|
@ -402,7 +373,7 @@ public class RequestTests extends ESTestCase {
|
|||
setRandomTimeout(putMappingRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
setRandomMasterTimeout(putMappingRequest, expectedParams);
|
||||
|
||||
Request request = Request.putMapping(putMappingRequest);
|
||||
Request request = RequestConverters.putMapping(putMappingRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
String index = String.join(",", indices);
|
||||
if (Strings.hasLength(index)) {
|
||||
|
@ -427,7 +398,7 @@ public class RequestTests extends ESTestCase {
|
|||
|
||||
setRandomIndicesOptions(deleteIndexRequest::indicesOptions, deleteIndexRequest::indicesOptions, expectedParams);
|
||||
|
||||
Request request = Request.deleteIndex(deleteIndexRequest);
|
||||
Request request = RequestConverters.deleteIndex(deleteIndexRequest);
|
||||
assertEquals("/" + String.join(",", indices), request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
|
||||
|
@ -451,7 +422,7 @@ public class RequestTests extends ESTestCase {
|
|||
setRandomIndicesOptions(openIndexRequest::indicesOptions, openIndexRequest::indicesOptions, expectedParams);
|
||||
setRandomWaitForActiveShards(openIndexRequest::waitForActiveShards, expectedParams);
|
||||
|
||||
Request request = Request.openIndex(openIndexRequest);
|
||||
Request request = RequestConverters.openIndex(openIndexRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_open");
|
||||
assertThat(endpoint.toString(), equalTo(request.getEndpoint()));
|
||||
assertThat(expectedParams, equalTo(request.getParameters()));
|
||||
|
@ -474,7 +445,7 @@ public class RequestTests extends ESTestCase {
|
|||
setRandomMasterTimeout(closeIndexRequest, expectedParams);
|
||||
setRandomIndicesOptions(closeIndexRequest::indicesOptions, closeIndexRequest::indicesOptions, expectedParams);
|
||||
|
||||
Request request = Request.closeIndex(closeIndexRequest);
|
||||
Request request = RequestConverters.closeIndex(closeIndexRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_close");
|
||||
assertThat(endpoint.toString(), equalTo(request.getEndpoint()));
|
||||
assertThat(expectedParams, equalTo(request.getParameters()));
|
||||
|
@ -542,7 +513,7 @@ public class RequestTests extends ESTestCase {
|
|||
indexRequest.source(builder);
|
||||
}
|
||||
|
||||
Request request = Request.index(indexRequest);
|
||||
Request request = RequestConverters.index(indexRequest);
|
||||
if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) {
|
||||
assertEquals("/" + index + "/" + type + "/" + id + "/_create", request.getEndpoint());
|
||||
} else if (id != null) {
|
||||
|
@ -572,7 +543,7 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomIndicesOptions(refreshRequest::indicesOptions, refreshRequest::indicesOptions, expectedParams);
|
||||
Request request = Request.refresh(refreshRequest);
|
||||
Request request = RequestConverters.refresh(refreshRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
if (indices != null && indices.length > 0) {
|
||||
endpoint.add(String.join(",", indices));
|
||||
|
@ -604,7 +575,7 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
expectedParams.put("wait_if_ongoing", Boolean.toString(flushRequest.waitIfOngoing()));
|
||||
|
||||
Request request = Request.flush(flushRequest);
|
||||
Request request = RequestConverters.flush(flushRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
if (indices != null && indices.length > 0) {
|
||||
endpoint.add(String.join(",", indices));
|
||||
|
@ -641,7 +612,7 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
expectedParams.put("flush", Boolean.toString(forceMergeRequest.flush()));
|
||||
|
||||
Request request = Request.forceMerge(forceMergeRequest);
|
||||
Request request = RequestConverters.forceMerge(forceMergeRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
if (indices != null && indices.length > 0) {
|
||||
endpoint.add(String.join(",", indices));
|
||||
|
@ -681,7 +652,7 @@ public class RequestTests extends ESTestCase {
|
|||
expectedParams.put("fields", String.join(",", clearIndicesCacheRequest.fields()));
|
||||
}
|
||||
|
||||
Request request = Request.clearCache(clearIndicesCacheRequest);
|
||||
Request request = RequestConverters.clearCache(clearIndicesCacheRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
if (indices != null && indices.length > 0) {
|
||||
endpoint.add(String.join(",", indices));
|
||||
|
@ -754,7 +725,7 @@ public class RequestTests extends ESTestCase {
|
|||
randomizeFetchSourceContextParams(updateRequest::fetchSource, expectedParams);
|
||||
}
|
||||
|
||||
Request request = Request.update(updateRequest);
|
||||
Request request = RequestConverters.update(updateRequest);
|
||||
assertEquals("/" + index + "/" + type + "/" + id + "/_update", request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
|
@ -791,7 +762,7 @@ public class RequestTests extends ESTestCase {
|
|||
UpdateRequest updateRequest = new UpdateRequest();
|
||||
updateRequest.doc(new IndexRequest().source(singletonMap("field", "doc"), XContentType.JSON));
|
||||
updateRequest.upsert(new IndexRequest().source(singletonMap("field", "upsert"), XContentType.YAML));
|
||||
Request.update(updateRequest);
|
||||
RequestConverters.update(updateRequest);
|
||||
});
|
||||
assertEquals("Update request cannot have different content types for doc [JSON] and upsert [YAML] documents",
|
||||
exception.getMessage());
|
||||
|
@ -859,7 +830,7 @@ public class RequestTests extends ESTestCase {
|
|||
bulkRequest.add(docWriteRequest);
|
||||
}
|
||||
|
||||
Request request = Request.bulk(bulkRequest);
|
||||
Request request = RequestConverters.bulk(bulkRequest);
|
||||
assertEquals("/_bulk", request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
|
@ -914,7 +885,7 @@ public class RequestTests extends ESTestCase {
|
|||
bulkRequest.add(new UpdateRequest("index", "type", "1").script(mockScript("test")));
|
||||
bulkRequest.add(new DeleteRequest("index", "type", "2"));
|
||||
|
||||
Request request = Request.bulk(bulkRequest);
|
||||
Request request = RequestConverters.bulk(bulkRequest);
|
||||
assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
|
||||
}
|
||||
{
|
||||
|
@ -924,7 +895,7 @@ public class RequestTests extends ESTestCase {
|
|||
bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), xContentType));
|
||||
bulkRequest.add(new DeleteRequest("index", "type", "2"));
|
||||
|
||||
Request request = Request.bulk(bulkRequest);
|
||||
Request request = RequestConverters.bulk(bulkRequest);
|
||||
assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
|
||||
}
|
||||
{
|
||||
|
@ -936,14 +907,14 @@ public class RequestTests extends ESTestCase {
|
|||
updateRequest.upsert(new IndexRequest().source(singletonMap("field", "value"), xContentType));
|
||||
}
|
||||
|
||||
Request request = Request.bulk(new BulkRequest().add(updateRequest));
|
||||
Request request = RequestConverters.bulk(new BulkRequest().add(updateRequest));
|
||||
assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
|
||||
}
|
||||
{
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.SMILE));
|
||||
bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON));
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest));
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest));
|
||||
assertEquals("Mismatching content-type found for request with content-type [JSON], " +
|
||||
"previous requests have content-type [SMILE]", exception.getMessage());
|
||||
}
|
||||
|
@ -957,7 +928,7 @@ public class RequestTests extends ESTestCase {
|
|||
.doc(new IndexRequest().source(singletonMap("field", "value"), XContentType.JSON))
|
||||
.upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE))
|
||||
);
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest));
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest));
|
||||
assertEquals("Mismatching content-type found for request with content-type [SMILE], " +
|
||||
"previous requests have content-type [JSON]", exception.getMessage());
|
||||
}
|
||||
|
@ -970,7 +941,7 @@ public class RequestTests extends ESTestCase {
|
|||
bulkRequest.add(new DeleteRequest("index", "type", "3"));
|
||||
bulkRequest.add(new IndexRequest("index", "type", "4").source(singletonMap("field", "value"), XContentType.JSON));
|
||||
bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), xContentType));
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest));
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest));
|
||||
assertEquals("Unsupported content-type found for request with content-type [" + xContentType
|
||||
+ "], only JSON and SMILE are supported", exception.getMessage());
|
||||
}
|
||||
|
@ -978,7 +949,7 @@ public class RequestTests extends ESTestCase {
|
|||
|
||||
public void testSearchNullSource() throws IOException {
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
Request request = Request.search(searchRequest);
|
||||
Request request = RequestConverters.search(searchRequest);
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_search", request.getEndpoint());
|
||||
assertNull(request.getEntity());
|
||||
|
@ -1073,7 +1044,7 @@ public class RequestTests extends ESTestCase {
|
|||
searchRequest.source(searchSourceBuilder);
|
||||
}
|
||||
|
||||
Request request = Request.search(searchRequest);
|
||||
Request request = RequestConverters.search(searchRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
String index = String.join(",", indices);
|
||||
if (Strings.hasLength(index)) {
|
||||
|
@ -1127,7 +1098,7 @@ public class RequestTests extends ESTestCase {
|
|||
expectedParams.put("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests()));
|
||||
}
|
||||
|
||||
Request request = Request.multiSearch(multiSearchRequest);
|
||||
Request request = RequestConverters.multiSearch(multiSearchRequest);
|
||||
assertEquals("/_msearch", request.getEndpoint());
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
|
@ -1152,7 +1123,7 @@ public class RequestTests extends ESTestCase {
|
|||
if (randomBoolean()) {
|
||||
searchScrollRequest.scroll(randomPositiveTimeValue());
|
||||
}
|
||||
Request request = Request.searchScroll(searchScrollRequest);
|
||||
Request request = RequestConverters.searchScroll(searchScrollRequest);
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_search/scroll", request.getEndpoint());
|
||||
assertEquals(0, request.getParameters().size());
|
||||
|
@ -1166,7 +1137,7 @@ public class RequestTests extends ESTestCase {
|
|||
for (int i = 0; i < numScrolls; i++) {
|
||||
clearScrollRequest.addScrollId(randomAlphaOfLengthBetween(5, 10));
|
||||
}
|
||||
Request request = Request.clearScroll(clearScrollRequest);
|
||||
Request request = RequestConverters.clearScroll(clearScrollRequest);
|
||||
assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_search/scroll", request.getEndpoint());
|
||||
assertEquals(0, request.getParameters().size());
|
||||
|
@ -1191,7 +1162,7 @@ public class RequestTests extends ESTestCase {
|
|||
setRandomLocal(getAliasesRequest, expectedParams);
|
||||
setRandomIndicesOptions(getAliasesRequest::indicesOptions, getAliasesRequest::indicesOptions, expectedParams);
|
||||
|
||||
Request request = Request.existsAlias(getAliasesRequest);
|
||||
Request request = RequestConverters.existsAlias(getAliasesRequest);
|
||||
StringJoiner expectedEndpoint = new StringJoiner("/", "/", "");
|
||||
if (indices != null && indices.length > 0) {
|
||||
expectedEndpoint.add(String.join(",", indices));
|
||||
|
@ -1209,13 +1180,15 @@ public class RequestTests extends ESTestCase {
|
|||
public void testExistsAliasNoAliasNoIndex() {
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest();
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.existsAlias(getAliasesRequest));
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () ->
|
||||
RequestConverters.existsAlias(getAliasesRequest));
|
||||
assertEquals("existsAlias requires at least an alias or an index", iae.getMessage());
|
||||
}
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest((String[])null);
|
||||
getAliasesRequest.indices((String[])null);
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.existsAlias(getAliasesRequest));
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () ->
|
||||
RequestConverters.existsAlias(getAliasesRequest));
|
||||
assertEquals("existsAlias requires at least an alias or an index", iae.getMessage());
|
||||
}
|
||||
}
|
||||
|
@ -1234,7 +1207,7 @@ public class RequestTests extends ESTestCase {
|
|||
fieldCapabilitiesRequest::indicesOptions,
|
||||
indicesOptionsParams);
|
||||
|
||||
Request request = Request.fieldCaps(fieldCapabilitiesRequest);
|
||||
Request request = RequestConverters.fieldCaps(fieldCapabilitiesRequest);
|
||||
|
||||
// Verify that the resulting REST request looks as expected.
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
|
@ -1270,7 +1243,7 @@ public class RequestTests extends ESTestCase {
|
|||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomIndicesOptions(rankEvalRequest::indicesOptions, rankEvalRequest::indicesOptions, expectedParams);
|
||||
|
||||
Request request = Request.rankEval(rankEvalRequest);
|
||||
Request request = RequestConverters.rankEval(rankEvalRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
String index = String.join(",", indices);
|
||||
if (Strings.hasLength(index)) {
|
||||
|
@ -1284,25 +1257,25 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testSplit() throws IOException {
|
||||
resizeTest(ResizeType.SPLIT, Request::split);
|
||||
resizeTest(ResizeType.SPLIT, RequestConverters::split);
|
||||
}
|
||||
|
||||
public void testSplitWrongResizeType() {
|
||||
ResizeRequest resizeRequest = new ResizeRequest("target", "source");
|
||||
resizeRequest.setResizeType(ResizeType.SHRINK);
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.split(resizeRequest));
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> RequestConverters.split(resizeRequest));
|
||||
assertEquals("Wrong resize type [SHRINK] for indices split request", iae.getMessage());
|
||||
}
|
||||
|
||||
public void testShrinkWrongResizeType() {
|
||||
ResizeRequest resizeRequest = new ResizeRequest("target", "source");
|
||||
resizeRequest.setResizeType(ResizeType.SPLIT);
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.shrink(resizeRequest));
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> RequestConverters.shrink(resizeRequest));
|
||||
assertEquals("Wrong resize type [SPLIT] for indices shrink request", iae.getMessage());
|
||||
}
|
||||
|
||||
public void testShrink() throws IOException {
|
||||
resizeTest(ResizeType.SHRINK, Request::shrink);
|
||||
resizeTest(ResizeType.SHRINK, RequestConverters::shrink);
|
||||
}
|
||||
|
||||
private static void resizeTest(ResizeType resizeType, CheckedFunction<ResizeRequest, Request, IOException> function)
|
||||
|
@ -1341,7 +1314,7 @@ public class RequestTests extends ESTestCase {
|
|||
setRandomMasterTimeout(request, expectedParams);
|
||||
setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
|
||||
Request expectedRequest = Request.clusterPutSettings(request);
|
||||
Request expectedRequest = RequestConverters.clusterPutSettings(request);
|
||||
assertEquals("/_cluster/settings", expectedRequest.getEndpoint());
|
||||
assertEquals(HttpPut.METHOD_NAME, expectedRequest.getMethod());
|
||||
assertEquals(expectedParams, expectedRequest.getParameters());
|
||||
|
@ -1374,7 +1347,7 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
setRandomWaitForActiveShards(rolloverRequest.getCreateIndexRequest()::waitForActiveShards, expectedParams);
|
||||
|
||||
Request request = Request.rollover(rolloverRequest);
|
||||
Request request = RequestConverters.rollover(rolloverRequest);
|
||||
if (rolloverRequest.getNewIndexName() == null) {
|
||||
assertEquals("/" + rolloverRequest.getAlias() + "/_rollover", request.getEndpoint());
|
||||
} else {
|
||||
|
@ -1399,7 +1372,7 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
Request request = Request.indexPutSettings(updateSettingsRequest);
|
||||
Request request = RequestConverters.indexPutSettings(updateSettingsRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
if (indices != null && indices.length > 0) {
|
||||
endpoint.add(String.join(",", indices));
|
||||
|
@ -1417,143 +1390,115 @@ public class RequestTests extends ESTestCase {
|
|||
assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(actualEntity)));
|
||||
}
|
||||
|
||||
public void testParams() {
|
||||
final int nbParams = randomIntBetween(0, 10);
|
||||
Request.Params params = Request.Params.builder();
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
for (int i = 0; i < nbParams; i++) {
|
||||
String paramName = "p_" + i;
|
||||
String paramValue = randomAlphaOfLength(5);
|
||||
params.putParam(paramName, paramValue);
|
||||
expectedParams.put(paramName, paramValue);
|
||||
}
|
||||
|
||||
Map<String, String> requestParams = params.getParams();
|
||||
assertEquals(nbParams, requestParams.size());
|
||||
assertEquals(expectedParams, requestParams);
|
||||
}
|
||||
|
||||
public void testParamsNoDuplicates() {
|
||||
Request.Params params = Request.Params.builder();
|
||||
params.putParam("test", "1");
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> params.putParam("test", "2"));
|
||||
assertEquals("Request parameter [test] is already registered", e.getMessage());
|
||||
|
||||
Map<String, String> requestParams = params.getParams();
|
||||
assertEquals(1L, requestParams.size());
|
||||
assertEquals("1", requestParams.values().iterator().next());
|
||||
}
|
||||
|
||||
public void testEndpointBuilder() {
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder();
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder();
|
||||
assertEquals("/", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart(Strings.EMPTY_ARRAY);
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart(Strings.EMPTY_ARRAY);
|
||||
assertEquals("/", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("");
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("");
|
||||
assertEquals("/", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("a", "b");
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b");
|
||||
assertEquals("/a/b", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("a").addPathPart("b")
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPart("b")
|
||||
.addPathPartAsIs("_create");
|
||||
assertEquals("/a/b/_create", endpointBuilder.build());
|
||||
}
|
||||
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("a", "b", "c")
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b", "c")
|
||||
.addPathPartAsIs("_create");
|
||||
assertEquals("/a/b/c/_create", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("a").addPathPartAsIs("_create");
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPartAsIs("_create");
|
||||
assertEquals("/a/_create", endpointBuilder.build());
|
||||
}
|
||||
}
|
||||
|
||||
public void testEndpointBuilderEncodeParts() {
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("-#index1,index#2", "type", "id");
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("-#index1,index#2", "type", "id");
|
||||
assertEquals("/-%23index1,index%232/type/id", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("index", "type#2", "id");
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("index", "type#2", "id");
|
||||
assertEquals("/index/type%232/id", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("index", "type", "this/is/the/id");
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("index", "type", "this/is/the/id");
|
||||
assertEquals("/index/type/this%2Fis%2Fthe%2Fid", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("index", "type", "this|is|the|id");
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("index", "type", "this|is|the|id");
|
||||
assertEquals("/index/type/this%7Cis%7Cthe%7Cid", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("index", "type", "id#1");
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("index", "type", "id#1");
|
||||
assertEquals("/index/type/id%231", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("<logstash-{now/M}>", "_search");
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("<logstash-{now/M}>", "_search");
|
||||
assertEquals("/%3Clogstash-%7Bnow%2FM%7D%3E/_search", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("中文");
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("中文");
|
||||
assertEquals("/中文", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("foo bar");
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("foo bar");
|
||||
assertEquals("/foo%20bar", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("foo+bar");
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("foo+bar");
|
||||
assertEquals("/foo+bar", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("foo+bar");
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("foo+bar");
|
||||
assertEquals("/foo+bar", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("foo/bar");
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("foo/bar");
|
||||
assertEquals("/foo%2Fbar", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("foo^bar");
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("foo^bar");
|
||||
assertEquals("/foo%5Ebar", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("cluster1:index1,index2")
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("cluster1:index1,index2")
|
||||
.addPathPartAsIs("_search");
|
||||
assertEquals("/cluster1:index1,index2/_search", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder()
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder()
|
||||
.addCommaSeparatedPathParts(new String[]{"index1", "index2"}).addPathPartAsIs("cache/clear");
|
||||
assertEquals("/index1,index2/cache/clear", endpointBuilder.build());
|
||||
}
|
||||
}
|
||||
|
||||
public void testEndpoint() {
|
||||
assertEquals("/index/type/id", Request.endpoint("index", "type", "id"));
|
||||
assertEquals("/index/type/id/_endpoint", Request.endpoint("index", "type", "id", "_endpoint"));
|
||||
assertEquals("/index1,index2", Request.endpoint(new String[]{"index1", "index2"}));
|
||||
assertEquals("/index1,index2/_endpoint", Request.endpoint(new String[]{"index1", "index2"}, "_endpoint"));
|
||||
assertEquals("/index1,index2/type1,type2/_endpoint", Request.endpoint(new String[]{"index1", "index2"},
|
||||
assertEquals("/index/type/id", RequestConverters.endpoint("index", "type", "id"));
|
||||
assertEquals("/index/type/id/_endpoint", RequestConverters.endpoint("index", "type", "id", "_endpoint"));
|
||||
assertEquals("/index1,index2", RequestConverters.endpoint(new String[]{"index1", "index2"}));
|
||||
assertEquals("/index1,index2/_endpoint", RequestConverters.endpoint(new String[]{"index1", "index2"}, "_endpoint"));
|
||||
assertEquals("/index1,index2/type1,type2/_endpoint", RequestConverters.endpoint(new String[]{"index1", "index2"},
|
||||
new String[]{"type1", "type2"}, "_endpoint"));
|
||||
assertEquals("/index1,index2/_endpoint/suffix1,suffix2", Request.endpoint(new String[]{"index1", "index2"},
|
||||
assertEquals("/index1,index2/_endpoint/suffix1,suffix2", RequestConverters.endpoint(new String[]{"index1", "index2"},
|
||||
"_endpoint", new String[]{"suffix1", "suffix2"}));
|
||||
}
|
||||
|
||||
public void testCreateContentType() {
|
||||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
assertEquals(xContentType.mediaTypeWithoutParameters(), Request.createContentType(xContentType).getMimeType());
|
||||
assertEquals(xContentType.mediaTypeWithoutParameters(), RequestConverters.createContentType(xContentType).getMimeType());
|
||||
}
|
||||
|
||||
public void testEnforceSameContentType() {
|
|
@ -94,14 +94,7 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
import static org.elasticsearch.client.RestClientTestUtil.randomHeaders;
|
||||
import static org.elasticsearch.common.xcontent.XContentHelper.toXContent;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.mockito.Matchers.anyMapOf;
|
||||
import static org.mockito.Matchers.anyObject;
|
||||
import static org.mockito.Matchers.anyString;
|
||||
import static org.mockito.Matchers.anyVararg;
|
||||
import static org.mockito.Matchers.argThat;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Matchers.isNotNull;
|
||||
import static org.mockito.Matchers.isNull;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
@ -134,31 +127,22 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
Header[] headers = randomHeaders(random(), "Header");
|
||||
Response response = mock(Response.class);
|
||||
when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.OK));
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenReturn(response);
|
||||
when(restClient.performRequest(any(Request.class))).thenReturn(response);
|
||||
assertTrue(restHighLevelClient.ping(headers));
|
||||
verify(restClient).performRequest(eq(HttpHead.METHOD_NAME), eq("/"), eq(Collections.emptyMap()),
|
||||
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
public void testPing404NotFound() throws IOException {
|
||||
Header[] headers = randomHeaders(random(), "Header");
|
||||
Response response = mock(Response.class);
|
||||
when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.NOT_FOUND));
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenReturn(response);
|
||||
when(restClient.performRequest(any(Request.class))).thenReturn(response);
|
||||
assertFalse(restHighLevelClient.ping(headers));
|
||||
verify(restClient).performRequest(eq(HttpHead.METHOD_NAME), eq("/"), eq(Collections.emptyMap()),
|
||||
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
public void testPingSocketTimeout() throws IOException {
|
||||
Header[] headers = randomHeaders(random(), "Header");
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(new SocketTimeoutException());
|
||||
when(restClient.performRequest(any(Request.class))).thenThrow(new SocketTimeoutException());
|
||||
expectThrows(SocketTimeoutException.class, () -> restHighLevelClient.ping(headers));
|
||||
verify(restClient).performRequest(eq(HttpHead.METHOD_NAME), eq("/"), eq(Collections.emptyMap()),
|
||||
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
public void testInfo() throws IOException {
|
||||
|
@ -168,8 +152,6 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
mockResponse(testInfo);
|
||||
MainResponse receivedInfo = restHighLevelClient.info(headers);
|
||||
assertEquals(testInfo, receivedInfo);
|
||||
verify(restClient).performRequest(eq(HttpGet.METHOD_NAME), eq("/"), eq(Collections.emptyMap()),
|
||||
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
public void testSearchScroll() throws IOException {
|
||||
|
@ -185,8 +167,6 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
assertEquals(5, searchResponse.getTotalShards());
|
||||
assertEquals(5, searchResponse.getSuccessfulShards());
|
||||
assertEquals(100, searchResponse.getTook().getMillis());
|
||||
verify(restClient).performRequest(eq(HttpPost.METHOD_NAME), eq("/_search/scroll"), eq(Collections.emptyMap()),
|
||||
isNotNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
public void testClearScroll() throws IOException {
|
||||
|
@ -198,17 +178,14 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
ClearScrollResponse clearScrollResponse = restHighLevelClient.clearScroll(clearScrollRequest, headers);
|
||||
assertEquals(mockClearScrollResponse.isSucceeded(), clearScrollResponse.isSucceeded());
|
||||
assertEquals(mockClearScrollResponse.getNumFreed(), clearScrollResponse.getNumFreed());
|
||||
verify(restClient).performRequest(eq(HttpDelete.METHOD_NAME), eq("/_search/scroll"), eq(Collections.emptyMap()),
|
||||
isNotNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
private void mockResponse(ToXContent toXContent) throws IOException {
|
||||
Response response = mock(Response.class);
|
||||
ContentType contentType = ContentType.parse(Request.REQUEST_BODY_CONTENT_TYPE.mediaType());
|
||||
String requestBody = toXContent(toXContent, Request.REQUEST_BODY_CONTENT_TYPE, false).utf8ToString();
|
||||
ContentType contentType = ContentType.parse(RequestConverters.REQUEST_BODY_CONTENT_TYPE.mediaType());
|
||||
String requestBody = toXContent(toXContent, RequestConverters.REQUEST_BODY_CONTENT_TYPE, false).utf8ToString();
|
||||
when(response.getEntity()).thenReturn(new NStringEntity(requestBody, contentType));
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenReturn(response);
|
||||
when(restClient.performRequest(any(Request.class))).thenReturn(response);
|
||||
}
|
||||
|
||||
public void testRequestValidation() {
|
||||
|
@ -336,13 +313,11 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnSuccess() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenReturn(mockResponse);
|
||||
when(restClient.performRequest(any(Request.class))).thenReturn(mockResponse);
|
||||
{
|
||||
Integer result = restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.emptySet());
|
||||
|
@ -358,14 +333,12 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnResponseExceptionWithoutEntity() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(responseException);
|
||||
when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
|
||||
|
@ -376,16 +349,14 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnResponseExceptionWithEntity() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}",
|
||||
ContentType.APPLICATION_JSON));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(responseException);
|
||||
when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
|
||||
|
@ -396,15 +367,13 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnResponseExceptionWithBrokenEntity() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(responseException);
|
||||
when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
|
||||
|
@ -416,15 +385,13 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(responseException);
|
||||
when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
|
||||
|
@ -436,13 +403,11 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnResponseExceptionWithIgnores() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(responseException);
|
||||
when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
|
||||
//although we got an exception, we turn it into a successful response because the status code was provided among ignores
|
||||
assertEquals(Integer.valueOf(404), restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.singleton(404)));
|
||||
|
@ -450,13 +415,11 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(responseException);
|
||||
when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> {throw new IllegalStateException();}, Collections.singleton(404)));
|
||||
|
@ -467,15 +430,13 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testPerformRequestOnResponseExceptionWithIgnoresErrorValidBody() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request ->
|
||||
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
|
||||
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}",
|
||||
ContentType.APPLICATION_JSON));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(responseException);
|
||||
when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> {throw new IllegalStateException();}, Collections.singleton(404)));
|
||||
|
@ -696,23 +657,6 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private static class HeadersVarargMatcher extends ArgumentMatcher<Header[]> implements VarargMatcher {
|
||||
private Header[] expectedHeaders;
|
||||
|
||||
HeadersVarargMatcher(Header... expectedHeaders) {
|
||||
this.expectedHeaders = expectedHeaders;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean matches(Object varargArgument) {
|
||||
if (varargArgument instanceof Header[]) {
|
||||
Header[] actualHeaders = (Header[]) varargArgument;
|
||||
return new ArrayEquals(expectedHeaders).matches(actualHeaders);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private static StatusLine newStatusLine(RestStatus restStatus) {
|
||||
return new BasicStatusLine(HTTP_PROTOCOL, restStatus.getStatus(), restStatus.name());
|
||||
}
|
||||
|
|
|
@ -0,0 +1,202 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
/**
|
||||
* HTTP Request to Elasticsearch.
|
||||
*/
|
||||
public final class Request {
|
||||
private static final Header[] NO_HEADERS = new Header[0];
|
||||
private final String method;
|
||||
private final String endpoint;
|
||||
private final Map<String, String> parameters = new HashMap<>();
|
||||
|
||||
private HttpEntity entity;
|
||||
private Header[] headers = NO_HEADERS;
|
||||
private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory =
|
||||
HttpAsyncResponseConsumerFactory.DEFAULT;
|
||||
|
||||
/**
|
||||
* Create the {@linkplain Request}.
|
||||
* @param method the HTTP method
|
||||
* @param endpoint the path of the request (without scheme, host, port, or prefix)
|
||||
*/
|
||||
public Request(String method, String endpoint) {
|
||||
this.method = Objects.requireNonNull(method, "method cannot be null");
|
||||
this.endpoint = Objects.requireNonNull(endpoint, "endpoint cannot be null");
|
||||
}
|
||||
|
||||
/**
|
||||
* The HTTP method.
|
||||
*/
|
||||
public String getMethod() {
|
||||
return method;
|
||||
}
|
||||
|
||||
/**
|
||||
* The path of the request (without scheme, host, port, or prefix).
|
||||
*/
|
||||
public String getEndpoint() {
|
||||
return endpoint;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a query string parameter.
|
||||
* @param name the name of the url parameter. Must not be null.
|
||||
* @param value the value of the url url parameter. If {@code null} then
|
||||
* the parameter is sent as {@code name} rather than {@code name=value}
|
||||
* @throws IllegalArgumentException if a parameter with that name has
|
||||
* already been set
|
||||
*/
|
||||
public void addParameter(String name, String value) {
|
||||
Objects.requireNonNull(name, "url parameter name cannot be null");
|
||||
// .putIfAbsent(name, value) except we are in Java 7 which doesn't have that.
|
||||
if (parameters.containsKey(name)) {
|
||||
throw new IllegalArgumentException("url parameter [" + name + "] has already been set to [" + parameters.get(name) + "]");
|
||||
} else {
|
||||
parameters.put(name, value);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Query string parameters. The returned map is an unmodifiable view of the
|
||||
* map in the request so calls to {@link #addParameter(String, String)}
|
||||
* will change it.
|
||||
*/
|
||||
public Map<String, String> getParameters() {
|
||||
return unmodifiableMap(parameters);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the body of the request. If not set or set to {@code null} then no
|
||||
* body is sent with the request.
|
||||
*/
|
||||
public void setEntity(HttpEntity entity) {
|
||||
this.entity = entity;
|
||||
}
|
||||
|
||||
/**
|
||||
* The body of the request. If {@code null} then no body
|
||||
* is sent with the request.
|
||||
*/
|
||||
public HttpEntity getEntity() {
|
||||
return entity;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the headers to attach to the request.
|
||||
*/
|
||||
public void setHeaders(Header... headers) {
|
||||
Objects.requireNonNull(headers, "headers cannot be null");
|
||||
for (Header header : headers) {
|
||||
Objects.requireNonNull(header, "header cannot be null");
|
||||
}
|
||||
this.headers = headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Headers to attach to the request.
|
||||
*/
|
||||
public Header[] getHeaders() {
|
||||
return headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* set the {@link HttpAsyncResponseConsumerFactory} used to create one
|
||||
* {@link HttpAsyncResponseConsumer} callback per retry. Controls how the
|
||||
* response body gets streamed from a non-blocking HTTP connection on the
|
||||
* client side.
|
||||
*/
|
||||
public void setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) {
|
||||
this.httpAsyncResponseConsumerFactory =
|
||||
Objects.requireNonNull(httpAsyncResponseConsumerFactory, "httpAsyncResponseConsumerFactory cannot be null");
|
||||
}
|
||||
|
||||
/**
|
||||
* The {@link HttpAsyncResponseConsumerFactory} used to create one
|
||||
* {@link HttpAsyncResponseConsumer} callback per retry. Controls how the
|
||||
* response body gets streamed from a non-blocking HTTP connection on the
|
||||
* client side.
|
||||
*/
|
||||
public HttpAsyncResponseConsumerFactory getHttpAsyncResponseConsumerFactory() {
|
||||
return httpAsyncResponseConsumerFactory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder b = new StringBuilder();
|
||||
b.append("Request{");
|
||||
b.append("method='").append(method).append('\'');
|
||||
b.append(", endpoint='").append(endpoint).append('\'');
|
||||
if (false == parameters.isEmpty()) {
|
||||
b.append(", params=").append(parameters);
|
||||
}
|
||||
if (entity != null) {
|
||||
b.append(", entity=").append(entity);
|
||||
}
|
||||
if (headers.length > 0) {
|
||||
b.append(", headers=");
|
||||
for (int h = 0; h < headers.length; h++) {
|
||||
if (h != 0) {
|
||||
b.append(',');
|
||||
}
|
||||
b.append(headers[h].toString());
|
||||
}
|
||||
}
|
||||
if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) {
|
||||
b.append(", consumerFactory=").append(httpAsyncResponseConsumerFactory);
|
||||
}
|
||||
return b.append('}').toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || (obj.getClass() != getClass())) {
|
||||
return false;
|
||||
}
|
||||
if (obj == this) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Request other = (Request) obj;
|
||||
return method.equals(other.method)
|
||||
&& endpoint.equals(other.endpoint)
|
||||
&& parameters.equals(other.parameters)
|
||||
&& Objects.equals(entity, other.entity)
|
||||
&& Arrays.equals(headers, other.headers)
|
||||
&& httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(method, endpoint, parameters, entity, Arrays.hashCode(headers), httpAsyncResponseConsumerFactory);
|
||||
}
|
||||
}
|
|
@ -143,6 +143,61 @@ public class RestClient implements Closeable {
|
|||
this.blacklist.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the Elasticsearch cluster that the client points to.
|
||||
* Blocks until the request is completed and returns its response or fails
|
||||
* by throwing an exception. Selects a host out of the provided ones in a
|
||||
* round-robin fashion. Failing hosts are marked dead and retried after a
|
||||
* certain amount of time (minimum 1 minute, maximum 30 minutes), depending
|
||||
* on how many times they previously failed (the more failures, the later
|
||||
* they will be retried). In case of failures all of the alive nodes (or
|
||||
* dead nodes that deserve a retry) are retried until one responds or none
|
||||
* of them does, in which case an {@link IOException} will be thrown.
|
||||
*
|
||||
* This method works by performing an asynchronous call and waiting
|
||||
* for the result. If the asynchronous call throws an exception we wrap
|
||||
* it and rethrow it so that the stack trace attached to the exception
|
||||
* contains the call site. While we attempt to preserve the original
|
||||
* exception this isn't always possible and likely haven't covered all of
|
||||
* the cases. You can get the original exception from
|
||||
* {@link Exception#getCause()}.
|
||||
*
|
||||
* @param request the request to perform
|
||||
* @return the response returned by Elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
|
||||
*/
|
||||
public Response performRequest(Request request) throws IOException {
|
||||
SyncResponseListener listener = new SyncResponseListener(maxRetryTimeoutMillis);
|
||||
performRequestAsyncNoCatch(request, listener);
|
||||
return listener.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the Elasticsearch cluster that the client points to.
|
||||
* The request is executed asynchronously and the provided
|
||||
* {@link ResponseListener} gets notified upon request completion or
|
||||
* failure. Selects a host out of the provided ones in a round-robin
|
||||
* fashion. Failing hosts are marked dead and retried after a certain
|
||||
* amount of time (minimum 1 minute, maximum 30 minutes), depending on how
|
||||
* many times they previously failed (the more failures, the later they
|
||||
* will be retried). In case of failures all of the alive nodes (or dead
|
||||
* nodes that deserve a retry) are retried until one responds or none of
|
||||
* them does, in which case an {@link IOException} will be thrown.
|
||||
*
|
||||
* @param request the request to perform
|
||||
* @param responseListener the {@link ResponseListener} to notify when the
|
||||
* request is completed or fails
|
||||
*/
|
||||
public void performRequestAsync(Request request, ResponseListener responseListener) {
|
||||
try {
|
||||
performRequestAsyncNoCatch(request, responseListener);
|
||||
} catch (Exception e) {
|
||||
responseListener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters
|
||||
|
@ -157,7 +212,9 @@ public class RestClient implements Closeable {
|
|||
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
|
||||
*/
|
||||
public Response performRequest(String method, String endpoint, Header... headers) throws IOException {
|
||||
return performRequest(method, endpoint, Collections.<String, String>emptyMap(), null, headers);
|
||||
Request request = new Request(method, endpoint);
|
||||
request.setHeaders(headers);
|
||||
return performRequest(request);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -174,7 +231,10 @@ public class RestClient implements Closeable {
|
|||
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
|
||||
*/
|
||||
public Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException {
|
||||
return performRequest(method, endpoint, params, (HttpEntity)null, headers);
|
||||
Request request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setHeaders(headers);
|
||||
return performRequest(request);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -195,7 +255,11 @@ public class RestClient implements Closeable {
|
|||
*/
|
||||
public Response performRequest(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, Header... headers) throws IOException {
|
||||
return performRequest(method, endpoint, params, entity, HttpAsyncResponseConsumerFactory.DEFAULT, headers);
|
||||
Request request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setEntity(entity);
|
||||
request.setHeaders(headers);
|
||||
return performRequest(request);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -229,10 +293,12 @@ public class RestClient implements Closeable {
|
|||
public Response performRequest(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory,
|
||||
Header... headers) throws IOException {
|
||||
SyncResponseListener listener = new SyncResponseListener(maxRetryTimeoutMillis);
|
||||
performRequestAsyncNoCatch(method, endpoint, params, entity, httpAsyncResponseConsumerFactory,
|
||||
listener, headers);
|
||||
return listener.get();
|
||||
Request request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setEntity(entity);
|
||||
request.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory);
|
||||
request.setHeaders(headers);
|
||||
return performRequest(request);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -246,7 +312,15 @@ public class RestClient implements Closeable {
|
|||
* @param headers the optional request headers
|
||||
*/
|
||||
public void performRequestAsync(String method, String endpoint, ResponseListener responseListener, Header... headers) {
|
||||
performRequestAsync(method, endpoint, Collections.<String, String>emptyMap(), null, responseListener, headers);
|
||||
Request request;
|
||||
try {
|
||||
request = new Request(method, endpoint);
|
||||
request.setHeaders(headers);
|
||||
} catch (Exception e) {
|
||||
responseListener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
performRequestAsync(request, responseListener);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -262,7 +336,16 @@ public class RestClient implements Closeable {
|
|||
*/
|
||||
public void performRequestAsync(String method, String endpoint, Map<String, String> params,
|
||||
ResponseListener responseListener, Header... headers) {
|
||||
performRequestAsync(method, endpoint, params, null, responseListener, headers);
|
||||
Request request;
|
||||
try {
|
||||
request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setHeaders(headers);
|
||||
} catch (Exception e) {
|
||||
responseListener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
performRequestAsync(request, responseListener);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -281,7 +364,17 @@ public class RestClient implements Closeable {
|
|||
*/
|
||||
public void performRequestAsync(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, ResponseListener responseListener, Header... headers) {
|
||||
performRequestAsync(method, endpoint, params, entity, HttpAsyncResponseConsumerFactory.DEFAULT, responseListener, headers);
|
||||
Request request;
|
||||
try {
|
||||
request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setEntity(entity);
|
||||
request.setHeaders(headers);
|
||||
} catch (Exception e) {
|
||||
responseListener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
performRequestAsync(request, responseListener);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -305,24 +398,27 @@ public class RestClient implements Closeable {
|
|||
public void performRequestAsync(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory,
|
||||
ResponseListener responseListener, Header... headers) {
|
||||
Request request;
|
||||
try {
|
||||
performRequestAsyncNoCatch(method, endpoint, params, entity, httpAsyncResponseConsumerFactory,
|
||||
responseListener, headers);
|
||||
request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setEntity(entity);
|
||||
request.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory);
|
||||
request.setHeaders(headers);
|
||||
} catch (Exception e) {
|
||||
responseListener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
performRequestAsync(request, responseListener);
|
||||
}
|
||||
|
||||
void performRequestAsyncNoCatch(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory,
|
||||
ResponseListener responseListener, Header... headers) {
|
||||
Objects.requireNonNull(params, "params must not be null");
|
||||
Map<String, String> requestParams = new HashMap<>(params);
|
||||
void performRequestAsyncNoCatch(Request request, ResponseListener listener) {
|
||||
Map<String, String> requestParams = new HashMap<>(request.getParameters());
|
||||
//ignore is a special parameter supported by the clients, shouldn't be sent to es
|
||||
String ignoreString = requestParams.remove("ignore");
|
||||
Set<Integer> ignoreErrorCodes;
|
||||
if (ignoreString == null) {
|
||||
if (HttpHead.METHOD_NAME.equals(method)) {
|
||||
if (HttpHead.METHOD_NAME.equals(request.getMethod())) {
|
||||
//404 never causes error if returned for a HEAD request
|
||||
ignoreErrorCodes = Collections.singleton(404);
|
||||
} else {
|
||||
|
@ -331,7 +427,7 @@ public class RestClient implements Closeable {
|
|||
} else {
|
||||
String[] ignoresArray = ignoreString.split(",");
|
||||
ignoreErrorCodes = new HashSet<>();
|
||||
if (HttpHead.METHOD_NAME.equals(method)) {
|
||||
if (HttpHead.METHOD_NAME.equals(request.getMethod())) {
|
||||
//404 never causes error if returned for a HEAD request
|
||||
ignoreErrorCodes.add(404);
|
||||
}
|
||||
|
@ -343,13 +439,13 @@ public class RestClient implements Closeable {
|
|||
}
|
||||
}
|
||||
}
|
||||
URI uri = buildUri(pathPrefix, endpoint, requestParams);
|
||||
HttpRequestBase request = createHttpRequest(method, uri, entity);
|
||||
setHeaders(request, headers);
|
||||
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener);
|
||||
URI uri = buildUri(pathPrefix, request.getEndpoint(), requestParams);
|
||||
HttpRequestBase httpRequest = createHttpRequest(request.getMethod(), uri, request.getEntity());
|
||||
setHeaders(httpRequest, request.getHeaders());
|
||||
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(listener);
|
||||
long startTime = System.nanoTime();
|
||||
performRequestAsync(startTime, nextHost(), request, ignoreErrorCodes, httpAsyncResponseConsumerFactory,
|
||||
failureTrackingResponseListener);
|
||||
performRequestAsync(startTime, nextHost(), httpRequest, ignoreErrorCodes,
|
||||
request.getHttpAsyncResponseConsumerFactory(), failureTrackingResponseListener);
|
||||
}
|
||||
|
||||
private void performRequestAsync(final long startTime, final HostTuple<Iterator<HttpHost>> hostTuple, final HttpRequestBase request,
|
||||
|
@ -428,11 +524,9 @@ public class RestClient implements Closeable {
|
|||
}
|
||||
|
||||
private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) {
|
||||
Objects.requireNonNull(requestHeaders, "request headers must not be null");
|
||||
// request headers override default headers, so we don't add default headers if they exist as request headers
|
||||
final Set<String> requestNames = new HashSet<>(requestHeaders.length);
|
||||
for (Header requestHeader : requestHeaders) {
|
||||
Objects.requireNonNull(requestHeader, "request header must not be null");
|
||||
httpRequest.addHeader(requestHeader);
|
||||
requestNames.add(requestHeader.getName());
|
||||
}
|
||||
|
@ -766,4 +860,15 @@ public class RestClient implements Closeable {
|
|||
this.authCache = authCache;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add all parameters from a map to a {@link Request}. This only exists
|
||||
* to support methods that exist for backwards compatibility.
|
||||
*/
|
||||
private static void addParameters(Request request, Map<String, String> parameters) {
|
||||
Objects.requireNonNull(parameters, "parameters cannot be null");
|
||||
for (Map.Entry<String, String> entry : parameters.entrySet()) {
|
||||
request.addParameter(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,137 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class RequestTests extends RestClientTestCase {
|
||||
public void testConstructor() {
|
||||
final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"});
|
||||
final String endpoint = randomAsciiLettersOfLengthBetween(1, 10);
|
||||
|
||||
try {
|
||||
new Request(null, endpoint);
|
||||
fail("expected failure");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("method cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
new Request(method, null);
|
||||
fail("expected failure");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("endpoint cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
final Request request = new Request(method, endpoint);
|
||||
assertEquals(method, request.getMethod());
|
||||
assertEquals(endpoint, request.getEndpoint());
|
||||
}
|
||||
|
||||
public void testAddParameters() {
|
||||
final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"});
|
||||
final String endpoint = randomAsciiLettersOfLengthBetween(1, 10);
|
||||
int parametersCount = between(1, 3);
|
||||
final Map<String, String> parameters = new HashMap<>(parametersCount);
|
||||
while (parameters.size() < parametersCount) {
|
||||
parameters.put(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5));
|
||||
}
|
||||
Request request = new Request(method, endpoint);
|
||||
|
||||
try {
|
||||
request.addParameter(null, "value");
|
||||
fail("expected failure");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("url parameter name cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
for (Map.Entry<String, String> entry : parameters.entrySet()) {
|
||||
request.addParameter(entry.getKey(), entry.getValue());
|
||||
}
|
||||
assertEquals(parameters, request.getParameters());
|
||||
|
||||
// Test that adding parameters with a null value is ok.
|
||||
request.addParameter("is_null", null);
|
||||
parameters.put("is_null", null);
|
||||
assertEquals(parameters, request.getParameters());
|
||||
|
||||
// Test that adding a duplicate parameter fails
|
||||
String firstValue = randomBoolean() ? null : "value";
|
||||
request.addParameter("name", firstValue);
|
||||
try {
|
||||
request.addParameter("name", randomBoolean() ? firstValue : "second_value");
|
||||
fail("expected failure");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("url parameter [name] has already been set to [" + firstValue + "]", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetEntity() {
|
||||
final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"});
|
||||
final String endpoint = randomAsciiLettersOfLengthBetween(1, 10);
|
||||
final HttpEntity entity =
|
||||
randomBoolean() ? new StringEntity(randomAsciiLettersOfLengthBetween(1, 100), ContentType.TEXT_PLAIN) : null;
|
||||
Request request = new Request(method, endpoint);
|
||||
|
||||
request.setEntity(entity);
|
||||
assertEquals(entity, request.getEntity());
|
||||
}
|
||||
|
||||
public void testSetHeaders() {
|
||||
final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"});
|
||||
final String endpoint = randomAsciiLettersOfLengthBetween(1, 10);
|
||||
Request request = new Request(method, endpoint);
|
||||
|
||||
try {
|
||||
request.setHeaders((Header[]) null);
|
||||
fail("expected failure");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("headers cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
request.setHeaders(new Header [] {null});
|
||||
fail("expected failure");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("header cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
Header[] headers = new Header[between(0, 5)];
|
||||
for (int i = 0; i < headers.length; i++) {
|
||||
headers[i] = new BasicHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3));
|
||||
}
|
||||
request.setHeaders(headers);
|
||||
assertArrayEquals(headers, request.getHeaders());
|
||||
}
|
||||
|
||||
// TODO equals and hashcode
|
||||
|
||||
}
|
|
@ -138,7 +138,7 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
|
|||
final int statusCode = randomBoolean() ? randomOkStatusCode(getRandom()) : randomErrorNoRetryStatusCode(getRandom());
|
||||
Response response;
|
||||
try {
|
||||
response = restClient.performRequest(method, "/" + statusCode);
|
||||
response = restClient.performRequest(new Request(method, "/" + statusCode));
|
||||
} catch(ResponseException responseException) {
|
||||
response = responseException.getResponse();
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
|
|||
final String method = RestClientTestUtil.randomHttpMethod(getRandom());
|
||||
//we don't test status codes that are subject to retries as they interfere with hosts being stopped
|
||||
final int statusCode = randomBoolean() ? randomOkStatusCode(getRandom()) : randomErrorNoRetryStatusCode(getRandom());
|
||||
restClient.performRequestAsync(method, "/" + statusCode, new ResponseListener() {
|
||||
restClient.performRequestAsync(new Request(method, "/" + statusCode), new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
responses.add(new TestResponse(method, statusCode, response));
|
||||
|
|
|
@ -62,6 +62,7 @@ import java.util.HashMap;
|
|||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
|
@ -280,13 +281,17 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
StringEntity entity = new StringEntity(body, ContentType.APPLICATION_JSON);
|
||||
for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) {
|
||||
for (int okStatusCode : getOkStatusCodes()) {
|
||||
Response response = restClient.performRequest(method, "/" + okStatusCode, Collections.<String, String>emptyMap(), entity);
|
||||
Request request = new Request(method, "/" + okStatusCode);
|
||||
request.setEntity(entity);
|
||||
Response response = restClient.performRequest(request);
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode));
|
||||
assertThat(EntityUtils.toString(response.getEntity()), equalTo(body));
|
||||
}
|
||||
for (int errorStatusCode : getAllErrorStatusCodes()) {
|
||||
Request request = new Request(method, "/" + errorStatusCode);
|
||||
request.setEntity(entity);
|
||||
try {
|
||||
restClient.performRequest(method, "/" + errorStatusCode, Collections.<String, String>emptyMap(), entity);
|
||||
restClient.performRequest(request);
|
||||
fail("request should have failed");
|
||||
} catch(ResponseException e) {
|
||||
Response response = e.getResponse();
|
||||
|
@ -297,8 +302,10 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) {
|
||||
Request request = new Request(method, "/" + randomStatusCode(getRandom()));
|
||||
request.setEntity(entity);
|
||||
try {
|
||||
restClient.performRequest(method, "/" + randomStatusCode(getRandom()), Collections.<String, String>emptyMap(), entity);
|
||||
restClient.performRequest(request);
|
||||
fail("request should have failed");
|
||||
} catch(UnsupportedOperationException e) {
|
||||
assertThat(e.getMessage(), equalTo(method + " with body is not supported"));
|
||||
|
@ -306,7 +313,11 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testNullHeaders() throws IOException {
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetHeaders()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void tesPerformRequestOldStyleNullHeaders() throws IOException {
|
||||
String method = randomHttpMethod(getRandom());
|
||||
int statusCode = randomStatusCode(getRandom());
|
||||
try {
|
||||
|
@ -323,20 +334,24 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testNullParams() throws IOException {
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetParameters()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testPerformRequestOldStyleWithNullParams() throws IOException {
|
||||
String method = randomHttpMethod(getRandom());
|
||||
int statusCode = randomStatusCode(getRandom());
|
||||
try {
|
||||
restClient.performRequest(method, "/" + statusCode, (Map<String, String>)null);
|
||||
fail("request should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("params must not be null", e.getMessage());
|
||||
assertEquals("parameters cannot be null", e.getMessage());
|
||||
}
|
||||
try {
|
||||
restClient.performRequest(method, "/" + statusCode, null, (HttpEntity)null);
|
||||
fail("request should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("params must not be null", e.getMessage());
|
||||
assertEquals("parameters cannot be null", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -348,9 +363,11 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
for (String method : getHttpMethods()) {
|
||||
final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header");
|
||||
final int statusCode = randomStatusCode(getRandom());
|
||||
Request request = new Request(method, "/" + statusCode);
|
||||
request.setHeaders(requestHeaders);
|
||||
Response esResponse;
|
||||
try {
|
||||
esResponse = restClient.performRequest(method, "/" + statusCode, requestHeaders);
|
||||
esResponse = restClient.performRequest(request);
|
||||
} catch(ResponseException e) {
|
||||
esResponse = e.getResponse();
|
||||
}
|
||||
|
@ -361,16 +378,15 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
|
||||
private HttpUriRequest performRandomRequest(String method) throws Exception {
|
||||
String uriAsString = "/" + randomStatusCode(getRandom());
|
||||
Request request = new Request(method, uriAsString);
|
||||
URIBuilder uriBuilder = new URIBuilder(uriAsString);
|
||||
final Map<String, String> params = new HashMap<>();
|
||||
boolean hasParams = randomBoolean();
|
||||
if (hasParams) {
|
||||
if (randomBoolean()) {
|
||||
int numParams = randomIntBetween(1, 3);
|
||||
for (int i = 0; i < numParams; i++) {
|
||||
String paramKey = "param-" + i;
|
||||
String paramValue = randomAsciiOfLengthBetween(3, 10);
|
||||
params.put(paramKey, paramValue);
|
||||
uriBuilder.addParameter(paramKey, paramValue);
|
||||
String name = "param-" + i;
|
||||
String value = randomAsciiAlphanumOfLengthBetween(3, 10);
|
||||
request.addParameter(name, value);
|
||||
uriBuilder.addParameter(name, value);
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
|
@ -379,81 +395,82 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
if (randomBoolean()) {
|
||||
ignore += "," + Integer.toString(randomFrom(RestClientTestUtil.getAllErrorStatusCodes()));
|
||||
}
|
||||
params.put("ignore", ignore);
|
||||
request.addParameter("ignore", ignore);
|
||||
}
|
||||
URI uri = uriBuilder.build();
|
||||
|
||||
HttpUriRequest request;
|
||||
HttpUriRequest expectedRequest;
|
||||
switch(method) {
|
||||
case "DELETE":
|
||||
request = new HttpDeleteWithEntity(uri);
|
||||
expectedRequest = new HttpDeleteWithEntity(uri);
|
||||
break;
|
||||
case "GET":
|
||||
request = new HttpGetWithEntity(uri);
|
||||
expectedRequest = new HttpGetWithEntity(uri);
|
||||
break;
|
||||
case "HEAD":
|
||||
request = new HttpHead(uri);
|
||||
expectedRequest = new HttpHead(uri);
|
||||
break;
|
||||
case "OPTIONS":
|
||||
request = new HttpOptions(uri);
|
||||
expectedRequest = new HttpOptions(uri);
|
||||
break;
|
||||
case "PATCH":
|
||||
request = new HttpPatch(uri);
|
||||
expectedRequest = new HttpPatch(uri);
|
||||
break;
|
||||
case "POST":
|
||||
request = new HttpPost(uri);
|
||||
expectedRequest = new HttpPost(uri);
|
||||
break;
|
||||
case "PUT":
|
||||
request = new HttpPut(uri);
|
||||
expectedRequest = new HttpPut(uri);
|
||||
break;
|
||||
case "TRACE":
|
||||
request = new HttpTrace(uri);
|
||||
expectedRequest = new HttpTrace(uri);
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException("method not supported: " + method);
|
||||
}
|
||||
|
||||
HttpEntity entity = null;
|
||||
boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
|
||||
if (hasBody) {
|
||||
entity = new StringEntity(randomAsciiOfLengthBetween(10, 100), ContentType.APPLICATION_JSON);
|
||||
((HttpEntityEnclosingRequest) request).setEntity(entity);
|
||||
if (expectedRequest instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean()) {
|
||||
HttpEntity entity = new StringEntity(randomAsciiAlphanumOfLengthBetween(10, 100), ContentType.APPLICATION_JSON);
|
||||
((HttpEntityEnclosingRequest) expectedRequest).setEntity(entity);
|
||||
request.setEntity(entity);
|
||||
}
|
||||
|
||||
Header[] headers = new Header[0];
|
||||
final Set<String> uniqueNames = new HashSet<>();
|
||||
if (randomBoolean()) {
|
||||
headers = RestClientTestUtil.randomHeaders(getRandom(), "Header");
|
||||
Header[] headers = RestClientTestUtil.randomHeaders(getRandom(), "Header");
|
||||
request.setHeaders(headers);
|
||||
for (Header header : headers) {
|
||||
request.addHeader(header);
|
||||
expectedRequest.addHeader(header);
|
||||
uniqueNames.add(header.getName());
|
||||
}
|
||||
}
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
// request level headers override default headers
|
||||
if (uniqueNames.contains(defaultHeader.getName()) == false) {
|
||||
request.addHeader(defaultHeader);
|
||||
expectedRequest.addHeader(defaultHeader);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
if (hasParams == false && hasBody == false && randomBoolean()) {
|
||||
restClient.performRequest(method, uriAsString, headers);
|
||||
} else if (hasBody == false && randomBoolean()) {
|
||||
restClient.performRequest(method, uriAsString, params, headers);
|
||||
} else {
|
||||
restClient.performRequest(method, uriAsString, params, entity, headers);
|
||||
}
|
||||
restClient.performRequest(request);
|
||||
} catch(ResponseException e) {
|
||||
//all good
|
||||
}
|
||||
return request;
|
||||
return expectedRequest;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated prefer {@link RestClient#performRequest(Request)}.
|
||||
*/
|
||||
@Deprecated
|
||||
private Response performRequest(String method, String endpoint, Header... headers) throws IOException {
|
||||
return performRequest(method, endpoint, Collections.<String, String>emptyMap(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated prefer {@link RestClient#performRequest(Request)}.
|
||||
*/
|
||||
@Deprecated
|
||||
private Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException {
|
||||
int methodSelector;
|
||||
if (params.isEmpty()) {
|
||||
|
|
|
@ -52,6 +52,30 @@ public class RestClientTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
public void testPerformAsyncWithUnsupportedMethod() throws Exception {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.performRequestAsync(new Request("unsupported", randomAsciiLettersOfLength(5)), new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
fail("should have failed because of unsupported method");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
assertThat(exception, instanceOf(UnsupportedOperationException.class));
|
||||
assertEquals("http method not supported: unsupported", exception.getMessage());
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link #testPerformAsyncWithUnsupportedMethod()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testPerformAsyncOldStyleWithUnsupportedMethod() throws Exception {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.performRequestAsync("unsupported", randomAsciiLettersOfLength(5), new ResponseListener() {
|
||||
|
@ -71,7 +95,11 @@ public class RestClientTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testPerformAsyncWithNullParams() throws Exception {
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetParameters()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testPerformOldStyleAsyncWithNullParams() throws Exception {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.performRequestAsync(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5), null, new ResponseListener() {
|
||||
|
@ -83,7 +111,7 @@ public class RestClientTests extends RestClientTestCase {
|
|||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
assertThat(exception, instanceOf(NullPointerException.class));
|
||||
assertEquals("params must not be null", exception.getMessage());
|
||||
assertEquals("parameters cannot be null", exception.getMessage());
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
|
@ -91,7 +119,11 @@ public class RestClientTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testPerformAsyncWithNullHeaders() throws Exception {
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetHeaders()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testPerformOldStyleAsyncWithNullHeaders() throws Exception {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
ResponseListener listener = new ResponseListener() {
|
||||
|
@ -103,7 +135,7 @@ public class RestClientTests extends RestClientTestCase {
|
|||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
assertThat(exception, instanceOf(NullPointerException.class));
|
||||
assertEquals("request header must not be null", exception.getMessage());
|
||||
assertEquals("header cannot be null", exception.getMessage());
|
||||
latch.countDown();
|
||||
}
|
||||
};
|
||||
|
@ -113,6 +145,30 @@ public class RestClientTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
public void testPerformAsyncWithWrongEndpoint() throws Exception {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.performRequestAsync(new Request("GET", "::http:///"), new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
fail("should have failed because of wrong endpoint");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
assertThat(exception, instanceOf(IllegalArgumentException.class));
|
||||
assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage());
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link #testPerformAsyncWithWrongEndpoint()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testPerformAsyncOldStyleWithWrongEndpoint() throws Exception {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
restClient.performRequestAsync("GET", "::http:///", new ResponseListener() {
|
||||
|
@ -175,6 +231,10 @@ public class RestClientTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testConstructor()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testNullPath() throws IOException {
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
for (String method : getHttpMethods()) {
|
||||
|
@ -182,7 +242,7 @@ public class RestClientTests extends RestClientTestCase {
|
|||
restClient.performRequest(method, null);
|
||||
fail("path set to null should fail!");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("path must not be null", e.getMessage());
|
||||
assertEquals("endpoint cannot be null", e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,9 @@ import org.apache.http.auth.AuthScope;
|
|||
import org.apache.http.auth.UsernamePasswordCredentials;
|
||||
import org.apache.http.client.CredentialsProvider;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.entity.BasicHttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.client.BasicCredentialsProvider;
|
||||
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.apache.http.impl.nio.reactor.IOReactorConfig;
|
||||
|
@ -37,6 +39,7 @@ import org.apache.http.ssl.SSLContextBuilder;
|
|||
import org.apache.http.ssl.SSLContexts;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.HttpAsyncResponseConsumerFactory;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseListener;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
|
@ -134,107 +137,61 @@ public class RestClientDocumentation {
|
|||
}
|
||||
|
||||
{
|
||||
//tag::rest-client-verb-endpoint
|
||||
Response response = restClient.performRequest("GET", "/"); // <1>
|
||||
//end::rest-client-verb-endpoint
|
||||
//tag::rest-client-sync
|
||||
Request request = new Request(
|
||||
"GET", // <1>
|
||||
"/"); // <2>
|
||||
Response response = restClient.performRequest(request);
|
||||
//end::rest-client-sync
|
||||
}
|
||||
{
|
||||
//tag::rest-client-headers
|
||||
Response response = restClient.performRequest("GET", "/", new BasicHeader("header", "value"));
|
||||
//end::rest-client-headers
|
||||
}
|
||||
{
|
||||
//tag::rest-client-verb-endpoint-params
|
||||
Map<String, String> params = Collections.singletonMap("pretty", "true");
|
||||
Response response = restClient.performRequest("GET", "/", params); // <1>
|
||||
//end::rest-client-verb-endpoint-params
|
||||
}
|
||||
{
|
||||
//tag::rest-client-verb-endpoint-params-body
|
||||
Map<String, String> params = Collections.emptyMap();
|
||||
String jsonString = "{" +
|
||||
"\"user\":\"kimchy\"," +
|
||||
"\"postDate\":\"2013-01-30\"," +
|
||||
"\"message\":\"trying out Elasticsearch\"" +
|
||||
"}";
|
||||
HttpEntity entity = new NStringEntity(jsonString, ContentType.APPLICATION_JSON);
|
||||
Response response = restClient.performRequest("PUT", "/posts/doc/1", params, entity); // <1>
|
||||
//end::rest-client-verb-endpoint-params-body
|
||||
}
|
||||
{
|
||||
//tag::rest-client-response-consumer
|
||||
Map<String, String> params = Collections.emptyMap();
|
||||
HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory consumerFactory =
|
||||
new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024);
|
||||
Response response = restClient.performRequest("GET", "/posts/_search", params, null, consumerFactory); // <1>
|
||||
//end::rest-client-response-consumer
|
||||
}
|
||||
{
|
||||
//tag::rest-client-verb-endpoint-async
|
||||
ResponseListener responseListener = new ResponseListener() {
|
||||
//tag::rest-client-async
|
||||
Request request = new Request(
|
||||
"GET", // <1>
|
||||
"/"); // <2>
|
||||
restClient.performRequestAsync(request, new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
// <1>
|
||||
// <3>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
// <2>
|
||||
// <4>
|
||||
}
|
||||
};
|
||||
restClient.performRequestAsync("GET", "/", responseListener); // <3>
|
||||
//end::rest-client-verb-endpoint-async
|
||||
|
||||
//tag::rest-client-headers-async
|
||||
Header[] headers = {
|
||||
new BasicHeader("header1", "value1"),
|
||||
new BasicHeader("header2", "value2")
|
||||
};
|
||||
restClient.performRequestAsync("GET", "/", responseListener, headers);
|
||||
//end::rest-client-headers-async
|
||||
|
||||
//tag::rest-client-verb-endpoint-params-async
|
||||
Map<String, String> params = Collections.singletonMap("pretty", "true");
|
||||
restClient.performRequestAsync("GET", "/", params, responseListener); // <1>
|
||||
//end::rest-client-verb-endpoint-params-async
|
||||
|
||||
//tag::rest-client-verb-endpoint-params-body-async
|
||||
String jsonString = "{" +
|
||||
"\"user\":\"kimchy\"," +
|
||||
"\"postDate\":\"2013-01-30\"," +
|
||||
"\"message\":\"trying out Elasticsearch\"" +
|
||||
"}";
|
||||
HttpEntity entity = new NStringEntity(jsonString, ContentType.APPLICATION_JSON);
|
||||
restClient.performRequestAsync("PUT", "/posts/doc/1", params, entity, responseListener); // <1>
|
||||
//end::rest-client-verb-endpoint-params-body-async
|
||||
|
||||
//tag::rest-client-response-consumer-async
|
||||
HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory consumerFactory =
|
||||
new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024);
|
||||
restClient.performRequestAsync("GET", "/posts/_search", params, null, consumerFactory, responseListener); // <1>
|
||||
//end::rest-client-response-consumer-async
|
||||
});
|
||||
//end::rest-client-async
|
||||
}
|
||||
{
|
||||
//tag::rest-client-response2
|
||||
Response response = restClient.performRequest("GET", "/");
|
||||
RequestLine requestLine = response.getRequestLine(); // <1>
|
||||
HttpHost host = response.getHost(); // <2>
|
||||
int statusCode = response.getStatusLine().getStatusCode(); // <3>
|
||||
Header[] headers = response.getHeaders(); // <4>
|
||||
String responseBody = EntityUtils.toString(response.getEntity()); // <5>
|
||||
//end::rest-client-response2
|
||||
Request request = new Request("GET", "/");
|
||||
//tag::rest-client-parameters
|
||||
request.addParameter("pretty", "true");
|
||||
//end::rest-client-parameters
|
||||
//tag::rest-client-body
|
||||
request.setEntity(new StringEntity(
|
||||
"{\"json\":\"text\"}",
|
||||
ContentType.APPLICATION_JSON));
|
||||
//end::rest-client-body
|
||||
//tag::rest-client-headers
|
||||
request.setHeaders(
|
||||
new BasicHeader("Accept", "text/plain"),
|
||||
new BasicHeader("Cache-Control", "no-cache"));
|
||||
//end::rest-client-headers
|
||||
//tag::rest-client-response-consumer
|
||||
request.setHttpAsyncResponseConsumerFactory(
|
||||
new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024));
|
||||
//end::rest-client-response-consumer
|
||||
}
|
||||
{
|
||||
HttpEntity[] documents = new HttpEntity[10];
|
||||
//tag::rest-client-async-example
|
||||
final CountDownLatch latch = new CountDownLatch(documents.length);
|
||||
for (int i = 0; i < documents.length; i++) {
|
||||
Request request = new Request("PUT", "/posts/doc/" + i);
|
||||
//let's assume that the documents are stored in an HttpEntity array
|
||||
request.setEntity(documents[i]);
|
||||
restClient.performRequestAsync(
|
||||
"PUT",
|
||||
"/posts/doc/" + i,
|
||||
Collections.<String, String>emptyMap(),
|
||||
//let's assume that the documents are stored in an HttpEntity array
|
||||
documents[i],
|
||||
request,
|
||||
new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
|
@ -253,7 +210,16 @@ public class RestClientDocumentation {
|
|||
latch.await();
|
||||
//end::rest-client-async-example
|
||||
}
|
||||
|
||||
{
|
||||
//tag::rest-client-response2
|
||||
Response response = restClient.performRequest("GET", "/");
|
||||
RequestLine requestLine = response.getRequestLine(); // <1>
|
||||
HttpHost host = response.getHost(); // <2>
|
||||
int statusCode = response.getStatusLine().getStatusCode(); // <3>
|
||||
Header[] headers = response.getHeaders(); // <4>
|
||||
String responseBody = EntityUtils.toString(response.getEntity()); // <5>
|
||||
//end::rest-client-response2
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'ru.vyarus.animalsniffer'
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
apply plugin: 'nebula.maven-scm'
|
||||
|
||||
|
@ -52,8 +51,6 @@ dependencies {
|
|||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
testCompile "org.elasticsearch:securemock:${versions.securemock}"
|
||||
testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}"
|
||||
testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15"
|
||||
signature "org.codehaus.mojo.signature:java17:1.0@signature"
|
||||
}
|
||||
|
||||
forbiddenApisMain {
|
||||
|
|
|
@ -60,8 +60,6 @@ import static org.junit.Assert.assertEquals;
|
|||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
|
||||
@IgnoreJRERequirement
|
||||
public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
|
||||
|
||||
private int sniffRequestTimeout;
|
||||
|
|
|
@ -217,6 +217,24 @@ subprojects {
|
|||
}
|
||||
check.dependsOn checkNotice
|
||||
|
||||
if (project.name == 'zip' || project.name == 'tar') {
|
||||
task checkMlCppNotice {
|
||||
dependsOn buildDist, checkExtraction
|
||||
onlyIf toolExists
|
||||
doLast {
|
||||
// this is just a small sample from the C++ notices, the idea being that if we've added these lines we've probably added all the required lines
|
||||
final List<String> expectedLines = Arrays.asList("Apache log4cxx", "Boost Software License - Version 1.0 - August 17th, 2003")
|
||||
final Path noticePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/modules/x-pack/x-pack-ml/NOTICE.txt")
|
||||
final List<String> actualLines = Files.readAllLines(noticePath)
|
||||
for (final String expectedLine : expectedLines) {
|
||||
if (actualLines.contains(expectedLine) == false) {
|
||||
throw new GradleException("expected [${noticePath}] to contain [${expectedLine}] but it did not")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
check.dependsOn checkMlCppNotice
|
||||
}
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
|
|
|
@ -1,12 +1,26 @@
|
|||
[[es-release-notes]]
|
||||
= {es} Release Notes
|
||||
|
||||
[partintro]
|
||||
--
|
||||
// Use these for links to issue and pulls. Note issues and pulls redirect one to
|
||||
// each other on Github, so don't worry too much on using the right prefix.
|
||||
// :issue: https://github.com/elastic/elasticsearch/issues/
|
||||
// :pull: https://github.com/elastic/elasticsearch/pull/
|
||||
:issue: https://github.com/elastic/elasticsearch/issues/
|
||||
:pull: https://github.com/elastic/elasticsearch/pull/
|
||||
|
||||
= Elasticsearch Release Notes
|
||||
This section summarizes the changes in each release.
|
||||
|
||||
== Elasticsearch 7.0.0
|
||||
* <<release-notes-7.0.0>>
|
||||
* <<release-notes-6.4.0>>
|
||||
|
||||
|
||||
--
|
||||
|
||||
[[release-notes-7.0.0]]
|
||||
== {es} 7.0.0
|
||||
|
||||
[float]
|
||||
[[breaking-7.0.0]]
|
||||
=== Breaking Changes
|
||||
|
||||
<<write-thread-pool-fallback, Removed `thread_pool.bulk.*` settings and
|
||||
|
@ -14,30 +28,69 @@
|
|||
|
||||
<<remove-suggest-metric, Removed `suggest` metric on stats APIs>> ({pull}29635[#29635])
|
||||
|
||||
=== Breaking Java Changes
|
||||
<<remove-field-caps-body, In field capabilities APIs, removed support for providing fields in the request body>> ({pull}30185[#30185])
|
||||
|
||||
=== Deprecations
|
||||
Machine Learning::
|
||||
* The `max_running_jobs` node property is removed in this release. Use the
|
||||
`xpack.ml.max_open_jobs` setting instead. For more information, see <<ml-settings>>.
|
||||
|
||||
=== New Features
|
||||
Monitoring::
|
||||
* The `xpack.monitoring.collection.interval` setting can no longer be set to `-1`
|
||||
to disable monitoring data collection. Use `xpack.monitoring.collection.enabled`
|
||||
and set it to `false` (its default), which was added in 6.3.0.
|
||||
|
||||
=== Enhancements
|
||||
Security::
|
||||
* The fields returned as part of the mappings section by get index, get
|
||||
mappings, get field mappings, and field capabilities API are now only the
|
||||
ones that the user is authorized to access in case field level security is enabled.
|
||||
|
||||
//[float]
|
||||
//=== Breaking Java Changes
|
||||
|
||||
//[float]
|
||||
//=== Deprecations
|
||||
|
||||
//[float]
|
||||
//=== New Features
|
||||
|
||||
//[float]
|
||||
//=== Enhancements
|
||||
|
||||
[float]
|
||||
=== Bug Fixes
|
||||
|
||||
=== Regressions
|
||||
Fail snapshot operations early when creating or deleting a snapshot on a repository that has been
|
||||
written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140])
|
||||
|
||||
=== Known Issues
|
||||
//[float]
|
||||
//=== Regressions
|
||||
|
||||
== Elasticsearch version 6.3.0
|
||||
//[float]
|
||||
//=== Known Issues
|
||||
|
||||
=== New Features
|
||||
[[release-notes-6.4.0]]
|
||||
== {es} 6.4.0
|
||||
|
||||
//[float]
|
||||
//=== New Features
|
||||
|
||||
[float]
|
||||
=== Enhancements
|
||||
|
||||
{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow copying source settings on index resize operations] ({pull}30255[#30255])
|
||||
|
||||
Added new "Request" object flavored request methods. Prefer these instead of the
|
||||
multi-argument versions. ({pull}29623[#29623])
|
||||
|
||||
|
||||
[float]
|
||||
=== Bug Fixes
|
||||
|
||||
=== Regressions
|
||||
|
||||
=== Known Issues
|
||||
Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216])
|
||||
|
||||
|
||||
//[float]
|
||||
//=== Regressions
|
||||
|
||||
//[float]
|
||||
//=== Known Issues
|
||||
|
|
|
@ -218,93 +218,74 @@ http://hc.apache.org/httpcomponents-asyncclient-dev/httpasyncclient/apidocs/org/
|
|||
[[java-rest-low-usage-requests]]
|
||||
=== Performing requests
|
||||
|
||||
Once the `RestClient` has been created, requests can be sent by calling one of
|
||||
the available `performRequest` or `performRequestAsync` method variants.
|
||||
The `performRequest` methods are synchronous and return the `Response` directly,
|
||||
meaning that the client will block and wait for a response to be returned.
|
||||
The `performRequestAsync` variants return `void` and accept an extra
|
||||
`ResponseListener` as an argument instead, meaning that they are executed
|
||||
asynchronously. The provided listener will be notified upon request completion
|
||||
or failure.
|
||||
Once the `RestClient` has been created, requests can be sent by calling either
|
||||
`performRequest` or `performRequestAsync`. `performRequest` is synchronous and
|
||||
will block the calling thread and return the `Response` when the request is
|
||||
successful or throw an exception if it fails. `performRequestAsync` is
|
||||
asynchronous and accepts a `ResponseListener` argument that it calls with a
|
||||
`Response` when the request is successful or with an `Exception` if it4 fails.
|
||||
|
||||
This is synchronous:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint]
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-sync]
|
||||
--------------------------------------------------
|
||||
<1> Send a request by providing only the verb and the endpoint, minimum set
|
||||
of required arguments
|
||||
<1> The HTTP method (`GET`, `POST`, `HEAD`, etc)
|
||||
<2> The endpoint on the server
|
||||
|
||||
And this is asynchronous:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint-params]
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-async]
|
||||
--------------------------------------------------
|
||||
<1> Send a request by providing the verb, the endpoint, and some querystring
|
||||
parameter
|
||||
<1> The HTTP method (`GET`, `POST`, `HEAD`, etc)
|
||||
<2> The endpoint on the server
|
||||
<3> Handle the response
|
||||
<4> Handle the failure
|
||||
|
||||
You can add request parameters to the request object:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint-params-body]
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-parameters]
|
||||
--------------------------------------------------
|
||||
|
||||
You can set the body of the request to any `HttpEntity`:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-body]
|
||||
--------------------------------------------------
|
||||
<1> Send a request by providing the verb, the endpoint, optional querystring
|
||||
parameters and the request body enclosed in an `org.apache.http.HttpEntity`
|
||||
object
|
||||
|
||||
IMPORTANT: The `ContentType` specified for the `HttpEntity` is important
|
||||
because it will be used to set the `Content-Type` header so that Elasticsearch
|
||||
can properly parse the content.
|
||||
|
||||
And you can set a list of headers to send with the request:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-headers]
|
||||
--------------------------------------------------
|
||||
|
||||
You can also customize the response consumer used to buffer the asynchronous
|
||||
responses. The default consumer will buffer up to 100MB of response on the
|
||||
JVM heap. If the response is larger then the request will fail. You could,
|
||||
for example, lower the maximum size which might be useful if you are running
|
||||
in a heap constrained environment:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-response-consumer]
|
||||
--------------------------------------------------
|
||||
<1> Send a request by providing the verb, the endpoint, optional querystring
|
||||
parameters, optional request body and the optional factory that is used to
|
||||
create an http://hc.apache.org/httpcomponents-core-ga/httpcore-nio/apidocs/org/apache/http/nio/protocol/HttpAsyncResponseConsumer.html[`org.apache.http.nio.protocol.HttpAsyncResponseConsumer`]
|
||||
callback instance per request attempt. Controls how the response body gets
|
||||
streamed from a non-blocking HTTP connection on the client side. When not
|
||||
provided, the default implementation is used which buffers the whole response
|
||||
body in heap memory, up to 100 MB.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint-async]
|
||||
--------------------------------------------------
|
||||
<1> Define what needs to happen when the request is successfully performed
|
||||
<2> Define what needs to happen when the request fails, meaning whenever
|
||||
there's a connection error or a response with error status code is returned.
|
||||
<3> Send an async request by providing only the verb, the endpoint, and the
|
||||
response listener to be notified once the request is completed, minimum set
|
||||
of required arguments
|
||||
==== Multiple parallel asynchronous actions
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint-params-async]
|
||||
--------------------------------------------------
|
||||
<1> Send an async request by providing the verb, the endpoint, some querystring
|
||||
parameter and the response listener to be notified once the request is completed
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint-params-body-async]
|
||||
--------------------------------------------------
|
||||
<1> Send an async request by providing the verb, the endpoint, optional
|
||||
querystring parameters, the request body enclosed in an
|
||||
`org.apache.http.HttpEntity` object and the response listener to be
|
||||
notified once the request is completed
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-response-consumer-async]
|
||||
--------------------------------------------------
|
||||
<1> Send an async request by providing the verb, the endpoint, optional
|
||||
querystring parameters, optional request body and the optional factory that is
|
||||
used to create an http://hc.apache.org/httpcomponents-core-ga/httpcore-nio/apidocs/org/apache/http/nio/protocol/HttpAsyncResponseConsumer.html[`org.apache.http.nio.protocol.HttpAsyncResponseConsumer`]
|
||||
callback instance per request attempt. Controls how the response body gets
|
||||
streamed from a non-blocking HTTP connection on the client side. When not
|
||||
provided, the default implementation is used which buffers the whole response
|
||||
body in heap memory, up to 100 MB.
|
||||
|
||||
The following is a basic example of how async requests can be sent:
|
||||
The client is quite happy to execute many actions in parallel. The following
|
||||
example indexes many documents in parallel. In a real world scenario you'd
|
||||
probably want to use the `_bulk` API instead, but the example is illustative.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
|
@ -314,19 +295,6 @@ include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-async-examp
|
|||
<2> Handle the returned exception, due to communication error or a response
|
||||
with status code that indicates an error
|
||||
|
||||
Each of the above listed method supports sending headers along with the
|
||||
request through a `Header` varargs argument as in the following examples:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-headers]
|
||||
--------------------------------------------------
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-headers-async]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-low-usage-responses]]
|
||||
=== Reading responses
|
||||
|
||||
|
@ -396,4 +364,3 @@ still yields the same response as it did. Enable trace logging for the `tracer`
|
|||
package to have such log lines printed out. Do note that this type of logging is
|
||||
expensive and should not be enabled at all times in production environments,
|
||||
but rather temporarily used only when needed.
|
||||
|
||||
|
|
|
@ -19,9 +19,6 @@ the configured remote cluster alias.
|
|||
`seeds`::
|
||||
The configured initial seed transport addresses of the remote cluster.
|
||||
|
||||
`http_addresses`::
|
||||
The published http addresses of all connected remote nodes.
|
||||
|
||||
`connected`::
|
||||
True if there is at least one connection to the remote cluster.
|
||||
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
[[cluster-reroute]]
|
||||
== Cluster Reroute
|
||||
|
||||
The reroute command allows to explicitly execute a cluster reroute
|
||||
allocation command including specific commands. For example, a shard can
|
||||
be moved from one node to another explicitly, an allocation can be
|
||||
canceled, or an unassigned shard can be explicitly allocated on a
|
||||
specific node.
|
||||
The reroute command allows for manual changes to the allocation of individual
|
||||
shards in the cluster. For example, a shard can be moved from one node to
|
||||
another explicitly, an allocation can be cancelled, and an unassigned shard can
|
||||
be explicitly allocated to a specific node.
|
||||
|
||||
Here is a short example of how a simple reroute API call:
|
||||
Here is a short example of a simple reroute API call:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -32,59 +31,53 @@ POST /_cluster/reroute
|
|||
// CONSOLE
|
||||
// TEST[skip:doc tests run with only a single node]
|
||||
|
||||
An important aspect to remember is the fact that once when an allocation
|
||||
occurs, the cluster will aim at re-balancing its state back to an even
|
||||
state. For example, if the allocation includes moving a shard from
|
||||
`node1` to `node2`, in an `even` state, then another shard will be moved
|
||||
from `node2` to `node1` to even things out.
|
||||
It is important to note that that after processing any reroute commands
|
||||
Elasticsearch will perform rebalancing as normal (respecting the values of
|
||||
settings such as `cluster.routing.rebalance.enable`) in order to remain in a
|
||||
balanced state. For example, if the requested allocation includes moving a
|
||||
shard from `node1` to `node2` then this may cause a shard to be moved from
|
||||
`node2` back to `node1` to even things out.
|
||||
|
||||
The cluster can be set to disable allocations, which means that only the
|
||||
explicitly allocations will be performed. Obviously, only once all
|
||||
commands has been applied, the cluster will aim to be re-balance its
|
||||
state.
|
||||
The cluster can be set to disable allocations using the
|
||||
`cluster.routing.allocation.enable` setting. If allocations are disabled then
|
||||
the only allocations that will be performed are explicit ones given using the
|
||||
`reroute` command, and consequent allocations due to rebalancing.
|
||||
|
||||
Another option is to run the commands in `dry_run` (as a URI flag, or in
|
||||
the request body). This will cause the commands to apply to the current
|
||||
cluster state, and return the resulting cluster after the commands (and
|
||||
re-balancing) has been applied.
|
||||
It is possible to run `reroute` commands in "dry run" mode by using the
|
||||
`?dry_run` URI query parameter, or by passing `"dry_run": true` in the request
|
||||
body. This will calculate the result of applying the commands to the current
|
||||
cluster state, and return the resulting cluster state after the commands (and
|
||||
re-balancing) has been applied, but will not actually perform the requested
|
||||
changes.
|
||||
|
||||
If the `explain` parameter is specified, a detailed explanation of why the
|
||||
commands could or could not be executed is returned.
|
||||
If the `?explain` URI query parameter is included then a detailed explanation
|
||||
of why the commands could or could not be executed is included in the response.
|
||||
|
||||
The commands supported are:
|
||||
|
||||
`move`::
|
||||
Move a started shard from one node to another node. Accepts
|
||||
`index` and `shard` for index name and shard number, `from_node` for the
|
||||
node to move the shard `from`, and `to_node` for the node to move the
|
||||
node to move the shard from, and `to_node` for the node to move the
|
||||
shard to.
|
||||
|
||||
`cancel`::
|
||||
Cancel allocation of a shard (or recovery). Accepts `index`
|
||||
and `shard` for index name and shard number, and `node` for the node to
|
||||
cancel the shard allocation on. It also accepts `allow_primary` flag to
|
||||
explicitly specify that it is allowed to cancel allocation for a primary
|
||||
shard. This can be used to force resynchronization of existing replicas
|
||||
from the primary shard by cancelling them and allowing them to be
|
||||
reinitialized through the standard reallocation process.
|
||||
Cancel allocation of a shard (or recovery). Accepts `index` and `shard` for
|
||||
index name and shard number, and `node` for the node to cancel the shard
|
||||
allocation on. This can be used to force resynchronization of existing
|
||||
replicas from the primary shard by cancelling them and allowing them to be
|
||||
reinitialized through the standard recovery process. By default only
|
||||
replica shard allocations can be cancelled. If it is necessary to cancel
|
||||
the allocation of a primary shard then the `allow_primary` flag must also
|
||||
be included in the request.
|
||||
|
||||
`allocate_replica`::
|
||||
Allocate an unassigned replica shard to a node. Accepts the
|
||||
`index` and `shard` for index name and shard number, and `node` to
|
||||
allocate the shard to. Takes <<modules-cluster,allocation deciders>> into account.
|
||||
|
||||
Two more commands are available that allow the allocation of a primary shard
|
||||
to a node. These commands should however be used with extreme care, as primary
|
||||
shard allocation is usually fully automatically handled by Elasticsearch.
|
||||
Reasons why a primary shard cannot be automatically allocated include the following:
|
||||
|
||||
- A new index was created but there is no node which satisfies the allocation deciders.
|
||||
- An up-to-date shard copy of the data cannot be found on the current data nodes in
|
||||
the cluster. To prevent data loss, the system does not automatically promote a stale
|
||||
shard copy to primary.
|
||||
Allocate an unassigned replica shard to a node. Accepts `index` and `shard`
|
||||
for index name and shard number, and `node` to allocate the shard to. Takes
|
||||
<<modules-cluster,allocation deciders>> into account.
|
||||
|
||||
[float]
|
||||
=== Retry failed shards
|
||||
=== Retrying failed allocations
|
||||
|
||||
The cluster will attempt to allocate a shard a maximum of
|
||||
`index.allocation.max_retries` times in a row (defaults to `5`), before giving
|
||||
|
@ -93,36 +86,48 @@ structural problems such as having an analyzer which refers to a stopwords
|
|||
file which doesn't exist on all nodes.
|
||||
|
||||
Once the problem has been corrected, allocation can be manually retried by
|
||||
calling the <<cluster-reroute,`reroute`>> API with `?retry_failed`, which
|
||||
will attempt a single retry round for these shards.
|
||||
calling the <<cluster-reroute,`reroute`>> API with the `?retry_failed` URI
|
||||
query parameter, which will attempt a single retry round for these shards.
|
||||
|
||||
[float]
|
||||
=== Forced allocation on unrecoverable errors
|
||||
|
||||
Two more commands are available that allow the allocation of a primary shard to
|
||||
a node. These commands should however be used with extreme care, as primary
|
||||
shard allocation is usually fully automatically handled by Elasticsearch.
|
||||
Reasons why a primary shard cannot be automatically allocated include the
|
||||
following:
|
||||
|
||||
- A new index was created but there is no node which satisfies the allocation
|
||||
deciders.
|
||||
- An up-to-date shard copy of the data cannot be found on the current data
|
||||
nodes in the cluster. To prevent data loss, the system does not automatically
|
||||
promote a stale shard copy to primary.
|
||||
|
||||
The following two commands are dangerous and may result in data loss. They are
|
||||
meant to be used in cases where the original data can not be recovered and the cluster
|
||||
administrator accepts the loss. If you have suffered a temporary issue that has been
|
||||
fixed, please see the `retry_failed` flag described above.
|
||||
meant to be used in cases where the original data can not be recovered and the
|
||||
cluster administrator accepts the loss. If you have suffered a temporary issue
|
||||
that can be fixed, please see the `retry_failed` flag described above. To
|
||||
emphasise: if these commands are performed and then a node joins the cluster
|
||||
that holds a copy of the affected shard then the copy on the newly-joined node
|
||||
will be deleted or overwritten.
|
||||
|
||||
`allocate_stale_primary`::
|
||||
Allocate a primary shard to a node that holds a stale copy. Accepts the
|
||||
`index` and `shard` for index name and shard number, and `node` to
|
||||
allocate the shard to. Using this command may lead to data loss
|
||||
for the provided shard id. If a node which has the good copy of the
|
||||
data rejoins the cluster later on, that data will be overwritten with
|
||||
the data of the stale copy that was forcefully allocated with this
|
||||
command. To ensure that these implications are well-understood,
|
||||
this command requires the special field `accept_data_loss` to be
|
||||
explicitly set to `true` for it to work.
|
||||
`index` and `shard` for index name and shard number, and `node` to allocate
|
||||
the shard to. Using this command may lead to data loss for the provided
|
||||
shard id. If a node which has the good copy of the data rejoins the cluster
|
||||
later on, that data will be deleted or overwritten with the data of the
|
||||
stale copy that was forcefully allocated with this command. To ensure that
|
||||
these implications are well-understood, this command requires the flag
|
||||
`accept_data_loss` to be explicitly set to `true`.
|
||||
|
||||
`allocate_empty_primary`::
|
||||
Allocate an empty primary shard to a node. Accepts the
|
||||
`index` and `shard` for index name and shard number, and `node` to
|
||||
allocate the shard to. Using this command leads to a complete loss
|
||||
of all data that was indexed into this shard, if it was previously
|
||||
started. If a node which has a copy of the
|
||||
data rejoins the cluster later on, that data will be deleted!
|
||||
To ensure that these implications are well-understood,
|
||||
this command requires the special field `accept_data_loss` to be
|
||||
explicitly set to `true` for it to work.
|
||||
Allocate an empty primary shard to a node. Accepts the `index` and `shard`
|
||||
for index name and shard number, and `node` to allocate the shard to. Using
|
||||
this command leads to a complete loss of all data that was indexed into
|
||||
this shard, if it was previously started. If a node which has a copy of the
|
||||
data rejoins the cluster later on, that data will be deleted. To ensure
|
||||
that these implications are well-understood, this command requires the flag
|
||||
`accept_data_loss` to be explicitly set to `true`.
|
||||
|
||||
|
|
|
@ -15,6 +15,12 @@ of the cluster state (its size when serialized for transmission over
|
|||
the network), and the cluster state itself, which can be filtered to
|
||||
only retrieve the parts of interest, as described below.
|
||||
|
||||
The cluster's `cluster_uuid` is also returned as part of the top-level
|
||||
response, in addition to the `metadata` section. added[6.4.0]
|
||||
|
||||
NOTE: While the cluster is still forming, it is possible for the `cluster_uuid`
|
||||
to be `_na_` as well as the cluster state's version to be `-1`.
|
||||
|
||||
By default, the cluster state request is routed to the master node, to
|
||||
ensure that the latest cluster state is returned.
|
||||
For debugging purposes, you can retrieve the cluster state local to a
|
||||
|
|
|
@ -284,9 +284,12 @@ executed again in order to conform to `requests_per_second`.
|
|||
|
||||
`failures`::
|
||||
|
||||
Array of all indexing failures. If this is non-empty then the request aborted
|
||||
because of those failures. See `conflicts` for how to prevent version conflicts
|
||||
from aborting the operation.
|
||||
Array of failures if there were any unrecoverable errors during the process. If
|
||||
this is non-empty then the request aborted because of those failures.
|
||||
Delete-by-query is implemented using batches and any failure causes the entire
|
||||
process to abort but all failures in the current batch are collected into the
|
||||
array. You can use the `conflicts` option to prevent reindex from aborting on
|
||||
version conflicts.
|
||||
|
||||
|
||||
[float]
|
||||
|
|
|
@ -161,12 +161,12 @@ POST _reindex
|
|||
|
||||
`index` and `type` in `source` can both be lists, allowing you to copy from
|
||||
lots of sources in one request. This will copy documents from the `_doc` and
|
||||
`post` types in the `twitter` and `blog` index. The copied documents would include the
|
||||
`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more
|
||||
`post` types in the `twitter` and `blog` index. The copied documents would include the
|
||||
`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more
|
||||
specific parameters, you can use `query`.
|
||||
|
||||
The Reindex API makes no effort to handle ID collisions. For such issues, the target index
|
||||
will remain valid, but it's not easy to predict which document will survive because
|
||||
The Reindex API makes no effort to handle ID collisions. For such issues, the target index
|
||||
will remain valid, but it's not easy to predict which document will survive because
|
||||
the iteration order isn't well defined.
|
||||
|
||||
[source,js]
|
||||
|
@ -666,9 +666,11 @@ executed again in order to conform to `requests_per_second`.
|
|||
|
||||
`failures`::
|
||||
|
||||
Array of all indexing failures. If this is non-empty then the request aborted
|
||||
because of those failures. See `conflicts` for how to prevent version conflicts
|
||||
from aborting the operation.
|
||||
Array of failures if there were any unrecoverable errors during the process. If
|
||||
this is non-empty then the request aborted because of those failures. Reindex
|
||||
is implemented using batches and any failure causes the entire process to abort
|
||||
but all failures in the current batch are collected into the array. You can use
|
||||
the `conflicts` option to prevent reindex from aborting on version conflicts.
|
||||
|
||||
[float]
|
||||
[[docs-reindex-task-api]]
|
||||
|
@ -1004,7 +1006,7 @@ number for most indices. If slicing manually or otherwise tuning
|
|||
automatic slicing, use these guidelines.
|
||||
|
||||
Query performance is most efficient when the number of `slices` is equal to the
|
||||
number of shards in the index. If that number is large (e.g. 500),
|
||||
number of shards in the index. If that number is large (e.g. 500),
|
||||
choose a lower number as too many `slices` will hurt performance. Setting
|
||||
`slices` higher than the number of shards generally does not improve efficiency
|
||||
and adds overhead.
|
||||
|
@ -1018,7 +1020,7 @@ documents being reindexed and cluster resources.
|
|||
[float]
|
||||
=== Reindex daily indices
|
||||
|
||||
You can use `_reindex` in combination with <<modules-scripting-painless, Painless>>
|
||||
You can use `_reindex` in combination with <<modules-scripting-painless, Painless>>
|
||||
to reindex daily indices to apply a new template to the existing documents.
|
||||
|
||||
Assuming you have indices consisting of documents as follows:
|
||||
|
|
|
@ -338,9 +338,13 @@ executed again in order to conform to `requests_per_second`.
|
|||
|
||||
`failures`::
|
||||
|
||||
Array of all indexing failures. If this is non-empty then the request aborted
|
||||
because of those failures. See `conflicts` for how to prevent version conflicts
|
||||
from aborting the operation.
|
||||
Array of failures if there were any unrecoverable errors during the process. If
|
||||
this is non-empty then the request aborted because of those failures.
|
||||
Update-by-query is implemented using batches and any failure causes the entire
|
||||
process to abort but all failures in the current batch are collected into the
|
||||
array. You can use the `conflicts` option to prevent reindex from aborting on
|
||||
version conflicts.
|
||||
|
||||
|
||||
|
||||
[float]
|
||||
|
|
|
@ -23,7 +23,8 @@ The merge scheduler supports the following _dynamic_ setting:
|
|||
|
||||
`index.merge.scheduler.max_thread_count`::
|
||||
|
||||
The maximum number of threads that may be merging at once. Defaults to
|
||||
The maximum number of threads on a single shard that may be merging at once.
|
||||
Defaults to
|
||||
`Math.max(1, Math.min(4, Runtime.getRuntime().availableProcessors() / 2))`
|
||||
which works well for a good solid-state-disk (SSD). If your index is on
|
||||
spinning platter drives instead, decrease this to 1.
|
||||
|
|
|
@ -5,4 +5,4 @@ include::testing.asciidoc[]
|
|||
|
||||
include::glossary.asciidoc[]
|
||||
|
||||
include::release-notes.asciidoc[]
|
||||
include::{docdir}/../CHANGELOG.asciidoc[]
|
|
@ -119,9 +119,15 @@ POST my_source_index/_shrink/my_target_index
|
|||
segment.
|
||||
|
||||
|
||||
NOTE: Mappings may not be specified in the `_shrink` request, and all
|
||||
`index.analysis.*` and `index.similarity.*` settings will be overwritten with
|
||||
the settings from the source index.
|
||||
NOTE: Mappings may not be specified in the `_shrink` request.
|
||||
|
||||
NOTE: By default, with the exception of `index.analysis`, `index.similarity`,
|
||||
and `index.sort` settings, index settings on the source index are not copied
|
||||
during a shrink operation. With the exception of non-copyable settings, settings
|
||||
from the source index can be copied to the target index by adding the URL
|
||||
parameter `copy_settings=true` to the request.
|
||||
|
||||
deprecated[6.4.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0]
|
||||
|
||||
[float]
|
||||
=== Monitoring the shrink process
|
||||
|
|
|
@ -175,9 +175,15 @@ POST my_source_index/_split/my_target_index
|
|||
number of shards in the source index.
|
||||
|
||||
|
||||
NOTE: Mappings may not be specified in the `_split` request, and all
|
||||
`index.analysis.*` and `index.similarity.*` settings will be overwritten with
|
||||
the settings from the source index.
|
||||
NOTE: Mappings may not be specified in the `_split` request.
|
||||
|
||||
NOTE: By default, with the exception of `index.analysis`, `index.similarity`,
|
||||
and `index.sort` settings, index settings on the source index are not copied
|
||||
during a split operation. With the exception of non-copyable settings, settings
|
||||
from the source index can be copied to the target index by adding the URL
|
||||
parameter `copy_settings=true` to the request.
|
||||
|
||||
deprecated[6.4.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0]
|
||||
|
||||
[float]
|
||||
=== Monitoring the split process
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
[[breaking-changes-6.4]]
|
||||
== Breaking changes in 6.4
|
||||
|
||||
[[breaking_64_api_changes]]
|
||||
=== API changes
|
||||
|
||||
==== Field capabilities request format
|
||||
|
||||
In the past, `fields` could be provided either as a parameter, or as part of the request
|
||||
body. Specifying `fields` in the request body is now deprecated, and instead they should
|
||||
always be supplied through a request parameter. In 7.0.0, the field capabilities API will
|
||||
not accept `fields` supplied in the request body.
|
|
@ -22,6 +22,32 @@ The following parameters starting with underscore have been removed:
|
|||
Instead of these removed parameters, use their non camel case equivalents without
|
||||
starting underscore, e.g. use `version_type` instead of `_version_type` or `versionType`.
|
||||
|
||||
==== Thread pool info
|
||||
|
||||
In previous versions of Elasticsearch, the thread pool info returned in the
|
||||
<<cluster-nodes-info,nodes info API>> returned `min` and `max` values reflecting
|
||||
the configured minimum and maximum number of threads that could be in each
|
||||
thread pool. The trouble with this representation is that it does not align with
|
||||
the configuration parameters used to configure thread pools. For
|
||||
<<modules-threadpool,scaling thread pools>>, the minimum number of threads is
|
||||
configured by a parameter called `core` and the maximum number of threads is
|
||||
configured by a parameter called `max`. For <<modules-threadpool,fixed thread
|
||||
pools>>, there is only one configuration parameter along these lines and that
|
||||
parameter is called `size`, reflecting the fixed number of threads in the
|
||||
pool. This discrepancy between the API and the configuration parameters has been
|
||||
rectified. Now, the API will report `core` and `max` for scaling thread pools,
|
||||
and `size` for fixed thread pools.
|
||||
|
||||
Similarly, in the cat thread pool API the existing `size` output has been
|
||||
renamed to `pool_size` which reflects the number of threads currently in the
|
||||
pool; the shortcut for this value has been changed from `s` to `psz`. The `min`
|
||||
output has been renamed to `core` with a shortcut of `cr`, the shortcut for
|
||||
`max` has been changed to `mx`, and the `size` output with a shortcut of `sz`
|
||||
has been reused to report the configured number of threads in the pool. This
|
||||
aligns the output of the API with the configuration values for thread
|
||||
pools. Note that `core` and `max` will be populated for scaling thread pools,
|
||||
and `size` will be populated for fixed thread pools.
|
||||
|
||||
==== The parameter `fields` deprecated in 6.x has been removed from Bulk request
|
||||
and Update request. The Update API returns `400 - Bad request` if request contains
|
||||
unknown parameters (instead of ignored in the previous version).
|
||||
|
@ -33,3 +59,9 @@ Previously, `suggest` stats were folded into `search` stats. Support for the
|
|||
`suggest` metric on the indices stats and nodes stats APIs remained for
|
||||
backwards compatibility. Backwards support for the `suggest` metric was
|
||||
deprecated in 6.3.0 and now removed in 7.0.0.
|
||||
|
||||
[[remove-field-caps-body]]
|
||||
|
||||
In the past, `fields` could be provided either as a parameter, or as part of the request
|
||||
body. Specifying `fields` in the request body as opposed to a parameter was deprecated
|
||||
in 6.4.0, and is now unsupported in 7.0.0.
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[[disk-allocator]]
|
||||
=== Disk-based Shard Allocation
|
||||
|
||||
Elasticsearch factors in the available disk space on a node before deciding
|
||||
whether to allocate new shards to that node or to actively relocate shards
|
||||
away from that node.
|
||||
Elasticsearch considers the available disk space on a node before deciding
|
||||
whether to allocate new shards to that node or to actively relocate shards away
|
||||
from that node.
|
||||
|
||||
Below are the settings that can be configured in the `elasticsearch.yml` config
|
||||
file or updated dynamically on a live cluster with the
|
||||
|
@ -15,29 +15,33 @@ file or updated dynamically on a live cluster with the
|
|||
|
||||
`cluster.routing.allocation.disk.watermark.low`::
|
||||
|
||||
Controls the low watermark for disk usage. It defaults to 85%, meaning ES will
|
||||
not allocate new shards to nodes once they have more than 85% disk used. It
|
||||
can also be set to an absolute byte value (like 500mb) to prevent ES from
|
||||
allocating shards if less than the configured amount of space is available.
|
||||
Controls the low watermark for disk usage. It defaults to `85%`, meaning
|
||||
that Elasticsearch will not allocate shards to nodes that have more than
|
||||
85% disk used. It can also be set to an absolute byte value (like `500mb`)
|
||||
to prevent Elasticsearch from allocating shards if less than the specified
|
||||
amount of space is available. This setting has no effect on the primary
|
||||
shards of newly-created indices or, specifically, any shards that have
|
||||
never previously been allocated.
|
||||
|
||||
`cluster.routing.allocation.disk.watermark.high`::
|
||||
|
||||
Controls the high watermark. It defaults to 90%, meaning ES will attempt to
|
||||
relocate shards to another node if the node disk usage rises above 90%. It can
|
||||
also be set to an absolute byte value (similar to the low watermark) to
|
||||
relocate shards once less than the configured amount of space is available on
|
||||
the node.
|
||||
Controls the high watermark. It defaults to `90%`, meaning that
|
||||
Elasticsearch will attempt to relocate shards away from a node whose disk
|
||||
usage is above 90%. It can also be set to an absolute byte value (similarly
|
||||
to the low watermark) to relocate shards away from a node if it has less
|
||||
than the specified amount of free space. This setting affects the
|
||||
allocation of all shards, whether previously allocated or not.
|
||||
|
||||
`cluster.routing.allocation.disk.watermark.flood_stage`::
|
||||
+
|
||||
--
|
||||
Controls the flood stage watermark. It defaults to 95%, meaning ES enforces
|
||||
a read-only index block (`index.blocks.read_only_allow_delete`) on every
|
||||
index that has one or more shards allocated on the node that has at least
|
||||
one disk exceeding the flood stage. This is a last resort to prevent nodes
|
||||
from running out of disk space. The index block must be released manually
|
||||
once there is enough disk space available to allow indexing operations to
|
||||
continue.
|
||||
Controls the flood stage watermark. It defaults to 95%, meaning that
|
||||
Elasticsearch enforces a read-only index block
|
||||
(`index.blocks.read_only_allow_delete`) on every index that has one or more
|
||||
shards allocated on the node that has at least one disk exceeding the flood
|
||||
stage. This is a last resort to prevent nodes from running out of disk space.
|
||||
The index block must be released manually once there is enough disk space
|
||||
available to allow indexing operations to continue.
|
||||
|
||||
NOTE: You can not mix the usage of percentage values and byte values within
|
||||
these settings. Either all are set to percentage values, or all are set to byte
|
||||
|
@ -67,12 +71,12 @@ PUT /twitter/_settings
|
|||
`cluster.routing.allocation.disk.include_relocations`::
|
||||
|
||||
Defaults to +true+, which means that Elasticsearch will take into account
|
||||
shards that are currently being relocated to the target node when computing a
|
||||
node's disk usage. Taking relocating shards' sizes into account may, however,
|
||||
mean that the disk usage for a node is incorrectly estimated on the high side,
|
||||
since the relocation could be 90% complete and a recently retrieved disk usage
|
||||
would include the total size of the relocating shard as well as the space
|
||||
already used by the running relocation.
|
||||
shards that are currently being relocated to the target node when computing
|
||||
a node's disk usage. Taking relocating shards' sizes into account may,
|
||||
however, mean that the disk usage for a node is incorrectly estimated on
|
||||
the high side, since the relocation could be 90% complete and a recently
|
||||
retrieved disk usage would include the total size of the relocating shard
|
||||
as well as the space already used by the running relocation.
|
||||
|
||||
|
||||
NOTE: Percentage values refer to used disk space, while byte values refer to
|
||||
|
|
|
@ -44,12 +44,12 @@ If you register same snapshot repository with multiple clusters, only
|
|||
one cluster should have write access to the repository. All other clusters
|
||||
connected to that repository should set the repository to `readonly` mode.
|
||||
|
||||
NOTE: The snapshot format can change across major versions, so if you have
|
||||
clusters on different major versions trying to write the same repository,
|
||||
new snapshots written by one version will not be visible to the other. While
|
||||
setting the repository to `readonly` on all but one of the clusters should work
|
||||
with multiple clusters differing by one major version, it is not a supported
|
||||
configuration.
|
||||
IMPORTANT: The snapshot format can change across major versions, so if you have
|
||||
clusters on different versions trying to write the same repository, snapshots
|
||||
written by one version may not be visible to the other and the repository could
|
||||
be corrupted. While setting the repository to `readonly` on all but one of the
|
||||
clusters should work with multiple clusters differing by one major version, it
|
||||
is not a supported configuration.
|
||||
|
||||
[source,js]
|
||||
-----------------------------------
|
||||
|
|
|
@ -186,8 +186,7 @@ process. It does not support field name prefixes, wildcard characters,
|
|||
or other "advanced" features. For this reason, chances of it failing are
|
||||
very small / non existent, and it provides an excellent behavior when it
|
||||
comes to just analyze and run that text as a query behavior (which is
|
||||
usually what a text search box does). Also, the `phrase_prefix` type can
|
||||
provide a great "as you type" behavior to automatically load search
|
||||
results.
|
||||
usually what a text search box does). Also, the <<query-dsl-match-query-phrase-prefix,`match_phrase_prefix`>>
|
||||
type can provide a great "as you type" behavior to automatically load search results.
|
||||
|
||||
**************************************************
|
||||
|
|
|
@ -10,5 +10,9 @@ The changes listed below have been released for the first time in Elasticsearch
|
|||
Core::
|
||||
* Tribe node has been removed in favor of Cross-Cluster-Search
|
||||
|
||||
Cross-Cluster-Search::
|
||||
* `http_addresses` has been removed from the <<cluster-remote-info>> API
|
||||
because it is expensive to fetch and no longer needed by Kibana.
|
||||
|
||||
Rest API::
|
||||
* The Clear Cache API only supports `POST` as HTTP method
|
||||
|
|
|
@ -20,20 +20,6 @@ GET twitter/_field_caps?fields=rating
|
|||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
Alternatively the `fields` option can also be defined in the request body. deprecated[6.4.0, Please use a request parameter instead.]
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _field_caps
|
||||
{
|
||||
"fields" : ["rating"]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[warning:Specifying a request body is deprecated -- the [fields] request parameter should be used instead.]
|
||||
|
||||
This is equivalent to the previous request.
|
||||
|
||||
Supported request options:
|
||||
|
||||
[horizontal]
|
||||
|
|
|
@ -23,7 +23,7 @@ POST twitter/_search
|
|||
},
|
||||
"suggest" : {
|
||||
"my-suggestion" : {
|
||||
"text" : "trying out Elasticsearch",
|
||||
"text" : "tring out Elasticsearch",
|
||||
"term" : {
|
||||
"field" : "message"
|
||||
}
|
||||
|
|
|
@ -91,25 +91,20 @@ already have local shard copies.
|
|||
+
|
||||
--
|
||||
When all nodes have joined the cluster and recovered their primary shards,
|
||||
reenable allocation.
|
||||
reenable allocation by restoring `cluster.routing.allocation.enable` to its
|
||||
default:
|
||||
|
||||
[source,js]
|
||||
------------------------------------------------------
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient": {
|
||||
"cluster.routing.allocation.enable": "all"
|
||||
"persistent": {
|
||||
"cluster.routing.allocation.enable": null
|
||||
}
|
||||
}
|
||||
------------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
NOTE: Because <<_precedence_of_settings, transient
|
||||
settings take precedence over persistent settings>>, this overrides the
|
||||
persistent setting used to disable shard allocation in the first step. If you
|
||||
don't explicitly reenable shard allocation after a full cluster restart, the
|
||||
persistent setting is used and shard allocation remains disabled.
|
||||
|
||||
Once allocation is reenabled, the cluster starts allocating replica shards to
|
||||
the data nodes. At this point it is safe to resume indexing and searching,
|
||||
but your cluster will recover more quickly if you can wait until all primary
|
||||
|
|
|
@ -72,21 +72,15 @@ GET _cat/nodes
|
|||
+
|
||||
--
|
||||
|
||||
NOTE: Because <<_precedence_of_settings, transient
|
||||
settings take precedence over persistent settings>>, this overrides the
|
||||
persistent setting used to disable shard allocation in the first step. If you
|
||||
don't explicitly reenable shard allocation after a full cluster restart, the
|
||||
persistent setting is used and shard allocation remains disabled.
|
||||
|
||||
Once the node has joined the cluster, reenable shard allocation to start using
|
||||
the node:
|
||||
Once the node has joined the cluster, remove the `cluster.routing.allocation.enable`
|
||||
setting to enable shard allocation and start using the node:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient": {
|
||||
"cluster.routing.allocation.enable": "all"
|
||||
"persistent": {
|
||||
"cluster.routing.allocation.enable": null
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -64,7 +64,7 @@ case "`uname`" in
|
|||
;;
|
||||
esac
|
||||
|
||||
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
|
||||
CLASSPATH=$APP_HOME/.gradle-wrapper/gradle-wrapper.jar
|
||||
|
||||
# Determine the Java command to use to start the JVM.
|
||||
if [ -n "$JAVA_HOME" ] ; then
|
||||
|
|
|
@ -63,7 +63,7 @@ set CMD_LINE_ARGS=%*
|
|||
:execute
|
||||
@rem Setup the command line
|
||||
|
||||
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
|
||||
set CLASSPATH=%APP_HOME%\.gradle-wrapper\gradle-wrapper.jar
|
||||
|
||||
@rem Execute Gradle
|
||||
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
|
||||
|
|
|
@ -107,7 +107,7 @@ public final class Booleans {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns <code>false</code> if text is in <tt>false</tt>, <tt>0</tt>, <tt>off</tt>, <tt>no</tt>; else, true
|
||||
* Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}.
|
||||
*
|
||||
* @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, Boolean)} instead.
|
||||
*/
|
||||
|
@ -119,9 +119,7 @@ public final class Booleans {
|
|||
return parseBooleanLenient(value, false);
|
||||
}
|
||||
/**
|
||||
* Returns <code>true</code> iff the value is neither of the following:
|
||||
* <tt>false</tt>, <tt>0</tt>, <tt>off</tt>, <tt>no</tt>
|
||||
* otherwise <code>false</code>
|
||||
* Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}.
|
||||
*
|
||||
* @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, boolean)} instead.
|
||||
*/
|
||||
|
@ -134,21 +132,21 @@ public final class Booleans {
|
|||
}
|
||||
|
||||
/**
|
||||
* @return <code>true</code> iff the value is <tt>false</tt>, otherwise <code>false</code>.
|
||||
* @return {@code true} iff the value is "false", otherwise {@code false}.
|
||||
*/
|
||||
public static boolean isFalse(String value) {
|
||||
return "false".equals(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return <code>true</code> iff the value is <tt>true</tt>, otherwise <code>false</code>
|
||||
* @return {@code true} iff the value is "true", otherwise {@code false}.
|
||||
*/
|
||||
public static boolean isTrue(String value) {
|
||||
return "true".equals(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>false</code> if text is in <tt>false</tt>, <tt>0</tt>, <tt>off</tt>, <tt>no</tt>; else, true
|
||||
* Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}.
|
||||
*
|
||||
* @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(char[], int, int, boolean)} instead
|
||||
*/
|
||||
|
|
|
@ -37,11 +37,11 @@ import java.util.Map;
|
|||
public final class IOUtils {
|
||||
|
||||
private IOUtils() {
|
||||
|
||||
// Static utils methods
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes all given <tt>Closeable</tt>s. Some of the <tt>Closeable</tt>s may be null; they are
|
||||
* Closes all given {@link Closeable}s. Some of the {@linkplain Closeable}s may be null; they are
|
||||
* ignored. After everything is closed, the method either throws the first exception it hit
|
||||
* while closing with other exceptions added as suppressed, or completes normally if there were
|
||||
* no exceptions.
|
||||
|
@ -53,7 +53,7 @@ public final class IOUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Closes all given <tt>Closeable</tt>s. Some of the <tt>Closeable</tt>s may be null; they are
|
||||
* Closes all given {@link Closeable}s. Some of the {@linkplain Closeable}s may be null; they are
|
||||
* ignored. After everything is closed, the method adds any exceptions as suppressed to the
|
||||
* original exception, or throws the first exception it hit if {@code Exception} is null. If
|
||||
* no exceptions are encountered and the passed in exception is null, it completes normally.
|
||||
|
@ -65,7 +65,7 @@ public final class IOUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Closes all given <tt>Closeable</tt>s. Some of the <tt>Closeable</tt>s may be null; they are
|
||||
* Closes all given {@link Closeable}s. Some of the {@linkplain Closeable}s may be null; they are
|
||||
* ignored. After everything is closed, the method either throws the first exception it hit
|
||||
* while closing with other exceptions added as suppressed, or completes normally if there were
|
||||
* no exceptions.
|
||||
|
|
|
@ -3,6 +3,8 @@ setup:
|
|||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
mappings:
|
||||
test:
|
||||
properties:
|
||||
|
|
|
@ -55,7 +55,7 @@ import java.util.Map;
|
|||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A query builder for <tt>has_child</tt> query.
|
||||
* A query builder for {@code has_child} query.
|
||||
*/
|
||||
public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuilder> {
|
||||
public static final String NAME = "has_child";
|
||||
|
|
|
@ -41,7 +41,7 @@ import static org.elasticsearch.index.rankeval.EvaluationMetric.joinHitsWithRati
|
|||
|
||||
/**
|
||||
* Metric implementing Discounted Cumulative Gain.
|
||||
* The `normalize` parameter can be set to calculate the normalized NDCG (set to <tt>false</tt> by default).<br>
|
||||
* The `normalize` parameter can be set to calculate the normalized NDCG (set to {@code false} by default).<br>
|
||||
* The optional `unknown_doc_rating` parameter can be used to specify a default rating for unlabeled documents.
|
||||
* @see <a href="https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Discounted_Cumulative_Gain">Discounted Cumulative Gain</a><br>
|
||||
*/
|
||||
|
|
|
@ -192,7 +192,7 @@ public class RatedRequest implements Writeable, ToXContentObject {
|
|||
return Collections.unmodifiableMap(this.params);
|
||||
}
|
||||
|
||||
/** return the parameters if this request uses a template, otherwise this will be <tt>null</tt>. */
|
||||
/** return the parameters if this request uses a template, otherwise this will be {@code null}. */
|
||||
public String getTemplateId() {
|
||||
return this.templateId;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,10 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
|
||||
import static org.elasticsearch.gradle.BuildPlugin.getJavaHome
|
||||
|
||||
apply plugin: 'elasticsearch.test-with-dependencies'
|
||||
|
||||
esplugin {
|
||||
|
@ -60,3 +64,64 @@ thirdPartyAudit.excludes = [
|
|||
'org.apache.log.Hierarchy',
|
||||
'org.apache.log.Logger',
|
||||
]
|
||||
|
||||
// Support for testing reindex-from-remote against old Elaticsearch versions
|
||||
configurations {
|
||||
oldesFixture
|
||||
es2
|
||||
es1
|
||||
es090
|
||||
}
|
||||
|
||||
dependencies {
|
||||
oldesFixture project(':test:fixtures:old-elasticsearch')
|
||||
/* Right now we just test against the latest version of each major we expect
|
||||
* reindex-from-remote to work against. We could randomize the versions but
|
||||
* that doesn't seem worth it at this point. */
|
||||
es2 'org.elasticsearch.distribution.zip:elasticsearch:2.4.5@zip'
|
||||
es1 'org.elasticsearch:elasticsearch:1.7.6@zip'
|
||||
es090 'org.elasticsearch:elasticsearch:0.90.13@zip'
|
||||
}
|
||||
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
logger.warn("Disabling reindex-from-old tests because we can't get the pid file on windows")
|
||||
integTestRunner.systemProperty "tests.fromOld", "false"
|
||||
} else if (rootProject.rootDir.toString().contains(" ")) {
|
||||
logger.warn("Disabling reindex-from-old tests because Elasticsearch 1.7 won't start with spaces in the path")
|
||||
integTestRunner.systemProperty "tests.fromOld", "false"
|
||||
} else {
|
||||
integTestRunner.systemProperty "tests.fromOld", "true"
|
||||
/* Set up tasks to unzip and run the old versions of ES before running the
|
||||
* integration tests. */
|
||||
for (String version : ['2', '1', '090']) {
|
||||
Task unzip = task("unzipEs${version}", type: Sync) {
|
||||
Configuration oldEsDependency = configurations['es' + version]
|
||||
dependsOn oldEsDependency
|
||||
/* Use a closure here to delay resolution of the dependency until we need
|
||||
* it */
|
||||
from {
|
||||
oldEsDependency.collect { zipTree(it) }
|
||||
}
|
||||
into temporaryDir
|
||||
}
|
||||
Task fixture = task("oldEs${version}Fixture",
|
||||
type: org.elasticsearch.gradle.test.AntFixture) {
|
||||
dependsOn project.configurations.oldesFixture
|
||||
dependsOn unzip
|
||||
executable = new File(project.runtimeJavaHome, 'bin/java')
|
||||
env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }"
|
||||
env 'JAVA_HOME', getJavaHome(it, 7)
|
||||
args 'oldes.OldElasticsearch',
|
||||
baseDir,
|
||||
unzip.temporaryDir,
|
||||
version == '090'
|
||||
}
|
||||
integTest.dependsOn fixture
|
||||
integTestRunner {
|
||||
/* Use a closure on the string to delay evaluation until right before we
|
||||
* run the integration tests so that we can be sure that the file is
|
||||
* ready. */
|
||||
systemProperty "es${version}.port", "${ -> fixture.addressAndPort }"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,7 +79,11 @@ final class RemoteRequestBuilders {
|
|||
}
|
||||
params.put("size", Integer.toString(searchRequest.source().size()));
|
||||
if (searchRequest.source().version() == null || searchRequest.source().version() == true) {
|
||||
// false is the only value that makes it false. Null defaults to true....
|
||||
/*
|
||||
* Passing `null` here just add the `version` request parameter
|
||||
* without any value. This way of requesting the version works
|
||||
* for all supported versions of Elasticsearch.
|
||||
*/
|
||||
params.put("version", null);
|
||||
}
|
||||
if (searchRequest.source().sorts() != null) {
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.smoketest;
|
||||
package org.elasticsearch.index.reindex.remote;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
|
@ -27,6 +27,7 @@ import org.apache.http.util.EntityUtils;
|
|||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -38,6 +39,9 @@ import static org.hamcrest.Matchers.containsString;
|
|||
|
||||
public class ReindexFromOldRemoteIT extends ESRestTestCase {
|
||||
private void oldEsTestCase(String portPropertyName, String requestsPerSecond) throws IOException {
|
||||
boolean enabled = Booleans.parseBoolean(System.getProperty("tests.fromOld"));
|
||||
assumeTrue("test is disabled, probably because this is windows", enabled);
|
||||
|
||||
int oldEsPort = Integer.parseInt(System.getProperty(portPropertyName));
|
||||
try (RestClient oldEs = RestClient.builder(new HttpHost("127.0.0.1", oldEsPort)).build()) {
|
||||
try {
|
|
@ -36,12 +36,12 @@ import com.ibm.icu.util.ULocale;
|
|||
|
||||
/**
|
||||
* An ICU based collation token filter. There are two ways to configure collation:
|
||||
* <p>The first is simply specifying the locale (defaults to the default locale). The <tt>language</tt>
|
||||
* parameter is the lowercase two-letter ISO-639 code. An additional <tt>country</tt> and <tt>variant</tt>
|
||||
* <p>The first is simply specifying the locale (defaults to the default locale). The {@code language}
|
||||
* parameter is the lowercase two-letter ISO-639 code. An additional {@code country} and {@code variant}
|
||||
* can be provided.
|
||||
* <p>The second option is to specify collation rules as defined in the <a href="http://www.icu-project.org/userguide/Collate_Customization.html">
|
||||
* Collation customization</a> chapter in icu docs. The <tt>rules</tt> parameter can either embed the rules definition
|
||||
* in the settings or refer to an external location (preferable located under the <tt>config</tt> location, relative to it).
|
||||
* Collation customization</a> chapter in icu docs. The {@code rules} parameter can either embed the rules definition
|
||||
* in the settings or refer to an external location (preferable located under the {@code config} location, relative to it).
|
||||
*/
|
||||
public class IcuCollationTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.elasticsearch.index.IndexSettings;
|
|||
* Can be filtered to handle certain characters in a specified way (see http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html)
|
||||
* E.g national chars that should be retained (filter : "[^åäöÅÄÖ]").
|
||||
*
|
||||
* <p>The <tt>unicodeSetFilter</tt> attribute can be used to provide the UniCodeSet for filtering.
|
||||
* <p>The {@code unicodeSetFilter} attribute can be used to provide the UniCodeSet for filtering.
|
||||
*
|
||||
* @author kimchy (shay.banon)
|
||||
*/
|
||||
|
|
|
@ -32,9 +32,9 @@ import java.io.Reader;
|
|||
|
||||
/**
|
||||
* Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2CharFilter} to normalize character.
|
||||
* <p>The <tt>name</tt> can be used to provide the type of normalization to perform.</p>
|
||||
* <p>The <tt>mode</tt> can be used to provide 'compose' or 'decompose'. Default is compose.</p>
|
||||
* <p>The <tt>unicodeSetFilter</tt> attribute can be used to provide the UniCodeSet for filtering.</p>
|
||||
* <p>The {@code name} can be used to provide the type of normalization to perform.</p>
|
||||
* <p>The {@code mode} can be used to provide 'compose' or 'decompose'. Default is compose.</p>
|
||||
* <p>The {@code unicodeSetFilter} attribute can be used to provide the UniCodeSet for filtering.</p>
|
||||
*/
|
||||
public class IcuNormalizerCharFilterFactory extends AbstractCharFilterFactory implements MultiTermAwareComponent {
|
||||
|
||||
|
|
|
@ -31,10 +31,8 @@ import org.elasticsearch.index.IndexSettings;
|
|||
|
||||
/**
|
||||
* Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2Filter} to normalize tokens.
|
||||
* <p>The <tt>name</tt> can be used to provide the type of normalization to perform.</p>
|
||||
* <p>The <tt>unicodeSetFilter</tt> attribute can be used to provide the UniCodeSet for filtering.</p>
|
||||
*
|
||||
*
|
||||
* <p>The {@code name} can be used to provide the type of normalization to perform.</p>
|
||||
* <p>The {@code unicodeSetFilter} attribute can be used to provide the UniCodeSet for filtering.</p>
|
||||
*/
|
||||
public class IcuNormalizerTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent {
|
||||
|
||||
|
|
|
@ -1,8 +1,3 @@
|
|||
import org.elasticsearch.gradle.test.AntFixture
|
||||
|
||||
import java.security.KeyPair
|
||||
import java.security.KeyPairGenerator
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
|
@ -58,50 +53,7 @@ thirdPartyAudit.excludes = [
|
|||
'org.apache.log.Logger',
|
||||
]
|
||||
|
||||
forbiddenApisTest {
|
||||
// we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
|
||||
bundledSignatures -= 'jdk-non-portable'
|
||||
bundledSignatures += 'jdk-internal'
|
||||
}
|
||||
|
||||
/** A task to start the GoogleCloudStorageFixture which emulates a Google Cloud Storage service **/
|
||||
task googleCloudStorageFixture(type: AntFixture) {
|
||||
dependsOn compileTestJava
|
||||
env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
|
||||
executable = new File(project.runtimeJavaHome, 'bin/java')
|
||||
args 'org.elasticsearch.repositories.gcs.GoogleCloudStorageFixture', baseDir, 'bucket_test'
|
||||
}
|
||||
|
||||
/** A service account file that points to the Google Cloud Storage service emulated by the fixture **/
|
||||
File serviceAccountFile = new File(project.buildDir, "generated-resources/service_account_test.json")
|
||||
task createServiceAccountFile() {
|
||||
dependsOn googleCloudStorageFixture
|
||||
doLast {
|
||||
KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA")
|
||||
keyPairGenerator.initialize(1024)
|
||||
KeyPair keyPair = keyPairGenerator.generateKeyPair()
|
||||
String encodedKey = Base64.getEncoder().encodeToString(keyPair.private.getEncoded())
|
||||
|
||||
serviceAccountFile.parentFile.mkdirs()
|
||||
serviceAccountFile.setText("{\n" +
|
||||
' "type": "service_account",\n' +
|
||||
' "project_id": "integration_test",\n' +
|
||||
' "private_key_id": "' + UUID.randomUUID().toString() + '",\n' +
|
||||
' "private_key": "-----BEGIN PRIVATE KEY-----\\n' + encodedKey + '\\n-----END PRIVATE KEY-----\\n",\n' +
|
||||
' "client_email": "integration_test@appspot.gserviceaccount.com",\n' +
|
||||
' "client_id": "123456789101112130594",\n' +
|
||||
" \"auth_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/auth\",\n" +
|
||||
" \"token_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/token\",\n" +
|
||||
' "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",\n' +
|
||||
' "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/integration_test%40appspot.gserviceaccount.com"\n' +
|
||||
'}', 'UTF-8')
|
||||
}
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
dependsOn createServiceAccountFile, googleCloudStorageFixture
|
||||
keystoreFile 'gcs.client.integration_test.credentials_file', "${serviceAccountFile.absolutePath}"
|
||||
|
||||
/* Use a closure on the string to delay evaluation until tests are executed */
|
||||
setting 'gcs.client.integration_test.endpoint', "http://${ -> googleCloudStorageFixture.addressAndPort }"
|
||||
}
|
||||
check {
|
||||
// also execute the QA tests when testing the plugin
|
||||
dependsOn 'qa:google-cloud-storage:check'
|
||||
}
|
|
@ -0,0 +1,115 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.MavenFilteringHack
|
||||
import org.elasticsearch.gradle.test.AntFixture
|
||||
|
||||
import java.security.KeyPair
|
||||
import java.security.KeyPairGenerator
|
||||
|
||||
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
dependencies {
|
||||
testCompile project(path: ':plugins:repository-gcs', configuration: 'runtime')
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
plugin ':plugins:repository-gcs'
|
||||
}
|
||||
|
||||
forbiddenApisTest {
|
||||
// we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
|
||||
bundledSignatures -= 'jdk-non-portable'
|
||||
bundledSignatures += 'jdk-internal'
|
||||
}
|
||||
|
||||
boolean useFixture = false
|
||||
|
||||
String gcsServiceAccount = System.getenv("google_storage_service_account")
|
||||
String gcsBucket = System.getenv("google_storage_bucket")
|
||||
String gcsBasePath = System.getenv("google_storage_base_path")
|
||||
|
||||
File serviceAccountFile = null
|
||||
if (!gcsServiceAccount && !gcsBucket && !gcsBasePath) {
|
||||
serviceAccountFile = new File(project.buildDir, 'generated-resources/service_account_test.json')
|
||||
gcsBucket = 'bucket_test'
|
||||
gcsBasePath = 'integration_test'
|
||||
useFixture = true
|
||||
} else {
|
||||
serviceAccountFile = new File(gcsServiceAccount)
|
||||
if (serviceAccountFile.exists() == false || serviceAccountFile.canRead() == false) {
|
||||
throw new FileNotFoundException(gcsServiceAccount, "Google Storage service account file does not exist or is not readable")
|
||||
}
|
||||
}
|
||||
|
||||
/** A task to start the GoogleCloudStorageFixture which emulates a Google Cloud Storage service **/
|
||||
task googleCloudStorageFixture(type: AntFixture) {
|
||||
dependsOn compileTestJava
|
||||
env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
|
||||
executable = new File(project.runtimeJavaHome, 'bin/java')
|
||||
args 'org.elasticsearch.repositories.gcs.GoogleCloudStorageFixture', baseDir, 'bucket_test'
|
||||
}
|
||||
|
||||
/** A service account file that points to the Google Cloud Storage service emulated by the fixture **/
|
||||
task createServiceAccountFile() {
|
||||
dependsOn googleCloudStorageFixture
|
||||
doLast {
|
||||
KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA")
|
||||
keyPairGenerator.initialize(1024)
|
||||
KeyPair keyPair = keyPairGenerator.generateKeyPair()
|
||||
String encodedKey = Base64.getEncoder().encodeToString(keyPair.private.getEncoded())
|
||||
|
||||
serviceAccountFile.parentFile.mkdirs()
|
||||
serviceAccountFile.setText("{\n" +
|
||||
' "type": "service_account",\n' +
|
||||
' "project_id": "integration_test",\n' +
|
||||
' "private_key_id": "' + UUID.randomUUID().toString() + '",\n' +
|
||||
' "private_key": "-----BEGIN PRIVATE KEY-----\\n' + encodedKey + '\\n-----END PRIVATE KEY-----\\n",\n' +
|
||||
' "client_email": "integration_test@appspot.gserviceaccount.com",\n' +
|
||||
' "client_id": "123456789101112130594",\n' +
|
||||
" \"auth_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/auth\",\n" +
|
||||
" \"token_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/token\",\n" +
|
||||
' "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",\n' +
|
||||
' "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/integration_test%40appspot.gserviceaccount.com"\n' +
|
||||
'}', 'UTF-8')
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, Object> expansions = [
|
||||
'bucket': gcsBucket,
|
||||
'base_path': gcsBasePath
|
||||
]
|
||||
|
||||
processTestResources {
|
||||
inputs.properties(expansions)
|
||||
MavenFilteringHack.filter(it, expansions)
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
keystoreFile 'gcs.client.integration_test.credentials_file', "${serviceAccountFile.absolutePath}"
|
||||
|
||||
if (useFixture) {
|
||||
dependsOn createServiceAccountFile, googleCloudStorageFixture
|
||||
/* Use a closure on the string to delay evaluation until tests are executed */
|
||||
setting 'gcs.client.integration_test.endpoint', "http://${ -> googleCloudStorageFixture.addressAndPort }"
|
||||
} else {
|
||||
println "Using an external service to test the repository-gcs plugin"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.gcs;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
|
||||
public class GoogleCloudStorageRepositoryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
public GoogleCloudStorageRepositoryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws Exception {
|
||||
return createParameters();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,177 @@
|
|||
# Integration tests for repository-gcs
|
||||
---
|
||||
"Snapshot/Restore with repository-gcs":
|
||||
|
||||
# Register repository
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: repository
|
||||
body:
|
||||
type: gcs
|
||||
settings:
|
||||
bucket: ${bucket}
|
||||
client: "integration_test"
|
||||
base_path: ${base_path}
|
||||
|
||||
- match: { acknowledged: true }
|
||||
|
||||
# Get repository
|
||||
- do:
|
||||
snapshot.get_repository:
|
||||
repository: repository
|
||||
|
||||
- match: { repository.settings.bucket : ${bucket} }
|
||||
- match: { repository.settings.client : "integration_test" }
|
||||
- match: { repository.settings.base_path : ${base_path} }
|
||||
|
||||
# Index documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 1
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 2
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 3
|
||||
- snapshot: one
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Create a first snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-one }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.include_global_state: true }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.status:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.snapshot: snapshot-one }
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
|
||||
# Index more documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 4
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 5
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 6
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 7
|
||||
- snapshot: two
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Create a second snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-two }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.get:
|
||||
repository: repository
|
||||
snapshot: snapshot-one,snapshot-two
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
- match: { snapshots.1.state : SUCCESS }
|
||||
|
||||
# Delete the index
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the second snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Delete the index again
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the first snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Remove the snapshots
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
# Remove our repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
repository: repository
|
|
@ -11,177 +11,3 @@
|
|||
nodes.info: {}
|
||||
|
||||
- match: { nodes.$master.plugins.0.name: repository-gcs }
|
||||
---
|
||||
"Snapshot/Restore with repository-gcs":
|
||||
|
||||
# Register repository
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: repository
|
||||
body:
|
||||
type: gcs
|
||||
settings:
|
||||
bucket: "bucket_test"
|
||||
client: "integration_test"
|
||||
|
||||
- match: { acknowledged: true }
|
||||
|
||||
# Get repository
|
||||
- do:
|
||||
snapshot.get_repository:
|
||||
repository: repository
|
||||
|
||||
- match: {repository.settings.bucket : "bucket_test"}
|
||||
- match: {repository.settings.client : "integration_test"}
|
||||
|
||||
# Index documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 1
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 2
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 3
|
||||
- snapshot: one
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Create a first snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-one }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.include_global_state: true }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.status:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.snapshot: snapshot-one }
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
|
||||
# Index more documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 4
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 5
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 6
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 7
|
||||
- snapshot: two
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Create a second snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-two }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.get:
|
||||
repository: repository
|
||||
snapshot: snapshot-one,snapshot-two
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
- match: { snapshots.1.state : SUCCESS }
|
||||
|
||||
# Delete the index
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the second snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Delete the index again
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the first snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Remove the snapshots
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
# Remove our repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
repository: repository
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
import org.elasticsearch.gradle.test.AntFixture
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
|
@ -66,28 +64,14 @@ test {
|
|||
exclude '**/*CredentialsTests.class'
|
||||
}
|
||||
|
||||
forbiddenApisTest {
|
||||
// we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
|
||||
bundledSignatures -= 'jdk-non-portable'
|
||||
bundledSignatures += 'jdk-internal'
|
||||
}
|
||||
|
||||
/** A task to start the AmazonS3Fixture which emulates a S3 service **/
|
||||
task s3Fixture(type: AntFixture) {
|
||||
dependsOn compileTestJava
|
||||
env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
|
||||
executable = new File(project.runtimeJavaHome, 'bin/java')
|
||||
args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, 'bucket_test'
|
||||
check {
|
||||
// also execute the QA tests when testing the plugin
|
||||
dependsOn 'qa:amazon-s3:check'
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
dependsOn s3Fixture
|
||||
|
||||
keystoreSetting 's3.client.integration_test.access_key', "s3_integration_test_access_key"
|
||||
keystoreSetting 's3.client.integration_test.secret_key', "s3_integration_test_secret_key"
|
||||
|
||||
/* Use a closure on the string to delay evaluation until tests are executed */
|
||||
setting 's3.client.integration_test.endpoint', "http://${ -> s3Fixture.addressAndPort }"
|
||||
}
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.MavenFilteringHack
|
||||
import org.elasticsearch.gradle.test.AntFixture
|
||||
|
||||
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
dependencies {
|
||||
testCompile project(path: ':plugins:repository-s3', configuration: 'runtime')
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
plugin ':plugins:repository-s3'
|
||||
}
|
||||
|
||||
forbiddenApisTest {
|
||||
// we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
|
||||
bundledSignatures -= 'jdk-non-portable'
|
||||
bundledSignatures += 'jdk-internal'
|
||||
}
|
||||
|
||||
boolean useFixture = false
|
||||
|
||||
String s3AccessKey = System.getenv("amazon_s3_access_key")
|
||||
String s3SecretKey = System.getenv("amazon_s3_secret_key")
|
||||
String s3Bucket = System.getenv("amazon_s3_bucket")
|
||||
String s3BasePath = System.getenv("amazon_s3_base_path")
|
||||
|
||||
if (!s3AccessKey && !s3SecretKey && !s3Bucket && !s3BasePath) {
|
||||
s3AccessKey = 's3_integration_test_access_key'
|
||||
s3SecretKey = 's3_integration_test_secret_key'
|
||||
s3Bucket = 'bucket_test'
|
||||
s3BasePath = 'integration_test'
|
||||
useFixture = true
|
||||
}
|
||||
|
||||
/** A task to start the AmazonS3Fixture which emulates a S3 service **/
|
||||
task s3Fixture(type: AntFixture) {
|
||||
dependsOn compileTestJava
|
||||
env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
|
||||
executable = new File(project.runtimeJavaHome, 'bin/java')
|
||||
args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, s3Bucket
|
||||
}
|
||||
|
||||
Map<String, Object> expansions = [
|
||||
'bucket': s3Bucket,
|
||||
'base_path': s3BasePath
|
||||
]
|
||||
processTestResources {
|
||||
inputs.properties(expansions)
|
||||
MavenFilteringHack.filter(it, expansions)
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
keystoreSetting 's3.client.integration_test.access_key', s3AccessKey
|
||||
keystoreSetting 's3.client.integration_test.secret_key', s3SecretKey
|
||||
|
||||
if (useFixture) {
|
||||
dependsOn s3Fixture
|
||||
/* Use a closure on the string to delay evaluation until tests are executed */
|
||||
setting 's3.client.integration_test.endpoint', "http://${-> s3Fixture.addressAndPort}"
|
||||
} else {
|
||||
println "Using an external service to test the repository-s3 plugin"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
|
||||
public class AmazonS3RepositoryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
public AmazonS3RepositoryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws Exception {
|
||||
return ESClientYamlSuiteTestCase.createParameters();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,183 @@
|
|||
# Integration tests for repository-s3
|
||||
---
|
||||
"Snapshot/Restore with repository-s3":
|
||||
|
||||
# Register repository
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: repository
|
||||
body:
|
||||
type: s3
|
||||
settings:
|
||||
bucket: ${bucket}
|
||||
client: integration_test
|
||||
base_path: ${base_path}
|
||||
canned_acl: private
|
||||
storage_class: standard
|
||||
|
||||
- match: { acknowledged: true }
|
||||
|
||||
# Get repository
|
||||
- do:
|
||||
snapshot.get_repository:
|
||||
repository: repository
|
||||
|
||||
- match: { repository.settings.bucket : ${bucket} }
|
||||
- match: { repository.settings.client : "integration_test" }
|
||||
- match: { repository.settings.base_path : ${base_path} }
|
||||
- match: { repository.settings.canned_acl : "private" }
|
||||
- match: { repository.settings.storage_class : "standard" }
|
||||
- is_false: repository.settings.access_key
|
||||
- is_false: repository.settings.secret_key
|
||||
|
||||
# Index documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 1
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 2
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 3
|
||||
- snapshot: one
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Create a first snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-one }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.include_global_state: true }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.status:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.snapshot: snapshot-one }
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
|
||||
# Index more documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 4
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 5
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 6
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 7
|
||||
- snapshot: two
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Create a second snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-two }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.get:
|
||||
repository: repository
|
||||
snapshot: snapshot-one,snapshot-two
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
- match: { snapshots.1.state : SUCCESS }
|
||||
|
||||
# Delete the index
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the second snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Delete the index again
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the first snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Remove the snapshots
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
# Remove our repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
repository: repository
|
|
@ -156,7 +156,7 @@ class S3Repository extends BlobStoreRepository {
|
|||
|
||||
String bucket = BUCKET_SETTING.get(metadata.settings());
|
||||
if (bucket == null) {
|
||||
throw new RepositoryException(metadata.name(), "No bucket defined for s3 gateway");
|
||||
throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository");
|
||||
}
|
||||
|
||||
boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings());
|
||||
|
|
|
@ -11,183 +11,3 @@
|
|||
nodes.info: {}
|
||||
|
||||
- match: { nodes.$master.plugins.0.name: repository-s3 }
|
||||
---
|
||||
"Snapshot/Restore with repository-s3":
|
||||
|
||||
# Register repository
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: repository
|
||||
body:
|
||||
type: s3
|
||||
settings:
|
||||
bucket: "bucket_test"
|
||||
client: "integration_test"
|
||||
canned_acl: "public-read"
|
||||
storage_class: "standard"
|
||||
|
||||
- match: { acknowledged: true }
|
||||
|
||||
# Get repository
|
||||
- do:
|
||||
snapshot.get_repository:
|
||||
repository: repository
|
||||
|
||||
- match: {repository.settings.bucket : "bucket_test"}
|
||||
- match: {repository.settings.client : "integration_test"}
|
||||
- match: {repository.settings.canned_acl : "public-read"}
|
||||
- match: {repository.settings.storage_class : "standard"}
|
||||
- is_false: repository.settings.access_key
|
||||
- is_false: repository.settings.secret_key
|
||||
|
||||
# Index documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 1
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 2
|
||||
- snapshot: one
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 3
|
||||
- snapshot: one
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Create a first snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-one }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.include_global_state: true }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.status:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.snapshot: snapshot-one }
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
|
||||
# Index more documents
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 4
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 5
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 6
|
||||
- snapshot: two
|
||||
- index:
|
||||
_index: docs
|
||||
_type: doc
|
||||
_id: 7
|
||||
- snapshot: two
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Create a second snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: snapshot-two }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
- do:
|
||||
snapshot.get:
|
||||
repository: repository
|
||||
snapshot: snapshot-one,snapshot-two
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.state : SUCCESS }
|
||||
- match: { snapshots.1.state : SUCCESS }
|
||||
|
||||
# Delete the index
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the second snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 7}
|
||||
|
||||
# Delete the index again
|
||||
- do:
|
||||
indices.delete:
|
||||
index: docs
|
||||
|
||||
# Restore the first snapshot
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: docs
|
||||
|
||||
- match: {count: 3}
|
||||
|
||||
# Remove the snapshots
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-two
|
||||
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
# Remove our repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
repository: repository
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
- match: { my_remote_cluster.num_nodes_connected: 1}
|
||||
- match: { my_remote_cluster.max_connections_per_cluster: 1}
|
||||
- match: { my_remote_cluster.initial_connect_timeout: "30s" }
|
||||
- is_true: my_remote_cluster.http_addresses.0
|
||||
|
||||
---
|
||||
"Add transient remote cluster based on the preset cluster and check remote info":
|
||||
|
@ -38,9 +37,6 @@
|
|||
|
||||
- do:
|
||||
cluster.remote_info: {}
|
||||
- set: { my_remote_cluster.http_addresses.0: remote_http }
|
||||
- match: { test_remote_cluster.http_addresses.0: $remote_http }
|
||||
|
||||
- match: { test_remote_cluster.connected: true }
|
||||
- match: { my_remote_cluster.connected: true }
|
||||
|
||||
|
@ -132,4 +128,3 @@
|
|||
transient:
|
||||
search.remote.remote1.seeds: null
|
||||
search.remote.remote1.skip_unavailable: null
|
||||
|
||||
|
|
|
@ -1,94 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
description = """\
|
||||
Tests reindex-from-remote against some specific versions of
|
||||
Elasticsearch prior to 5.0. Versions of Elasticsearch >= 5.0
|
||||
should be able to use the standard launching mechanism which
|
||||
is more flexible and reliable.
|
||||
"""
|
||||
|
||||
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
|
||||
import static org.elasticsearch.gradle.BuildPlugin.getJavaHome
|
||||
|
||||
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
integTestCluster {
|
||||
// Whitelist reindexing from the local node so we can test it.
|
||||
setting 'reindex.remote.whitelist', '127.0.0.1:*'
|
||||
}
|
||||
|
||||
configurations {
|
||||
oldesFixture
|
||||
es2
|
||||
es1
|
||||
es090
|
||||
}
|
||||
|
||||
dependencies {
|
||||
oldesFixture project(':test:fixtures:old-elasticsearch')
|
||||
/* Right now we just test against the latest version of each major we expect
|
||||
* reindex-from-remote to work against. We could randomize the versions but
|
||||
* that doesn't seem worth it at this point. */
|
||||
es2 'org.elasticsearch.distribution.zip:elasticsearch:2.4.5@zip'
|
||||
es1 'org.elasticsearch:elasticsearch:1.7.6@zip'
|
||||
es090 'org.elasticsearch:elasticsearch:0.90.13@zip'
|
||||
}
|
||||
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
// we can't get the pid files in windows so we skip that
|
||||
integTest.enabled = false
|
||||
} else {
|
||||
/* Set up tasks to unzip and run the old versions of ES before running the
|
||||
* integration tests. */
|
||||
for (String version : ['2', '1', '090']) {
|
||||
Task unzip = task("unzipEs${version}", type: Sync) {
|
||||
Configuration oldEsDependency = configurations['es' + version]
|
||||
dependsOn oldEsDependency
|
||||
/* Use a closure here to delay resolution of the dependency until we need
|
||||
* it */
|
||||
from {
|
||||
oldEsDependency.collect { zipTree(it) }
|
||||
}
|
||||
into temporaryDir
|
||||
}
|
||||
Task fixture = task("oldEs${version}Fixture",
|
||||
type: org.elasticsearch.gradle.test.AntFixture) {
|
||||
dependsOn project.configurations.oldesFixture
|
||||
dependsOn unzip
|
||||
executable = new File(project.runtimeJavaHome, 'bin/java')
|
||||
env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }"
|
||||
env 'JAVA_HOME', getJavaHome(it, 7)
|
||||
args 'oldes.OldElasticsearch',
|
||||
baseDir,
|
||||
unzip.temporaryDir,
|
||||
version == '090'
|
||||
}
|
||||
integTest.dependsOn fixture
|
||||
integTestRunner {
|
||||
/* Use a closure on the string to delay evaluation until right before we
|
||||
* run the integration tests so that we can be sure that the file is
|
||||
* ready. */
|
||||
systemProperty "es${version}.port", "${ -> fixture.addressAndPort }"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -23,9 +23,9 @@ apply plugin: 'elasticsearch.standalone-rest-test'
|
|||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
ext.pluginsCount = 0
|
||||
project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each { subproj ->
|
||||
project(':plugins').getChildProjects().each { pluginName, pluginProject ->
|
||||
integTestCluster {
|
||||
plugin subproj.path
|
||||
plugin pluginProject.path
|
||||
}
|
||||
pluginsCount += 1
|
||||
}
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
|
@ -17,12 +19,27 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
apply plugin: 'elasticsearch.vagrantsupport'
|
||||
apply plugin: 'elasticsearch.vagrant'
|
||||
plugins {
|
||||
id 'java'
|
||||
id 'elasticsearch.build'
|
||||
id 'elasticsearch.vagrantsupport'
|
||||
id 'elasticsearch.vagrant'
|
||||
}
|
||||
|
||||
dependencies {
|
||||
compile "junit:junit:${versions.junit}"
|
||||
compile "org.hamcrest:hamcrest-core:${versions.hamcrest}"
|
||||
|
||||
// needs to be on the classpath for JarHell
|
||||
testRuntime project(':libs:elasticsearch-core')
|
||||
|
||||
// pulls in the jar built by this project and its dependencies
|
||||
packagingTest project(path: project.path, configuration: 'runtime')
|
||||
}
|
||||
|
||||
List<String> plugins = []
|
||||
for (Project subproj : project.rootProject.subprojects) {
|
||||
if (subproj.path.startsWith(':plugins:') || subproj.path.equals(':example-plugins:custom-settings')) {
|
||||
if (subproj.parent.path == ':plugins' || subproj.path.equals(':example-plugins:custom-settings')) {
|
||||
// add plugin as a dep
|
||||
dependencies {
|
||||
packaging project(path: "${subproj.path}", configuration: 'zip')
|
||||
|
@ -39,3 +56,20 @@ setupPackagingTest {
|
|||
expectedPlugins.setText(plugins.join('\n'), 'UTF-8')
|
||||
}
|
||||
}
|
||||
|
||||
esvagrant {
|
||||
testClass 'org.elasticsearch.packaging.PackagingTests'
|
||||
}
|
||||
|
||||
forbiddenApisMain {
|
||||
signaturesURLs = [
|
||||
PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')
|
||||
]
|
||||
}
|
||||
|
||||
// we don't have additional tests for the tests themselves
|
||||
tasks.test.enabled = false
|
||||
|
||||
// this project doesn't get published
|
||||
tasks.dependencyLicenses.enabled = false
|
||||
tasks.dependenciesInfo.enabled = false
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.packaging;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* This class doesn't have any tests yet
|
||||
*/
|
||||
public class PackagingTests {
|
||||
|
||||
@Test
|
||||
public void testDummy() {}
|
||||
}
|
|
@ -35,9 +35,6 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"body": {
|
||||
"description": "Field json objects containing an array of field names",
|
||||
"required": false
|
||||
}
|
||||
"body": null
|
||||
}
|
||||
}
|
|
@ -18,6 +18,10 @@
|
|||
}
|
||||
},
|
||||
"params": {
|
||||
"copy_settings": {
|
||||
"type" : "boolean",
|
||||
"description" : "whether or not to copy settings from the source index (defaults to false)"
|
||||
},
|
||||
"timeout": {
|
||||
"type" : "time",
|
||||
"description" : "Explicit operation timeout"
|
||||
|
|
|
@ -18,6 +18,10 @@
|
|||
}
|
||||
},
|
||||
"params": {
|
||||
"copy_settings": {
|
||||
"type" : "boolean",
|
||||
"description" : "whether or not to copy settings from the source index (defaults to false)"
|
||||
},
|
||||
"timeout": {
|
||||
"type" : "time",
|
||||
"description" : "Explicit operation timeout"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
"get cluster state":
|
||||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
|
||||
- is_true: master_node
|
||||
|
||||
---
|
||||
|
@ -18,3 +18,18 @@
|
|||
- is_true: master_node
|
||||
- gte: { compressed_size_in_bytes: 50 }
|
||||
- is_true: compressed_size
|
||||
|
||||
---
|
||||
"get cluster state returns cluster_uuid at the top level":
|
||||
- skip:
|
||||
version: " - 6.3.99"
|
||||
reason: "cluster state including cluster_uuid at the top level is new in v6.4.0 and higher"
|
||||
|
||||
- do:
|
||||
cluster.state:
|
||||
human: true
|
||||
|
||||
- is_true: cluster_uuid
|
||||
- is_true: master_node
|
||||
- gte: { compressed_size_in_bytes: 50 }
|
||||
- is_true: compressed_size
|
||||
|
|
|
@ -156,3 +156,19 @@ setup:
|
|||
- is_true: routing_table.indices.index1
|
||||
- is_true: metadata.indices.index2
|
||||
- is_true: routing_table.indices.index2
|
||||
|
||||
---
|
||||
"Filtering the cluster state returns cluster_uuid at the top level regardless of metric filters":
|
||||
- skip:
|
||||
version: " - 6.3.99"
|
||||
reason: "cluster state including cluster_uuid at the top level is new in v6.4.0 and higher"
|
||||
|
||||
- do:
|
||||
cluster.state:
|
||||
metric: [ master_node, version, metadata ]
|
||||
|
||||
- is_true: cluster_uuid
|
||||
- is_true: master_node
|
||||
- is_true: version
|
||||
- is_true: state_uuid
|
||||
- is_true: metadata
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
---
|
||||
"Copy settings during shrink index":
|
||||
- skip:
|
||||
version: " - 6.3.99"
|
||||
reason: copy_settings did not exist prior to 6.4.0
|
||||
features: "warnings"
|
||||
|
||||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
# get master node id
|
||||
- set: { master_node: master }
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: source
|
||||
wait_for_active_shards: 1
|
||||
body:
|
||||
settings:
|
||||
# ensure everything is allocated on the master node
|
||||
index.routing.allocation.include._id: $master
|
||||
index.number_of_replicas: 0
|
||||
index.merge.scheduler.max_merge_count: 4
|
||||
|
||||
# make it read-only
|
||||
- do:
|
||||
indices.put_settings:
|
||||
index: source
|
||||
body:
|
||||
index.blocks.write: true
|
||||
index.number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
index: source
|
||||
|
||||
# now we do a actual shrink and copy settings
|
||||
- do:
|
||||
indices.shrink:
|
||||
index: "source"
|
||||
target: "copy-settings-target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
copy_settings: true
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.merge.scheduler.max_thread_count: 2
|
||||
warnings:
|
||||
- "parameter [copy_settings] is deprecated but was [true]"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
indices.get_settings:
|
||||
index: "copy-settings-target"
|
||||
|
||||
# settings should be copied
|
||||
- match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
|
||||
- match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
||||
- match: { copy-settings-target.settings.index.blocks.write: "true" }
|
||||
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $master }
|
||||
|
||||
# now we do a actual shrink and do not copy settings
|
||||
- do:
|
||||
indices.shrink:
|
||||
index: "source"
|
||||
target: "no-copy-settings-target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
copy_settings: false
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.merge.scheduler.max_thread_count: 2
|
||||
warnings:
|
||||
- "parameter [copy_settings] is deprecated but was [false]"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
indices.get_settings:
|
||||
index: "no-copy-settings-target"
|
||||
|
||||
# only the request setting should be copied
|
||||
- is_false: no-copy-settings-target.settings.index.merge.scheduler.max_merge_count
|
||||
- match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
||||
- is_false: no-copy-settings-target.settings.index.blocks.write
|
||||
- is_false: no-copy-settings-target.settings.index.routing.allocation.include._id
|
|
@ -0,0 +1,98 @@
|
|||
---
|
||||
"Copy settings during split index":
|
||||
- skip:
|
||||
version: " - 6.3.99"
|
||||
reason: copy_settings did not exist prior to 6.4.0
|
||||
features: "warnings"
|
||||
|
||||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
# get master node id
|
||||
- set: { master_node: master }
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: source
|
||||
wait_for_active_shards: 1
|
||||
body:
|
||||
settings:
|
||||
# ensure everything is allocated on the master node
|
||||
index.routing.allocation.include._id: $master
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 1
|
||||
index.number_of_routing_shards: 4
|
||||
index.merge.scheduler.max_merge_count: 4
|
||||
|
||||
# make it read-only
|
||||
- do:
|
||||
indices.put_settings:
|
||||
index: source
|
||||
body:
|
||||
index.blocks.write: true
|
||||
index.number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
index: source
|
||||
|
||||
# now we do a actual split and copy settings
|
||||
- do:
|
||||
indices.split:
|
||||
index: "source"
|
||||
target: "copy-settings-target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
copy_settings: true
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 2
|
||||
index.merge.scheduler.max_thread_count: 2
|
||||
warnings:
|
||||
- "parameter [copy_settings] is deprecated but was [true]"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
indices.get_settings:
|
||||
index: "copy-settings-target"
|
||||
|
||||
# settings should be copied
|
||||
- match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
|
||||
- match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
||||
- match: { copy-settings-target.settings.index.blocks.write: "true" }
|
||||
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $master }
|
||||
|
||||
# now we do a actual shrink and do not copy settings
|
||||
- do:
|
||||
indices.split:
|
||||
index: "source"
|
||||
target: "no-copy-settings-target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
copy_settings: false
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 2
|
||||
index.merge.scheduler.max_thread_count: 2
|
||||
warnings:
|
||||
- "parameter [copy_settings] is deprecated but was [false]"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
indices.get_settings:
|
||||
index: "no-copy-settings-target"
|
||||
|
||||
# only the request setting should be copied
|
||||
- is_false: no-copy-settings-target.settings.index.merge.scheduler.max_merge_count
|
||||
- match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
||||
- is_false: no-copy-settings-target.settings.index.blocks.write
|
||||
- is_false: no-copy-settings-target.settings.index.routing.allocation.include._id
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.io.stream.Writeable;
|
|||
import org.elasticsearch.common.logging.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParseException;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -635,8 +636,25 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
public static ElasticsearchException[] guessRootCauses(Throwable t) {
|
||||
Throwable ex = ExceptionsHelper.unwrapCause(t);
|
||||
if (ex instanceof ElasticsearchException) {
|
||||
// ElasticsearchException knows how to guess its own root cause
|
||||
return ((ElasticsearchException) ex).guessRootCauses();
|
||||
}
|
||||
if (ex instanceof XContentParseException) {
|
||||
/*
|
||||
* We'd like to unwrap parsing exceptions to the inner-most
|
||||
* parsing exception because that is generally the most interesting
|
||||
* exception to return to the user. If that exception is caused by
|
||||
* an ElasticsearchException we'd like to keep unwrapping because
|
||||
* ElasticserachExceptions tend to contain useful information for
|
||||
* the user.
|
||||
*/
|
||||
Throwable cause = ex.getCause();
|
||||
if (cause != null) {
|
||||
if (cause instanceof XContentParseException || cause instanceof ElasticsearchException) {
|
||||
return guessRootCauses(ex.getCause());
|
||||
}
|
||||
}
|
||||
}
|
||||
return new ElasticsearchException[]{new ElasticsearchException(t.getMessage(), t) {
|
||||
@Override
|
||||
protected String getExceptionName() {
|
||||
|
|
|
@ -19,7 +19,11 @@
|
|||
|
||||
package org.elasticsearch;
|
||||
|
||||
/**
|
||||
* An exception that is meant to be "unwrapped" when sent back to the user
|
||||
* as an error because its is {@link #getCause() cause}, if non-null is
|
||||
* <strong>always</strong> more useful to the user than the exception itself.
|
||||
*/
|
||||
public interface ElasticsearchWrapperException {
|
||||
|
||||
Throwable getCause();
|
||||
}
|
||||
|
|
|
@ -241,6 +241,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestRemoteClusterInfoAction;
|
|||
import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestVerifyRepositoryAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestResizeHandler;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestAnalyzeAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestClearIndicesCacheAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestCloseIndexAction;
|
||||
|
@ -270,8 +271,6 @@ import org.elasticsearch.rest.action.admin.indices.RestPutMappingAction;
|
|||
import org.elasticsearch.rest.action.admin.indices.RestRecoveryAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestRefreshAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestRolloverIndexAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestShrinkIndexAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestSplitIndexAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestSyncedFlushAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestUpdateSettingsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestUpgradeAction;
|
||||
|
@ -569,8 +568,8 @@ public class ActionModule extends AbstractModule {
|
|||
registerHandler.accept(new RestIndexPutAliasAction(settings, restController));
|
||||
registerHandler.accept(new RestIndicesAliasesAction(settings, restController));
|
||||
registerHandler.accept(new RestCreateIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestShrinkIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestSplitIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestResizeHandler.RestShrinkIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestResizeHandler.RestSplitIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestRolloverIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestDeleteIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestCloseIndexAction(settings, restController));
|
||||
|
|
|
@ -75,7 +75,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
|
|||
|
||||
Result(int op) {
|
||||
this.op = (byte) op;
|
||||
this.lowercase = this.toString().toLowerCase(Locale.ENGLISH);
|
||||
this.lowercase = this.name().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
public byte getOp() {
|
||||
|
|
|
@ -30,6 +30,8 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
||||
public final class TransportRemoteInfoAction extends HandledTransportAction<RemoteInfoRequest, RemoteInfoResponse> {
|
||||
|
||||
private final RemoteClusterService remoteClusterService;
|
||||
|
@ -45,7 +47,6 @@ public final class TransportRemoteInfoAction extends HandledTransportAction<Remo
|
|||
|
||||
@Override
|
||||
protected void doExecute(RemoteInfoRequest remoteInfoRequest, ActionListener<RemoteInfoResponse> listener) {
|
||||
remoteClusterService.getRemoteConnectionInfos(ActionListener.wrap(remoteConnectionInfos
|
||||
-> listener.onResponse(new RemoteInfoResponse(remoteConnectionInfos)), listener::onFailure));
|
||||
listener.onResponse(new RemoteInfoResponse(remoteClusterService.getRemoteConnectionInfos().collect(toList())));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -230,9 +230,9 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
SnapshotInfo snapshotInfo = snapshotsService.snapshot(repositoryName, snapshotId);
|
||||
List<SnapshotIndexShardStatus> shardStatusBuilder = new ArrayList<>();
|
||||
if (snapshotInfo.state().completed()) {
|
||||
Map<ShardId, IndexShardSnapshotStatus> shardStatues =
|
||||
snapshotsService.snapshotShards(request.repository(), snapshotInfo);
|
||||
for (Map.Entry<ShardId, IndexShardSnapshotStatus> shardStatus : shardStatues.entrySet()) {
|
||||
Map<ShardId, IndexShardSnapshotStatus> shardStatuses =
|
||||
snapshotsService.snapshotShards(repositoryName, repositoryData, snapshotInfo);
|
||||
for (Map.Entry<ShardId, IndexShardSnapshotStatus> shardStatus : shardStatuses.entrySet()) {
|
||||
IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardStatus.getValue().asCopy();
|
||||
shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), lastSnapshotStatus));
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
|||
private final String providedName;
|
||||
private Index recoverFrom;
|
||||
private ResizeType resizeType;
|
||||
private boolean copySettings;
|
||||
|
||||
private IndexMetaData.State state = IndexMetaData.State.OPEN;
|
||||
|
||||
|
@ -112,6 +113,11 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
|||
return this;
|
||||
}
|
||||
|
||||
public CreateIndexClusterStateUpdateRequest copySettings(final boolean copySettings) {
|
||||
this.copySettings = copySettings;
|
||||
return this;
|
||||
}
|
||||
|
||||
public TransportMessage originalMessage() {
|
||||
return originalMessage;
|
||||
}
|
||||
|
@ -170,4 +176,9 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
|||
public ResizeType resizeType() {
|
||||
return resizeType;
|
||||
}
|
||||
|
||||
public boolean copySettings() {
|
||||
return copySettings;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.action.admin.indices.shrink;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
|
@ -55,6 +56,7 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
|||
private CreateIndexRequest targetIndexRequest;
|
||||
private String sourceIndex;
|
||||
private ResizeType type = ResizeType.SHRINK;
|
||||
private boolean copySettings = false;
|
||||
|
||||
ResizeRequest() {}
|
||||
|
||||
|
@ -96,6 +98,11 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
|||
} else {
|
||||
type = ResizeType.SHRINK; // BWC this used to be shrink only
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
copySettings = in.readBoolean();
|
||||
} else {
|
||||
copySettings = false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -106,6 +113,9 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
|||
if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) {
|
||||
out.writeEnum(type);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
out.writeBoolean(copySettings);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -177,6 +187,14 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
|||
return type;
|
||||
}
|
||||
|
||||
public void setCopySettings(final boolean copySettings) {
|
||||
this.copySettings = copySettings;
|
||||
}
|
||||
|
||||
public boolean getCopySettings() {
|
||||
return copySettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue