Merge remote-tracking branch 'es/master' into ccr
* es/master: (38 commits) Build: Add pom generation to meta plugins (#28321) Add 6.3 version constant to master Minor improvements to translog docs (#28237) [Docs] Remove typo in painless-getting-started.asciidoc Build: Fix meta plugin usage in integ test clusters (#28307) Painless: Add spi jar that will be published for extending whitelists (#28302) mistyping in one of the highlighting examples comment -> content (#28139) Documents applicability of term query to range type (#28166) Build: Omit dependency licenses check for elasticsearch deps (#28304) Clean up commits when global checkpoint advanced (#28140) Implement socket and server ChannelContexts (#28275) Plugins: Fix meta plugins to install bundled plugins with their real name (#28285) Build: Fix meta plugin integ test installation (#28286) Modify Abstract transport tests to use impls (#28270) Fork Groovy compiler onto compile Java home [Docs] Update tophits-aggregation.asciidoc (#28273) Docs: match between snippet to its description (#28296) [TEST] fix RequestTests#testSearch in case search source is not set REST high-level client: remove index suffix from indices client method names (#28263) Fix simple_query_string on invalid input (#28219) ...
This commit is contained in:
commit
2f17f91680
|
@ -126,8 +126,8 @@ Alternatively, `idea.no.launcher=true` can be set in the
|
|||
[`idea.properties`](https://www.jetbrains.com/help/idea/file-idea-properties.html)
|
||||
file which can be accessed under Help > Edit Custom Properties (this will require a
|
||||
restart of IDEA). For IDEA 2017.3 and above, in addition to the JVM option, you will need to go to
|
||||
`Run->Edit Configurations->...->Defaults->JUnit` and change the value for the `Shorten command line` setting from
|
||||
`user-local default: none` to `classpath file`. You may also need to [remove `ant-javafx.jar` from your
|
||||
`Run->Edit Configurations->...->Defaults->JUnit` and verify that the `Shorten command line` setting is set to
|
||||
`user-local default: none`. You may also need to [remove `ant-javafx.jar` from your
|
||||
classpath](https://github.com/elastic/elasticsearch/issues/14348) if that is
|
||||
reported as a source of jar hell.
|
||||
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.gradle.api.publish.maven.MavenPublication
|
|||
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
|
||||
import org.gradle.api.publish.maven.tasks.GenerateMavenPom
|
||||
import org.gradle.api.tasks.bundling.Jar
|
||||
import org.gradle.api.tasks.compile.GroovyCompile
|
||||
import org.gradle.api.tasks.compile.JavaCompile
|
||||
import org.gradle.api.tasks.javadoc.Javadoc
|
||||
import org.gradle.internal.jvm.Jvm
|
||||
|
@ -455,6 +456,13 @@ class BuildPlugin implements Plugin<Project> {
|
|||
// TODO: use native Gradle support for --release when available (cf. https://github.com/gradle/gradle/issues/2510)
|
||||
options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion
|
||||
}
|
||||
// also apply release flag to groovy, which is used in build-tools
|
||||
project.tasks.withType(GroovyCompile) {
|
||||
final JavaVersion targetCompatibilityVersion = JavaVersion.toVersion(it.targetCompatibility)
|
||||
options.fork = true
|
||||
options.forkOptions.javaHome = new File(project.compilerJavaHome)
|
||||
options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -651,7 +659,10 @@ class BuildPlugin implements Plugin<Project> {
|
|||
Task precommit = PrecommitTasks.create(project, true)
|
||||
project.check.dependsOn(precommit)
|
||||
project.test.mustRunAfter(precommit)
|
||||
project.dependencyLicenses.dependencies = project.configurations.runtime - project.configurations.provided
|
||||
// only require dependency licenses for non-elasticsearch deps
|
||||
project.dependencyLicenses.dependencies = project.configurations.runtime.fileCollection {
|
||||
it.group.startsWith('org.elasticsearch') == false
|
||||
} - project.configurations.provided
|
||||
}
|
||||
|
||||
private static configureDependenciesInfo(Project project) {
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.plugin
|
||||
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.elasticsearch.gradle.test.RestTestPlugin
|
||||
import org.elasticsearch.gradle.test.RunTask
|
||||
import org.elasticsearch.gradle.test.StandaloneRestTestPlugin
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.file.FileCopyDetails
|
||||
import org.gradle.api.file.RelativePath
|
||||
import org.gradle.api.tasks.bundling.Zip
|
||||
|
||||
class MetaPluginBuildPlugin implements Plugin<Project> {
|
||||
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
project.plugins.apply(StandaloneRestTestPlugin)
|
||||
project.plugins.apply(RestTestPlugin)
|
||||
|
||||
createBundleTask(project)
|
||||
|
||||
project.integTestCluster {
|
||||
dependsOn(project.bundlePlugin)
|
||||
plugin(project.path)
|
||||
}
|
||||
BuildPlugin.configurePomGeneration(project)
|
||||
project.afterEvaluate {
|
||||
PluginBuildPlugin.addZipPomGeneration(project)
|
||||
}
|
||||
|
||||
RunTask run = project.tasks.create('run', RunTask)
|
||||
run.dependsOn(project.bundlePlugin)
|
||||
run.clusterConfig.plugin(project.path)
|
||||
}
|
||||
|
||||
private static void createBundleTask(Project project) {
|
||||
|
||||
MetaPluginPropertiesTask buildProperties = project.tasks.create('pluginProperties', MetaPluginPropertiesTask.class)
|
||||
|
||||
// create the actual bundle task, which zips up all the files for the plugin
|
||||
Zip bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [buildProperties]) {
|
||||
into('elasticsearch') {
|
||||
from(buildProperties.descriptorOutput.parentFile) {
|
||||
// plugin properties file
|
||||
include(buildProperties.descriptorOutput.name)
|
||||
}
|
||||
}
|
||||
// due to how the renames work for each bundled plugin, we must exclude empty dirs or every subdir
|
||||
// within bundled plugin zips will show up at the root as an empty dir
|
||||
includeEmptyDirs = false
|
||||
|
||||
}
|
||||
project.assemble.dependsOn(bundle)
|
||||
|
||||
// also make the zip available as a configuration (used when depending on this project)
|
||||
project.configurations.create('zip')
|
||||
project.artifacts.add('zip', bundle)
|
||||
|
||||
// a super hacky way to inject code to run at the end of each of the bundled plugin's configuration
|
||||
// to add itself back to this meta plugin zip
|
||||
project.afterEvaluate {
|
||||
buildProperties.extension.plugins.each { String bundledPluginProjectName ->
|
||||
Project bundledPluginProject = project.project(bundledPluginProjectName)
|
||||
bundledPluginProject.afterEvaluate {
|
||||
bundle.configure {
|
||||
dependsOn bundledPluginProject.bundlePlugin
|
||||
from(project.zipTree(bundledPluginProject.bundlePlugin.outputs.files.singleFile)) {
|
||||
eachFile { FileCopyDetails details ->
|
||||
// paths in the individual plugins begin with elasticsearch, and we want to add in the
|
||||
// bundled plugin name between that and each filename
|
||||
details.relativePath = new RelativePath(true, 'elasticsearch', bundledPluginProjectName,
|
||||
details.relativePath.toString().replace('elasticsearch/', ''))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -17,21 +17,30 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.nio;
|
||||
package org.elasticsearch.gradle.plugin
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.BiConsumer;
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.tasks.Input
|
||||
|
||||
public interface WriteContext {
|
||||
/**
|
||||
* A container for meta plugin properties that will be written to the meta plugin descriptor, for easy
|
||||
* manipulation in the gradle DSL.
|
||||
*/
|
||||
class MetaPluginPropertiesExtension {
|
||||
@Input
|
||||
String name
|
||||
|
||||
void sendMessage(Object message, BiConsumer<Void, Throwable> listener);
|
||||
@Input
|
||||
String description
|
||||
|
||||
void queueWriteOperations(WriteOperation writeOperation);
|
||||
|
||||
void flushChannel() throws IOException;
|
||||
|
||||
boolean hasQueuedWriteOps();
|
||||
|
||||
void clearQueuedWriteOps(Exception e);
|
||||
/**
|
||||
* The plugins this meta plugin wraps.
|
||||
* Note this is not written to the plugin descriptor, but used to setup the final zip file task.
|
||||
*/
|
||||
@Input
|
||||
List<String> plugins
|
||||
|
||||
MetaPluginPropertiesExtension(Project project) {
|
||||
name = project.name
|
||||
}
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.plugin
|
||||
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.tasks.Copy
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
|
||||
class MetaPluginPropertiesTask extends Copy {
|
||||
|
||||
MetaPluginPropertiesExtension extension
|
||||
|
||||
@OutputFile
|
||||
File descriptorOutput = new File(project.buildDir, 'generated-resources/meta-plugin-descriptor.properties')
|
||||
|
||||
MetaPluginPropertiesTask() {
|
||||
File templateFile = new File(project.buildDir, "templates/${descriptorOutput.name}")
|
||||
Task copyPluginPropertiesTemplate = project.tasks.create('copyPluginPropertiesTemplate') {
|
||||
doLast {
|
||||
InputStream resourceTemplate = PluginPropertiesTask.getResourceAsStream("/${descriptorOutput.name}")
|
||||
templateFile.parentFile.mkdirs()
|
||||
templateFile.setText(resourceTemplate.getText('UTF-8'), 'UTF-8')
|
||||
}
|
||||
}
|
||||
|
||||
dependsOn(copyPluginPropertiesTemplate)
|
||||
extension = project.extensions.create('es_meta_plugin', MetaPluginPropertiesExtension, project)
|
||||
project.afterEvaluate {
|
||||
// check require properties are set
|
||||
if (extension.name == null) {
|
||||
throw new InvalidUserDataException('name is a required setting for es_meta_plugin')
|
||||
}
|
||||
if (extension.description == null) {
|
||||
throw new InvalidUserDataException('description is a required setting for es_meta_plugin')
|
||||
}
|
||||
// configure property substitution
|
||||
from(templateFile.parentFile).include(descriptorOutput.name)
|
||||
into(descriptorOutput.parentFile)
|
||||
Map<String, String> properties = generateSubstitutions()
|
||||
expand(properties)
|
||||
inputs.properties(properties)
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, String> generateSubstitutions() {
|
||||
return ['name': extension.name,
|
||||
'description': extension.description
|
||||
]
|
||||
}
|
||||
}
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.plugin
|
||||
|
||||
import nebula.plugin.info.scm.ScmInfoPlugin
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.elasticsearch.gradle.NoticeTask
|
||||
import org.elasticsearch.gradle.test.RestIntegTestTask
|
||||
|
@ -220,7 +221,8 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
}
|
||||
|
||||
/** Adds a task to generate a pom file for the zip distribution. */
|
||||
protected void addZipPomGeneration(Project project) {
|
||||
public static void addZipPomGeneration(Project project) {
|
||||
project.plugins.apply(ScmInfoPlugin.class)
|
||||
project.plugins.apply(MavenPublishPlugin.class)
|
||||
|
||||
project.publishing {
|
||||
|
|
|
@ -23,6 +23,8 @@ import org.apache.tools.ant.taskdefs.condition.Os
|
|||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.Version
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin
|
||||
import org.elasticsearch.gradle.plugin.MetaPluginPropertiesExtension
|
||||
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
|
||||
import org.elasticsearch.gradle.plugin.PluginPropertiesExtension
|
||||
import org.gradle.api.AntBuilder
|
||||
|
@ -138,8 +140,8 @@ class ClusterFormationTasks {
|
|||
/** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */
|
||||
static void configureBwcPluginDependency(String name, Project project, Project pluginProject, Configuration configuration, String elasticsearchVersion) {
|
||||
verifyProjectHasBuildPlugin(name, elasticsearchVersion, project, pluginProject)
|
||||
PluginPropertiesExtension extension = pluginProject.extensions.findByName('esplugin');
|
||||
project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${extension.name}:${elasticsearchVersion}@zip")
|
||||
final String pluginName = findPluginName(pluginProject)
|
||||
project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${pluginName}:${elasticsearchVersion}@zip")
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -449,7 +451,7 @@ class ClusterFormationTasks {
|
|||
configuration = project.configurations.create(configurationName)
|
||||
}
|
||||
|
||||
final String depName = pluginProject.extensions.findByName('esplugin').name
|
||||
final String depName = findPluginName(pluginProject)
|
||||
|
||||
Dependency dep = bwcPlugins.dependencies.find {
|
||||
it.name == depName
|
||||
|
@ -753,9 +755,19 @@ class ClusterFormationTasks {
|
|||
}
|
||||
|
||||
static void verifyProjectHasBuildPlugin(String name, String version, Project project, Project pluginProject) {
|
||||
if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) {
|
||||
if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false && pluginProject.plugins.hasPlugin(MetaPluginBuildPlugin) == false) {
|
||||
throw new GradleException("Task [${name}] cannot add plugin [${pluginProject.path}] with version [${version}] to project's " +
|
||||
"[${project.path}] dependencies: the plugin is not an esplugin")
|
||||
"[${project.path}] dependencies: the plugin is not an esplugin or es_meta_plugin")
|
||||
}
|
||||
}
|
||||
|
||||
/** Find the plugin name in the given project, whether a regular plugin or meta plugin. */
|
||||
static String findPluginName(Project pluginProject) {
|
||||
PluginPropertiesExtension extension = pluginProject.extensions.findByName('esplugin')
|
||||
if (extension != null) {
|
||||
return extension.name
|
||||
} else {
|
||||
return pluginProject.extensions.findByName('es_meta_plugin').name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
#
|
||||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
implementation-class=org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.client;
|
|||
|
||||
import org.apache.http.Header;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
|
@ -49,9 +51,9 @@ public final class IndicesClient {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html">
|
||||
* Delete Index API on elastic.co</a>
|
||||
*/
|
||||
public DeleteIndexResponse deleteIndex(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException {
|
||||
public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent,
|
||||
Collections.emptySet(), headers);
|
||||
Collections.emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -60,10 +62,9 @@ public final class IndicesClient {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html">
|
||||
* Delete Index API on elastic.co</a>
|
||||
*/
|
||||
public void deleteIndexAsync(DeleteIndexRequest deleteIndexRequest, ActionListener<DeleteIndexResponse> listener,
|
||||
Header... headers) {
|
||||
public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener<DeleteIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent,
|
||||
listener, Collections.emptySet(), headers);
|
||||
listener, Collections.emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -72,7 +73,7 @@ public final class IndicesClient {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html">
|
||||
* Create Index API on elastic.co</a>
|
||||
*/
|
||||
public CreateIndexResponse createIndex(CreateIndexRequest createIndexRequest, Header... headers) throws IOException {
|
||||
public CreateIndexResponse create(CreateIndexRequest createIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent,
|
||||
Collections.emptySet(), headers);
|
||||
}
|
||||
|
@ -83,10 +84,9 @@ public final class IndicesClient {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html">
|
||||
* Create Index API on elastic.co</a>
|
||||
*/
|
||||
public void createIndexAsync(CreateIndexRequest createIndexRequest, ActionListener<CreateIndexResponse> listener,
|
||||
Header... headers) {
|
||||
public void createAsync(CreateIndexRequest createIndexRequest, ActionListener<CreateIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent,
|
||||
listener, Collections.emptySet(), headers);
|
||||
listener, Collections.emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -95,7 +95,7 @@ public final class IndicesClient {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
* Open Index API on elastic.co</a>
|
||||
*/
|
||||
public OpenIndexResponse openIndex(OpenIndexRequest openIndexRequest, Header... headers) throws IOException {
|
||||
public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent,
|
||||
Collections.emptySet(), headers);
|
||||
}
|
||||
|
@ -106,9 +106,30 @@ public final class IndicesClient {
|
|||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
* Open Index API on elastic.co</a>
|
||||
*/
|
||||
public void openIndexAsync(OpenIndexRequest openIndexRequest, ActionListener<OpenIndexResponse> listener, Header... headers) {
|
||||
public void openAsync(OpenIndexRequest openIndexRequest, ActionListener<OpenIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent,
|
||||
listener, Collections.emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes an index using the Close Index API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
* Close Index API on elastic.co</a>
|
||||
*/
|
||||
public CloseIndexResponse close(CloseIndexRequest closeIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent,
|
||||
Collections.emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously closes an index using the Close Index API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
* Close Index API on elastic.co</a>
|
||||
*/
|
||||
public void closeAsync(CloseIndexRequest closeIndexRequest, ActionListener<CloseIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent,
|
||||
listener, Collections.emptySet(), headers);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.http.entity.ByteArrayEntity;
|
|||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
|
@ -153,6 +154,18 @@ public final class Request {
|
|||
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
}
|
||||
|
||||
static Request closeIndex(CloseIndexRequest closeIndexRequest) {
|
||||
String endpoint = endpoint(closeIndexRequest.indices(), Strings.EMPTY_ARRAY, "_close");
|
||||
|
||||
Params parameters = Params.builder();
|
||||
|
||||
parameters.withTimeout(closeIndexRequest.timeout());
|
||||
parameters.withMasterTimeout(closeIndexRequest.masterNodeTimeout());
|
||||
parameters.withIndicesOptions(closeIndexRequest.indicesOptions());
|
||||
|
||||
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
}
|
||||
|
||||
static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException {
|
||||
String endpoint = endpoint(createIndexRequest.indices(), Strings.EMPTY_ARRAY, "");
|
||||
|
||||
|
|
|
@ -26,8 +26,6 @@ import org.elasticsearch.ElasticsearchStatusException;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.client;
|
|||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
|
@ -28,21 +30,18 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
|||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
|
@ -56,7 +55,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
|
||||
|
||||
CreateIndexResponse createIndexResponse =
|
||||
execute(createIndexRequest, highLevelClient().indices()::createIndex, highLevelClient().indices()::createIndexAsync);
|
||||
execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
|
||||
assertTrue(indexExists(indexName));
|
||||
|
@ -84,7 +83,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
createIndexRequest.mapping("type_name", mappingBuilder);
|
||||
|
||||
CreateIndexResponse createIndexResponse =
|
||||
execute(createIndexRequest, highLevelClient().indices()::createIndex, highLevelClient().indices()::createIndexAsync);
|
||||
execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
|
||||
Map<String, Object> indexMetaData = getIndexMetadata(indexName);
|
||||
|
@ -117,7 +116,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indexName);
|
||||
DeleteIndexResponse deleteIndexResponse =
|
||||
execute(deleteIndexRequest, highLevelClient().indices()::deleteIndex, highLevelClient().indices()::deleteIndexAsync);
|
||||
execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync);
|
||||
assertTrue(deleteIndexResponse.isAcknowledged());
|
||||
|
||||
assertFalse(indexExists(indexName));
|
||||
|
@ -130,63 +129,74 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(nonExistentIndex);
|
||||
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(deleteIndexRequest, highLevelClient().indices()::deleteIndex, highLevelClient().indices()::deleteIndexAsync));
|
||||
() -> execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
}
|
||||
}
|
||||
|
||||
public void testOpenExistingIndex() throws IOException {
|
||||
String[] indices = randomIndices(1, 5);
|
||||
for (String index : indices) {
|
||||
createIndex(index);
|
||||
closeIndex(index);
|
||||
ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search"));
|
||||
assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus()));
|
||||
assertThat(exception.getMessage().contains(index), equalTo(true));
|
||||
}
|
||||
String index = "index";
|
||||
createIndex(index);
|
||||
closeIndex(index);
|
||||
ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search"));
|
||||
assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus()));
|
||||
assertThat(exception.getMessage().contains(index), equalTo(true));
|
||||
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest(indices);
|
||||
OpenIndexResponse openIndexResponse = execute(openIndexRequest, highLevelClient().indices()::openIndex,
|
||||
highLevelClient().indices()::openIndexAsync);
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest(index);
|
||||
OpenIndexResponse openIndexResponse = execute(openIndexRequest, highLevelClient().indices()::open,
|
||||
highLevelClient().indices()::openAsync);
|
||||
assertTrue(openIndexResponse.isAcknowledged());
|
||||
|
||||
for (String index : indices) {
|
||||
Response response = client().performRequest("GET", index + "/_search");
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
|
||||
}
|
||||
Response response = client().performRequest("GET", index + "/_search");
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
|
||||
}
|
||||
|
||||
public void testOpenNonExistentIndex() throws IOException {
|
||||
String[] nonExistentIndices = randomIndices(1, 5);
|
||||
for (String nonExistentIndex : nonExistentIndices) {
|
||||
assertFalse(indexExists(nonExistentIndex));
|
||||
}
|
||||
String nonExistentIndex = "non_existent_index";
|
||||
assertFalse(indexExists(nonExistentIndex));
|
||||
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest(nonExistentIndices);
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest(nonExistentIndex);
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(openIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync));
|
||||
() -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
|
||||
OpenIndexRequest lenientOpenIndexRequest = new OpenIndexRequest(nonExistentIndices);
|
||||
OpenIndexRequest lenientOpenIndexRequest = new OpenIndexRequest(nonExistentIndex);
|
||||
lenientOpenIndexRequest.indicesOptions(IndicesOptions.lenientExpandOpen());
|
||||
OpenIndexResponse lenientOpenIndexResponse = execute(lenientOpenIndexRequest, highLevelClient().indices()::openIndex,
|
||||
highLevelClient().indices()::openIndexAsync);
|
||||
OpenIndexResponse lenientOpenIndexResponse = execute(lenientOpenIndexRequest, highLevelClient().indices()::open,
|
||||
highLevelClient().indices()::openAsync);
|
||||
assertThat(lenientOpenIndexResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
OpenIndexRequest strictOpenIndexRequest = new OpenIndexRequest(nonExistentIndices);
|
||||
OpenIndexRequest strictOpenIndexRequest = new OpenIndexRequest(nonExistentIndex);
|
||||
strictOpenIndexRequest.indicesOptions(IndicesOptions.strictExpandOpen());
|
||||
ElasticsearchException strictException = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(openIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync));
|
||||
() -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, strictException.status());
|
||||
}
|
||||
|
||||
private static String[] randomIndices(int minIndicesNum, int maxIndicesNum) {
|
||||
int numIndices = randomIntBetween(minIndicesNum, maxIndicesNum);
|
||||
String[] indices = new String[numIndices];
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT);
|
||||
}
|
||||
return indices;
|
||||
public void testCloseExistingIndex() throws IOException {
|
||||
String index = "index";
|
||||
createIndex(index);
|
||||
Response response = client().performRequest("GET", index + "/_search");
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
|
||||
|
||||
CloseIndexRequest closeIndexRequest = new CloseIndexRequest(index);
|
||||
CloseIndexResponse closeIndexResponse = execute(closeIndexRequest, highLevelClient().indices()::close,
|
||||
highLevelClient().indices()::closeAsync);
|
||||
assertTrue(closeIndexResponse.isAcknowledged());
|
||||
|
||||
ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search"));
|
||||
assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus()));
|
||||
assertThat(exception.getMessage().contains(index), equalTo(true));
|
||||
}
|
||||
|
||||
public void testCloseNonExistentIndex() throws IOException {
|
||||
String nonExistentIndex = "non_existent_index";
|
||||
assertFalse(indexExists(nonExistentIndex));
|
||||
|
||||
CloseIndexRequest closeIndexRequest = new CloseIndexRequest(nonExistentIndex);
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(closeIndexRequest, highLevelClient().indices()::close, highLevelClient().indices()::closeAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
}
|
||||
|
||||
private static void createIndex(String index) throws IOException {
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.http.entity.ContentType;
|
|||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
|
@ -325,17 +326,10 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testDeleteIndex() {
|
||||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest();
|
||||
|
||||
int numIndices = randomIntBetween(0, 5);
|
||||
String[] indices = new String[numIndices];
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5);
|
||||
}
|
||||
deleteIndexRequest.indices(indices);
|
||||
String[] indices = randomIndicesNames(0, 5);
|
||||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indices);
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
|
||||
setRandomTimeout(deleteIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
setRandomMasterTimeout(deleteIndexRequest, expectedParams);
|
||||
|
||||
|
@ -349,12 +343,8 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testOpenIndex() {
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest();
|
||||
int numIndices = randomIntBetween(1, 5);
|
||||
String[] indices = new String[numIndices];
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5);
|
||||
}
|
||||
String[] indices = randomIndicesNames(1, 5);
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest(indices);
|
||||
openIndexRequest.indices(indices);
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
|
@ -371,6 +361,23 @@ public class RequestTests extends ESTestCase {
|
|||
assertThat(request.getEntity(), nullValue());
|
||||
}
|
||||
|
||||
public void testCloseIndex() {
|
||||
String[] indices = randomIndicesNames(1, 5);
|
||||
CloseIndexRequest closeIndexRequest = new CloseIndexRequest(indices);
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomTimeout(closeIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
setRandomMasterTimeout(closeIndexRequest, expectedParams);
|
||||
setRandomIndicesOptions(closeIndexRequest::indicesOptions, closeIndexRequest::indicesOptions, expectedParams);
|
||||
|
||||
Request request = Request.closeIndex(closeIndexRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_close");
|
||||
assertThat(endpoint.toString(), equalTo(request.getEndpoint()));
|
||||
assertThat(expectedParams, equalTo(request.getParameters()));
|
||||
assertThat(request.getMethod(), equalTo("POST"));
|
||||
assertThat(request.getEntity(), nullValue());
|
||||
}
|
||||
|
||||
public void testIndex() throws IOException {
|
||||
String index = randomAlphaOfLengthBetween(3, 10);
|
||||
String type = randomAlphaOfLengthBetween(3, 10);
|
||||
|
@ -748,13 +755,9 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testSearch() throws Exception {
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
int numIndices = randomIntBetween(0, 5);
|
||||
String[] indices = new String[numIndices];
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5);
|
||||
}
|
||||
searchRequest.indices(indices);
|
||||
String[] indices = randomIndicesNames(0, 5);
|
||||
SearchRequest searchRequest = new SearchRequest(indices);
|
||||
|
||||
int numTypes = randomIntBetween(0, 5);
|
||||
String[] types = new String[numTypes];
|
||||
for (int i = 0; i < numTypes; i++) {
|
||||
|
@ -791,44 +794,47 @@ public class RequestTests extends ESTestCase {
|
|||
|
||||
setRandomIndicesOptions(searchRequest::indicesOptions, searchRequest::indicesOptions, expectedParams);
|
||||
|
||||
SearchSourceBuilder searchSourceBuilder = null;
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||
//rarely skip setting the search source completely
|
||||
if (frequently()) {
|
||||
searchSourceBuilder = new SearchSourceBuilder();
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.size(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.from(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.minScore(randomFloat());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.explain(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.profile(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.highlighter(new HighlightBuilder().field(randomAlphaOfLengthBetween(3, 10)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.query(new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.aggregation(new TermsAggregationBuilder(randomAlphaOfLengthBetween(3, 10), ValueType.STRING)
|
||||
.field(randomAlphaOfLengthBetween(3, 10)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion(randomAlphaOfLengthBetween(3, 10),
|
||||
new CompletionSuggestionBuilder(randomAlphaOfLengthBetween(3, 10))));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.addRescorer(new QueryRescorerBuilder(
|
||||
new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10))));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.collapse(new CollapseBuilder(randomAlphaOfLengthBetween(3, 10)));
|
||||
//frequently set the search source to have some content, otherwise leave it empty but still set it
|
||||
if (frequently()) {
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.size(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.from(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.minScore(randomFloat());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.explain(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.profile(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.highlighter(new HighlightBuilder().field(randomAlphaOfLengthBetween(3, 10)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.query(new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.aggregation(new TermsAggregationBuilder(randomAlphaOfLengthBetween(3, 10), ValueType.STRING)
|
||||
.field(randomAlphaOfLengthBetween(3, 10)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion(randomAlphaOfLengthBetween(3, 10),
|
||||
new CompletionSuggestionBuilder(randomAlphaOfLengthBetween(3, 10))));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.addRescorer(new QueryRescorerBuilder(
|
||||
new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10))));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
searchSourceBuilder.collapse(new CollapseBuilder(randomAlphaOfLengthBetween(3, 10)));
|
||||
}
|
||||
}
|
||||
searchRequest.source(searchSourceBuilder);
|
||||
}
|
||||
|
@ -846,11 +852,7 @@ public class RequestTests extends ESTestCase {
|
|||
endpoint.add("_search");
|
||||
assertEquals(endpoint.toString(), request.getEndpoint());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
if (searchSourceBuilder == null) {
|
||||
assertNull(request.getEntity());
|
||||
} else {
|
||||
assertToXContentBody(searchSourceBuilder, request.getEntity());
|
||||
}
|
||||
assertToXContentBody(searchSourceBuilder, request.getEntity());
|
||||
}
|
||||
|
||||
public void testMultiSearch() throws IOException {
|
||||
|
@ -1130,4 +1132,13 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
return excludesParam.toString();
|
||||
}
|
||||
|
||||
private static String[] randomIndicesNames(int minIndicesNum, int maxIndicesNum) {
|
||||
int numIndices = randomIntBetween(minIndicesNum, maxIndicesNum);
|
||||
String[] indices = new String[numIndices];
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT);
|
||||
}
|
||||
return indices;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,10 +22,14 @@ package org.elasticsearch.client.documentation;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
|
@ -58,7 +62,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
CreateIndexResponse createIndexResponse = client.indices().createIndex(new CreateIndexRequest("posts"));
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts"));
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
|
@ -80,7 +84,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
// end::delete-index-request-indicesOptions
|
||||
|
||||
// tag::delete-index-execute
|
||||
DeleteIndexResponse deleteIndexResponse = client.indices().deleteIndex(request);
|
||||
DeleteIndexResponse deleteIndexResponse = client.indices().delete(request);
|
||||
// end::delete-index-execute
|
||||
|
||||
// tag::delete-index-response
|
||||
|
@ -93,7 +97,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
// tag::delete-index-notfound
|
||||
try {
|
||||
DeleteIndexRequest request = new DeleteIndexRequest("does_not_exist");
|
||||
client.indices().deleteIndex(request);
|
||||
client.indices().delete(request);
|
||||
} catch (ElasticsearchException exception) {
|
||||
if (exception.status() == RestStatus.NOT_FOUND) {
|
||||
// <1>
|
||||
|
@ -107,7 +111,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
final RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
CreateIndexResponse createIndexResponse = client.indices().createIndex(new CreateIndexRequest("posts"));
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts"));
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
|
@ -115,7 +119,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
DeleteIndexRequest request = new DeleteIndexRequest("posts");
|
||||
|
||||
// tag::delete-index-execute-async
|
||||
client.indices().deleteIndexAsync(request, new ActionListener<DeleteIndexResponse>() {
|
||||
client.indices().deleteAsync(request, new ActionListener<DeleteIndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteIndexResponse deleteIndexResponse) {
|
||||
// <1>
|
||||
|
@ -185,7 +189,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
// end::create-index-request-waitForActiveShards
|
||||
|
||||
// tag::create-index-execute
|
||||
CreateIndexResponse createIndexResponse = client.indices().createIndex(request);
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(request);
|
||||
// end::create-index-execute
|
||||
|
||||
// tag::create-index-response
|
||||
|
@ -203,7 +207,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
{
|
||||
CreateIndexRequest request = new CreateIndexRequest("twitter");
|
||||
// tag::create-index-execute-async
|
||||
client.indices().createIndexAsync(request, new ActionListener<CreateIndexResponse>() {
|
||||
client.indices().createAsync(request, new ActionListener<CreateIndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(CreateIndexResponse createIndexResponse) {
|
||||
// <1>
|
||||
|
@ -224,4 +228,138 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
}
|
||||
}
|
||||
|
||||
public void testOpenIndex() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index"));
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
{
|
||||
// tag::open-index-request
|
||||
OpenIndexRequest request = new OpenIndexRequest("index"); // <1>
|
||||
// end::open-index-request
|
||||
|
||||
// tag::open-index-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
request.timeout("2m"); // <2>
|
||||
// end::open-index-request-timeout
|
||||
// tag::open-index-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::open-index-request-masterTimeout
|
||||
// tag::open-index-request-waitForActiveShards
|
||||
request.waitForActiveShards(2); // <1>
|
||||
request.waitForActiveShards(ActiveShardCount.DEFAULT); // <2>
|
||||
// end::open-index-request-waitForActiveShards
|
||||
|
||||
|
||||
// tag::open-index-request-indicesOptions
|
||||
request.indicesOptions(IndicesOptions.strictExpandOpen()); // <1>
|
||||
// end::open-index-request-indicesOptions
|
||||
|
||||
// tag::open-index-execute
|
||||
OpenIndexResponse openIndexResponse = client.indices().open(request);
|
||||
// end::open-index-execute
|
||||
|
||||
// tag::open-index-response
|
||||
boolean acknowledged = openIndexResponse.isAcknowledged(); // <1>
|
||||
boolean shardsAcked = openIndexResponse.isShardsAcknowledged(); // <2>
|
||||
// end::open-index-response
|
||||
assertTrue(acknowledged);
|
||||
assertTrue(shardsAcked);
|
||||
|
||||
// tag::open-index-execute-async
|
||||
client.indices().openAsync(request, new ActionListener<OpenIndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(OpenIndexResponse openIndexResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
// end::open-index-execute-async
|
||||
}
|
||||
|
||||
{
|
||||
// tag::open-index-notfound
|
||||
try {
|
||||
OpenIndexRequest request = new OpenIndexRequest("does_not_exist");
|
||||
client.indices().open(request);
|
||||
} catch (ElasticsearchException exception) {
|
||||
if (exception.status() == RestStatus.BAD_REQUEST) {
|
||||
// <1>
|
||||
}
|
||||
}
|
||||
// end::open-index-notfound
|
||||
}
|
||||
}
|
||||
|
||||
public void testCloseIndex() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index"));
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
{
|
||||
// tag::close-index-request
|
||||
CloseIndexRequest request = new CloseIndexRequest("index"); // <1>
|
||||
// end::close-index-request
|
||||
|
||||
// tag::close-index-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
request.timeout("2m"); // <2>
|
||||
// end::close-index-request-timeout
|
||||
// tag::close-index-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::close-index-request-masterTimeout
|
||||
|
||||
// tag::close-index-request-indicesOptions
|
||||
request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
|
||||
// end::close-index-request-indicesOptions
|
||||
|
||||
// tag::close-index-execute
|
||||
CloseIndexResponse closeIndexResponse = client.indices().close(request);
|
||||
// end::close-index-execute
|
||||
|
||||
// tag::close-index-response
|
||||
boolean acknowledged = closeIndexResponse.isAcknowledged(); // <1>
|
||||
// end::close-index-response
|
||||
assertTrue(acknowledged);
|
||||
|
||||
// tag::close-index-execute-async
|
||||
client.indices().closeAsync(request, new ActionListener<CloseIndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(CloseIndexResponse closeIndexResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
});
|
||||
// end::close-index-execute-async
|
||||
}
|
||||
|
||||
{
|
||||
// tag::close-index-notfound
|
||||
try {
|
||||
CloseIndexRequest request = new CloseIndexRequest("does_not_exist");
|
||||
client.indices().close(request);
|
||||
} catch (ElasticsearchException exception) {
|
||||
if (exception.status() == RestStatus.BAD_REQUEST) {
|
||||
// <1>
|
||||
}
|
||||
}
|
||||
// end::close-index-notfound
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ ext.restTestExpansions = [
|
|||
// we create the buildModules task above so the distribution subprojects can
|
||||
// depend on it, but we don't actually configure it until here so we can do a single
|
||||
// loop over modules to also setup cross task dependencies and increment our modules counter
|
||||
project.rootProject.subprojects.findAll { it.path.startsWith(':modules:') }.each { Project module ->
|
||||
project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { Project module ->
|
||||
buildFullNotice {
|
||||
def defaultLicensesDir = new File(module.projectDir, 'licenses')
|
||||
if (defaultLicensesDir.exists()) {
|
||||
|
|
|
@ -646,9 +646,11 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
|||
Environment env, List<Path> deleteOnFailure) throws Exception {
|
||||
final MetaPluginInfo metaInfo = MetaPluginInfo.readFromProperties(tmpRoot);
|
||||
verifyPluginName(env.pluginsFile(), metaInfo.getName(), tmpRoot);
|
||||
|
||||
final Path destination = env.pluginsFile().resolve(metaInfo.getName());
|
||||
deleteOnFailure.add(destination);
|
||||
terminal.println(VERBOSE, metaInfo.toString());
|
||||
|
||||
final List<Path> pluginPaths = new ArrayList<>();
|
||||
try (DirectoryStream<Path> paths = Files.newDirectoryStream(tmpRoot)) {
|
||||
// Extract bundled plugins path and validate plugin names
|
||||
|
@ -665,19 +667,11 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
|||
for (Path plugin : pluginPaths) {
|
||||
final PluginInfo info = verify(terminal, plugin, isBatch, env);
|
||||
pluginInfos.add(info);
|
||||
Path tmpBinDir = plugin.resolve("bin");
|
||||
if (Files.exists(tmpBinDir)) {
|
||||
Path destBinDir = env.binFile().resolve(metaInfo.getName());
|
||||
deleteOnFailure.add(destBinDir);
|
||||
installBin(info, tmpBinDir, destBinDir);
|
||||
}
|
||||
|
||||
Path tmpConfigDir = plugin.resolve("config");
|
||||
if (Files.exists(tmpConfigDir)) {
|
||||
// some files may already exist, and we don't remove plugin config files on plugin removal,
|
||||
// so any installed config files are left on failure too
|
||||
Path destConfigDir = env.configFile().resolve(metaInfo.getName());
|
||||
installConfig(info, tmpConfigDir, destConfigDir);
|
||||
installPluginSupportFiles(info, plugin, env.binFile().resolve(metaInfo.getName()),
|
||||
env.configFile().resolve(metaInfo.getName()), deleteOnFailure);
|
||||
// ensure the plugin dir within the tmpRoot has the correct name
|
||||
if (plugin.getFileName().toString().equals(info.getName()) == false) {
|
||||
Files.move(plugin, plugin.getParent().resolve(info.getName()), StandardCopyOption.ATOMIC_MOVE);
|
||||
}
|
||||
}
|
||||
movePlugin(tmpRoot, destination);
|
||||
|
@ -693,7 +687,7 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
|||
|
||||
/**
|
||||
* Installs the plugin from {@code tmpRoot} into the plugins dir.
|
||||
* If the plugin has a bin dir and/or a config dir, those are copied.
|
||||
* If the plugin has a bin dir and/or a config dir, those are moved.
|
||||
*/
|
||||
private void installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot,
|
||||
Environment env, List<Path> deleteOnFailure) throws Exception {
|
||||
|
@ -701,9 +695,20 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
|||
final Path destination = env.pluginsFile().resolve(info.getName());
|
||||
deleteOnFailure.add(destination);
|
||||
|
||||
installPluginSupportFiles(info, tmpRoot, env.binFile().resolve(info.getName()),
|
||||
env.configFile().resolve(info.getName()), deleteOnFailure);
|
||||
movePlugin(tmpRoot, destination);
|
||||
if (info.requiresKeystore()) {
|
||||
createKeystoreIfNeeded(terminal, env, info);
|
||||
}
|
||||
terminal.println("-> Installed " + info.getName());
|
||||
}
|
||||
|
||||
/** Moves bin and config directories from the plugin if they exist */
|
||||
private void installPluginSupportFiles(PluginInfo info, Path tmpRoot,
|
||||
Path destBinDir, Path destConfigDir, List<Path> deleteOnFailure) throws Exception {
|
||||
Path tmpBinDir = tmpRoot.resolve("bin");
|
||||
if (Files.exists(tmpBinDir)) {
|
||||
Path destBinDir = env.binFile().resolve(info.getName());
|
||||
deleteOnFailure.add(destBinDir);
|
||||
installBin(info, tmpBinDir, destBinDir);
|
||||
}
|
||||
|
@ -712,14 +717,8 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
|||
if (Files.exists(tmpConfigDir)) {
|
||||
// some files may already exist, and we don't remove plugin config files on plugin removal,
|
||||
// so any installed config files are left on failure too
|
||||
Path destConfigDir = env.configFile().resolve(info.getName());
|
||||
installConfig(info, tmpConfigDir, destConfigDir);
|
||||
}
|
||||
movePlugin(tmpRoot, destination);
|
||||
if (info.requiresKeystore()) {
|
||||
createKeystoreIfNeeded(terminal, env, info);
|
||||
}
|
||||
terminal.println("-> Installed " + info.getName());
|
||||
}
|
||||
|
||||
/** Moves the plugin directory into its final destination. **/
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
[[java-rest-high-close-index]]
|
||||
=== Close Index API
|
||||
|
||||
[[java-rest-high-close-index-request]]
|
||||
==== Close Index Request
|
||||
|
||||
A `CloseIndexRequest` requires an `index` argument:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request]
|
||||
--------------------------------------------------
|
||||
<1> The index to close
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the index is closed
|
||||
as a `TimeValue`
|
||||
<2> Timeout to wait for the all the nodes to acknowledge the index is closed
|
||||
as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request-indicesOptions]
|
||||
--------------------------------------------------
|
||||
<1> Setting `IndicesOptions` controls how unavailable indices are resolved and
|
||||
how wildcard expressions are expanded
|
||||
|
||||
[[java-rest-high-close-index-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-close-index-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-close-index-response]]
|
||||
==== Close Index Response
|
||||
|
||||
The returned `CloseIndexResponse` allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-response]
|
||||
--------------------------------------------------
|
||||
<1> Indicates whether all of the nodes have acknowledged the request
|
|
@ -48,7 +48,7 @@ The following arguments can optionally be provided:
|
|||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the index creation as a `TimeValue`
|
||||
<2> Timeout to wait for the all the nodes to acknowledge the index creatiom as a `String`
|
||||
<2> Timeout to wait for the all the nodes to acknowledge the index creation as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
|
@ -61,8 +61,10 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-reque
|
|||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-request-waitForActiveShards]
|
||||
--------------------------------------------------
|
||||
<1> The number of active shard copies to wait for before proceeding with the operation, as an `int`.
|
||||
<2> The number of active shard copies to wait for before proceeding with the operation, as an `ActiveShardCount`.
|
||||
<1> The number of active shard copies to wait for before the create index API returns a
|
||||
response, as an `int`.
|
||||
<2> The number of active shard copies to wait for before the create index API returns a
|
||||
response, as an `ActiveShardCount`.
|
||||
|
||||
[[java-rest-high-create-index-sync]]
|
||||
==== Synchronous Execution
|
||||
|
|
|
@ -1,10 +1,23 @@
|
|||
include::createindex.asciidoc[]
|
||||
|
||||
include::deleteindex.asciidoc[]
|
||||
|
||||
include::open_index.asciidoc[]
|
||||
|
||||
include::close_index.asciidoc[]
|
||||
|
||||
include::_index.asciidoc[]
|
||||
|
||||
include::get.asciidoc[]
|
||||
|
||||
include::delete.asciidoc[]
|
||||
|
||||
include::update.asciidoc[]
|
||||
|
||||
include::bulk.asciidoc[]
|
||||
|
||||
include::search.asciidoc[]
|
||||
|
||||
include::scroll.asciidoc[]
|
||||
|
||||
include::main.asciidoc[]
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
[[java-rest-high-open-index]]
|
||||
=== Open Index API
|
||||
|
||||
[[java-rest-high-open-index-request]]
|
||||
==== Open Index Request
|
||||
|
||||
An `OpenIndexRequest` requires an `index` argument:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request]
|
||||
--------------------------------------------------
|
||||
<1> The index to open
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the index is opened
|
||||
as a `TimeValue`
|
||||
<2> Timeout to wait for the all the nodes to acknowledge the index is opened
|
||||
as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-waitForActiveShards]
|
||||
--------------------------------------------------
|
||||
<1> The number of active shard copies to wait for before the open index API
|
||||
returns a response, as an `int`.
|
||||
<2> The number of active shard copies to wait for before the open index API
|
||||
returns a response, as an `ActiveShardCount`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-indicesOptions]
|
||||
--------------------------------------------------
|
||||
<1> Setting `IndicesOptions` controls how unavailable indices are resolved and
|
||||
how wildcard expressions are expanded
|
||||
|
||||
[[java-rest-high-open-index-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-open-index-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-open-index-response]]
|
||||
==== Open Index Response
|
||||
|
||||
The returned `OpenIndexResponse` allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-response]
|
||||
--------------------------------------------------
|
||||
<1> Indicates whether all of the nodes have acknowledged the request
|
||||
<2> Indicates whether the requisite number of shard copies were started for
|
||||
each shard in the index before timing out
|
|
@ -6,6 +6,8 @@ The Java High Level REST Client supports the following APIs:
|
|||
Indices APIs::
|
||||
* <<java-rest-high-create-index>>
|
||||
* <<java-rest-high-delete-index>>
|
||||
* <<java-rest-high-open-index>>
|
||||
* <<java-rest-high-close-index>>
|
||||
|
||||
Single document APIs::
|
||||
* <<java-rest-high-document-index>>
|
||||
|
|
|
@ -320,7 +320,7 @@ POST hockey/player/_update_by_query
|
|||
|
||||
Note: all of the `_update_by_query` examples above could really do with a
|
||||
`query` to limit the data that they pull back. While you *could* use a
|
||||
See {ref}/query-dsl-script-query.html[script query] it wouldn't be as efficient
|
||||
{ref}/query-dsl-script-query.html[script query] it wouldn't be as efficient
|
||||
as using any other query because script queries aren't able to use the inverted
|
||||
index to limit the documents that they have to check.
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[[search-aggregations-metrics-top-hits-aggregation]]
|
||||
=== Top hits Aggregation
|
||||
=== Top Hits Aggregation
|
||||
|
||||
A `top_hits` metric aggregator keeps track of the most relevant document being aggregated. This aggregator is intended
|
||||
to be used as a sub aggregator, so that the top matching documents can be aggregated per bucket.
|
||||
|
|
|
@ -41,7 +41,7 @@ for more details) |Required |
|
|||
details)|Optional |`skip`
|
||||
|===
|
||||
|
||||
The following snippet only retains buckets where the total sales for the month is more than 400:
|
||||
The following snippet only retains buckets where the total sales for the month is more than 200:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -171,7 +171,7 @@ The `basque` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /armenian_example
|
||||
PUT /basque_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -536,7 +536,7 @@ The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /detch_example
|
||||
PUT /dutch_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1554,7 +1554,7 @@ The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /swidish_example
|
||||
PUT /swedish_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -1,41 +1,44 @@
|
|||
[[index-modules-translog]]
|
||||
== Translog
|
||||
|
||||
Changes to Lucene are only persisted to disk during a Lucene commit,
|
||||
which is a relatively heavy operation and so cannot be performed after every
|
||||
index or delete operation. Changes that happen after one commit and before another
|
||||
will be lost in the event of process exit or HW failure.
|
||||
Changes to Lucene are only persisted to disk during a Lucene commit, which is a
|
||||
relatively expensive operation and so cannot be performed after every index or
|
||||
delete operation. Changes that happen after one commit and before another will
|
||||
be removed from the index by Lucene in the event of process exit or hardware
|
||||
failure.
|
||||
|
||||
To prevent this data loss, each shard has a _transaction log_ or write ahead
|
||||
log associated with it. Any index or delete operation is written to the
|
||||
translog after being processed by the internal Lucene index.
|
||||
|
||||
In the event of a crash, recent transactions can be replayed from the
|
||||
transaction log when the shard recovers.
|
||||
Because Lucene commits are too expensive to perform on every individual change,
|
||||
each shard copy also has a _transaction log_ known as its _translog_ associated
|
||||
with it. All index and delete operations are written to the translog after
|
||||
being processed by the internal Lucene index but before they are acknowledged.
|
||||
In the event of a crash, recent transactions that have been acknowledged but
|
||||
not yet included in the last Lucene commit can instead be recovered from the
|
||||
translog when the shard recovers.
|
||||
|
||||
An Elasticsearch flush is the process of performing a Lucene commit and
|
||||
starting a new translog. It is done automatically in the background in order
|
||||
to make sure the transaction log doesn't grow too large, which would make
|
||||
starting a new translog. Flushes are performed automatically in the background
|
||||
in order to make sure the translog doesn't grow too large, which would make
|
||||
replaying its operations take a considerable amount of time during recovery.
|
||||
It is also exposed through an API, though its rarely needed to be performed
|
||||
manually.
|
||||
The ability to perform a flush manually is also exposed through an API,
|
||||
although this is rarely needed.
|
||||
|
||||
[float]
|
||||
=== Translog settings
|
||||
|
||||
The data in the transaction log is only persisted to disk when the translog is
|
||||
The data in the translog is only persisted to disk when the translog is
|
||||
++fsync++ed and committed. In the event of hardware failure, any data written
|
||||
since the previous translog commit will be lost.
|
||||
|
||||
By default, Elasticsearch ++fsync++s and commits the translog every 5 seconds if `index.translog.durability` is set
|
||||
to `async` or if set to `request` (default) at the end of every <<docs-index_,index>>, <<docs-delete,delete>>,
|
||||
<<docs-update,update>>, or <<docs-bulk,bulk>> request. In fact, Elasticsearch
|
||||
will only report success of an index, delete, update, or bulk request to the
|
||||
client after the transaction log has been successfully ++fsync++ed and committed
|
||||
on the primary and on every allocated replica.
|
||||
By default, Elasticsearch ++fsync++s and commits the translog every 5 seconds
|
||||
if `index.translog.durability` is set to `async` or if set to `request`
|
||||
(default) at the end of every <<docs-index_,index>>, <<docs-delete,delete>>,
|
||||
<<docs-update,update>>, or <<docs-bulk,bulk>> request. More precisely, if set
|
||||
to `request`, Elasticsearch will only report success of an index, delete,
|
||||
update, or bulk request to the client after the translog has been successfully
|
||||
++fsync++ed and committed on the primary and on every allocated replica.
|
||||
|
||||
The following <<indices-update-settings,dynamically updatable>> per-index settings
|
||||
control the behaviour of the transaction log:
|
||||
The following <<indices-update-settings,dynamically updatable>> per-index
|
||||
settings control the behaviour of the translog:
|
||||
|
||||
`index.translog.sync_interval`::
|
||||
|
||||
|
@ -64,17 +67,20 @@ update, or bulk request. This setting accepts the following parameters:
|
|||
|
||||
`index.translog.flush_threshold_size`::
|
||||
|
||||
The translog stores all operations that are not yet safely persisted in Lucene (i.e., are
|
||||
not part of a lucene commit point). Although these operations are available for reads, they will
|
||||
need to be reindexed if the shard was to shutdown and has to be recovered. This settings controls
|
||||
the maximum total size of these operations, to prevent recoveries from taking too long. Once the
|
||||
maximum size has been reached a flush will happen, generating a new Lucene commit. Defaults to `512mb`.
|
||||
The translog stores all operations that are not yet safely persisted in Lucene
|
||||
(i.e., are not part of a Lucene commit point). Although these operations are
|
||||
available for reads, they will need to be reindexed if the shard was to
|
||||
shutdown and has to be recovered. This settings controls the maximum total size
|
||||
of these operations, to prevent recoveries from taking too long. Once the
|
||||
maximum size has been reached a flush will happen, generating a new Lucene
|
||||
commit point. Defaults to `512mb`.
|
||||
|
||||
`index.translog.retention.size`::
|
||||
|
||||
The total size of translog files to keep. Keeping more translog files increases the chance of performing
|
||||
an operation based sync when recovering replicas. If the translog files are not sufficient, replica recovery
|
||||
will fall back to a file based sync. Defaults to `512mb`
|
||||
The total size of translog files to keep. Keeping more translog files increases
|
||||
the chance of performing an operation based sync when recovering replicas. If
|
||||
the translog files are not sufficient, replica recovery will fall back to a
|
||||
file based sync. Defaults to `512mb`
|
||||
|
||||
|
||||
`index.translog.retention.age`::
|
||||
|
@ -86,10 +92,14 @@ The maximum duration for which translog files will be kept. Defaults to `12h`.
|
|||
[[corrupt-translog-truncation]]
|
||||
=== What to do if the translog becomes corrupted?
|
||||
|
||||
In some cases (a bad drive, user error) the translog can become corrupted. When
|
||||
this corruption is detected by Elasticsearch due to mismatching checksums,
|
||||
Elasticsearch will fail the shard and refuse to allocate that copy of the data
|
||||
to the node, recovering from a replica if available.
|
||||
In some cases (a bad drive, user error) the translog on a shard copy can become
|
||||
corrupted. When this corruption is detected by Elasticsearch due to mismatching
|
||||
checksums, Elasticsearch will fail that shard copy and refuse to use that copy
|
||||
of the data. If there are other copies of the shard available then
|
||||
Elasticsearch will automatically recover from one of them using the normal
|
||||
shard allocation and recovery mechanism. In particular, if the corrupt shard
|
||||
copy was the primary when the corruption was detected then one of its replicas
|
||||
will be promoted in its place.
|
||||
|
||||
If there is no copy of the data from which Elasticsearch can recover
|
||||
successfully, a user may want to recover the data that is part of the shard at
|
||||
|
|
|
@ -47,18 +47,16 @@ PUT range_index/_doc/1
|
|||
--------------------------------------------------
|
||||
//CONSOLE
|
||||
|
||||
The following is an example of a `date_range` query over the `date_range` field named "time_frame".
|
||||
The following is an example of a <<query-dsl-term-query, term query>> on the `integer_range` field named "expected_attendees".
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST range_index/_search
|
||||
GET range_index/_search
|
||||
{
|
||||
"query" : {
|
||||
"range" : {
|
||||
"time_frame" : { <5>
|
||||
"gte" : "2015-10-31",
|
||||
"lte" : "2015-11-01",
|
||||
"relation" : "within" <6>
|
||||
"term" : {
|
||||
"expected_attendees" : {
|
||||
"value": 12
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -104,6 +102,27 @@ The result produced by the above query.
|
|||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/"took": 13/"took" : $body.took/]
|
||||
|
||||
|
||||
The following is an example of a `date_range` query over the `date_range` field named "time_frame".
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET range_index/_search
|
||||
{
|
||||
"query" : {
|
||||
"range" : {
|
||||
"time_frame" : { <5>
|
||||
"gte" : "2015-10-31",
|
||||
"lte" : "2015-11-01",
|
||||
"relation" : "within" <6>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:range_index]
|
||||
|
||||
<1> `date_range` types accept the same field parameters defined by the <<date, `date`>> type.
|
||||
<2> Example indexing a meeting with 10 to 20 attendees.
|
||||
<3> Date ranges accept the same format as described in <<ranges-on-dates, date range queries>>.
|
||||
|
@ -112,6 +131,44 @@ The result produced by the above query.
|
|||
<6> Range queries over range <<mapping-types, fields>> support a `relation` parameter which can be one of `WITHIN`, `CONTAINS`,
|
||||
`INTERSECTS` (default).
|
||||
|
||||
This query produces a similar result:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"took": 13,
|
||||
"timed_out": false,
|
||||
"_shards" : {
|
||||
"total": 2,
|
||||
"successful": 2,
|
||||
"skipped" : 0,
|
||||
"failed": 0
|
||||
},
|
||||
"hits" : {
|
||||
"total" : 1,
|
||||
"max_score" : 1.0,
|
||||
"hits" : [
|
||||
{
|
||||
"_index" : "range_index",
|
||||
"_type" : "_doc",
|
||||
"_id" : "1",
|
||||
"_score" : 1.0,
|
||||
"_source" : {
|
||||
"expected_attendees" : {
|
||||
"gte" : 10, "lte" : 20
|
||||
},
|
||||
"time_frame" : {
|
||||
"gte" : "2015-10-31 12:00:00", "lte" : "2015-11-01"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/"took": 13/"took" : $body.took/]
|
||||
|
||||
|
||||
[[range-params]]
|
||||
==== Parameters for range fields
|
||||
|
||||
|
|
|
@ -51,6 +51,8 @@ GET _search
|
|||
as the query clause for `normal`.
|
||||
<2> The `normal` clause has the default neutral boost of `1.0`.
|
||||
|
||||
A `term` query can also match against <<range, range data types>>.
|
||||
|
||||
.Why doesn't the `term` query match my document?
|
||||
**************************************************
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ GET /_search
|
|||
},
|
||||
"highlight" : {
|
||||
"fields" : {
|
||||
"comment" : {}
|
||||
"content" : {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,7 +15,8 @@ Currently the rescore API has only one implementation: the query
|
|||
rescorer, which uses a query to tweak the scoring. In the future,
|
||||
alternative rescorers may be made available, for example, a pair-wise rescorer.
|
||||
|
||||
NOTE: the `rescore` phase is not executed when <<search-request-sort,`sort`>> is used.
|
||||
NOTE: An error will be thrown if an explicit <<search-request-sort,`sort`>> (other than `_score`)
|
||||
is provided with a `rescore` query.
|
||||
|
||||
NOTE: when exposing pagination to your users, you should not change
|
||||
`window_size` as you step through each page (by passing different
|
||||
|
|
|
@ -26,7 +26,6 @@ import java.nio.channels.NetworkChannel;
|
|||
import java.nio.channels.SelectableChannel;
|
||||
import java.nio.channels.SelectionKey;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
/**
|
||||
|
@ -48,9 +47,6 @@ import java.util.function.BiConsumer;
|
|||
public abstract class AbstractNioChannel<S extends SelectableChannel & NetworkChannel> implements NioChannel {
|
||||
|
||||
final S socketChannel;
|
||||
// This indicates if the channel has been scheduled to be closed. Read the closeFuture to determine if
|
||||
// the channel close process has completed.
|
||||
final AtomicBoolean isClosing = new AtomicBoolean(false);
|
||||
|
||||
private final InetSocketAddress localAddress;
|
||||
private final CompletableFuture<Void> closeContext = new CompletableFuture<>();
|
||||
|
@ -73,21 +69,6 @@ public abstract class AbstractNioChannel<S extends SelectableChannel & NetworkCh
|
|||
return localAddress;
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedules a channel to be closed by the selector event loop with which it is registered.
|
||||
* <p>
|
||||
* If the channel is open and the state can be transitioned to closed, the close operation will
|
||||
* be scheduled with the event loop.
|
||||
* <p>
|
||||
* If the channel is already set to closed, it is assumed that it is already scheduled to be closed.
|
||||
*/
|
||||
@Override
|
||||
public void close() {
|
||||
if (isClosing.compareAndSet(false, true)) {
|
||||
selector.queueChannelClose(this);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes the channel synchronously. This method should only be called from the selector thread.
|
||||
* <p>
|
||||
|
@ -95,11 +76,10 @@ public abstract class AbstractNioChannel<S extends SelectableChannel & NetworkCh
|
|||
*/
|
||||
@Override
|
||||
public void closeFromSelector() throws IOException {
|
||||
assert selector.isOnCurrentThread() : "Should only call from selector thread";
|
||||
isClosing.set(true);
|
||||
selector.assertOnSelectorThread();
|
||||
if (closeContext.isDone() == false) {
|
||||
try {
|
||||
closeRawChannel();
|
||||
socketChannel.close();
|
||||
closeContext.complete(null);
|
||||
} catch (IOException e) {
|
||||
closeContext.completeExceptionally(e);
|
||||
|
@ -139,13 +119,13 @@ public abstract class AbstractNioChannel<S extends SelectableChannel & NetworkCh
|
|||
closeContext.whenComplete(listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
getContext().closeChannel();
|
||||
}
|
||||
|
||||
// Package visibility for testing
|
||||
void setSelectionKey(SelectionKey selectionKey) {
|
||||
this.selectionKey = selectionKey;
|
||||
}
|
||||
// Package visibility for testing
|
||||
|
||||
void closeRawChannel() throws IOException {
|
||||
socketChannel.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ public class AcceptorEventHandler extends EventHandler {
|
|||
ChannelFactory<?, ?> channelFactory = nioServerChannel.getChannelFactory();
|
||||
SocketSelector selector = selectorSupplier.get();
|
||||
NioSocketChannel nioSocketChannel = channelFactory.acceptNioChannel(nioServerChannel, selector);
|
||||
nioServerChannel.getAcceptContext().accept(nioSocketChannel);
|
||||
nioServerChannel.getContext().acceptChannel(nioSocketChannel);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,166 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.LinkedList;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
public class BytesChannelContext extends SocketChannelContext {
|
||||
|
||||
private final ReadConsumer readConsumer;
|
||||
private final InboundChannelBuffer channelBuffer;
|
||||
private final LinkedList<BytesWriteOperation> queued = new LinkedList<>();
|
||||
private final AtomicBoolean isClosing = new AtomicBoolean(false);
|
||||
|
||||
public BytesChannelContext(NioSocketChannel channel, BiConsumer<NioSocketChannel, Exception> exceptionHandler,
|
||||
ReadConsumer readConsumer, InboundChannelBuffer channelBuffer) {
|
||||
super(channel, exceptionHandler);
|
||||
this.readConsumer = readConsumer;
|
||||
this.channelBuffer = channelBuffer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
if (channelBuffer.getRemaining() == 0) {
|
||||
// Requiring one additional byte will ensure that a new page is allocated.
|
||||
channelBuffer.ensureCapacity(channelBuffer.getCapacity() + 1);
|
||||
}
|
||||
|
||||
int bytesRead = readFromChannel(channelBuffer.sliceBuffersFrom(channelBuffer.getIndex()));
|
||||
|
||||
if (bytesRead == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
channelBuffer.incrementIndex(bytesRead);
|
||||
|
||||
int bytesConsumed = Integer.MAX_VALUE;
|
||||
while (bytesConsumed > 0 && channelBuffer.getIndex() > 0) {
|
||||
bytesConsumed = readConsumer.consumeReads(channelBuffer);
|
||||
channelBuffer.release(bytesConsumed);
|
||||
}
|
||||
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sendMessage(ByteBuffer[] buffers, BiConsumer<Void, Throwable> listener) {
|
||||
if (isClosing.get()) {
|
||||
listener.accept(null, new ClosedChannelException());
|
||||
return;
|
||||
}
|
||||
|
||||
BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener);
|
||||
SocketSelector selector = channel.getSelector();
|
||||
if (selector.isOnCurrentThread() == false) {
|
||||
selector.queueWrite(writeOperation);
|
||||
return;
|
||||
}
|
||||
|
||||
selector.queueWriteInChannelBuffer(writeOperation);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void queueWriteOperation(WriteOperation writeOperation) {
|
||||
channel.getSelector().assertOnSelectorThread();
|
||||
queued.add((BytesWriteOperation) writeOperation);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flushChannel() throws IOException {
|
||||
channel.getSelector().assertOnSelectorThread();
|
||||
int ops = queued.size();
|
||||
if (ops == 1) {
|
||||
singleFlush(queued.pop());
|
||||
} else if (ops > 1) {
|
||||
multiFlush();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasQueuedWriteOps() {
|
||||
channel.getSelector().assertOnSelectorThread();
|
||||
return queued.isEmpty() == false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void closeChannel() {
|
||||
if (isClosing.compareAndSet(false, true)) {
|
||||
channel.getSelector().queueChannelClose(channel);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean selectorShouldClose() {
|
||||
return isPeerClosed() || hasIOException() || isClosing.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void closeFromSelector() throws IOException {
|
||||
channel.getSelector().assertOnSelectorThread();
|
||||
if (channel.isOpen()) {
|
||||
IOException channelCloseException = null;
|
||||
try {
|
||||
channel.closeFromSelector();
|
||||
} catch (IOException e) {
|
||||
channelCloseException = e;
|
||||
}
|
||||
// Set to true in order to reject new writes before queuing with selector
|
||||
isClosing.set(true);
|
||||
channelBuffer.close();
|
||||
for (BytesWriteOperation op : queued) {
|
||||
channel.getSelector().executeFailedListener(op.getListener(), new ClosedChannelException());
|
||||
}
|
||||
queued.clear();
|
||||
if (channelCloseException != null) {
|
||||
throw channelCloseException;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void singleFlush(BytesWriteOperation headOp) throws IOException {
|
||||
try {
|
||||
int written = flushToChannel(headOp.getBuffersToWrite());
|
||||
headOp.incrementIndex(written);
|
||||
} catch (IOException e) {
|
||||
channel.getSelector().executeFailedListener(headOp.getListener(), e);
|
||||
throw e;
|
||||
}
|
||||
|
||||
if (headOp.isFullyFlushed()) {
|
||||
channel.getSelector().executeListener(headOp.getListener(), null);
|
||||
} else {
|
||||
queued.push(headOp);
|
||||
}
|
||||
}
|
||||
|
||||
private void multiFlush() throws IOException {
|
||||
boolean lastOpCompleted = true;
|
||||
while (lastOpCompleted && queued.isEmpty() == false) {
|
||||
BytesWriteOperation op = queued.pop();
|
||||
singleFlush(op);
|
||||
lastOpCompleted = op.isFullyFlushed();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class BytesReadContext implements ReadContext {
|
||||
|
||||
private final NioSocketChannel channel;
|
||||
private final ReadConsumer readConsumer;
|
||||
private final InboundChannelBuffer channelBuffer;
|
||||
|
||||
public BytesReadContext(NioSocketChannel channel, ReadConsumer readConsumer, InboundChannelBuffer channelBuffer) {
|
||||
this.channel = channel;
|
||||
this.channelBuffer = channelBuffer;
|
||||
this.readConsumer = readConsumer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
if (channelBuffer.getRemaining() == 0) {
|
||||
// Requiring one additional byte will ensure that a new page is allocated.
|
||||
channelBuffer.ensureCapacity(channelBuffer.getCapacity() + 1);
|
||||
}
|
||||
|
||||
int bytesRead = channel.read(channelBuffer.sliceBuffersFrom(channelBuffer.getIndex()));
|
||||
|
||||
if (bytesRead == -1) {
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
channelBuffer.incrementIndex(bytesRead);
|
||||
|
||||
int bytesConsumed = Integer.MAX_VALUE;
|
||||
while (bytesConsumed > 0) {
|
||||
bytesConsumed = readConsumer.consumeReads(channelBuffer);
|
||||
channelBuffer.release(bytesConsumed);
|
||||
}
|
||||
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
channelBuffer.close();
|
||||
}
|
||||
}
|
|
@ -1,111 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.LinkedList;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
public class BytesWriteContext implements WriteContext {
|
||||
|
||||
private final NioSocketChannel channel;
|
||||
private final LinkedList<WriteOperation> queued = new LinkedList<>();
|
||||
|
||||
public BytesWriteContext(NioSocketChannel channel) {
|
||||
this.channel = channel;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sendMessage(Object message, BiConsumer<Void, Throwable> listener) {
|
||||
ByteBuffer[] buffers = (ByteBuffer[]) message;
|
||||
if (channel.isWritable() == false) {
|
||||
listener.accept(null, new ClosedChannelException());
|
||||
return;
|
||||
}
|
||||
|
||||
WriteOperation writeOperation = new WriteOperation(channel, buffers, listener);
|
||||
SocketSelector selector = channel.getSelector();
|
||||
if (selector.isOnCurrentThread() == false) {
|
||||
selector.queueWrite(writeOperation);
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: Eval if we will allow writes from sendMessage
|
||||
selector.queueWriteInChannelBuffer(writeOperation);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void queueWriteOperations(WriteOperation writeOperation) {
|
||||
assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to queue writes";
|
||||
queued.add(writeOperation);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flushChannel() throws IOException {
|
||||
assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to flush writes";
|
||||
int ops = queued.size();
|
||||
if (ops == 1) {
|
||||
singleFlush(queued.pop());
|
||||
} else if (ops > 1) {
|
||||
multiFlush();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasQueuedWriteOps() {
|
||||
assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to access queued writes";
|
||||
return queued.isEmpty() == false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearQueuedWriteOps(Exception e) {
|
||||
assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to clear queued writes";
|
||||
for (WriteOperation op : queued) {
|
||||
channel.getSelector().executeFailedListener(op.getListener(), e);
|
||||
}
|
||||
queued.clear();
|
||||
}
|
||||
|
||||
private void singleFlush(WriteOperation headOp) throws IOException {
|
||||
try {
|
||||
headOp.flush();
|
||||
} catch (IOException e) {
|
||||
channel.getSelector().executeFailedListener(headOp.getListener(), e);
|
||||
throw e;
|
||||
}
|
||||
|
||||
if (headOp.isFullyFlushed()) {
|
||||
channel.getSelector().executeListener(headOp.getListener(), null);
|
||||
} else {
|
||||
queued.push(headOp);
|
||||
}
|
||||
}
|
||||
|
||||
private void multiFlush() throws IOException {
|
||||
boolean lastOpCompleted = true;
|
||||
while (lastOpCompleted && queued.isEmpty() == false) {
|
||||
WriteOperation op = queued.pop();
|
||||
singleFlush(op);
|
||||
lastOpCompleted = op.isFullyFlushed();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
public class BytesWriteOperation implements WriteOperation {
|
||||
|
||||
private final NioSocketChannel channel;
|
||||
private final BiConsumer<Void, Throwable> listener;
|
||||
private final ByteBuffer[] buffers;
|
||||
private final int[] offsets;
|
||||
private final int length;
|
||||
private int internalIndex;
|
||||
|
||||
public BytesWriteOperation(NioSocketChannel channel, ByteBuffer[] buffers, BiConsumer<Void, Throwable> listener) {
|
||||
this.channel = channel;
|
||||
this.listener = listener;
|
||||
this.buffers = buffers;
|
||||
this.offsets = new int[buffers.length];
|
||||
int offset = 0;
|
||||
for (int i = 0; i < buffers.length; i++) {
|
||||
ByteBuffer buffer = buffers[i];
|
||||
offsets[i] = offset;
|
||||
offset += buffer.remaining();
|
||||
}
|
||||
length = offset;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BiConsumer<Void, Throwable> getListener() {
|
||||
return listener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public NioSocketChannel getChannel() {
|
||||
return channel;
|
||||
}
|
||||
|
||||
public boolean isFullyFlushed() {
|
||||
assert length >= internalIndex : "Should never have an index that is greater than the length [length=" + length + ", index="
|
||||
+ internalIndex + "]";
|
||||
return internalIndex == length;
|
||||
}
|
||||
|
||||
public void incrementIndex(int delta) {
|
||||
internalIndex += delta;
|
||||
assert length >= internalIndex : "Should never increment index past length [length=" + length + ", post-increment index="
|
||||
+ internalIndex + ", delta=" + delta + "]";
|
||||
}
|
||||
|
||||
public ByteBuffer[] getBuffersToWrite() {
|
||||
final int index = Arrays.binarySearch(offsets, internalIndex);
|
||||
int offsetIndex = index < 0 ? (-(index + 1)) - 1 : index;
|
||||
|
||||
ByteBuffer[] postIndexBuffers = new ByteBuffer[buffers.length - offsetIndex];
|
||||
|
||||
ByteBuffer firstBuffer = buffers[offsetIndex].duplicate();
|
||||
firstBuffer.position(internalIndex - offsets[offsetIndex]);
|
||||
postIndexBuffers[0] = firstBuffer;
|
||||
int j = 1;
|
||||
for (int i = (offsetIndex + 1); i < buffers.length; ++i) {
|
||||
postIndexBuffers[j++] = buffers[i].duplicate();
|
||||
}
|
||||
|
||||
return postIndexBuffers;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public interface ChannelContext {
|
||||
/**
|
||||
* This method cleans up any context resources that need to be released when a channel is closed. It
|
||||
* should only be called by the selector thread.
|
||||
*
|
||||
* @throws IOException during channel / context close
|
||||
*/
|
||||
void closeFromSelector() throws IOException;
|
||||
|
||||
/**
|
||||
* Schedules a channel to be closed by the selector event loop with which it is registered.
|
||||
*
|
||||
* If the channel is open and the state can be transitioned to closed, the close operation will
|
||||
* be scheduled with the event loop.
|
||||
*
|
||||
* Depending on the underlying protocol of the channel, a close operation might simply close the socket
|
||||
* channel or may involve reading and writing messages.
|
||||
*/
|
||||
void closeChannel();
|
||||
|
||||
void handleException(Exception e);
|
||||
}
|
|
@ -88,9 +88,7 @@ public abstract class ChannelFactory<ServerSocket extends NioServerSocketChannel
|
|||
private Socket internalCreateChannel(SocketSelector selector, SocketChannel rawChannel) throws IOException {
|
||||
try {
|
||||
Socket channel = createChannel(selector, rawChannel);
|
||||
assert channel.getReadContext() != null : "read context should have been set on channel";
|
||||
assert channel.getWriteContext() != null : "write context should have been set on channel";
|
||||
assert channel.getExceptionContext() != null : "exception handler should have been set on channel";
|
||||
assert channel.getContext() != null : "channel context should have been set on channel";
|
||||
return channel;
|
||||
} catch (Exception e) {
|
||||
closeRawChannel(rawChannel, e);
|
||||
|
|
|
@ -163,6 +163,11 @@ public abstract class ESSelector implements Closeable {
|
|||
return Thread.currentThread() == thread;
|
||||
}
|
||||
|
||||
public void assertOnSelectorThread() {
|
||||
assert isOnCurrentThread() : "Must be on selector thread to perform this operation. Currently on thread ["
|
||||
+ Thread.currentThread().getName() + "].";
|
||||
}
|
||||
|
||||
void wakeup() {
|
||||
// TODO: Do we need the wakeup optimizations that some other libraries use?
|
||||
selector.wakeup();
|
||||
|
|
|
@ -69,7 +69,7 @@ public abstract class EventHandler {
|
|||
*/
|
||||
protected void handleClose(NioChannel channel) {
|
||||
try {
|
||||
channel.closeFromSelector();
|
||||
channel.getContext().closeFromSelector();
|
||||
} catch (IOException e) {
|
||||
closeException(channel, e);
|
||||
}
|
||||
|
|
|
@ -59,6 +59,10 @@ public final class InboundChannelBuffer implements AutoCloseable {
|
|||
ensureCapacity(PAGE_SIZE);
|
||||
}
|
||||
|
||||
public static InboundChannelBuffer allocatingInstance() {
|
||||
return new InboundChannelBuffer(() -> new Page(ByteBuffer.allocate(PAGE_SIZE), () -> {}));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (isClosed.compareAndSet(false, true)) {
|
||||
|
|
|
@ -44,6 +44,8 @@ public interface NioChannel {
|
|||
|
||||
NetworkChannel getRawChannel();
|
||||
|
||||
ChannelContext getContext();
|
||||
|
||||
/**
|
||||
* Adds a close listener to the channel. Multiple close listeners can be added. There is no guarantee
|
||||
* about the order in which close listeners will be executed. If the channel is already closed, the
|
||||
|
|
|
@ -21,12 +21,13 @@ package org.elasticsearch.nio;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.nio.channels.ServerSocketChannel;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
public class NioServerSocketChannel extends AbstractNioChannel<ServerSocketChannel> {
|
||||
|
||||
private final ChannelFactory<?, ?> channelFactory;
|
||||
private Consumer<NioSocketChannel> acceptContext;
|
||||
private ServerChannelContext context;
|
||||
private final AtomicBoolean contextSet = new AtomicBoolean(false);
|
||||
|
||||
public NioServerSocketChannel(ServerSocketChannel socketChannel, ChannelFactory<?, ?> channelFactory, AcceptingSelector selector)
|
||||
throws IOException {
|
||||
|
@ -39,17 +40,22 @@ public class NioServerSocketChannel extends AbstractNioChannel<ServerSocketChann
|
|||
}
|
||||
|
||||
/**
|
||||
* This method sets the accept context for a server socket channel. The accept context is called when a
|
||||
* new channel is accepted. The parameter passed to the context is the new channel.
|
||||
* This method sets the context for a server socket channel. The context is called when a new channel is
|
||||
* accepted, an exception occurs, or it is time to close the channel.
|
||||
*
|
||||
* @param acceptContext to call
|
||||
* @param context to call
|
||||
*/
|
||||
public void setAcceptContext(Consumer<NioSocketChannel> acceptContext) {
|
||||
this.acceptContext = acceptContext;
|
||||
public void setContext(ServerChannelContext context) {
|
||||
if (contextSet.compareAndSet(false, true)) {
|
||||
this.context = context;
|
||||
} else {
|
||||
throw new IllegalStateException("Context on this channel were already set. It should only be once.");
|
||||
}
|
||||
}
|
||||
|
||||
public Consumer<NioSocketChannel> getAcceptContext() {
|
||||
return acceptContext;
|
||||
@Override
|
||||
public ServerChannelContext getContext() {
|
||||
return context;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.nio;
|
|||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.nio.channels.SocketChannel;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
@ -33,10 +32,8 @@ public class NioSocketChannel extends AbstractNioChannel<SocketChannel> {
|
|||
private final InetSocketAddress remoteAddress;
|
||||
private final CompletableFuture<Void> connectContext = new CompletableFuture<>();
|
||||
private final SocketSelector socketSelector;
|
||||
private final AtomicBoolean contextsSet = new AtomicBoolean(false);
|
||||
private WriteContext writeContext;
|
||||
private ReadContext readContext;
|
||||
private BiConsumer<NioSocketChannel, Exception> exceptionContext;
|
||||
private final AtomicBoolean contextSet = new AtomicBoolean(false);
|
||||
private SocketChannelContext context;
|
||||
private Exception connectException;
|
||||
|
||||
public NioSocketChannel(SocketChannel socketChannel, SocketSelector selector) throws IOException {
|
||||
|
@ -45,23 +42,15 @@ public class NioSocketChannel extends AbstractNioChannel<SocketChannel> {
|
|||
this.socketSelector = selector;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void closeFromSelector() throws IOException {
|
||||
assert socketSelector.isOnCurrentThread() : "Should only call from selector thread";
|
||||
// Even if the channel has already been closed we will clear any pending write operations just in case
|
||||
if (writeContext.hasQueuedWriteOps()) {
|
||||
writeContext.clearQueuedWriteOps(new ClosedChannelException());
|
||||
}
|
||||
readContext.close();
|
||||
|
||||
super.closeFromSelector();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SocketSelector getSelector() {
|
||||
return socketSelector;
|
||||
}
|
||||
|
||||
public int write(ByteBuffer buffer) throws IOException {
|
||||
return socketChannel.write(buffer);
|
||||
}
|
||||
|
||||
public int write(ByteBuffer[] buffers) throws IOException {
|
||||
if (buffers.length == 1) {
|
||||
return socketChannel.write(buffers[0]);
|
||||
|
@ -82,37 +71,17 @@ public class NioSocketChannel extends AbstractNioChannel<SocketChannel> {
|
|||
}
|
||||
}
|
||||
|
||||
public int read(InboundChannelBuffer buffer) throws IOException {
|
||||
int bytesRead = (int) socketChannel.read(buffer.sliceBuffersFrom(buffer.getIndex()));
|
||||
|
||||
if (bytesRead == -1) {
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
buffer.incrementIndex(bytesRead);
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
public void setContexts(ReadContext readContext, WriteContext writeContext, BiConsumer<NioSocketChannel, Exception> exceptionContext) {
|
||||
if (contextsSet.compareAndSet(false, true)) {
|
||||
this.readContext = readContext;
|
||||
this.writeContext = writeContext;
|
||||
this.exceptionContext = exceptionContext;
|
||||
public void setContext(SocketChannelContext context) {
|
||||
if (contextSet.compareAndSet(false, true)) {
|
||||
this.context = context;
|
||||
} else {
|
||||
throw new IllegalStateException("Contexts on this channel were already set. They should only be once.");
|
||||
throw new IllegalStateException("Context on this channel were already set. It should only be once.");
|
||||
}
|
||||
}
|
||||
|
||||
public WriteContext getWriteContext() {
|
||||
return writeContext;
|
||||
}
|
||||
|
||||
public ReadContext getReadContext() {
|
||||
return readContext;
|
||||
}
|
||||
|
||||
public BiConsumer<NioSocketChannel, Exception> getExceptionContext() {
|
||||
return exceptionContext;
|
||||
@Override
|
||||
public SocketChannelContext getContext() {
|
||||
return context;
|
||||
}
|
||||
|
||||
public InetSocketAddress getRemoteAddress() {
|
||||
|
@ -123,14 +92,6 @@ public class NioSocketChannel extends AbstractNioChannel<SocketChannel> {
|
|||
return isConnectComplete0();
|
||||
}
|
||||
|
||||
public boolean isWritable() {
|
||||
return isClosing.get() == false;
|
||||
}
|
||||
|
||||
public boolean isReadable() {
|
||||
return isClosing.get() == false;
|
||||
}
|
||||
|
||||
/**
|
||||
* This method will attempt to complete the connection process for this channel. It should be called for
|
||||
* new channels or for a channel that has produced a OP_CONNECT event. If this method returns true then
|
||||
|
|
|
@ -26,28 +26,81 @@ public final class SelectionKeyUtils {
|
|||
|
||||
private SelectionKeyUtils() {}
|
||||
|
||||
/**
|
||||
* Adds an interest in writes for this channel while maintaining other interests.
|
||||
*
|
||||
* @param channel the channel
|
||||
* @throws CancelledKeyException if the key was already cancelled
|
||||
*/
|
||||
public static void setWriteInterested(NioChannel channel) throws CancelledKeyException {
|
||||
SelectionKey selectionKey = channel.getSelectionKey();
|
||||
selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_WRITE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes an interest in writes for this channel while maintaining other interests.
|
||||
*
|
||||
* @param channel the channel
|
||||
* @throws CancelledKeyException if the key was already cancelled
|
||||
*/
|
||||
public static void removeWriteInterested(NioChannel channel) throws CancelledKeyException {
|
||||
SelectionKey selectionKey = channel.getSelectionKey();
|
||||
selectionKey.interestOps(selectionKey.interestOps() & ~SelectionKey.OP_WRITE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes an interest in connects and reads for this channel while maintaining other interests.
|
||||
*
|
||||
* @param channel the channel
|
||||
* @throws CancelledKeyException if the key was already cancelled
|
||||
*/
|
||||
public static void setConnectAndReadInterested(NioChannel channel) throws CancelledKeyException {
|
||||
SelectionKey selectionKey = channel.getSelectionKey();
|
||||
selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_CONNECT | SelectionKey.OP_READ);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes an interest in connects, reads, and writes for this channel while maintaining other interests.
|
||||
*
|
||||
* @param channel the channel
|
||||
* @throws CancelledKeyException if the key was already cancelled
|
||||
*/
|
||||
public static void setConnectReadAndWriteInterested(NioChannel channel) throws CancelledKeyException {
|
||||
SelectionKey selectionKey = channel.getSelectionKey();
|
||||
selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_CONNECT | SelectionKey.OP_READ | SelectionKey.OP_WRITE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes an interest in connects for this channel while maintaining other interests.
|
||||
*
|
||||
* @param channel the channel
|
||||
* @throws CancelledKeyException if the key was already cancelled
|
||||
*/
|
||||
public static void removeConnectInterested(NioChannel channel) throws CancelledKeyException {
|
||||
SelectionKey selectionKey = channel.getSelectionKey();
|
||||
selectionKey.interestOps(selectionKey.interestOps() & ~SelectionKey.OP_CONNECT);
|
||||
}
|
||||
|
||||
public static void setAcceptInterested(NioServerSocketChannel channel) {
|
||||
/**
|
||||
* Adds an interest in accepts for this channel while maintaining other interests.
|
||||
*
|
||||
* @param channel the channel
|
||||
* @throws CancelledKeyException if the key was already cancelled
|
||||
*/
|
||||
public static void setAcceptInterested(NioServerSocketChannel channel) throws CancelledKeyException {
|
||||
SelectionKey selectionKey = channel.getSelectionKey();
|
||||
selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_ACCEPT);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Checks for an interest in writes for this channel.
|
||||
*
|
||||
* @param channel the channel
|
||||
* @return a boolean indicating if we are currently interested in writes for this channel
|
||||
* @throws CancelledKeyException if the key was already cancelled
|
||||
*/
|
||||
public static boolean isWriteInterested(NioSocketChannel channel) throws CancelledKeyException {
|
||||
return (channel.getSelectionKey().interestOps() & SelectionKey.OP_WRITE) != 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public class ServerChannelContext implements ChannelContext {
|
||||
|
||||
private final NioServerSocketChannel channel;
|
||||
private final Consumer<NioSocketChannel> acceptor;
|
||||
private final BiConsumer<NioServerSocketChannel, Exception> exceptionHandler;
|
||||
private final AtomicBoolean isClosing = new AtomicBoolean(false);
|
||||
|
||||
public ServerChannelContext(NioServerSocketChannel channel, Consumer<NioSocketChannel> acceptor,
|
||||
BiConsumer<NioServerSocketChannel, Exception> exceptionHandler) {
|
||||
this.channel = channel;
|
||||
this.acceptor = acceptor;
|
||||
this.exceptionHandler = exceptionHandler;
|
||||
}
|
||||
|
||||
public void acceptChannel(NioSocketChannel acceptedChannel) {
|
||||
acceptor.accept(acceptedChannel);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void closeFromSelector() throws IOException {
|
||||
channel.closeFromSelector();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void closeChannel() {
|
||||
if (isClosing.compareAndSet(false, true)) {
|
||||
channel.getSelector().queueChannelClose(channel);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(Exception e) {
|
||||
exceptionHandler.accept(channel, e);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,129 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
/**
|
||||
* This context should implement the specific logic for a channel. When a channel receives a notification
|
||||
* that it is ready to perform certain operations (read, write, etc) the {@link SocketChannelContext} will
|
||||
* be called. This context will need to implement all protocol related logic. Additionally, if any special
|
||||
* close behavior is required, it should be implemented in this context.
|
||||
*
|
||||
* The only methods of the context that should ever be called from a non-selector thread are
|
||||
* {@link #closeChannel()} and {@link #sendMessage(ByteBuffer[], BiConsumer)}.
|
||||
*/
|
||||
public abstract class SocketChannelContext implements ChannelContext {
|
||||
|
||||
protected final NioSocketChannel channel;
|
||||
private final BiConsumer<NioSocketChannel, Exception> exceptionHandler;
|
||||
private boolean ioException;
|
||||
private boolean peerClosed;
|
||||
|
||||
protected SocketChannelContext(NioSocketChannel channel, BiConsumer<NioSocketChannel, Exception> exceptionHandler) {
|
||||
this.channel = channel;
|
||||
this.exceptionHandler = exceptionHandler;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(Exception e) {
|
||||
exceptionHandler.accept(channel, e);
|
||||
}
|
||||
|
||||
public void channelRegistered() throws IOException {}
|
||||
|
||||
public abstract int read() throws IOException;
|
||||
|
||||
public abstract void sendMessage(ByteBuffer[] buffers, BiConsumer<Void, Throwable> listener);
|
||||
|
||||
public abstract void queueWriteOperation(WriteOperation writeOperation);
|
||||
|
||||
public abstract void flushChannel() throws IOException;
|
||||
|
||||
public abstract boolean hasQueuedWriteOps();
|
||||
|
||||
/**
|
||||
* This method indicates if a selector should close this channel.
|
||||
*
|
||||
* @return a boolean indicating if the selector should close
|
||||
*/
|
||||
public abstract boolean selectorShouldClose();
|
||||
|
||||
protected boolean hasIOException() {
|
||||
return ioException;
|
||||
}
|
||||
|
||||
protected boolean isPeerClosed() {
|
||||
return peerClosed;
|
||||
}
|
||||
|
||||
protected int readFromChannel(ByteBuffer buffer) throws IOException {
|
||||
try {
|
||||
int bytesRead = channel.read(buffer);
|
||||
if (bytesRead < 0) {
|
||||
peerClosed = true;
|
||||
bytesRead = 0;
|
||||
}
|
||||
return bytesRead;
|
||||
} catch (IOException e) {
|
||||
ioException = true;
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
protected int readFromChannel(ByteBuffer[] buffers) throws IOException {
|
||||
try {
|
||||
int bytesRead = channel.read(buffers);
|
||||
if (bytesRead < 0) {
|
||||
peerClosed = true;
|
||||
bytesRead = 0;
|
||||
}
|
||||
return bytesRead;
|
||||
} catch (IOException e) {
|
||||
ioException = true;
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
protected int flushToChannel(ByteBuffer buffer) throws IOException {
|
||||
try {
|
||||
return channel.write(buffer);
|
||||
} catch (IOException e) {
|
||||
ioException = true;
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
protected int flushToChannel(ByteBuffer[] buffers) throws IOException {
|
||||
try {
|
||||
return channel.write(buffers);
|
||||
} catch (IOException e) {
|
||||
ioException = true;
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
public interface ReadConsumer {
|
||||
int consumeReads(InboundChannelBuffer channelBuffer) throws IOException;
|
||||
}
|
||||
}
|
|
@ -43,8 +43,14 @@ public class SocketEventHandler extends EventHandler {
|
|||
*
|
||||
* @param channel that was registered
|
||||
*/
|
||||
protected void handleRegistration(NioSocketChannel channel) {
|
||||
SelectionKeyUtils.setConnectAndReadInterested(channel);
|
||||
protected void handleRegistration(NioSocketChannel channel) throws IOException {
|
||||
SocketChannelContext context = channel.getContext();
|
||||
context.channelRegistered();
|
||||
if (context.hasQueuedWriteOps()) {
|
||||
SelectionKeyUtils.setConnectReadAndWriteInterested(channel);
|
||||
} else {
|
||||
SelectionKeyUtils.setConnectAndReadInterested(channel);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -55,7 +61,7 @@ public class SocketEventHandler extends EventHandler {
|
|||
*/
|
||||
protected void registrationException(NioSocketChannel channel, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("failed to register socket channel: {}", channel), exception);
|
||||
exceptionCaught(channel, exception);
|
||||
channel.getContext().handleException(exception);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -76,7 +82,7 @@ public class SocketEventHandler extends EventHandler {
|
|||
*/
|
||||
protected void connectException(NioSocketChannel channel, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("failed to connect to socket channel: {}", channel), exception);
|
||||
exceptionCaught(channel, exception);
|
||||
channel.getContext().handleException(exception);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -86,10 +92,7 @@ public class SocketEventHandler extends EventHandler {
|
|||
* @param channel that can be read
|
||||
*/
|
||||
protected void handleRead(NioSocketChannel channel) throws IOException {
|
||||
int bytesRead = channel.getReadContext().read();
|
||||
if (bytesRead == -1) {
|
||||
handleClose(channel);
|
||||
}
|
||||
channel.getContext().read();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -100,23 +103,18 @@ public class SocketEventHandler extends EventHandler {
|
|||
*/
|
||||
protected void readException(NioSocketChannel channel, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("exception while reading from socket channel: {}", channel), exception);
|
||||
exceptionCaught(channel, exception);
|
||||
channel.getContext().handleException(exception);
|
||||
}
|
||||
|
||||
/**
|
||||
* This method is called when a channel signals it is ready to receive writes. All of the write logic
|
||||
* should occur in this call.
|
||||
*
|
||||
* @param channel that can be read
|
||||
* @param channel that can be written to
|
||||
*/
|
||||
protected void handleWrite(NioSocketChannel channel) throws IOException {
|
||||
WriteContext channelContext = channel.getWriteContext();
|
||||
SocketChannelContext channelContext = channel.getContext();
|
||||
channelContext.flushChannel();
|
||||
if (channelContext.hasQueuedWriteOps()) {
|
||||
SelectionKeyUtils.setWriteInterested(channel);
|
||||
} else {
|
||||
SelectionKeyUtils.removeWriteInterested(channel);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -127,20 +125,7 @@ public class SocketEventHandler extends EventHandler {
|
|||
*/
|
||||
protected void writeException(NioSocketChannel channel, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("exception while writing to socket channel: {}", channel), exception);
|
||||
exceptionCaught(channel, exception);
|
||||
}
|
||||
|
||||
/**
|
||||
* This method is called when handling an event from a channel fails due to an unexpected exception.
|
||||
* An example would be if checking ready ops on a {@link java.nio.channels.SelectionKey} threw
|
||||
* {@link java.nio.channels.CancelledKeyException}.
|
||||
*
|
||||
* @param channel that caused the exception
|
||||
* @param exception that was thrown
|
||||
*/
|
||||
protected void genericChannelException(NioChannel channel, Exception exception) {
|
||||
super.genericChannelException(channel, exception);
|
||||
exceptionCaught((NioSocketChannel) channel, exception);
|
||||
channel.getContext().handleException(exception);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -153,7 +138,20 @@ public class SocketEventHandler extends EventHandler {
|
|||
logger.warn(new ParameterizedMessage("exception while executing listener: {}", listener), exception);
|
||||
}
|
||||
|
||||
private void exceptionCaught(NioSocketChannel channel, Exception e) {
|
||||
channel.getExceptionContext().accept(channel, e);
|
||||
/**
|
||||
* @param channel that was handled
|
||||
*/
|
||||
protected void postHandling(NioSocketChannel channel) {
|
||||
if (channel.getContext().selectorShouldClose()) {
|
||||
handleClose(channel);
|
||||
} else {
|
||||
boolean currentlyWriteInterested = SelectionKeyUtils.isWriteInterested(channel);
|
||||
boolean pendingWrites = channel.getContext().hasQueuedWriteOps();
|
||||
if (currentlyWriteInterested == false && pendingWrites) {
|
||||
SelectionKeyUtils.setWriteInterested(channel);
|
||||
} else if (currentlyWriteInterested && pendingWrites == false) {
|
||||
SelectionKeyUtils.removeWriteInterested(channel);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,6 +64,8 @@ public class SocketSelector extends ESSelector {
|
|||
handleRead(nioSocketChannel);
|
||||
}
|
||||
}
|
||||
|
||||
eventHandler.postHandling(nioSocketChannel);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -118,12 +120,12 @@ public class SocketSelector extends ESSelector {
|
|||
* @param writeOperation to be queued in a channel's buffer
|
||||
*/
|
||||
public void queueWriteInChannelBuffer(WriteOperation writeOperation) {
|
||||
assert isOnCurrentThread() : "Must be on selector thread";
|
||||
assertOnSelectorThread();
|
||||
NioSocketChannel channel = writeOperation.getChannel();
|
||||
WriteContext context = channel.getWriteContext();
|
||||
SocketChannelContext context = channel.getContext();
|
||||
try {
|
||||
SelectionKeyUtils.setWriteInterested(channel);
|
||||
context.queueWriteOperations(writeOperation);
|
||||
context.queueWriteOperation(writeOperation);
|
||||
} catch (Exception e) {
|
||||
executeFailedListener(writeOperation.getListener(), e);
|
||||
}
|
||||
|
@ -137,7 +139,7 @@ public class SocketSelector extends ESSelector {
|
|||
* @param value to provide to listener
|
||||
*/
|
||||
public <V> void executeListener(BiConsumer<V, Throwable> listener, V value) {
|
||||
assert isOnCurrentThread() : "Must be on selector thread";
|
||||
assertOnSelectorThread();
|
||||
try {
|
||||
listener.accept(value, null);
|
||||
} catch (Exception e) {
|
||||
|
@ -153,7 +155,7 @@ public class SocketSelector extends ESSelector {
|
|||
* @param exception to provide to listener
|
||||
*/
|
||||
public <V> void executeFailedListener(BiConsumer<V, Throwable> listener, Exception exception) {
|
||||
assert isOnCurrentThread() : "Must be on selector thread";
|
||||
assertOnSelectorThread();
|
||||
try {
|
||||
listener.accept(null, exception);
|
||||
} catch (Exception e) {
|
||||
|
@ -180,7 +182,7 @@ public class SocketSelector extends ESSelector {
|
|||
private void handleQueuedWrites() {
|
||||
WriteOperation writeOperation;
|
||||
while ((writeOperation = queuedWrites.poll()) != null) {
|
||||
if (writeOperation.getChannel().isWritable()) {
|
||||
if (writeOperation.getChannel().isOpen()) {
|
||||
queueWriteInChannelBuffer(writeOperation);
|
||||
} else {
|
||||
executeFailedListener(writeOperation.getListener(), new ClosedChannelException());
|
||||
|
|
|
@ -19,74 +19,16 @@
|
|||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
public class WriteOperation {
|
||||
/**
|
||||
* This is a basic write operation that can be queued with a channel. The only requirements of a write
|
||||
* operation is that is has a listener and a reference to its channel. The actual conversion of the write
|
||||
* operation implementation to bytes will be performed by the {@link SocketChannelContext}.
|
||||
*/
|
||||
public interface WriteOperation {
|
||||
|
||||
private final NioSocketChannel channel;
|
||||
private final BiConsumer<Void, Throwable> listener;
|
||||
private final ByteBuffer[] buffers;
|
||||
private final int[] offsets;
|
||||
private final int length;
|
||||
private int internalIndex;
|
||||
BiConsumer<Void, Throwable> getListener();
|
||||
|
||||
public WriteOperation(NioSocketChannel channel, ByteBuffer[] buffers, BiConsumer<Void, Throwable> listener) {
|
||||
this.channel = channel;
|
||||
this.listener = listener;
|
||||
this.buffers = buffers;
|
||||
this.offsets = new int[buffers.length];
|
||||
int offset = 0;
|
||||
for (int i = 0; i < buffers.length; i++) {
|
||||
ByteBuffer buffer = buffers[i];
|
||||
offsets[i] = offset;
|
||||
offset += buffer.remaining();
|
||||
}
|
||||
length = offset;
|
||||
}
|
||||
|
||||
public ByteBuffer[] getByteBuffers() {
|
||||
return buffers;
|
||||
}
|
||||
|
||||
public BiConsumer<Void, Throwable> getListener() {
|
||||
return listener;
|
||||
}
|
||||
|
||||
public NioSocketChannel getChannel() {
|
||||
return channel;
|
||||
}
|
||||
|
||||
public boolean isFullyFlushed() {
|
||||
return internalIndex == length;
|
||||
}
|
||||
|
||||
public int flush() throws IOException {
|
||||
int written = channel.write(getBuffersToWrite());
|
||||
internalIndex += written;
|
||||
return written;
|
||||
}
|
||||
|
||||
private ByteBuffer[] getBuffersToWrite() {
|
||||
int offsetIndex = getOffsetIndex(internalIndex);
|
||||
|
||||
ByteBuffer[] postIndexBuffers = new ByteBuffer[buffers.length - offsetIndex];
|
||||
|
||||
ByteBuffer firstBuffer = buffers[offsetIndex].duplicate();
|
||||
firstBuffer.position(internalIndex - offsets[offsetIndex]);
|
||||
postIndexBuffers[0] = firstBuffer;
|
||||
int j = 1;
|
||||
for (int i = (offsetIndex + 1); i < buffers.length; ++i) {
|
||||
postIndexBuffers[j++] = buffers[i].duplicate();
|
||||
}
|
||||
|
||||
return postIndexBuffers;
|
||||
}
|
||||
|
||||
private int getOffsetIndex(int offset) {
|
||||
final int i = Arrays.binarySearch(offsets, offset);
|
||||
return i < 0 ? (-(i + 1)) - 1 : i;
|
||||
}
|
||||
NioSocketChannel getChannel();
|
||||
}
|
||||
|
|
|
@ -27,8 +27,6 @@ import java.nio.channels.SelectionKey;
|
|||
import java.nio.channels.ServerSocketChannel;
|
||||
import java.nio.channels.SocketChannel;
|
||||
import java.util.ArrayList;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.mockito.Matchers.same;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
@ -41,21 +39,21 @@ public class AcceptorEventHandlerTests extends ESTestCase {
|
|||
private SocketSelector socketSelector;
|
||||
private ChannelFactory<NioServerSocketChannel, NioSocketChannel> channelFactory;
|
||||
private NioServerSocketChannel channel;
|
||||
private Consumer<NioSocketChannel> acceptedChannelCallback;
|
||||
private ServerChannelContext context;
|
||||
|
||||
@Before
|
||||
@SuppressWarnings("unchecked")
|
||||
public void setUpHandler() throws IOException {
|
||||
channelFactory = mock(ChannelFactory.class);
|
||||
socketSelector = mock(SocketSelector.class);
|
||||
acceptedChannelCallback = mock(Consumer.class);
|
||||
context = mock(ServerChannelContext.class);
|
||||
ArrayList<SocketSelector> selectors = new ArrayList<>();
|
||||
selectors.add(socketSelector);
|
||||
handler = new AcceptorEventHandler(logger, new RoundRobinSupplier<>(selectors.toArray(new SocketSelector[selectors.size()])));
|
||||
|
||||
AcceptingSelector selector = mock(AcceptingSelector.class);
|
||||
channel = new DoNotRegisterServerChannel(mock(ServerSocketChannel.class), channelFactory, selector);
|
||||
channel.setAcceptContext(acceptedChannelCallback);
|
||||
channel.setContext(context);
|
||||
channel.register();
|
||||
}
|
||||
|
||||
|
@ -80,11 +78,11 @@ public class AcceptorEventHandlerTests extends ESTestCase {
|
|||
@SuppressWarnings("unchecked")
|
||||
public void testHandleAcceptCallsServerAcceptCallback() throws IOException {
|
||||
NioSocketChannel childChannel = new NioSocketChannel(mock(SocketChannel.class), socketSelector);
|
||||
childChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class));
|
||||
childChannel.setContext(mock(SocketChannelContext.class));
|
||||
when(channelFactory.acceptNioChannel(same(channel), same(socketSelector))).thenReturn(childChannel);
|
||||
|
||||
handler.acceptChannel(channel);
|
||||
|
||||
verify(acceptedChannelCallback).accept(childChannel);
|
||||
verify(context).acceptChannel(childChannel);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,337 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.isNull;
|
||||
import static org.mockito.Matchers.same;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class BytesChannelContextTests extends ESTestCase {
|
||||
|
||||
private SocketChannelContext.ReadConsumer readConsumer;
|
||||
private NioSocketChannel channel;
|
||||
private BytesChannelContext context;
|
||||
private InboundChannelBuffer channelBuffer;
|
||||
private SocketSelector selector;
|
||||
private BiConsumer<Void, Throwable> listener;
|
||||
private int messageLength;
|
||||
|
||||
@Before
|
||||
@SuppressWarnings("unchecked")
|
||||
public void init() {
|
||||
readConsumer = mock(SocketChannelContext.ReadConsumer.class);
|
||||
|
||||
messageLength = randomInt(96) + 20;
|
||||
selector = mock(SocketSelector.class);
|
||||
listener = mock(BiConsumer.class);
|
||||
channel = mock(NioSocketChannel.class);
|
||||
channelBuffer = InboundChannelBuffer.allocatingInstance();
|
||||
context = new BytesChannelContext(channel, null, readConsumer, channelBuffer);
|
||||
|
||||
when(channel.getSelector()).thenReturn(selector);
|
||||
when(selector.isOnCurrentThread()).thenReturn(true);
|
||||
}
|
||||
|
||||
public void testSuccessfulRead() throws IOException {
|
||||
byte[] bytes = createMessage(messageLength);
|
||||
|
||||
when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> {
|
||||
ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0];
|
||||
buffers[0].put(bytes);
|
||||
return bytes.length;
|
||||
});
|
||||
|
||||
when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, 0);
|
||||
|
||||
assertEquals(messageLength, context.read());
|
||||
|
||||
assertEquals(0, channelBuffer.getIndex());
|
||||
assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity());
|
||||
verify(readConsumer, times(1)).consumeReads(channelBuffer);
|
||||
}
|
||||
|
||||
public void testMultipleReadsConsumed() throws IOException {
|
||||
byte[] bytes = createMessage(messageLength * 2);
|
||||
|
||||
when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> {
|
||||
ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0];
|
||||
buffers[0].put(bytes);
|
||||
return bytes.length;
|
||||
});
|
||||
|
||||
when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, messageLength, 0);
|
||||
|
||||
assertEquals(bytes.length, context.read());
|
||||
|
||||
assertEquals(0, channelBuffer.getIndex());
|
||||
assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity());
|
||||
verify(readConsumer, times(2)).consumeReads(channelBuffer);
|
||||
}
|
||||
|
||||
public void testPartialRead() throws IOException {
|
||||
byte[] bytes = createMessage(messageLength);
|
||||
|
||||
when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> {
|
||||
ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0];
|
||||
buffers[0].put(bytes);
|
||||
return bytes.length;
|
||||
});
|
||||
|
||||
|
||||
when(readConsumer.consumeReads(channelBuffer)).thenReturn(0);
|
||||
|
||||
assertEquals(messageLength, context.read());
|
||||
|
||||
assertEquals(bytes.length, channelBuffer.getIndex());
|
||||
verify(readConsumer, times(1)).consumeReads(channelBuffer);
|
||||
|
||||
when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength * 2, 0);
|
||||
|
||||
assertEquals(messageLength, context.read());
|
||||
|
||||
assertEquals(0, channelBuffer.getIndex());
|
||||
assertEquals(BigArrays.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity());
|
||||
verify(readConsumer, times(2)).consumeReads(channelBuffer);
|
||||
}
|
||||
|
||||
public void testReadThrowsIOException() throws IOException {
|
||||
IOException ioException = new IOException();
|
||||
when(channel.read(any(ByteBuffer[].class))).thenThrow(ioException);
|
||||
|
||||
IOException ex = expectThrows(IOException.class, () -> context.read());
|
||||
assertSame(ioException, ex);
|
||||
}
|
||||
|
||||
public void testReadThrowsIOExceptionMeansReadyForClose() throws IOException {
|
||||
when(channel.read(any(ByteBuffer[].class))).thenThrow(new IOException());
|
||||
|
||||
assertFalse(context.selectorShouldClose());
|
||||
expectThrows(IOException.class, () -> context.read());
|
||||
assertTrue(context.selectorShouldClose());
|
||||
}
|
||||
|
||||
public void testReadLessThanZeroMeansReadyForClose() throws IOException {
|
||||
when(channel.read(any(ByteBuffer[].class))).thenReturn(-1);
|
||||
|
||||
assertEquals(0, context.read());
|
||||
|
||||
assertTrue(context.selectorShouldClose());
|
||||
}
|
||||
|
||||
public void testCloseClosesChannelBuffer() throws IOException {
|
||||
when(channel.isOpen()).thenReturn(true);
|
||||
Runnable closer = mock(Runnable.class);
|
||||
Supplier<InboundChannelBuffer.Page> pageSupplier = () -> new InboundChannelBuffer.Page(ByteBuffer.allocate(1 << 14), closer);
|
||||
InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier);
|
||||
buffer.ensureCapacity(1);
|
||||
BytesChannelContext context = new BytesChannelContext(channel, null, readConsumer, buffer);
|
||||
context.closeFromSelector();
|
||||
verify(closer).run();
|
||||
}
|
||||
|
||||
public void testWriteFailsIfClosing() {
|
||||
context.closeChannel();
|
||||
|
||||
ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))};
|
||||
context.sendMessage(buffers, listener);
|
||||
|
||||
verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class));
|
||||
}
|
||||
|
||||
public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exception {
|
||||
ArgumentCaptor<BytesWriteOperation> writeOpCaptor = ArgumentCaptor.forClass(BytesWriteOperation.class);
|
||||
|
||||
when(selector.isOnCurrentThread()).thenReturn(false);
|
||||
|
||||
ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))};
|
||||
context.sendMessage(buffers, listener);
|
||||
|
||||
verify(selector).queueWrite(writeOpCaptor.capture());
|
||||
BytesWriteOperation writeOp = writeOpCaptor.getValue();
|
||||
|
||||
assertSame(listener, writeOp.getListener());
|
||||
assertSame(channel, writeOp.getChannel());
|
||||
assertEquals(buffers[0], writeOp.getBuffersToWrite()[0]);
|
||||
}
|
||||
|
||||
public void testSendMessageFromSameThreadIsQueuedInChannel() {
|
||||
ArgumentCaptor<BytesWriteOperation> writeOpCaptor = ArgumentCaptor.forClass(BytesWriteOperation.class);
|
||||
|
||||
ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))};
|
||||
context.sendMessage(buffers, listener);
|
||||
|
||||
verify(selector).queueWriteInChannelBuffer(writeOpCaptor.capture());
|
||||
BytesWriteOperation writeOp = writeOpCaptor.getValue();
|
||||
|
||||
assertSame(listener, writeOp.getListener());
|
||||
assertSame(channel, writeOp.getChannel());
|
||||
assertEquals(buffers[0], writeOp.getBuffersToWrite()[0]);
|
||||
}
|
||||
|
||||
public void testWriteIsQueuedInChannel() {
|
||||
assertFalse(context.hasQueuedWriteOps());
|
||||
|
||||
ByteBuffer[] buffer = {ByteBuffer.allocate(10)};
|
||||
context.queueWriteOperation(new BytesWriteOperation(channel, buffer, listener));
|
||||
|
||||
assertTrue(context.hasQueuedWriteOps());
|
||||
}
|
||||
|
||||
public void testWriteOpsClearedOnClose() throws Exception {
|
||||
assertFalse(context.hasQueuedWriteOps());
|
||||
|
||||
ByteBuffer[] buffer = {ByteBuffer.allocate(10)};
|
||||
context.queueWriteOperation(new BytesWriteOperation(channel, buffer, listener));
|
||||
|
||||
assertTrue(context.hasQueuedWriteOps());
|
||||
|
||||
when(channel.isOpen()).thenReturn(true);
|
||||
context.closeFromSelector();
|
||||
|
||||
verify(selector).executeFailedListener(same(listener), any(ClosedChannelException.class));
|
||||
|
||||
assertFalse(context.hasQueuedWriteOps());
|
||||
}
|
||||
|
||||
public void testQueuedWriteIsFlushedInFlushCall() throws Exception {
|
||||
assertFalse(context.hasQueuedWriteOps());
|
||||
|
||||
ByteBuffer[] buffers = {ByteBuffer.allocate(10)};
|
||||
BytesWriteOperation writeOperation = mock(BytesWriteOperation.class);
|
||||
context.queueWriteOperation(writeOperation);
|
||||
|
||||
assertTrue(context.hasQueuedWriteOps());
|
||||
|
||||
when(writeOperation.getBuffersToWrite()).thenReturn(buffers);
|
||||
when(writeOperation.isFullyFlushed()).thenReturn(true);
|
||||
when(writeOperation.getListener()).thenReturn(listener);
|
||||
context.flushChannel();
|
||||
|
||||
verify(channel).write(buffers);
|
||||
verify(selector).executeListener(listener, null);
|
||||
assertFalse(context.hasQueuedWriteOps());
|
||||
}
|
||||
|
||||
public void testPartialFlush() throws IOException {
|
||||
assertFalse(context.hasQueuedWriteOps());
|
||||
|
||||
BytesWriteOperation writeOperation = mock(BytesWriteOperation.class);
|
||||
context.queueWriteOperation(writeOperation);
|
||||
|
||||
assertTrue(context.hasQueuedWriteOps());
|
||||
|
||||
when(writeOperation.isFullyFlushed()).thenReturn(false);
|
||||
context.flushChannel();
|
||||
|
||||
verify(listener, times(0)).accept(null, null);
|
||||
assertTrue(context.hasQueuedWriteOps());
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testMultipleWritesPartialFlushes() throws IOException {
|
||||
assertFalse(context.hasQueuedWriteOps());
|
||||
|
||||
BiConsumer<Void, Throwable> listener2 = mock(BiConsumer.class);
|
||||
BytesWriteOperation writeOperation1 = mock(BytesWriteOperation.class);
|
||||
BytesWriteOperation writeOperation2 = mock(BytesWriteOperation.class);
|
||||
when(writeOperation1.getListener()).thenReturn(listener);
|
||||
when(writeOperation2.getListener()).thenReturn(listener2);
|
||||
context.queueWriteOperation(writeOperation1);
|
||||
context.queueWriteOperation(writeOperation2);
|
||||
|
||||
assertTrue(context.hasQueuedWriteOps());
|
||||
|
||||
when(writeOperation1.isFullyFlushed()).thenReturn(true);
|
||||
when(writeOperation2.isFullyFlushed()).thenReturn(false);
|
||||
context.flushChannel();
|
||||
|
||||
verify(selector).executeListener(listener, null);
|
||||
verify(listener2, times(0)).accept(null, null);
|
||||
assertTrue(context.hasQueuedWriteOps());
|
||||
|
||||
when(writeOperation2.isFullyFlushed()).thenReturn(true);
|
||||
|
||||
context.flushChannel();
|
||||
|
||||
verify(selector).executeListener(listener2, null);
|
||||
assertFalse(context.hasQueuedWriteOps());
|
||||
}
|
||||
|
||||
public void testWhenIOExceptionThrownListenerIsCalled() throws IOException {
|
||||
assertFalse(context.hasQueuedWriteOps());
|
||||
|
||||
ByteBuffer[] buffers = {ByteBuffer.allocate(10)};
|
||||
BytesWriteOperation writeOperation = mock(BytesWriteOperation.class);
|
||||
context.queueWriteOperation(writeOperation);
|
||||
|
||||
assertTrue(context.hasQueuedWriteOps());
|
||||
|
||||
IOException exception = new IOException();
|
||||
when(writeOperation.getBuffersToWrite()).thenReturn(buffers);
|
||||
when(channel.write(buffers)).thenThrow(exception);
|
||||
when(writeOperation.getListener()).thenReturn(listener);
|
||||
expectThrows(IOException.class, () -> context.flushChannel());
|
||||
|
||||
verify(selector).executeFailedListener(listener, exception);
|
||||
assertFalse(context.hasQueuedWriteOps());
|
||||
}
|
||||
|
||||
public void testWriteIOExceptionMeansChannelReadyToClose() throws IOException {
|
||||
ByteBuffer[] buffers = {ByteBuffer.allocate(10)};
|
||||
BytesWriteOperation writeOperation = mock(BytesWriteOperation.class);
|
||||
context.queueWriteOperation(writeOperation);
|
||||
|
||||
IOException exception = new IOException();
|
||||
when(writeOperation.getBuffersToWrite()).thenReturn(buffers);
|
||||
when(channel.write(buffers)).thenThrow(exception);
|
||||
|
||||
assertFalse(context.selectorShouldClose());
|
||||
expectThrows(IOException.class, () -> context.flushChannel());
|
||||
assertTrue(context.selectorShouldClose());
|
||||
}
|
||||
|
||||
public void initiateCloseSchedulesCloseWithSelector() {
|
||||
context.closeChannel();
|
||||
verify(selector).queueChannelClose(channel);
|
||||
}
|
||||
|
||||
private static byte[] createMessage(int length) {
|
||||
byte[] bytes = new byte[length];
|
||||
for (int i = 0; i < length; ++i) {
|
||||
bytes[i] = randomByte();
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
}
|
|
@ -1,142 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class BytesReadContextTests extends ESTestCase {
|
||||
|
||||
private ReadContext.ReadConsumer readConsumer;
|
||||
private NioSocketChannel channel;
|
||||
private BytesReadContext readContext;
|
||||
private InboundChannelBuffer channelBuffer;
|
||||
private int messageLength;
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
readConsumer = mock(ReadContext.ReadConsumer.class);
|
||||
|
||||
messageLength = randomInt(96) + 20;
|
||||
channel = mock(NioSocketChannel.class);
|
||||
Supplier<InboundChannelBuffer.Page> pageSupplier = () ->
|
||||
new InboundChannelBuffer.Page(ByteBuffer.allocate(BigArrays.BYTE_PAGE_SIZE), () -> {});
|
||||
channelBuffer = new InboundChannelBuffer(pageSupplier);
|
||||
readContext = new BytesReadContext(channel, readConsumer, channelBuffer);
|
||||
}
|
||||
|
||||
public void testSuccessfulRead() throws IOException {
|
||||
byte[] bytes = createMessage(messageLength);
|
||||
|
||||
when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> {
|
||||
ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0];
|
||||
buffers[0].put(bytes);
|
||||
return bytes.length;
|
||||
});
|
||||
|
||||
when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, 0);
|
||||
|
||||
assertEquals(messageLength, readContext.read());
|
||||
|
||||
assertEquals(0, channelBuffer.getIndex());
|
||||
assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity());
|
||||
verify(readConsumer, times(2)).consumeReads(channelBuffer);
|
||||
}
|
||||
|
||||
public void testMultipleReadsConsumed() throws IOException {
|
||||
byte[] bytes = createMessage(messageLength * 2);
|
||||
|
||||
when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> {
|
||||
ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0];
|
||||
buffers[0].put(bytes);
|
||||
return bytes.length;
|
||||
});
|
||||
|
||||
when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, messageLength, 0);
|
||||
|
||||
assertEquals(bytes.length, readContext.read());
|
||||
|
||||
assertEquals(0, channelBuffer.getIndex());
|
||||
assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity());
|
||||
verify(readConsumer, times(3)).consumeReads(channelBuffer);
|
||||
}
|
||||
|
||||
public void testPartialRead() throws IOException {
|
||||
byte[] bytes = createMessage(messageLength);
|
||||
|
||||
when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> {
|
||||
ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0];
|
||||
buffers[0].put(bytes);
|
||||
return bytes.length;
|
||||
});
|
||||
|
||||
|
||||
when(readConsumer.consumeReads(channelBuffer)).thenReturn(0, messageLength);
|
||||
|
||||
assertEquals(messageLength, readContext.read());
|
||||
|
||||
assertEquals(bytes.length, channelBuffer.getIndex());
|
||||
verify(readConsumer, times(1)).consumeReads(channelBuffer);
|
||||
|
||||
when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength * 2, 0);
|
||||
|
||||
assertEquals(messageLength, readContext.read());
|
||||
|
||||
assertEquals(0, channelBuffer.getIndex());
|
||||
assertEquals(BigArrays.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity());
|
||||
verify(readConsumer, times(3)).consumeReads(channelBuffer);
|
||||
}
|
||||
|
||||
public void testReadThrowsIOException() throws IOException {
|
||||
IOException ioException = new IOException();
|
||||
when(channel.read(any(ByteBuffer[].class))).thenThrow(ioException);
|
||||
|
||||
IOException ex = expectThrows(IOException.class, () -> readContext.read());
|
||||
assertSame(ioException, ex);
|
||||
}
|
||||
|
||||
public void closeClosesChannelBuffer() {
|
||||
InboundChannelBuffer buffer = mock(InboundChannelBuffer.class);
|
||||
BytesReadContext readContext = new BytesReadContext(channel, readConsumer, buffer);
|
||||
|
||||
readContext.close();
|
||||
|
||||
verify(buffer).close();
|
||||
}
|
||||
|
||||
private static byte[] createMessage(int length) {
|
||||
byte[] bytes = new byte[length];
|
||||
for (int i = 0; i < length; ++i) {
|
||||
bytes[i] = randomByte();
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
}
|
|
@ -1,212 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.isNull;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class BytesWriteContextTests extends ESTestCase {
|
||||
|
||||
private SocketSelector selector;
|
||||
private BiConsumer<Void, Throwable> listener;
|
||||
private BytesWriteContext writeContext;
|
||||
private NioSocketChannel channel;
|
||||
|
||||
@Before
|
||||
@SuppressWarnings("unchecked")
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
selector = mock(SocketSelector.class);
|
||||
listener = mock(BiConsumer.class);
|
||||
channel = mock(NioSocketChannel.class);
|
||||
writeContext = new BytesWriteContext(channel);
|
||||
|
||||
when(channel.getSelector()).thenReturn(selector);
|
||||
when(selector.isOnCurrentThread()).thenReturn(true);
|
||||
}
|
||||
|
||||
public void testWriteFailsIfChannelNotWritable() throws Exception {
|
||||
when(channel.isWritable()).thenReturn(false);
|
||||
|
||||
ByteBuffer[] buffers = {ByteBuffer.wrap(generateBytes(10))};
|
||||
writeContext.sendMessage(buffers, listener);
|
||||
|
||||
verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class));
|
||||
}
|
||||
|
||||
public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exception {
|
||||
ArgumentCaptor<WriteOperation> writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class);
|
||||
|
||||
when(selector.isOnCurrentThread()).thenReturn(false);
|
||||
when(channel.isWritable()).thenReturn(true);
|
||||
|
||||
ByteBuffer[] buffers = {ByteBuffer.wrap(generateBytes(10))};
|
||||
writeContext.sendMessage(buffers, listener);
|
||||
|
||||
verify(selector).queueWrite(writeOpCaptor.capture());
|
||||
WriteOperation writeOp = writeOpCaptor.getValue();
|
||||
|
||||
assertSame(listener, writeOp.getListener());
|
||||
assertSame(channel, writeOp.getChannel());
|
||||
assertEquals(buffers[0], writeOp.getByteBuffers()[0]);
|
||||
}
|
||||
|
||||
public void testSendMessageFromSameThreadIsQueuedInChannel() throws Exception {
|
||||
ArgumentCaptor<WriteOperation> writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class);
|
||||
|
||||
when(channel.isWritable()).thenReturn(true);
|
||||
|
||||
ByteBuffer[] buffers = {ByteBuffer.wrap(generateBytes(10))};
|
||||
writeContext.sendMessage(buffers, listener);
|
||||
|
||||
verify(selector).queueWriteInChannelBuffer(writeOpCaptor.capture());
|
||||
WriteOperation writeOp = writeOpCaptor.getValue();
|
||||
|
||||
assertSame(listener, writeOp.getListener());
|
||||
assertSame(channel, writeOp.getChannel());
|
||||
assertEquals(buffers[0], writeOp.getByteBuffers()[0]);
|
||||
}
|
||||
|
||||
public void testWriteIsQueuedInChannel() throws Exception {
|
||||
assertFalse(writeContext.hasQueuedWriteOps());
|
||||
|
||||
ByteBuffer[] buffer = {ByteBuffer.allocate(10)};
|
||||
writeContext.queueWriteOperations(new WriteOperation(channel, buffer, listener));
|
||||
|
||||
assertTrue(writeContext.hasQueuedWriteOps());
|
||||
}
|
||||
|
||||
public void testWriteOpsCanBeCleared() throws Exception {
|
||||
assertFalse(writeContext.hasQueuedWriteOps());
|
||||
|
||||
ByteBuffer[] buffer = {ByteBuffer.allocate(10)};
|
||||
writeContext.queueWriteOperations(new WriteOperation(channel, buffer, listener));
|
||||
|
||||
assertTrue(writeContext.hasQueuedWriteOps());
|
||||
|
||||
ClosedChannelException e = new ClosedChannelException();
|
||||
writeContext.clearQueuedWriteOps(e);
|
||||
|
||||
verify(selector).executeFailedListener(listener, e);
|
||||
|
||||
assertFalse(writeContext.hasQueuedWriteOps());
|
||||
}
|
||||
|
||||
public void testQueuedWriteIsFlushedInFlushCall() throws Exception {
|
||||
assertFalse(writeContext.hasQueuedWriteOps());
|
||||
|
||||
WriteOperation writeOperation = mock(WriteOperation.class);
|
||||
writeContext.queueWriteOperations(writeOperation);
|
||||
|
||||
assertTrue(writeContext.hasQueuedWriteOps());
|
||||
|
||||
when(writeOperation.isFullyFlushed()).thenReturn(true);
|
||||
when(writeOperation.getListener()).thenReturn(listener);
|
||||
writeContext.flushChannel();
|
||||
|
||||
verify(writeOperation).flush();
|
||||
verify(selector).executeListener(listener, null);
|
||||
assertFalse(writeContext.hasQueuedWriteOps());
|
||||
}
|
||||
|
||||
public void testPartialFlush() throws IOException {
|
||||
assertFalse(writeContext.hasQueuedWriteOps());
|
||||
|
||||
WriteOperation writeOperation = mock(WriteOperation.class);
|
||||
writeContext.queueWriteOperations(writeOperation);
|
||||
|
||||
assertTrue(writeContext.hasQueuedWriteOps());
|
||||
|
||||
when(writeOperation.isFullyFlushed()).thenReturn(false);
|
||||
writeContext.flushChannel();
|
||||
|
||||
verify(listener, times(0)).accept(null, null);
|
||||
assertTrue(writeContext.hasQueuedWriteOps());
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testMultipleWritesPartialFlushes() throws IOException {
|
||||
assertFalse(writeContext.hasQueuedWriteOps());
|
||||
|
||||
BiConsumer<Void, Throwable> listener2 = mock(BiConsumer.class);
|
||||
WriteOperation writeOperation1 = mock(WriteOperation.class);
|
||||
WriteOperation writeOperation2 = mock(WriteOperation.class);
|
||||
when(writeOperation1.getListener()).thenReturn(listener);
|
||||
when(writeOperation2.getListener()).thenReturn(listener2);
|
||||
writeContext.queueWriteOperations(writeOperation1);
|
||||
writeContext.queueWriteOperations(writeOperation2);
|
||||
|
||||
assertTrue(writeContext.hasQueuedWriteOps());
|
||||
|
||||
when(writeOperation1.isFullyFlushed()).thenReturn(true);
|
||||
when(writeOperation2.isFullyFlushed()).thenReturn(false);
|
||||
writeContext.flushChannel();
|
||||
|
||||
verify(selector).executeListener(listener, null);
|
||||
verify(listener2, times(0)).accept(null, null);
|
||||
assertTrue(writeContext.hasQueuedWriteOps());
|
||||
|
||||
when(writeOperation2.isFullyFlushed()).thenReturn(true);
|
||||
|
||||
writeContext.flushChannel();
|
||||
|
||||
verify(selector).executeListener(listener2, null);
|
||||
assertFalse(writeContext.hasQueuedWriteOps());
|
||||
}
|
||||
|
||||
public void testWhenIOExceptionThrownListenerIsCalled() throws IOException {
|
||||
assertFalse(writeContext.hasQueuedWriteOps());
|
||||
|
||||
WriteOperation writeOperation = mock(WriteOperation.class);
|
||||
writeContext.queueWriteOperations(writeOperation);
|
||||
|
||||
assertTrue(writeContext.hasQueuedWriteOps());
|
||||
|
||||
IOException exception = new IOException();
|
||||
when(writeOperation.flush()).thenThrow(exception);
|
||||
when(writeOperation.getListener()).thenReturn(listener);
|
||||
expectThrows(IOException.class, () -> writeContext.flushChannel());
|
||||
|
||||
verify(selector).executeFailedListener(listener, exception);
|
||||
assertFalse(writeContext.hasQueuedWriteOps());
|
||||
}
|
||||
|
||||
private byte[] generateBytes(int n) {
|
||||
n += 10;
|
||||
byte[] bytes = new byte[n];
|
||||
for (int i = 0; i < n; ++i) {
|
||||
bytes[i] = randomByte();
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
}
|
|
@ -28,7 +28,6 @@ import java.io.IOException;
|
|||
import java.net.InetSocketAddress;
|
||||
import java.nio.channels.ServerSocketChannel;
|
||||
import java.nio.channels.SocketChannel;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.same;
|
||||
|
@ -139,7 +138,7 @@ public class ChannelFactoryTests extends ESTestCase {
|
|||
@Override
|
||||
public NioSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException {
|
||||
NioSocketChannel nioSocketChannel = new NioSocketChannel(channel, selector);
|
||||
nioSocketChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class));
|
||||
nioSocketChannel.setContext(mock(SocketChannelContext.class));
|
||||
return nioSocketChannel;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,8 @@ import java.io.IOException;
|
|||
import java.nio.channels.ServerSocketChannel;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
@ -56,52 +58,42 @@ public class NioServerSocketChannelTests extends ESTestCase {
|
|||
thread.join();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testClose() throws Exception {
|
||||
AtomicBoolean isClosed = new AtomicBoolean(false);
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
|
||||
NioChannel channel = new DoNotCloseServerChannel(mock(ServerSocketChannel.class), mock(ChannelFactory.class), selector);
|
||||
try (ServerSocketChannel rawChannel = ServerSocketChannel.open()) {
|
||||
NioServerSocketChannel channel = new NioServerSocketChannel(rawChannel, mock(ChannelFactory.class), selector);
|
||||
channel.setContext(new ServerChannelContext(channel, mock(Consumer.class), mock(BiConsumer.class)));
|
||||
channel.addCloseListener(ActionListener.toBiConsumer(new ActionListener<Void>() {
|
||||
@Override
|
||||
public void onResponse(Void o) {
|
||||
isClosed.set(true);
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
channel.addCloseListener(ActionListener.toBiConsumer(new ActionListener<Void>() {
|
||||
@Override
|
||||
public void onResponse(Void o) {
|
||||
isClosed.set(true);
|
||||
latch.countDown();
|
||||
}
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
isClosed.set(true);
|
||||
latch.countDown();
|
||||
}
|
||||
}));
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
isClosed.set(true);
|
||||
latch.countDown();
|
||||
}
|
||||
}));
|
||||
assertTrue(channel.isOpen());
|
||||
assertTrue(rawChannel.isOpen());
|
||||
assertFalse(isClosed.get());
|
||||
|
||||
assertTrue(channel.isOpen());
|
||||
assertFalse(closedRawChannel.get());
|
||||
assertFalse(isClosed.get());
|
||||
|
||||
PlainActionFuture<Void> closeFuture = PlainActionFuture.newFuture();
|
||||
channel.addCloseListener(ActionListener.toBiConsumer(closeFuture));
|
||||
channel.close();
|
||||
closeFuture.actionGet();
|
||||
PlainActionFuture<Void> closeFuture = PlainActionFuture.newFuture();
|
||||
channel.addCloseListener(ActionListener.toBiConsumer(closeFuture));
|
||||
selector.queueChannelClose(channel);
|
||||
closeFuture.actionGet();
|
||||
|
||||
|
||||
assertTrue(closedRawChannel.get());
|
||||
assertFalse(channel.isOpen());
|
||||
latch.await();
|
||||
assertTrue(isClosed.get());
|
||||
}
|
||||
|
||||
private class DoNotCloseServerChannel extends DoNotRegisterServerChannel {
|
||||
|
||||
private DoNotCloseServerChannel(ServerSocketChannel channel, ChannelFactory<?, ?> channelFactory, AcceptingSelector selector)
|
||||
throws IOException {
|
||||
super(channel, channelFactory, selector);
|
||||
}
|
||||
|
||||
@Override
|
||||
void closeRawChannel() throws IOException {
|
||||
closedRawChannel.set(true);
|
||||
assertFalse(rawChannel.isOpen());
|
||||
assertFalse(channel.isOpen());
|
||||
latch.await();
|
||||
assertTrue(isClosed.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,7 +41,6 @@ import static org.mockito.Mockito.when;
|
|||
public class NioSocketChannelTests extends ESTestCase {
|
||||
|
||||
private SocketSelector selector;
|
||||
private AtomicBoolean closedRawChannel;
|
||||
private Thread thread;
|
||||
|
||||
@Before
|
||||
|
@ -49,7 +48,6 @@ public class NioSocketChannelTests extends ESTestCase {
|
|||
public void startSelector() throws IOException {
|
||||
selector = new SocketSelector(new SocketEventHandler(logger));
|
||||
thread = new Thread(selector::runLoop);
|
||||
closedRawChannel = new AtomicBoolean(false);
|
||||
thread.start();
|
||||
FutureUtils.get(selector.isRunningFuture());
|
||||
}
|
||||
|
@ -65,42 +63,46 @@ public class NioSocketChannelTests extends ESTestCase {
|
|||
AtomicBoolean isClosed = new AtomicBoolean(false);
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
|
||||
NioSocketChannel socketChannel = new DoNotCloseChannel(mock(SocketChannel.class), selector);
|
||||
socketChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class));
|
||||
socketChannel.addCloseListener(ActionListener.toBiConsumer(new ActionListener<Void>() {
|
||||
@Override
|
||||
public void onResponse(Void o) {
|
||||
isClosed.set(true);
|
||||
latch.countDown();
|
||||
}
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
isClosed.set(true);
|
||||
latch.countDown();
|
||||
}
|
||||
}));
|
||||
try(SocketChannel rawChannel = SocketChannel.open()) {
|
||||
NioSocketChannel socketChannel = new NioSocketChannel(rawChannel, selector);
|
||||
socketChannel.setContext(new BytesChannelContext(socketChannel, mock(BiConsumer.class),
|
||||
mock(SocketChannelContext.ReadConsumer.class), InboundChannelBuffer.allocatingInstance()));
|
||||
socketChannel.addCloseListener(ActionListener.toBiConsumer(new ActionListener<Void>() {
|
||||
@Override
|
||||
public void onResponse(Void o) {
|
||||
isClosed.set(true);
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
assertTrue(socketChannel.isOpen());
|
||||
assertFalse(closedRawChannel.get());
|
||||
assertFalse(isClosed.get());
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
isClosed.set(true);
|
||||
latch.countDown();
|
||||
}
|
||||
}));
|
||||
|
||||
PlainActionFuture<Void> closeFuture = PlainActionFuture.newFuture();
|
||||
socketChannel.addCloseListener(ActionListener.toBiConsumer(closeFuture));
|
||||
socketChannel.close();
|
||||
closeFuture.actionGet();
|
||||
assertTrue(socketChannel.isOpen());
|
||||
assertTrue(rawChannel.isOpen());
|
||||
assertFalse(isClosed.get());
|
||||
|
||||
assertTrue(closedRawChannel.get());
|
||||
assertFalse(socketChannel.isOpen());
|
||||
latch.await();
|
||||
assertTrue(isClosed.get());
|
||||
PlainActionFuture<Void> closeFuture = PlainActionFuture.newFuture();
|
||||
socketChannel.addCloseListener(ActionListener.toBiConsumer(closeFuture));
|
||||
selector.queueChannelClose(socketChannel);
|
||||
closeFuture.actionGet();
|
||||
|
||||
assertFalse(rawChannel.isOpen());
|
||||
assertFalse(socketChannel.isOpen());
|
||||
latch.await();
|
||||
assertTrue(isClosed.get());
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testConnectSucceeds() throws Exception {
|
||||
SocketChannel rawChannel = mock(SocketChannel.class);
|
||||
when(rawChannel.finishConnect()).thenReturn(true);
|
||||
NioSocketChannel socketChannel = new DoNotCloseChannel(rawChannel, selector);
|
||||
socketChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class));
|
||||
NioSocketChannel socketChannel = new DoNotRegisterChannel(rawChannel, selector);
|
||||
socketChannel.setContext(mock(SocketChannelContext.class));
|
||||
selector.scheduleForRegistration(socketChannel);
|
||||
|
||||
PlainActionFuture<Void> connectFuture = PlainActionFuture.newFuture();
|
||||
|
@ -109,15 +111,14 @@ public class NioSocketChannelTests extends ESTestCase {
|
|||
|
||||
assertTrue(socketChannel.isConnectComplete());
|
||||
assertTrue(socketChannel.isOpen());
|
||||
assertFalse(closedRawChannel.get());
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testConnectFails() throws Exception {
|
||||
SocketChannel rawChannel = mock(SocketChannel.class);
|
||||
when(rawChannel.finishConnect()).thenThrow(new ConnectException());
|
||||
NioSocketChannel socketChannel = new DoNotCloseChannel(rawChannel, selector);
|
||||
socketChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class));
|
||||
NioSocketChannel socketChannel = new DoNotRegisterChannel(rawChannel, selector);
|
||||
socketChannel.setContext(mock(SocketChannelContext.class));
|
||||
selector.scheduleForRegistration(socketChannel);
|
||||
|
||||
PlainActionFuture<Void> connectFuture = PlainActionFuture.newFuture();
|
||||
|
@ -129,16 +130,4 @@ public class NioSocketChannelTests extends ESTestCase {
|
|||
// Even if connection fails the channel is 'open' until close() is called
|
||||
assertTrue(socketChannel.isOpen());
|
||||
}
|
||||
|
||||
private class DoNotCloseChannel extends DoNotRegisterChannel {
|
||||
|
||||
private DoNotCloseChannel(SocketChannel channel, SocketSelector selector) throws IOException {
|
||||
super(channel, selector);
|
||||
}
|
||||
|
||||
@Override
|
||||
void closeRawChannel() throws IOException {
|
||||
closedRawChannel.set(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,8 +28,10 @@ import java.nio.channels.CancelledKeyException;
|
|||
import java.nio.channels.SelectionKey;
|
||||
import java.nio.channels.SocketChannel;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
|
@ -39,7 +41,6 @@ public class SocketEventHandlerTests extends ESTestCase {
|
|||
|
||||
private SocketEventHandler handler;
|
||||
private NioSocketChannel channel;
|
||||
private ReadContext readContext;
|
||||
private SocketChannel rawChannel;
|
||||
|
||||
@Before
|
||||
|
@ -50,21 +51,36 @@ public class SocketEventHandlerTests extends ESTestCase {
|
|||
handler = new SocketEventHandler(logger);
|
||||
rawChannel = mock(SocketChannel.class);
|
||||
channel = new DoNotRegisterChannel(rawChannel, socketSelector);
|
||||
readContext = mock(ReadContext.class);
|
||||
when(rawChannel.finishConnect()).thenReturn(true);
|
||||
|
||||
channel.setContexts(readContext, new BytesWriteContext(channel), exceptionHandler);
|
||||
InboundChannelBuffer buffer = InboundChannelBuffer.allocatingInstance();
|
||||
channel.setContext(new BytesChannelContext(channel, exceptionHandler, mock(SocketChannelContext.ReadConsumer.class), buffer));
|
||||
channel.register();
|
||||
channel.finishConnect();
|
||||
|
||||
when(socketSelector.isOnCurrentThread()).thenReturn(true);
|
||||
}
|
||||
|
||||
public void testRegisterCallsContext() throws IOException {
|
||||
NioSocketChannel channel = mock(NioSocketChannel.class);
|
||||
SocketChannelContext channelContext = mock(SocketChannelContext.class);
|
||||
when(channel.getContext()).thenReturn(channelContext);
|
||||
when(channel.getSelectionKey()).thenReturn(new TestSelectionKey(0));
|
||||
handler.handleRegistration(channel);
|
||||
verify(channelContext).channelRegistered();
|
||||
}
|
||||
|
||||
public void testRegisterAddsOP_CONNECTAndOP_READInterest() throws IOException {
|
||||
handler.handleRegistration(channel);
|
||||
assertEquals(SelectionKey.OP_READ | SelectionKey.OP_CONNECT, channel.getSelectionKey().interestOps());
|
||||
}
|
||||
|
||||
public void testRegisterWithPendingWritesAddsOP_CONNECTAndOP_READAndOP_WRITEInterest() throws IOException {
|
||||
channel.getContext().queueWriteOperation(mock(BytesWriteOperation.class));
|
||||
handler.handleRegistration(channel);
|
||||
assertEquals(SelectionKey.OP_READ | SelectionKey.OP_CONNECT | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps());
|
||||
}
|
||||
|
||||
public void testRegistrationExceptionCallsExceptionHandler() throws IOException {
|
||||
CancelledKeyException exception = new CancelledKeyException();
|
||||
handler.registrationException(channel, exception);
|
||||
|
@ -83,79 +99,75 @@ public class SocketEventHandlerTests extends ESTestCase {
|
|||
verify(exceptionHandler).accept(channel, exception);
|
||||
}
|
||||
|
||||
public void testHandleReadDelegatesToReadContext() throws IOException {
|
||||
when(readContext.read()).thenReturn(1);
|
||||
public void testHandleReadDelegatesToContext() throws IOException {
|
||||
NioSocketChannel channel = new DoNotRegisterChannel(rawChannel, mock(SocketSelector.class));
|
||||
SocketChannelContext context = mock(SocketChannelContext.class);
|
||||
channel.setContext(context);
|
||||
|
||||
when(context.read()).thenReturn(1);
|
||||
handler.handleRead(channel);
|
||||
|
||||
verify(readContext).read();
|
||||
verify(context).read();
|
||||
}
|
||||
|
||||
public void testHandleReadMarksChannelForCloseIfPeerClosed() throws IOException {
|
||||
NioSocketChannel nioSocketChannel = mock(NioSocketChannel.class);
|
||||
when(nioSocketChannel.getReadContext()).thenReturn(readContext);
|
||||
when(readContext.read()).thenReturn(-1);
|
||||
|
||||
handler.handleRead(nioSocketChannel);
|
||||
|
||||
verify(nioSocketChannel).closeFromSelector();
|
||||
}
|
||||
|
||||
public void testReadExceptionCallsExceptionHandler() throws IOException {
|
||||
public void testReadExceptionCallsExceptionHandler() {
|
||||
IOException exception = new IOException();
|
||||
handler.readException(channel, exception);
|
||||
verify(exceptionHandler).accept(channel, exception);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testHandleWriteWithCompleteFlushRemovesOP_WRITEInterest() throws IOException {
|
||||
SelectionKey selectionKey = channel.getSelectionKey();
|
||||
setWriteAndRead(channel);
|
||||
assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, selectionKey.interestOps());
|
||||
|
||||
ByteBuffer[] buffers = {ByteBuffer.allocate(1)};
|
||||
channel.getWriteContext().queueWriteOperations(new WriteOperation(channel, buffers, mock(BiConsumer.class)));
|
||||
|
||||
when(rawChannel.write(buffers[0])).thenReturn(1);
|
||||
handler.handleWrite(channel);
|
||||
|
||||
assertEquals(SelectionKey.OP_READ, selectionKey.interestOps());
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testHandleWriteWithInCompleteFlushLeavesOP_WRITEInterest() throws IOException {
|
||||
SelectionKey selectionKey = channel.getSelectionKey();
|
||||
setWriteAndRead(channel);
|
||||
assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, selectionKey.interestOps());
|
||||
|
||||
ByteBuffer[] buffers = {ByteBuffer.allocate(1)};
|
||||
channel.getWriteContext().queueWriteOperations(new WriteOperation(channel, buffers, mock(BiConsumer.class)));
|
||||
|
||||
when(rawChannel.write(buffers[0])).thenReturn(0);
|
||||
handler.handleWrite(channel);
|
||||
|
||||
assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, selectionKey.interestOps());
|
||||
}
|
||||
|
||||
public void testHandleWriteWithNoOpsRemovesOP_WRITEInterest() throws IOException {
|
||||
SelectionKey selectionKey = channel.getSelectionKey();
|
||||
setWriteAndRead(channel);
|
||||
assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps());
|
||||
|
||||
handler.handleWrite(channel);
|
||||
|
||||
assertEquals(SelectionKey.OP_READ, selectionKey.interestOps());
|
||||
}
|
||||
|
||||
private void setWriteAndRead(NioChannel channel) {
|
||||
SelectionKeyUtils.setConnectAndReadInterested(channel);
|
||||
SelectionKeyUtils.removeConnectInterested(channel);
|
||||
SelectionKeyUtils.setWriteInterested(channel);
|
||||
}
|
||||
|
||||
public void testWriteExceptionCallsExceptionHandler() throws IOException {
|
||||
public void testWriteExceptionCallsExceptionHandler() {
|
||||
IOException exception = new IOException();
|
||||
handler.writeException(channel, exception);
|
||||
verify(exceptionHandler).accept(channel, exception);
|
||||
}
|
||||
|
||||
public void testPostHandlingCallWillCloseTheChannelIfReady() throws IOException {
|
||||
NioSocketChannel channel = mock(NioSocketChannel.class);
|
||||
SocketChannelContext context = mock(SocketChannelContext.class);
|
||||
when(channel.getSelectionKey()).thenReturn(new TestSelectionKey(0));
|
||||
|
||||
when(channel.getContext()).thenReturn(context);
|
||||
when(context.selectorShouldClose()).thenReturn(true);
|
||||
handler.postHandling(channel);
|
||||
|
||||
verify(context).closeFromSelector();
|
||||
}
|
||||
|
||||
public void testPostHandlingCallWillNotCloseTheChannelIfNotReady() throws IOException {
|
||||
NioSocketChannel channel = mock(NioSocketChannel.class);
|
||||
SocketChannelContext context = mock(SocketChannelContext.class);
|
||||
when(channel.getSelectionKey()).thenReturn(new TestSelectionKey(0));
|
||||
|
||||
when(channel.getContext()).thenReturn(context);
|
||||
when(context.selectorShouldClose()).thenReturn(false);
|
||||
handler.postHandling(channel);
|
||||
|
||||
verify(channel, times(0)).closeFromSelector();
|
||||
}
|
||||
|
||||
public void testPostHandlingWillAddWriteIfNecessary() throws IOException {
|
||||
NioSocketChannel channel = new DoNotRegisterChannel(rawChannel, mock(SocketSelector.class));
|
||||
channel.setSelectionKey(new TestSelectionKey(SelectionKey.OP_READ));
|
||||
SocketChannelContext context = mock(SocketChannelContext.class);
|
||||
channel.setContext(context);
|
||||
|
||||
when(context.hasQueuedWriteOps()).thenReturn(true);
|
||||
|
||||
assertEquals(SelectionKey.OP_READ, channel.getSelectionKey().interestOps());
|
||||
handler.postHandling(channel);
|
||||
assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps());
|
||||
}
|
||||
|
||||
public void testPostHandlingWillRemoveWriteIfNecessary() throws IOException {
|
||||
NioSocketChannel channel = new DoNotRegisterChannel(rawChannel, mock(SocketSelector.class));
|
||||
channel.setSelectionKey(new TestSelectionKey(SelectionKey.OP_READ | SelectionKey.OP_WRITE));
|
||||
SocketChannelContext context = mock(SocketChannelContext.class);
|
||||
channel.setContext(context);
|
||||
|
||||
when(context.hasQueuedWriteOps()).thenReturn(false);
|
||||
|
||||
assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps());
|
||||
handler.postHandling(channel);
|
||||
assertEquals(SelectionKey.OP_READ, channel.getSelectionKey().interestOps());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ public class SocketSelectorTests extends ESTestCase {
|
|||
private SocketEventHandler eventHandler;
|
||||
private NioSocketChannel channel;
|
||||
private TestSelectionKey selectionKey;
|
||||
private WriteContext writeContext;
|
||||
private SocketChannelContext channelContext;
|
||||
private BiConsumer<Void, Throwable> listener;
|
||||
private ByteBuffer[] buffers = {ByteBuffer.allocate(1)};
|
||||
private Selector rawSelector;
|
||||
|
@ -60,7 +60,7 @@ public class SocketSelectorTests extends ESTestCase {
|
|||
super.setUp();
|
||||
eventHandler = mock(SocketEventHandler.class);
|
||||
channel = mock(NioSocketChannel.class);
|
||||
writeContext = mock(WriteContext.class);
|
||||
channelContext = mock(SocketChannelContext.class);
|
||||
listener = mock(BiConsumer.class);
|
||||
selectionKey = new TestSelectionKey(0);
|
||||
selectionKey.attach(channel);
|
||||
|
@ -71,7 +71,7 @@ public class SocketSelectorTests extends ESTestCase {
|
|||
|
||||
when(channel.isOpen()).thenReturn(true);
|
||||
when(channel.getSelectionKey()).thenReturn(selectionKey);
|
||||
when(channel.getWriteContext()).thenReturn(writeContext);
|
||||
when(channel.getContext()).thenReturn(channelContext);
|
||||
when(channel.isConnectComplete()).thenReturn(true);
|
||||
when(channel.getSelector()).thenReturn(socketSelector);
|
||||
}
|
||||
|
@ -129,75 +129,71 @@ public class SocketSelectorTests extends ESTestCase {
|
|||
public void testQueueWriteWhenNotRunning() throws Exception {
|
||||
socketSelector.close();
|
||||
|
||||
socketSelector.queueWrite(new WriteOperation(channel, buffers, listener));
|
||||
socketSelector.queueWrite(new BytesWriteOperation(channel, buffers, listener));
|
||||
|
||||
verify(listener).accept(isNull(Void.class), any(ClosedSelectorException.class));
|
||||
}
|
||||
|
||||
public void testQueueWriteChannelIsNoLongerWritable() throws Exception {
|
||||
WriteOperation writeOperation = new WriteOperation(channel, buffers, listener);
|
||||
public void testQueueWriteChannelIsClosed() throws Exception {
|
||||
BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener);
|
||||
socketSelector.queueWrite(writeOperation);
|
||||
|
||||
when(channel.isWritable()).thenReturn(false);
|
||||
when(channel.isOpen()).thenReturn(false);
|
||||
socketSelector.preSelect();
|
||||
|
||||
verify(writeContext, times(0)).queueWriteOperations(writeOperation);
|
||||
verify(channelContext, times(0)).queueWriteOperation(writeOperation);
|
||||
verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class));
|
||||
}
|
||||
|
||||
public void testQueueWriteSelectionKeyThrowsException() throws Exception {
|
||||
SelectionKey selectionKey = mock(SelectionKey.class);
|
||||
|
||||
WriteOperation writeOperation = new WriteOperation(channel, buffers, listener);
|
||||
BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener);
|
||||
CancelledKeyException cancelledKeyException = new CancelledKeyException();
|
||||
socketSelector.queueWrite(writeOperation);
|
||||
|
||||
when(channel.isWritable()).thenReturn(true);
|
||||
when(channel.getSelectionKey()).thenReturn(selectionKey);
|
||||
when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException);
|
||||
socketSelector.preSelect();
|
||||
|
||||
verify(writeContext, times(0)).queueWriteOperations(writeOperation);
|
||||
verify(channelContext, times(0)).queueWriteOperation(writeOperation);
|
||||
verify(listener).accept(null, cancelledKeyException);
|
||||
}
|
||||
|
||||
public void testQueueWriteSuccessful() throws Exception {
|
||||
WriteOperation writeOperation = new WriteOperation(channel, buffers, listener);
|
||||
BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener);
|
||||
socketSelector.queueWrite(writeOperation);
|
||||
|
||||
assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0);
|
||||
|
||||
when(channel.isWritable()).thenReturn(true);
|
||||
socketSelector.preSelect();
|
||||
|
||||
verify(writeContext).queueWriteOperations(writeOperation);
|
||||
verify(channelContext).queueWriteOperation(writeOperation);
|
||||
assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0);
|
||||
}
|
||||
|
||||
public void testQueueDirectlyInChannelBufferSuccessful() throws Exception {
|
||||
WriteOperation writeOperation = new WriteOperation(channel, buffers, listener);
|
||||
BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener);
|
||||
|
||||
assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0);
|
||||
|
||||
when(channel.isWritable()).thenReturn(true);
|
||||
socketSelector.queueWriteInChannelBuffer(writeOperation);
|
||||
|
||||
verify(writeContext).queueWriteOperations(writeOperation);
|
||||
verify(channelContext).queueWriteOperation(writeOperation);
|
||||
assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0);
|
||||
}
|
||||
|
||||
public void testQueueDirectlyInChannelBufferSelectionKeyThrowsException() throws Exception {
|
||||
SelectionKey selectionKey = mock(SelectionKey.class);
|
||||
|
||||
WriteOperation writeOperation = new WriteOperation(channel, buffers, listener);
|
||||
BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener);
|
||||
CancelledKeyException cancelledKeyException = new CancelledKeyException();
|
||||
|
||||
when(channel.isWritable()).thenReturn(true);
|
||||
when(channel.getSelectionKey()).thenReturn(selectionKey);
|
||||
when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException);
|
||||
socketSelector.queueWriteInChannelBuffer(writeOperation);
|
||||
|
||||
verify(writeContext, times(0)).queueWriteOperations(writeOperation);
|
||||
verify(channelContext, times(0)).queueWriteOperation(writeOperation);
|
||||
verify(listener).accept(null, cancelledKeyException);
|
||||
}
|
||||
|
||||
|
@ -285,6 +281,16 @@ public class SocketSelectorTests extends ESTestCase {
|
|||
verify(eventHandler).readException(channel, ioException);
|
||||
}
|
||||
|
||||
public void testWillCallPostHandleAfterChannelHandling() throws Exception {
|
||||
selectionKey.setReadyOps(SelectionKey.OP_WRITE | SelectionKey.OP_READ);
|
||||
|
||||
socketSelector.processKey(selectionKey);
|
||||
|
||||
verify(eventHandler).handleWrite(channel);
|
||||
verify(eventHandler).handleRead(channel);
|
||||
verify(eventHandler).postHandling(channel);
|
||||
}
|
||||
|
||||
public void testCleanup() throws Exception {
|
||||
NioSocketChannel unRegisteredChannel = mock(NioSocketChannel.class);
|
||||
|
||||
|
@ -292,7 +298,7 @@ public class SocketSelectorTests extends ESTestCase {
|
|||
|
||||
socketSelector.preSelect();
|
||||
|
||||
socketSelector.queueWrite(new WriteOperation(mock(NioSocketChannel.class), buffers, listener));
|
||||
socketSelector.queueWrite(new BytesWriteOperation(mock(NioSocketChannel.class), buffers, listener));
|
||||
socketSelector.scheduleForRegistration(unRegisteredChannel);
|
||||
|
||||
TestSelectionKey testSelectionKey = new TestSelectionKey(0);
|
||||
|
|
|
@ -45,71 +45,58 @@ public class WriteOperationTests extends ESTestCase {
|
|||
|
||||
}
|
||||
|
||||
public void testFlush() throws IOException {
|
||||
public void testFullyFlushedMarker() {
|
||||
ByteBuffer[] buffers = {ByteBuffer.allocate(10)};
|
||||
WriteOperation writeOp = new WriteOperation(channel, buffers, listener);
|
||||
BytesWriteOperation writeOp = new BytesWriteOperation(channel, buffers, listener);
|
||||
|
||||
|
||||
when(channel.write(any(ByteBuffer[].class))).thenReturn(10);
|
||||
|
||||
writeOp.flush();
|
||||
writeOp.incrementIndex(10);
|
||||
|
||||
assertTrue(writeOp.isFullyFlushed());
|
||||
}
|
||||
|
||||
public void testPartialFlush() throws IOException {
|
||||
public void testPartiallyFlushedMarker() {
|
||||
ByteBuffer[] buffers = {ByteBuffer.allocate(10)};
|
||||
WriteOperation writeOp = new WriteOperation(channel, buffers, listener);
|
||||
BytesWriteOperation writeOp = new BytesWriteOperation(channel, buffers, listener);
|
||||
|
||||
when(channel.write(any(ByteBuffer[].class))).thenReturn(5);
|
||||
|
||||
writeOp.flush();
|
||||
writeOp.incrementIndex(5);
|
||||
|
||||
assertFalse(writeOp.isFullyFlushed());
|
||||
}
|
||||
|
||||
public void testMultipleFlushesWithCompositeBuffer() throws IOException {
|
||||
ByteBuffer[] buffers = {ByteBuffer.allocate(10), ByteBuffer.allocate(15), ByteBuffer.allocate(3)};
|
||||
WriteOperation writeOp = new WriteOperation(channel, buffers, listener);
|
||||
BytesWriteOperation writeOp = new BytesWriteOperation(channel, buffers, listener);
|
||||
|
||||
ArgumentCaptor<ByteBuffer[]> buffersCaptor = ArgumentCaptor.forClass(ByteBuffer[].class);
|
||||
|
||||
when(channel.write(buffersCaptor.capture())).thenReturn(5)
|
||||
.thenReturn(5)
|
||||
.thenReturn(2)
|
||||
.thenReturn(15)
|
||||
.thenReturn(1);
|
||||
|
||||
writeOp.flush();
|
||||
writeOp.incrementIndex(5);
|
||||
assertFalse(writeOp.isFullyFlushed());
|
||||
writeOp.flush();
|
||||
assertFalse(writeOp.isFullyFlushed());
|
||||
writeOp.flush();
|
||||
assertFalse(writeOp.isFullyFlushed());
|
||||
writeOp.flush();
|
||||
assertFalse(writeOp.isFullyFlushed());
|
||||
writeOp.flush();
|
||||
assertTrue(writeOp.isFullyFlushed());
|
||||
|
||||
List<ByteBuffer[]> values = buffersCaptor.getAllValues();
|
||||
ByteBuffer[] byteBuffers = values.get(0);
|
||||
assertEquals(3, byteBuffers.length);
|
||||
assertEquals(10, byteBuffers[0].remaining());
|
||||
|
||||
byteBuffers = values.get(1);
|
||||
ByteBuffer[] byteBuffers = writeOp.getBuffersToWrite();
|
||||
assertEquals(3, byteBuffers.length);
|
||||
assertEquals(5, byteBuffers[0].remaining());
|
||||
|
||||
byteBuffers = values.get(2);
|
||||
writeOp.incrementIndex(5);
|
||||
assertFalse(writeOp.isFullyFlushed());
|
||||
byteBuffers = writeOp.getBuffersToWrite();
|
||||
assertEquals(2, byteBuffers.length);
|
||||
assertEquals(15, byteBuffers[0].remaining());
|
||||
|
||||
byteBuffers = values.get(3);
|
||||
writeOp.incrementIndex(2);
|
||||
assertFalse(writeOp.isFullyFlushed());
|
||||
byteBuffers = writeOp.getBuffersToWrite();
|
||||
assertEquals(2, byteBuffers.length);
|
||||
assertEquals(13, byteBuffers[0].remaining());
|
||||
|
||||
byteBuffers = values.get(4);
|
||||
writeOp.incrementIndex(15);
|
||||
assertFalse(writeOp.isFullyFlushed());
|
||||
byteBuffers = writeOp.getBuffersToWrite();
|
||||
assertEquals(1, byteBuffers.length);
|
||||
assertEquals(1, byteBuffers[0].remaining());
|
||||
|
||||
writeOp.incrementIndex(1);
|
||||
assertTrue(writeOp.isFullyFlushed());
|
||||
byteBuffers = writeOp.getBuffersToWrite();
|
||||
assertEquals(1, byteBuffers.length);
|
||||
assertEquals(0, byteBuffers[0].remaining());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
subprojects {
|
||||
configure(subprojects.findAll { it.parent.path == project.path }) {
|
||||
group = 'org.elasticsearch.plugin' // for modules which publish client jars
|
||||
apply plugin: 'elasticsearch.esplugin'
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ integTestCluster {
|
|||
dependencies {
|
||||
compile 'org.antlr:antlr4-runtime:4.5.3'
|
||||
compile 'org.ow2.asm:asm-debug-all:5.1'
|
||||
compile project('spi')
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
|
|
|
@ -17,19 +17,24 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.nio;
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
apply plugin: 'nebula.maven-scm'
|
||||
|
||||
import java.io.IOException;
|
||||
group = 'org.elasticsearch.plugin'
|
||||
archivesBaseName = 'elasticsearch-scripting-painless-spi'
|
||||
|
||||
public interface ReadContext extends AutoCloseable {
|
||||
|
||||
int read() throws IOException;
|
||||
|
||||
@Override
|
||||
void close();
|
||||
|
||||
@FunctionalInterface
|
||||
interface ReadConsumer {
|
||||
int consumeReads(InboundChannelBuffer channelBuffer) throws IOException;
|
||||
publishing {
|
||||
publications {
|
||||
nebula {
|
||||
artifactId = archivesBaseName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
compile "org.elasticsearch:elasticsearch:${version}"
|
||||
}
|
||||
|
||||
// no tests...yet?
|
||||
test.enabled = false
|
|
@ -112,7 +112,6 @@ public class RankEvalResponse extends ActionResponse implements ToXContentObject
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.startObject("rank_eval");
|
||||
builder.field("quality_level", evaluationResult);
|
||||
builder.startObject("details");
|
||||
for (String key : details.keySet()) {
|
||||
|
@ -127,7 +126,6 @@ public class RankEvalResponse extends ActionResponse implements ToXContentObject
|
|||
}
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -92,23 +92,21 @@ public class RankEvalResponseTests extends ESTestCase {
|
|||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
|
||||
String xContent = response.toXContent(builder, ToXContent.EMPTY_PARAMS).bytes().utf8ToString();
|
||||
assertEquals(("{" +
|
||||
" \"rank_eval\": {" +
|
||||
" \"quality_level\": 0.123," +
|
||||
" \"details\": {" +
|
||||
" \"coffee_query\": {" +
|
||||
" \"quality_level\": 0.1," +
|
||||
" \"unknown_docs\": [{\"_index\":\"index\",\"_id\":\"456\"}]," +
|
||||
" \"hits\":[{\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"123\",\"_score\":1.0}," +
|
||||
" \"rating\":5}," +
|
||||
" {\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"456\",\"_score\":1.0}," +
|
||||
" \"rating\":null}" +
|
||||
" ]" +
|
||||
" }" +
|
||||
" }," +
|
||||
" \"failures\": {" +
|
||||
" \"beer_query\": {" +
|
||||
" \"error\": \"ParsingException[someMsg]\"" +
|
||||
" }" +
|
||||
" \"quality_level\": 0.123," +
|
||||
" \"details\": {" +
|
||||
" \"coffee_query\": {" +
|
||||
" \"quality_level\": 0.1," +
|
||||
" \"unknown_docs\": [{\"_index\":\"index\",\"_id\":\"456\"}]," +
|
||||
" \"hits\":[{\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"123\",\"_score\":1.0}," +
|
||||
" \"rating\":5}," +
|
||||
" {\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"456\",\"_score\":1.0}," +
|
||||
" \"rating\":null}" +
|
||||
" ]" +
|
||||
" }" +
|
||||
" }," +
|
||||
" \"failures\": {" +
|
||||
" \"beer_query\": {" +
|
||||
" \"error\": \"ParsingException[someMsg]\"" +
|
||||
" }" +
|
||||
" }" +
|
||||
"}").replaceAll("\\s+", ""), xContent);
|
||||
|
|
|
@ -64,27 +64,27 @@
|
|||
"metric" : { "precision": { "ignore_unlabeled" : true }}
|
||||
}
|
||||
|
||||
- match: { rank_eval.quality_level: 1}
|
||||
- match: { rank_eval.details.amsterdam_query.quality_level: 1.0}
|
||||
- match: { rank_eval.details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]}
|
||||
- match: { rank_eval.details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 2, "docs_retrieved": 2}}
|
||||
- match: { quality_level: 1}
|
||||
- match: { details.amsterdam_query.quality_level: 1.0}
|
||||
- match: { details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]}
|
||||
- match: { details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 2, "docs_retrieved": 2}}
|
||||
|
||||
- length: { rank_eval.details.amsterdam_query.hits: 3}
|
||||
- match: { rank_eval.details.amsterdam_query.hits.0.hit._id: "doc2"}
|
||||
- match: { rank_eval.details.amsterdam_query.hits.0.rating: 1}
|
||||
- match: { rank_eval.details.amsterdam_query.hits.1.hit._id: "doc3"}
|
||||
- match: { rank_eval.details.amsterdam_query.hits.1.rating: 1}
|
||||
- match: { rank_eval.details.amsterdam_query.hits.2.hit._id: "doc4"}
|
||||
- is_false: rank_eval.details.amsterdam_query.hits.2.rating
|
||||
- length: { details.amsterdam_query.hits: 3}
|
||||
- match: { details.amsterdam_query.hits.0.hit._id: "doc2"}
|
||||
- match: { details.amsterdam_query.hits.0.rating: 1}
|
||||
- match: { details.amsterdam_query.hits.1.hit._id: "doc3"}
|
||||
- match: { details.amsterdam_query.hits.1.rating: 1}
|
||||
- match: { details.amsterdam_query.hits.2.hit._id: "doc4"}
|
||||
- is_false: details.amsterdam_query.hits.2.rating
|
||||
|
||||
- match: { rank_eval.details.berlin_query.quality_level: 1.0}
|
||||
- match: { rank_eval.details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]}
|
||||
- match: { rank_eval.details.berlin_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}}
|
||||
- length: { rank_eval.details.berlin_query.hits: 2}
|
||||
- match: { rank_eval.details.berlin_query.hits.0.hit._id: "doc1" }
|
||||
- match: { rank_eval.details.berlin_query.hits.0.rating: 1}
|
||||
- match: { rank_eval.details.berlin_query.hits.1.hit._id: "doc4" }
|
||||
- is_false: rank_eval.details.berlin_query.hits.1.rating
|
||||
- match: { details.berlin_query.quality_level: 1.0}
|
||||
- match: { details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]}
|
||||
- match: { details.berlin_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}}
|
||||
- length: { details.berlin_query.hits: 2}
|
||||
- match: { details.berlin_query.hits.0.hit._id: "doc1" }
|
||||
- match: { details.berlin_query.hits.0.rating: 1}
|
||||
- match: { details.berlin_query.hits.1.hit._id: "doc4" }
|
||||
- is_false: details.berlin_query.hits.1.rating
|
||||
|
||||
---
|
||||
"Mean Reciprocal Rank":
|
||||
|
@ -152,14 +152,14 @@
|
|||
}
|
||||
|
||||
# average is (1/3 + 1/2)/2 = 5/12 ~ 0.41666666666666663
|
||||
- gt: {rank_eval.quality_level: 0.416}
|
||||
- lt: {rank_eval.quality_level: 0.417}
|
||||
- gt: {rank_eval.details.amsterdam_query.quality_level: 0.333}
|
||||
- lt: {rank_eval.details.amsterdam_query.quality_level: 0.334}
|
||||
- match: {rank_eval.details.amsterdam_query.metric_details: {"first_relevant": 3}}
|
||||
- match: {rank_eval.details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc2"},
|
||||
- gt: {quality_level: 0.416}
|
||||
- lt: {quality_level: 0.417}
|
||||
- gt: {details.amsterdam_query.quality_level: 0.333}
|
||||
- lt: {details.amsterdam_query.quality_level: 0.334}
|
||||
- match: {details.amsterdam_query.metric_details: {"first_relevant": 3}}
|
||||
- match: {details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc2"},
|
||||
{"_index": "foo", "_id": "doc3"} ]}
|
||||
- match: {rank_eval.details.berlin_query.quality_level: 0.5}
|
||||
- match: {rank_eval.details.berlin_query.metric_details: {"first_relevant": 2}}
|
||||
- match: {rank_eval.details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc1"}]}
|
||||
- match: {details.berlin_query.quality_level: 0.5}
|
||||
- match: {details.berlin_query.metric_details: {"first_relevant": 2}}
|
||||
- match: {details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc1"}]}
|
||||
|
||||
|
|
|
@ -69,11 +69,11 @@
|
|||
"metric" : { "dcg": {}}
|
||||
}
|
||||
|
||||
- gt: {rank_eval.quality_level: 13.848263 }
|
||||
- lt: {rank_eval.quality_level: 13.848264 }
|
||||
- gt: {rank_eval.details.dcg_query.quality_level: 13.848263}
|
||||
- lt: {rank_eval.details.dcg_query.quality_level: 13.848264}
|
||||
- match: {rank_eval.details.dcg_query.unknown_docs: [ ]}
|
||||
- gt: {quality_level: 13.848263 }
|
||||
- lt: {quality_level: 13.848264 }
|
||||
- gt: {details.dcg_query.quality_level: 13.848263}
|
||||
- lt: {details.dcg_query.quality_level: 13.848264}
|
||||
- match: {details.dcg_query.unknown_docs: [ ]}
|
||||
|
||||
# reverse the order in which the results are returned (less relevant docs first)
|
||||
|
||||
|
@ -96,11 +96,11 @@
|
|||
"metric" : { "dcg": { }}
|
||||
}
|
||||
|
||||
- gt: {rank_eval.quality_level: 10.299674}
|
||||
- lt: {rank_eval.quality_level: 10.299675}
|
||||
- gt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299674}
|
||||
- lt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299675}
|
||||
- match: {rank_eval.details.dcg_query_reverse.unknown_docs: [ ]}
|
||||
- gt: {quality_level: 10.299674}
|
||||
- lt: {quality_level: 10.299675}
|
||||
- gt: {details.dcg_query_reverse.quality_level: 10.299674}
|
||||
- lt: {details.dcg_query_reverse.quality_level: 10.299675}
|
||||
- match: {details.dcg_query_reverse.unknown_docs: [ ]}
|
||||
|
||||
# if we mix both, we should get the average
|
||||
|
||||
|
@ -134,11 +134,11 @@
|
|||
"metric" : { "dcg": { }}
|
||||
}
|
||||
|
||||
- gt: {rank_eval.quality_level: 12.073969}
|
||||
- lt: {rank_eval.quality_level: 12.073970}
|
||||
- gt: {rank_eval.details.dcg_query.quality_level: 13.848263}
|
||||
- lt: {rank_eval.details.dcg_query.quality_level: 13.848264}
|
||||
- match: {rank_eval.details.dcg_query.unknown_docs: [ ]}
|
||||
- gt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299674}
|
||||
- lt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299675}
|
||||
- match: {rank_eval.details.dcg_query_reverse.unknown_docs: [ ]}
|
||||
- gt: {quality_level: 12.073969}
|
||||
- lt: {quality_level: 12.073970}
|
||||
- gt: {details.dcg_query.quality_level: 13.848263}
|
||||
- lt: {details.dcg_query.quality_level: 13.848264}
|
||||
- match: {details.dcg_query.unknown_docs: [ ]}
|
||||
- gt: {details.dcg_query_reverse.quality_level: 10.299674}
|
||||
- lt: {details.dcg_query_reverse.quality_level: 10.299675}
|
||||
- match: {details.dcg_query_reverse.unknown_docs: [ ]}
|
||||
|
|
|
@ -34,9 +34,9 @@
|
|||
"metric" : { "precision": { "ignore_unlabeled" : true }}
|
||||
}
|
||||
|
||||
- match: { rank_eval.quality_level: 1}
|
||||
- match: { rank_eval.details.amsterdam_query.quality_level: 1.0}
|
||||
- match: { rank_eval.details.amsterdam_query.unknown_docs: [ ]}
|
||||
- match: { rank_eval.details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}}
|
||||
- match: { quality_level: 1}
|
||||
- match: { details.amsterdam_query.quality_level: 1.0}
|
||||
- match: { details.amsterdam_query.unknown_docs: [ ]}
|
||||
- match: { details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}}
|
||||
|
||||
- is_true: rank_eval.failures.invalid_query
|
||||
- is_true: failures.invalid_query
|
||||
|
|
|
@ -52,13 +52,6 @@ dependencies {
|
|||
testCompile project(path: ':modules:parent-join', configuration: 'runtime')
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
// Don't check the client's license. We know it.
|
||||
dependencies = project.configurations.runtime.fileCollection {
|
||||
it.group.startsWith('org.elasticsearch') == false
|
||||
} - project.configurations.provided
|
||||
}
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
// Commons logging
|
||||
'javax.servlet.ServletContextEvent',
|
||||
|
|
|
@ -18,39 +18,11 @@
|
|||
*/
|
||||
|
||||
// A meta plugin packaging example that bundles multiple plugins in a single zip.
|
||||
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
File plugins = new File(buildDir, 'plugins-unzip')
|
||||
subprojects {
|
||||
// unzip the subproject plugins
|
||||
task unzip(type:Copy, dependsOn: "${project.path}:bundlePlugin") {
|
||||
File dest = new File(plugins, project.name)
|
||||
from { zipTree(project(project.path).bundlePlugin.outputs.files.singleFile) }
|
||||
eachFile { f -> f.path = f.path.replaceFirst('elasticsearch', '') }
|
||||
into dest
|
||||
}
|
||||
apply plugin: 'elasticsearch.es-meta-plugin'
|
||||
|
||||
es_meta_plugin {
|
||||
name 'meta-plugin'
|
||||
description 'example meta plugin'
|
||||
plugins = ['dummy-plugin1', 'dummy-plugin2']
|
||||
}
|
||||
|
||||
// Build the meta plugin zip from the subproject plugins (unzipped)
|
||||
task buildZip(type:Zip) {
|
||||
subprojects.each { dependsOn("${it.name}:unzip") }
|
||||
from plugins
|
||||
from 'src/main/resources/meta-plugin-descriptor.properties'
|
||||
into 'elasticsearch'
|
||||
includeEmptyDirs false
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
dependsOn buildZip
|
||||
|
||||
// This is important, so that all the modules are available too.
|
||||
// There are index templates that use token filters that are in analysis-module and
|
||||
// processors are being used that are in ingest-common module.
|
||||
distribution = 'zip'
|
||||
|
||||
// Install the meta plugin before start.
|
||||
setupCommand 'installMetaPlugin',
|
||||
'bin/elasticsearch-plugin', 'install', 'file:' + buildZip.archivePath
|
||||
}
|
||||
check.dependsOn integTest
|
||||
|
|
|
@ -31,15 +31,15 @@ import org.elasticsearch.common.util.BigArrays;
|
|||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.nio.SocketChannelContext;
|
||||
import org.elasticsearch.nio.AcceptingSelector;
|
||||
import org.elasticsearch.nio.AcceptorEventHandler;
|
||||
import org.elasticsearch.nio.BytesReadContext;
|
||||
import org.elasticsearch.nio.BytesWriteContext;
|
||||
import org.elasticsearch.nio.ChannelFactory;
|
||||
import org.elasticsearch.nio.InboundChannelBuffer;
|
||||
import org.elasticsearch.nio.BytesChannelContext;
|
||||
import org.elasticsearch.nio.NioGroup;
|
||||
import org.elasticsearch.nio.NioSocketChannel;
|
||||
import org.elasticsearch.nio.ReadContext;
|
||||
import org.elasticsearch.nio.ServerChannelContext;
|
||||
import org.elasticsearch.nio.SocketEventHandler;
|
||||
import org.elasticsearch.nio.SocketSelector;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -53,6 +53,7 @@ import java.nio.ByteBuffer;
|
|||
import java.nio.channels.ServerSocketChannel;
|
||||
import java.nio.channels.SocketChannel;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.common.settings.Setting.intSetting;
|
||||
|
@ -72,12 +73,12 @@ public class NioTransport extends TcpTransport {
|
|||
public static final Setting<Integer> NIO_ACCEPTOR_COUNT =
|
||||
intSetting("transport.nio.acceptor_count", 1, 1, Setting.Property.NodeScope);
|
||||
|
||||
private final PageCacheRecycler pageCacheRecycler;
|
||||
protected final PageCacheRecycler pageCacheRecycler;
|
||||
private final ConcurrentMap<String, TcpChannelFactory> profileToChannelFactory = newConcurrentMap();
|
||||
private volatile NioGroup nioGroup;
|
||||
private volatile TcpChannelFactory clientChannelFactory;
|
||||
|
||||
NioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays,
|
||||
protected NioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays,
|
||||
PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry,
|
||||
CircuitBreakerService circuitBreakerService) {
|
||||
super("nio", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService);
|
||||
|
@ -111,13 +112,13 @@ public class NioTransport extends TcpTransport {
|
|||
NioTransport.NIO_WORKER_COUNT.get(settings), SocketEventHandler::new);
|
||||
|
||||
ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default");
|
||||
clientChannelFactory = new TcpChannelFactory(clientProfileSettings);
|
||||
clientChannelFactory = channelFactory(clientProfileSettings, true);
|
||||
|
||||
if (useNetworkServer) {
|
||||
// loop through all profiles and start them up, special handling for default one
|
||||
for (ProfileSettings profileSettings : profileSettings) {
|
||||
String profileName = profileSettings.profileName;
|
||||
TcpChannelFactory factory = new TcpChannelFactory(profileSettings);
|
||||
TcpChannelFactory factory = channelFactory(profileSettings, false);
|
||||
profileToChannelFactory.putIfAbsent(profileName, factory);
|
||||
bindServer(profileSettings);
|
||||
}
|
||||
|
@ -144,19 +145,30 @@ public class NioTransport extends TcpTransport {
|
|||
profileToChannelFactory.clear();
|
||||
}
|
||||
|
||||
private void exceptionCaught(NioSocketChannel channel, Exception exception) {
|
||||
protected void exceptionCaught(NioSocketChannel channel, Exception exception) {
|
||||
onException((TcpChannel) channel, exception);
|
||||
}
|
||||
|
||||
private void acceptChannel(NioSocketChannel channel) {
|
||||
protected void acceptChannel(NioSocketChannel channel) {
|
||||
serverAcceptedChannel((TcpNioSocketChannel) channel);
|
||||
}
|
||||
|
||||
private class TcpChannelFactory extends ChannelFactory<TcpNioServerSocketChannel, TcpNioSocketChannel> {
|
||||
protected TcpChannelFactory channelFactory(ProfileSettings settings, boolean isClient) {
|
||||
return new TcpChannelFactoryImpl(settings);
|
||||
}
|
||||
|
||||
protected abstract class TcpChannelFactory extends ChannelFactory<TcpNioServerSocketChannel, TcpNioSocketChannel> {
|
||||
|
||||
protected TcpChannelFactory(RawChannelFactory rawChannelFactory) {
|
||||
super(rawChannelFactory);
|
||||
}
|
||||
}
|
||||
|
||||
private class TcpChannelFactoryImpl extends TcpChannelFactory {
|
||||
|
||||
private final String profileName;
|
||||
|
||||
TcpChannelFactory(TcpTransport.ProfileSettings profileSettings) {
|
||||
private TcpChannelFactoryImpl(ProfileSettings profileSettings) {
|
||||
super(new RawChannelFactory(profileSettings.tcpNoDelay,
|
||||
profileSettings.tcpKeepAlive,
|
||||
profileSettings.reuseAddress,
|
||||
|
@ -172,18 +184,21 @@ public class NioTransport extends TcpTransport {
|
|||
Recycler.V<byte[]> bytes = pageCacheRecycler.bytePage(false);
|
||||
return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close);
|
||||
};
|
||||
ReadContext.ReadConsumer nioReadConsumer = channelBuffer ->
|
||||
SocketChannelContext.ReadConsumer nioReadConsumer = channelBuffer ->
|
||||
consumeNetworkReads(nioChannel, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex())));
|
||||
BytesReadContext readContext = new BytesReadContext(nioChannel, nioReadConsumer, new InboundChannelBuffer(pageSupplier));
|
||||
nioChannel.setContexts(readContext, new BytesWriteContext(nioChannel), NioTransport.this::exceptionCaught);
|
||||
BiConsumer<NioSocketChannel, Exception> exceptionHandler = NioTransport.this::exceptionCaught;
|
||||
BytesChannelContext context = new BytesChannelContext(nioChannel, exceptionHandler, nioReadConsumer,
|
||||
new InboundChannelBuffer(pageSupplier));
|
||||
nioChannel.setContext(context);
|
||||
return nioChannel;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TcpNioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException {
|
||||
TcpNioServerSocketChannel nioServerChannel = new TcpNioServerSocketChannel(profileName, channel, this, selector);
|
||||
nioServerChannel.setAcceptContext(NioTransport.this::acceptChannel);
|
||||
return nioServerChannel;
|
||||
TcpNioServerSocketChannel nioChannel = new TcpNioServerSocketChannel(profileName, channel, this, selector);
|
||||
ServerChannelContext context = new ServerChannelContext(nioChannel, NioTransport.this::acceptChannel, (c, e) -> {});
|
||||
nioChannel.setContext(context);
|
||||
return nioChannel;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,9 +38,9 @@ public class TcpNioServerSocketChannel extends NioServerSocketChannel implements
|
|||
|
||||
private final String profile;
|
||||
|
||||
TcpNioServerSocketChannel(String profile, ServerSocketChannel socketChannel,
|
||||
ChannelFactory<TcpNioServerSocketChannel, TcpNioSocketChannel> channelFactory,
|
||||
AcceptingSelector selector) throws IOException {
|
||||
public TcpNioServerSocketChannel(String profile, ServerSocketChannel socketChannel,
|
||||
ChannelFactory<TcpNioServerSocketChannel, TcpNioSocketChannel> channelFactory,
|
||||
AcceptingSelector selector) throws IOException {
|
||||
super(socketChannel, channelFactory, selector);
|
||||
this.profile = profile;
|
||||
}
|
||||
|
@ -60,6 +60,11 @@ public class TcpNioServerSocketChannel extends NioServerSocketChannel implements
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
getSelector().queueChannelClose(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getProfile() {
|
||||
return profile;
|
||||
|
|
|
@ -33,13 +33,13 @@ public class TcpNioSocketChannel extends NioSocketChannel implements TcpChannel
|
|||
|
||||
private final String profile;
|
||||
|
||||
TcpNioSocketChannel(String profile, SocketChannel socketChannel, SocketSelector selector) throws IOException {
|
||||
public TcpNioSocketChannel(String profile, SocketChannel socketChannel, SocketSelector selector) throws IOException {
|
||||
super(socketChannel, selector);
|
||||
this.profile = profile;
|
||||
}
|
||||
|
||||
public void sendMessage(BytesReference reference, ActionListener<Void> listener) {
|
||||
getWriteContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener));
|
||||
getContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -59,6 +59,11 @@ public class TcpNioSocketChannel extends NioSocketChannel implements TcpChannel
|
|||
addCloseListener(ActionListener.toBiConsumer(listener));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
getContext().closeChannel();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TcpNioSocketChannel{" +
|
||||
|
|
|
@ -67,6 +67,6 @@
|
|||
"metric" : { "precision": { }}
|
||||
}
|
||||
|
||||
- match: {rank_eval.quality_level: 0.5833333333333333}
|
||||
- match: {rank_eval.details.berlin_query.unknown_docs.0._id: "doc4"}
|
||||
- match: {rank_eval.details.amsterdam_query.unknown_docs.0._id: "doc4"}
|
||||
- match: {quality_level: 0.5833333333333333}
|
||||
- match: {details.berlin_query.unknown_docs.0._id: "doc4"}
|
||||
- match: {details.amsterdam_query.unknown_docs.0._id: "doc4"}
|
||||
|
|
|
@ -4,7 +4,14 @@ setup:
|
|||
- do:
|
||||
indices.create:
|
||||
index: test1
|
||||
wait_for_active_shards: all
|
||||
body:
|
||||
settings:
|
||||
# Limit the number of shards so that shards are unlikely
|
||||
# to be relocated or being initialized between the test
|
||||
# set up and the test execution
|
||||
index.number_of_shards: 3
|
||||
index.number_of_replicas: 0
|
||||
mappings:
|
||||
bar:
|
||||
properties:
|
||||
|
@ -20,6 +27,11 @@ setup:
|
|||
fields:
|
||||
completion:
|
||||
type: completion
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_no_relocating_shards: true
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test1
|
||||
|
@ -29,10 +41,10 @@ setup:
|
|||
|
||||
- do:
|
||||
index:
|
||||
index: test2
|
||||
type: baz
|
||||
id: 1
|
||||
body: { "bar": "bar", "baz": "baz" }
|
||||
index: test1
|
||||
type: bar
|
||||
id: 2
|
||||
body: { "bar": "foo", "baz": "foo" }
|
||||
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
@ -57,18 +69,17 @@ setup:
|
|||
completion:
|
||||
field: baz.completion
|
||||
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
search:
|
||||
sort: bar,baz
|
||||
body:
|
||||
sort: [ "bar", "baz" ]
|
||||
|
||||
---
|
||||
"Fields - blank":
|
||||
- do:
|
||||
indices.stats: {}
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.fielddata.memory_size_in_bytes: 0 }
|
||||
- is_false: _all.total.fielddata.fields
|
||||
- gt: { _all.total.completion.size_in_bytes: 0 }
|
||||
|
@ -79,6 +90,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { fields: bar }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.fielddata.memory_size_in_bytes: 0 }
|
||||
- gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 }
|
||||
- is_false: _all.total.fielddata.fields.baz
|
||||
|
@ -90,6 +102,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { fields: "bar,baz.completion" }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.fielddata.memory_size_in_bytes: 0 }
|
||||
- gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 }
|
||||
- is_false: _all.total.fielddata.fields.baz
|
||||
|
@ -102,6 +115,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { fields: "*" }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.fielddata.memory_size_in_bytes: 0 }
|
||||
- gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 }
|
||||
- gt: { _all.total.fielddata.fields.baz.memory_size_in_bytes: 0 }
|
||||
|
@ -114,6 +128,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { fields: "bar*" }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.fielddata.memory_size_in_bytes: 0 }
|
||||
- gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 }
|
||||
- is_false: _all.total.fielddata.fields.baz
|
||||
|
@ -126,6 +141,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { fields: "bar*", metric: _all }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.fielddata.memory_size_in_bytes: 0 }
|
||||
- gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 }
|
||||
- is_false: _all.total.fielddata.fields.baz
|
||||
|
@ -138,6 +154,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { fields: "bar*", metric: fielddata }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.fielddata.memory_size_in_bytes: 0 }
|
||||
- gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 }
|
||||
- is_false: _all.total.fielddata.fields.baz
|
||||
|
@ -148,6 +165,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { fields: "bar*", metric: completion }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- is_false: _all.total.fielddata
|
||||
- gt: { _all.total.completion.size_in_bytes: 0 }
|
||||
- gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 }
|
||||
|
@ -158,6 +176,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { fields: "bar*" , metric: [ completion, fielddata, search ]}
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.fielddata.memory_size_in_bytes: 0 }
|
||||
- gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 }
|
||||
- is_false: _all.total.fielddata.fields.baz
|
||||
|
@ -170,6 +189,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { fielddata_fields: bar }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 }
|
||||
- is_false: _all.total.fielddata.fields.baz
|
||||
- is_false: _all.total.completion.fields
|
||||
|
@ -179,6 +199,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { fielddata_fields: "bar,baz,baz.completion" }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 }
|
||||
- gt: { _all.total.fielddata.fields.baz.memory_size_in_bytes: 0 }
|
||||
- is_false: _all.total.completion.fields
|
||||
|
@ -188,6 +209,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { fielddata_fields: "*" }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 }
|
||||
- gt: { _all.total.fielddata.fields.baz.memory_size_in_bytes: 0 }
|
||||
- is_false: _all.total.completion.fields
|
||||
|
@ -197,6 +219,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { fielddata_fields: "*r" }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 }
|
||||
- is_false: _all.total.fielddata.fields.baz
|
||||
- is_false: _all.total.completion.fields
|
||||
|
@ -207,6 +230,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { fielddata_fields: "*r", metric: _all }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 }
|
||||
- is_false: _all.total.fielddata.fields.baz
|
||||
- is_false: _all.total.completion.fields
|
||||
|
@ -216,6 +240,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { fielddata_fields: "*r", metric: fielddata }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 }
|
||||
- is_false: _all.total.fielddata.fields.baz
|
||||
- is_false: _all.total.completion.fields
|
||||
|
@ -226,6 +251,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { fielddata_fields: "*r", metric: [ fielddata, search] }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 }
|
||||
- is_false: _all.total.fielddata.fields.baz
|
||||
- is_false: _all.total.completion.fields
|
||||
|
@ -236,6 +262,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { completion_fields: bar.completion }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 }
|
||||
- is_false: _all.total.completion.fields.baz\.completion
|
||||
- is_false: _all.total.fielddata.fields
|
||||
|
@ -245,6 +272,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { completion_fields: "bar.completion,baz,baz.completion" }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 }
|
||||
- gt: { _all.total.completion.fields.baz\.completion.size_in_bytes: 0 }
|
||||
- is_false: _all.total.fielddata.fields
|
||||
|
@ -254,6 +282,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { completion_fields: "*" }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 }
|
||||
- gt: { _all.total.completion.fields.baz\.completion.size_in_bytes: 0 }
|
||||
- is_false: _all.total.fielddata.fields
|
||||
|
@ -263,6 +292,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { completion_fields: "*r*" }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 }
|
||||
- is_false: _all.total.completion.fields.baz\.completion
|
||||
- is_false: _all.total.fielddata.fields
|
||||
|
@ -272,6 +302,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { completion_fields: "*r*", metric: _all }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 }
|
||||
- is_false: _all.total.completion.fields.baz\.completion
|
||||
- is_false: _all.total.fielddata.fields
|
||||
|
@ -281,6 +312,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { completion_fields: "*r*", metric: completion }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 }
|
||||
- is_false: _all.total.completion.fields.baz\.completion
|
||||
- is_false: _all.total.fielddata.fields
|
||||
|
@ -290,6 +322,7 @@ setup:
|
|||
- do:
|
||||
indices.stats: { completion_fields: "*r*", metric: [ completion, search ] }
|
||||
|
||||
- match: { _shards.failed: 0}
|
||||
- gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 }
|
||||
- is_false: _all.total.completion.fields.baz\.completion
|
||||
- is_false: _all.total.fielddata.fields
|
||||
|
|
|
@ -90,7 +90,7 @@ dependencies {
|
|||
compile 'com.carrotsearch:hppc:0.7.1'
|
||||
|
||||
// time handling, remove with java 8 time
|
||||
compile 'joda-time:joda-time:2.9.5'
|
||||
compile 'joda-time:joda-time:2.9.9'
|
||||
|
||||
// json and yaml
|
||||
compile "org.yaml:snakeyaml:${versions.snakeyaml}"
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
5f01da7306363fad2028b916f3eab926262de928
|
|
@ -0,0 +1 @@
|
|||
f7b520c458572890807d143670c9b24f4de90897
|
|
@ -148,6 +148,8 @@ public class Version implements Comparable<Version> {
|
|||
public static final Version V_6_1_3 = new Version(V_6_1_3_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
|
||||
public static final int V_6_2_0_ID = 6020099;
|
||||
public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1);
|
||||
public static final int V_6_3_0_ID = 6030099;
|
||||
public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1);
|
||||
public static final int V_7_0_0_alpha1_ID = 7000001;
|
||||
public static final Version V_7_0_0_alpha1 =
|
||||
new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_1);
|
||||
|
@ -166,6 +168,8 @@ public class Version implements Comparable<Version> {
|
|||
switch (id) {
|
||||
case V_7_0_0_alpha1_ID:
|
||||
return V_7_0_0_alpha1;
|
||||
case V_6_3_0_ID:
|
||||
return V_6_3_0;
|
||||
case V_6_2_0_ID:
|
||||
return V_6_2_0;
|
||||
case V_6_1_3_ID:
|
||||
|
|
|
@ -22,13 +22,23 @@ package org.elasticsearch.action.admin.indices.close;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A response for a close index action.
|
||||
*/
|
||||
public class CloseIndexResponse extends AcknowledgedResponse {
|
||||
public class CloseIndexResponse extends AcknowledgedResponse implements ToXContentObject {
|
||||
private static final ConstructingObjectParser<CloseIndexResponse, Void> PARSER = new ConstructingObjectParser<>("close_index", true,
|
||||
args -> new CloseIndexResponse((boolean) args[0]));
|
||||
|
||||
static {
|
||||
declareAcknowledgedField(PARSER);
|
||||
}
|
||||
|
||||
CloseIndexResponse() {
|
||||
}
|
||||
|
@ -48,4 +58,16 @@ public class CloseIndexResponse extends AcknowledgedResponse {
|
|||
super.writeTo(out);
|
||||
writeAcknowledged(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
addAcknowledgedField(builder);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static CloseIndexResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.close;
|
|||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
|
|
|
@ -37,7 +37,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru
|
|||
/**
|
||||
* A response for a open index action.
|
||||
*/
|
||||
public class OpenIndexResponse extends AcknowledgedResponse implements ToXContentObject {
|
||||
public class OpenIndexResponse extends AcknowledgedResponse implements ToXContentObject {
|
||||
private static final String SHARDS_ACKNOWLEDGED = "shards_acknowledged";
|
||||
private static final ParseField SHARDS_ACKNOWLEDGED_PARSER = new ParseField(SHARDS_ACKNOWLEDGED);
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.update;
|
||||
|
||||
import java.util.Arrays;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
|
@ -893,4 +894,28 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder res = new StringBuilder()
|
||||
.append("update {[").append(index)
|
||||
.append("][").append(type)
|
||||
.append("][").append(id).append("]");
|
||||
res.append(", doc_as_upsert[").append(docAsUpsert).append("]");
|
||||
if (doc != null) {
|
||||
res.append(", doc[").append(doc).append("]");
|
||||
}
|
||||
if (script != null) {
|
||||
res.append(", script[").append(script).append("]");
|
||||
}
|
||||
if (upsertRequest != null) {
|
||||
res.append(", upsert[").append(upsertRequest).append("]");
|
||||
}
|
||||
res.append(", scripted_upsert[").append(scriptedUpsert).append("]");
|
||||
res.append(", detect_noop[").append(detectNoop).append("]");
|
||||
if (fields != null) {
|
||||
res.append(", fields[").append(Arrays.toString(fields)).append("]");
|
||||
}
|
||||
return res.append("}").toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.action.ActionModule;
|
|||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.GenericAction;
|
||||
import org.elasticsearch.client.support.AbstractClient;
|
||||
import org.elasticsearch.cluster.ClusterModule;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -195,8 +196,16 @@ public abstract class TransportClient extends AbstractClient {
|
|||
final TransportClientNodesService nodesService =
|
||||
new TransportClientNodesService(settings, transportService, threadPool, failureListner == null
|
||||
? (t, e) -> {} : failureListner);
|
||||
final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService,
|
||||
actionModule.getActions().values().stream().map(x -> x.getAction()).collect(Collectors.toList()));
|
||||
|
||||
// construct the list of client actions
|
||||
final List<ActionPlugin> actionPlugins = pluginsService.filterPlugins(ActionPlugin.class);
|
||||
final List<GenericAction> clientActions =
|
||||
actionPlugins.stream().flatMap(p -> p.getClientActions().stream()).collect(Collectors.toList());
|
||||
// add all the base actions
|
||||
final List<? extends GenericAction<?, ?>> baseActions =
|
||||
actionModule.getActions().values().stream().map(ActionPlugin.ActionHandler::getAction).collect(Collectors.toList());
|
||||
clientActions.addAll(baseActions);
|
||||
final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService, clientActions);
|
||||
|
||||
List<LifecycleComponent> pluginLifecycleComponents = new ArrayList<>(pluginsService.getGuiceServiceClasses().stream()
|
||||
.map(injector::getInstance).collect(Collectors.toList()));
|
||||
|
|
|
@ -700,7 +700,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
try {
|
||||
Translog translog = shard.getTranslog();
|
||||
if (translog.syncNeeded()) {
|
||||
translog.sync();
|
||||
shard.sync();
|
||||
}
|
||||
} catch (AlreadyClosedException ex) {
|
||||
// fine - continue;
|
||||
|
|
|
@ -47,8 +47,8 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
|
||||
if (settings.get("ignore_case") != null) {
|
||||
deprecationLogger.deprecated(
|
||||
"This tokenize synonyms with whatever tokenizer and token filters appear before it in the chain. " +
|
||||
"If you need ignore case with this filter, you should set lowercase filter before this");
|
||||
"The ignore_case option on the synonym_graph filter is deprecated. " +
|
||||
"Instead, insert a lowercase filter in the filter chain before the synonym_graph filter.");
|
||||
}
|
||||
|
||||
this.expand = settings.getAsBoolean("expand", true);
|
||||
|
|
|
@ -47,8 +47,8 @@ public final class CombinedDeletionPolicy extends IndexDeletionPolicy {
|
|||
private final LongSupplier globalCheckpointSupplier;
|
||||
private final IndexCommit startingCommit;
|
||||
private final ObjectIntHashMap<IndexCommit> snapshottedCommits; // Number of snapshots held against each commit point.
|
||||
private IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint.
|
||||
private IndexCommit lastCommit; // the most recent commit point
|
||||
private volatile IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint.
|
||||
private volatile IndexCommit lastCommit; // the most recent commit point
|
||||
|
||||
CombinedDeletionPolicy(EngineConfig.OpenMode openMode, TranslogDeletionPolicy translogDeletionPolicy,
|
||||
LongSupplier globalCheckpointSupplier, IndexCommit startingCommit) {
|
||||
|
@ -214,6 +214,21 @@ public final class CombinedDeletionPolicy extends IndexDeletionPolicy {
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the deletion policy can release some index commits with the latest global checkpoint.
|
||||
*/
|
||||
boolean hasUnreferencedCommits() throws IOException {
|
||||
final IndexCommit lastCommit = this.lastCommit;
|
||||
if (safeCommit != lastCommit) { // Race condition can happen but harmless
|
||||
if (lastCommit.getUserData().containsKey(SequenceNumbers.MAX_SEQ_NO)) {
|
||||
final long maxSeqNoFromLastCommit = Long.parseLong(lastCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO));
|
||||
// We can clean up the current safe commit if the last commit is safe
|
||||
return globalCheckpointSupplier.getAsLong() >= maxSeqNoFromLastCommit;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* A wrapper of an index commit that prevents it from being deleted.
|
||||
*/
|
||||
|
|
|
@ -91,6 +91,7 @@ import java.util.concurrent.locks.Lock;
|
|||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public abstract class Engine implements Closeable {
|
||||
|
||||
|
@ -549,6 +550,13 @@ public abstract class Engine implements Closeable {
|
|||
/** returns the translog for this engine */
|
||||
public abstract Translog getTranslog();
|
||||
|
||||
/**
|
||||
* Ensures that all locations in the given stream have been written to the underlying storage.
|
||||
*/
|
||||
public abstract boolean ensureTranslogSynced(Stream<Translog.Location> locations) throws IOException;
|
||||
|
||||
public abstract void syncTranslog() throws IOException;
|
||||
|
||||
protected void ensureOpen() {
|
||||
if (isClosed.get()) {
|
||||
throw new AlreadyClosedException(shardId + " engine is closed", failedEngine.get());
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.lucene.index.LiveIndexWriterConfig;
|
|||
import org.apache.lucene.index.MergePolicy;
|
||||
import org.apache.lucene.index.SegmentCommitInfo;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.SnapshotDeletionPolicy;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.ReferenceManager;
|
||||
|
@ -95,6 +94,7 @@ import java.util.concurrent.locks.Lock;
|
|||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.LongSupplier;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class InternalEngine extends Engine {
|
||||
|
||||
|
@ -521,6 +521,27 @@ public class InternalEngine extends Engine {
|
|||
return translog;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean ensureTranslogSynced(Stream<Translog.Location> locations) throws IOException {
|
||||
final boolean synced = translog.ensureSynced(locations);
|
||||
if (synced) {
|
||||
revisitIndexDeletionPolicyOnTranslogSynced();
|
||||
}
|
||||
return synced;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void syncTranslog() throws IOException {
|
||||
translog.sync();
|
||||
revisitIndexDeletionPolicyOnTranslogSynced();
|
||||
}
|
||||
|
||||
private void revisitIndexDeletionPolicyOnTranslogSynced() throws IOException {
|
||||
if (combinedDeletionPolicy.hasUnreferencedCommits()) {
|
||||
indexWriter.deleteUnusedFiles();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHistoryUUID() {
|
||||
return historyUUID;
|
||||
|
|
|
@ -48,7 +48,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
|
|||
case "none":
|
||||
return s;
|
||||
default:
|
||||
throw new IllegalArgumentException("failed to parse [" + s + "] must be one of [node,node]");
|
||||
throw new IllegalArgumentException("failed to parse [" + s + "] must be one of [node,none]");
|
||||
}
|
||||
}, Property.IndexScope);
|
||||
|
||||
|
|
|
@ -32,9 +32,9 @@ import java.util.concurrent.ConcurrentMap;
|
|||
|
||||
public class ShardFieldData implements IndexFieldDataCache.Listener {
|
||||
|
||||
final CounterMetric evictionsMetric = new CounterMetric();
|
||||
final CounterMetric totalMetric = new CounterMetric();
|
||||
final ConcurrentMap<String, CounterMetric> perFieldTotals = ConcurrentCollections.newConcurrentMap();
|
||||
private final CounterMetric evictionsMetric = new CounterMetric();
|
||||
private final CounterMetric totalMetric = new CounterMetric();
|
||||
private final ConcurrentMap<String, CounterMetric> perFieldTotals = ConcurrentCollections.newConcurrentMap();
|
||||
|
||||
public FieldDataStats stats(String... fields) {
|
||||
ObjectLongHashMap<String> fieldTotals = null;
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.lucene.search.PrefixQuery;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SynonymQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.query.AbstractQueryBuilder;
|
||||
|
@ -86,11 +87,11 @@ public class SimpleQueryStringQueryParser extends SimpleQueryParser {
|
|||
}
|
||||
|
||||
/**
|
||||
* Rethrow the runtime exception, unless the lenient flag has been set, returns null
|
||||
* Rethrow the runtime exception, unless the lenient flag has been set, returns {@link MatchNoDocsQuery}
|
||||
*/
|
||||
private Query rethrowUnlessLenient(RuntimeException e) {
|
||||
if (settings.lenient()) {
|
||||
return null;
|
||||
return Queries.newMatchNoDocsQuery("failed query, caused by " + e.getMessage());
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
|
@ -115,7 +116,7 @@ public class SimpleQueryStringQueryParser extends SimpleQueryParser {
|
|||
try {
|
||||
return queryBuilder.parse(MultiMatchQueryBuilder.Type.MOST_FIELDS, weights, text, null);
|
||||
} catch (IOException e) {
|
||||
return rethrowUnlessLenient(new IllegalArgumentException(e.getMessage()));
|
||||
return rethrowUnlessLenient(new IllegalStateException(e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,7 +136,7 @@ public class SimpleQueryStringQueryParser extends SimpleQueryParser {
|
|||
settings.fuzzyMaxExpansions, settings.fuzzyTranspositions);
|
||||
disjuncts.add(wrapWithBoost(query, entry.getValue()));
|
||||
} catch (RuntimeException e) {
|
||||
rethrowUnlessLenient(e);
|
||||
disjuncts.add(rethrowUnlessLenient(e));
|
||||
}
|
||||
}
|
||||
if (disjuncts.size() == 1) {
|
||||
|
@ -156,7 +157,7 @@ public class SimpleQueryStringQueryParser extends SimpleQueryParser {
|
|||
}
|
||||
return queryBuilder.parse(MultiMatchQueryBuilder.Type.PHRASE, phraseWeights, text, null);
|
||||
} catch (IOException e) {
|
||||
return rethrowUnlessLenient(new IllegalArgumentException(e.getMessage()));
|
||||
return rethrowUnlessLenient(new IllegalStateException(e.getMessage()));
|
||||
} finally {
|
||||
queryBuilder.setPhraseSlop(0);
|
||||
}
|
||||
|
@ -184,7 +185,7 @@ public class SimpleQueryStringQueryParser extends SimpleQueryParser {
|
|||
disjuncts.add(wrapWithBoost(query, entry.getValue()));
|
||||
}
|
||||
} catch (RuntimeException e) {
|
||||
return rethrowUnlessLenient(e);
|
||||
disjuncts.add(rethrowUnlessLenient(e));
|
||||
}
|
||||
}
|
||||
if (disjuncts.size() == 1) {
|
||||
|
|
|
@ -130,10 +130,9 @@ public class GlobalCheckpointSyncAction extends TransportReplicationAction<
|
|||
}
|
||||
|
||||
private void maybeSyncTranslog(final IndexShard indexShard) throws IOException {
|
||||
final Translog translog = indexShard.getTranslog();
|
||||
if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST &&
|
||||
translog.getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) {
|
||||
indexShard.getTranslog().sync();
|
||||
indexShard.getTranslog().getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) {
|
||||
indexShard.sync();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2315,8 +2315,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
@Override
|
||||
protected void write(List<Tuple<Translog.Location, Consumer<Exception>>> candidates) throws IOException {
|
||||
try {
|
||||
final Engine engine = getEngine();
|
||||
engine.getTranslog().ensureSynced(candidates.stream().map(Tuple::v1));
|
||||
getEngine().ensureTranslogSynced(candidates.stream().map(Tuple::v1));
|
||||
} catch (AlreadyClosedException ex) {
|
||||
// that's fine since we already synced everything on engine close - this also is conform with the methods
|
||||
// documentation
|
||||
|
@ -2341,9 +2340,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
translogSyncProcessor.put(location, syncListener);
|
||||
}
|
||||
|
||||
public final void sync() throws IOException {
|
||||
public void sync() throws IOException {
|
||||
verifyNotClosed();
|
||||
getEngine().getTranslog().sync();
|
||||
getEngine().syncTranslog();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -42,6 +42,7 @@ import java.util.List;
|
|||
import java.util.Objects;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.function.UnaryOperator;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* An additional extension point for {@link Plugin}s that extends Elasticsearch's scripting functionality. Implement it like this:
|
||||
|
@ -62,6 +63,15 @@ public interface ActionPlugin {
|
|||
default List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
/**
|
||||
* Client actions added by this plugin. This defaults to all of the {@linkplain GenericAction} in
|
||||
* {@linkplain ActionPlugin#getActions()}.
|
||||
*/
|
||||
default List<GenericAction> getClientActions() {
|
||||
return getActions().stream().map(a -> a.action).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
/**
|
||||
* Action filters added by this plugin.
|
||||
*/
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue