diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 30e8261c874..985c70a39a0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -126,8 +126,8 @@ Alternatively, `idea.no.launcher=true` can be set in the [`idea.properties`](https://www.jetbrains.com/help/idea/file-idea-properties.html) file which can be accessed under Help > Edit Custom Properties (this will require a restart of IDEA). For IDEA 2017.3 and above, in addition to the JVM option, you will need to go to -`Run->Edit Configurations->...->Defaults->JUnit` and change the value for the `Shorten command line` setting from -`user-local default: none` to `classpath file`. You may also need to [remove `ant-javafx.jar` from your +`Run->Edit Configurations->...->Defaults->JUnit` and verify that the `Shorten command line` setting is set to +`user-local default: none`. You may also need to [remove `ant-javafx.jar` from your classpath](https://github.com/elastic/elasticsearch/issues/14348) if that is reported as a source of jar hell. diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 0c76ce4fd3a..0df80116099 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -43,6 +43,7 @@ import org.gradle.api.publish.maven.MavenPublication import org.gradle.api.publish.maven.plugins.MavenPublishPlugin import org.gradle.api.publish.maven.tasks.GenerateMavenPom import org.gradle.api.tasks.bundling.Jar +import org.gradle.api.tasks.compile.GroovyCompile import org.gradle.api.tasks.compile.JavaCompile import org.gradle.api.tasks.javadoc.Javadoc import org.gradle.internal.jvm.Jvm @@ -455,6 +456,13 @@ class BuildPlugin implements Plugin { // TODO: use native Gradle support for --release when available (cf. https://github.com/gradle/gradle/issues/2510) options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion } + // also apply release flag to groovy, which is used in build-tools + project.tasks.withType(GroovyCompile) { + final JavaVersion targetCompatibilityVersion = JavaVersion.toVersion(it.targetCompatibility) + options.fork = true + options.forkOptions.javaHome = new File(project.compilerJavaHome) + options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion + } } } @@ -651,7 +659,10 @@ class BuildPlugin implements Plugin { Task precommit = PrecommitTasks.create(project, true) project.check.dependsOn(precommit) project.test.mustRunAfter(precommit) - project.dependencyLicenses.dependencies = project.configurations.runtime - project.configurations.provided + // only require dependency licenses for non-elasticsearch deps + project.dependencyLicenses.dependencies = project.configurations.runtime.fileCollection { + it.group.startsWith('org.elasticsearch') == false + } - project.configurations.provided } private static configureDependenciesInfo(Project project) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy new file mode 100644 index 00000000000..3df9b604c13 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.plugin + +import org.elasticsearch.gradle.BuildPlugin +import org.elasticsearch.gradle.test.RestTestPlugin +import org.elasticsearch.gradle.test.RunTask +import org.elasticsearch.gradle.test.StandaloneRestTestPlugin +import org.gradle.api.Plugin +import org.gradle.api.Project +import org.gradle.api.file.FileCopyDetails +import org.gradle.api.file.RelativePath +import org.gradle.api.tasks.bundling.Zip + +class MetaPluginBuildPlugin implements Plugin { + + @Override + void apply(Project project) { + project.plugins.apply(StandaloneRestTestPlugin) + project.plugins.apply(RestTestPlugin) + + createBundleTask(project) + + project.integTestCluster { + dependsOn(project.bundlePlugin) + plugin(project.path) + } + BuildPlugin.configurePomGeneration(project) + project.afterEvaluate { + PluginBuildPlugin.addZipPomGeneration(project) + } + + RunTask run = project.tasks.create('run', RunTask) + run.dependsOn(project.bundlePlugin) + run.clusterConfig.plugin(project.path) + } + + private static void createBundleTask(Project project) { + + MetaPluginPropertiesTask buildProperties = project.tasks.create('pluginProperties', MetaPluginPropertiesTask.class) + + // create the actual bundle task, which zips up all the files for the plugin + Zip bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [buildProperties]) { + into('elasticsearch') { + from(buildProperties.descriptorOutput.parentFile) { + // plugin properties file + include(buildProperties.descriptorOutput.name) + } + } + // due to how the renames work for each bundled plugin, we must exclude empty dirs or every subdir + // within bundled plugin zips will show up at the root as an empty dir + includeEmptyDirs = false + + } + project.assemble.dependsOn(bundle) + + // also make the zip available as a configuration (used when depending on this project) + project.configurations.create('zip') + project.artifacts.add('zip', bundle) + + // a super hacky way to inject code to run at the end of each of the bundled plugin's configuration + // to add itself back to this meta plugin zip + project.afterEvaluate { + buildProperties.extension.plugins.each { String bundledPluginProjectName -> + Project bundledPluginProject = project.project(bundledPluginProjectName) + bundledPluginProject.afterEvaluate { + bundle.configure { + dependsOn bundledPluginProject.bundlePlugin + from(project.zipTree(bundledPluginProject.bundlePlugin.outputs.files.singleFile)) { + eachFile { FileCopyDetails details -> + // paths in the individual plugins begin with elasticsearch, and we want to add in the + // bundled plugin name between that and each filename + details.relativePath = new RelativePath(true, 'elasticsearch', bundledPluginProjectName, + details.relativePath.toString().replace('elasticsearch/', '')) + } + } + } + } + } + } + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteContext.java b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesExtension.groovy similarity index 55% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteContext.java rename to buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesExtension.groovy index 39e69e8f9a9..e5d84002e53 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteContext.java +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesExtension.groovy @@ -17,21 +17,30 @@ * under the License. */ -package org.elasticsearch.nio; +package org.elasticsearch.gradle.plugin -import java.io.IOException; -import java.util.function.BiConsumer; +import org.gradle.api.Project +import org.gradle.api.tasks.Input -public interface WriteContext { +/** + * A container for meta plugin properties that will be written to the meta plugin descriptor, for easy + * manipulation in the gradle DSL. + */ +class MetaPluginPropertiesExtension { + @Input + String name - void sendMessage(Object message, BiConsumer listener); + @Input + String description - void queueWriteOperations(WriteOperation writeOperation); - - void flushChannel() throws IOException; - - boolean hasQueuedWriteOps(); - - void clearQueuedWriteOps(Exception e); + /** + * The plugins this meta plugin wraps. + * Note this is not written to the plugin descriptor, but used to setup the final zip file task. + */ + @Input + List plugins + MetaPluginPropertiesExtension(Project project) { + name = project.name + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesTask.groovy new file mode 100644 index 00000000000..e868cc2cc31 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesTask.groovy @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.plugin + +import org.gradle.api.InvalidUserDataException +import org.gradle.api.Task +import org.gradle.api.tasks.Copy +import org.gradle.api.tasks.OutputFile + +class MetaPluginPropertiesTask extends Copy { + + MetaPluginPropertiesExtension extension + + @OutputFile + File descriptorOutput = new File(project.buildDir, 'generated-resources/meta-plugin-descriptor.properties') + + MetaPluginPropertiesTask() { + File templateFile = new File(project.buildDir, "templates/${descriptorOutput.name}") + Task copyPluginPropertiesTemplate = project.tasks.create('copyPluginPropertiesTemplate') { + doLast { + InputStream resourceTemplate = PluginPropertiesTask.getResourceAsStream("/${descriptorOutput.name}") + templateFile.parentFile.mkdirs() + templateFile.setText(resourceTemplate.getText('UTF-8'), 'UTF-8') + } + } + + dependsOn(copyPluginPropertiesTemplate) + extension = project.extensions.create('es_meta_plugin', MetaPluginPropertiesExtension, project) + project.afterEvaluate { + // check require properties are set + if (extension.name == null) { + throw new InvalidUserDataException('name is a required setting for es_meta_plugin') + } + if (extension.description == null) { + throw new InvalidUserDataException('description is a required setting for es_meta_plugin') + } + // configure property substitution + from(templateFile.parentFile).include(descriptorOutput.name) + into(descriptorOutput.parentFile) + Map properties = generateSubstitutions() + expand(properties) + inputs.properties(properties) + } + } + + Map generateSubstitutions() { + return ['name': extension.name, + 'description': extension.description + ] + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index f342a68707e..950acad9a5e 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.plugin +import nebula.plugin.info.scm.ScmInfoPlugin import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.NoticeTask import org.elasticsearch.gradle.test.RestIntegTestTask @@ -220,7 +221,8 @@ public class PluginBuildPlugin extends BuildPlugin { } /** Adds a task to generate a pom file for the zip distribution. */ - protected void addZipPomGeneration(Project project) { + public static void addZipPomGeneration(Project project) { + project.plugins.apply(ScmInfoPlugin.class) project.plugins.apply(MavenPublishPlugin.class) project.publishing { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index a64c39171a2..593a08c8735 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -23,6 +23,8 @@ import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties +import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin +import org.elasticsearch.gradle.plugin.MetaPluginPropertiesExtension import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.elasticsearch.gradle.plugin.PluginPropertiesExtension import org.gradle.api.AntBuilder @@ -138,8 +140,8 @@ class ClusterFormationTasks { /** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */ static void configureBwcPluginDependency(String name, Project project, Project pluginProject, Configuration configuration, String elasticsearchVersion) { verifyProjectHasBuildPlugin(name, elasticsearchVersion, project, pluginProject) - PluginPropertiesExtension extension = pluginProject.extensions.findByName('esplugin'); - project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${extension.name}:${elasticsearchVersion}@zip") + final String pluginName = findPluginName(pluginProject) + project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${pluginName}:${elasticsearchVersion}@zip") } /** @@ -449,7 +451,7 @@ class ClusterFormationTasks { configuration = project.configurations.create(configurationName) } - final String depName = pluginProject.extensions.findByName('esplugin').name + final String depName = findPluginName(pluginProject) Dependency dep = bwcPlugins.dependencies.find { it.name == depName @@ -753,9 +755,19 @@ class ClusterFormationTasks { } static void verifyProjectHasBuildPlugin(String name, String version, Project project, Project pluginProject) { - if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) { + if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false && pluginProject.plugins.hasPlugin(MetaPluginBuildPlugin) == false) { throw new GradleException("Task [${name}] cannot add plugin [${pluginProject.path}] with version [${version}] to project's " + - "[${project.path}] dependencies: the plugin is not an esplugin") + "[${project.path}] dependencies: the plugin is not an esplugin or es_meta_plugin") + } + } + + /** Find the plugin name in the given project, whether a regular plugin or meta plugin. */ + static String findPluginName(Project pluginProject) { + PluginPropertiesExtension extension = pluginProject.extensions.findByName('esplugin') + if (extension != null) { + return extension.name + } else { + return pluginProject.extensions.findByName('es_meta_plugin').name } } } diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.es-meta-plugin.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.es-meta-plugin.properties new file mode 100644 index 00000000000..50240e95416 --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.es-meta-plugin.properties @@ -0,0 +1,20 @@ +# +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +implementation-class=org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 57dafbba509..2dd130fc634 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -21,6 +21,8 @@ package org.elasticsearch.client; import org.apache.http.Header; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; @@ -49,9 +51,9 @@ public final class IndicesClient { * See * Delete Index API on elastic.co */ - public DeleteIndexResponse deleteIndex(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException { + public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, - Collections.emptySet(), headers); + Collections.emptySet(), headers); } /** @@ -60,10 +62,9 @@ public final class IndicesClient { * See * Delete Index API on elastic.co */ - public void deleteIndexAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, - Header... headers) { + public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, Collections.emptySet(), headers); } /** @@ -72,7 +73,7 @@ public final class IndicesClient { * See * Create Index API on elastic.co */ - public CreateIndexResponse createIndex(CreateIndexRequest createIndexRequest, Header... headers) throws IOException { + public CreateIndexResponse create(CreateIndexRequest createIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent, Collections.emptySet(), headers); } @@ -83,10 +84,9 @@ public final class IndicesClient { * See * Create Index API on elastic.co */ - public void createIndexAsync(CreateIndexRequest createIndexRequest, ActionListener listener, - Header... headers) { + public void createAsync(CreateIndexRequest createIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, Collections.emptySet(), headers); } /** @@ -95,7 +95,7 @@ public final class IndicesClient { * See * Open Index API on elastic.co */ - public OpenIndexResponse openIndex(OpenIndexRequest openIndexRequest, Header... headers) throws IOException { + public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent, Collections.emptySet(), headers); } @@ -106,9 +106,30 @@ public final class IndicesClient { * See * Open Index API on elastic.co */ - public void openIndexAsync(OpenIndexRequest openIndexRequest, ActionListener listener, Header... headers) { + public void openAsync(OpenIndexRequest openIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent, listener, Collections.emptySet(), headers); } + /** + * Closes an index using the Close Index API + *

+ * See + * Close Index API on elastic.co + */ + public CloseIndexResponse close(CloseIndexRequest closeIndexRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent, + Collections.emptySet(), headers); + } + + /** + * Asynchronously closes an index using the Close Index API + *

+ * See + * Close Index API on elastic.co + */ + public void closeAsync(CloseIndexRequest closeIndexRequest, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent, + listener, Collections.emptySet(), headers); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index d35db1c637d..e55204c3d94 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -29,6 +29,7 @@ import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -153,6 +154,18 @@ public final class Request { return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); } + static Request closeIndex(CloseIndexRequest closeIndexRequest) { + String endpoint = endpoint(closeIndexRequest.indices(), Strings.EMPTY_ARRAY, "_close"); + + Params parameters = Params.builder(); + + parameters.withTimeout(closeIndexRequest.timeout()); + parameters.withMasterTimeout(closeIndexRequest.masterNodeTimeout()); + parameters.withIndicesOptions(closeIndexRequest.indicesOptions()); + + return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); + } + static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException { String endpoint = endpoint(createIndexRequest.indices(), Strings.EMPTY_ARRAY, ""); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index cad7449c689..9fb53a54d8c 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -26,8 +26,6 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; -import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 5f356c4c29f..5f8702807fb 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -21,6 +21,8 @@ package org.elasticsearch.client; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; @@ -28,21 +30,18 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.rest.RestStatus; - -import java.io.IOException; -import java.util.Locale; - -import static org.hamcrest.Matchers.equalTo; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.rest.RestStatus; +import java.io.IOException; import java.util.Map; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.hamcrest.Matchers.equalTo; public class IndicesClientIT extends ESRestHighLevelClientTestCase { @@ -56,7 +55,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName); CreateIndexResponse createIndexResponse = - execute(createIndexRequest, highLevelClient().indices()::createIndex, highLevelClient().indices()::createIndexAsync); + execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync); assertTrue(createIndexResponse.isAcknowledged()); assertTrue(indexExists(indexName)); @@ -84,7 +83,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { createIndexRequest.mapping("type_name", mappingBuilder); CreateIndexResponse createIndexResponse = - execute(createIndexRequest, highLevelClient().indices()::createIndex, highLevelClient().indices()::createIndexAsync); + execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync); assertTrue(createIndexResponse.isAcknowledged()); Map indexMetaData = getIndexMetadata(indexName); @@ -117,7 +116,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indexName); DeleteIndexResponse deleteIndexResponse = - execute(deleteIndexRequest, highLevelClient().indices()::deleteIndex, highLevelClient().indices()::deleteIndexAsync); + execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync); assertTrue(deleteIndexResponse.isAcknowledged()); assertFalse(indexExists(indexName)); @@ -130,63 +129,74 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(deleteIndexRequest, highLevelClient().indices()::deleteIndex, highLevelClient().indices()::deleteIndexAsync)); + () -> execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } } public void testOpenExistingIndex() throws IOException { - String[] indices = randomIndices(1, 5); - for (String index : indices) { - createIndex(index); - closeIndex(index); - ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search")); - assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus())); - assertThat(exception.getMessage().contains(index), equalTo(true)); - } + String index = "index"; + createIndex(index); + closeIndex(index); + ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search")); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus())); + assertThat(exception.getMessage().contains(index), equalTo(true)); - OpenIndexRequest openIndexRequest = new OpenIndexRequest(indices); - OpenIndexResponse openIndexResponse = execute(openIndexRequest, highLevelClient().indices()::openIndex, - highLevelClient().indices()::openIndexAsync); + OpenIndexRequest openIndexRequest = new OpenIndexRequest(index); + OpenIndexResponse openIndexResponse = execute(openIndexRequest, highLevelClient().indices()::open, + highLevelClient().indices()::openAsync); assertTrue(openIndexResponse.isAcknowledged()); - for (String index : indices) { - Response response = client().performRequest("GET", index + "/_search"); - assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); - } + Response response = client().performRequest("GET", index + "/_search"); + assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); } public void testOpenNonExistentIndex() throws IOException { - String[] nonExistentIndices = randomIndices(1, 5); - for (String nonExistentIndex : nonExistentIndices) { - assertFalse(indexExists(nonExistentIndex)); - } + String nonExistentIndex = "non_existent_index"; + assertFalse(indexExists(nonExistentIndex)); - OpenIndexRequest openIndexRequest = new OpenIndexRequest(nonExistentIndices); + OpenIndexRequest openIndexRequest = new OpenIndexRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(openIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync)); + () -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); - OpenIndexRequest lenientOpenIndexRequest = new OpenIndexRequest(nonExistentIndices); + OpenIndexRequest lenientOpenIndexRequest = new OpenIndexRequest(nonExistentIndex); lenientOpenIndexRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); - OpenIndexResponse lenientOpenIndexResponse = execute(lenientOpenIndexRequest, highLevelClient().indices()::openIndex, - highLevelClient().indices()::openIndexAsync); + OpenIndexResponse lenientOpenIndexResponse = execute(lenientOpenIndexRequest, highLevelClient().indices()::open, + highLevelClient().indices()::openAsync); assertThat(lenientOpenIndexResponse.isAcknowledged(), equalTo(true)); - OpenIndexRequest strictOpenIndexRequest = new OpenIndexRequest(nonExistentIndices); + OpenIndexRequest strictOpenIndexRequest = new OpenIndexRequest(nonExistentIndex); strictOpenIndexRequest.indicesOptions(IndicesOptions.strictExpandOpen()); ElasticsearchException strictException = expectThrows(ElasticsearchException.class, - () -> execute(openIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync)); + () -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync)); assertEquals(RestStatus.NOT_FOUND, strictException.status()); } - private static String[] randomIndices(int minIndicesNum, int maxIndicesNum) { - int numIndices = randomIntBetween(minIndicesNum, maxIndicesNum); - String[] indices = new String[numIndices]; - for (int i = 0; i < numIndices; i++) { - indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); - } - return indices; + public void testCloseExistingIndex() throws IOException { + String index = "index"; + createIndex(index); + Response response = client().performRequest("GET", index + "/_search"); + assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + + CloseIndexRequest closeIndexRequest = new CloseIndexRequest(index); + CloseIndexResponse closeIndexResponse = execute(closeIndexRequest, highLevelClient().indices()::close, + highLevelClient().indices()::closeAsync); + assertTrue(closeIndexResponse.isAcknowledged()); + + ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search")); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus())); + assertThat(exception.getMessage().contains(index), equalTo(true)); + } + + public void testCloseNonExistentIndex() throws IOException { + String nonExistentIndex = "non_existent_index"; + assertFalse(indexExists(nonExistentIndex)); + + CloseIndexRequest closeIndexRequest = new CloseIndexRequest(nonExistentIndex); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> execute(closeIndexRequest, highLevelClient().indices()::close, highLevelClient().indices()::closeAsync)); + assertEquals(RestStatus.NOT_FOUND, exception.status()); } private static void createIndex(String index) throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index acb27fff7e2..56848a905a1 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -25,6 +25,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -325,17 +326,10 @@ public class RequestTests extends ESTestCase { } public void testDeleteIndex() { - DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(); - - int numIndices = randomIntBetween(0, 5); - String[] indices = new String[numIndices]; - for (int i = 0; i < numIndices; i++) { - indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5); - } - deleteIndexRequest.indices(indices); + String[] indices = randomIndicesNames(0, 5); + DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indices); Map expectedParams = new HashMap<>(); - setRandomTimeout(deleteIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); setRandomMasterTimeout(deleteIndexRequest, expectedParams); @@ -349,12 +343,8 @@ public class RequestTests extends ESTestCase { } public void testOpenIndex() { - OpenIndexRequest openIndexRequest = new OpenIndexRequest(); - int numIndices = randomIntBetween(1, 5); - String[] indices = new String[numIndices]; - for (int i = 0; i < numIndices; i++) { - indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5); - } + String[] indices = randomIndicesNames(1, 5); + OpenIndexRequest openIndexRequest = new OpenIndexRequest(indices); openIndexRequest.indices(indices); Map expectedParams = new HashMap<>(); @@ -371,6 +361,23 @@ public class RequestTests extends ESTestCase { assertThat(request.getEntity(), nullValue()); } + public void testCloseIndex() { + String[] indices = randomIndicesNames(1, 5); + CloseIndexRequest closeIndexRequest = new CloseIndexRequest(indices); + + Map expectedParams = new HashMap<>(); + setRandomTimeout(closeIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + setRandomMasterTimeout(closeIndexRequest, expectedParams); + setRandomIndicesOptions(closeIndexRequest::indicesOptions, closeIndexRequest::indicesOptions, expectedParams); + + Request request = Request.closeIndex(closeIndexRequest); + StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_close"); + assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + assertThat(expectedParams, equalTo(request.getParameters())); + assertThat(request.getMethod(), equalTo("POST")); + assertThat(request.getEntity(), nullValue()); + } + public void testIndex() throws IOException { String index = randomAlphaOfLengthBetween(3, 10); String type = randomAlphaOfLengthBetween(3, 10); @@ -748,13 +755,9 @@ public class RequestTests extends ESTestCase { } public void testSearch() throws Exception { - SearchRequest searchRequest = new SearchRequest(); - int numIndices = randomIntBetween(0, 5); - String[] indices = new String[numIndices]; - for (int i = 0; i < numIndices; i++) { - indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5); - } - searchRequest.indices(indices); + String[] indices = randomIndicesNames(0, 5); + SearchRequest searchRequest = new SearchRequest(indices); + int numTypes = randomIntBetween(0, 5); String[] types = new String[numTypes]; for (int i = 0; i < numTypes; i++) { @@ -791,44 +794,47 @@ public class RequestTests extends ESTestCase { setRandomIndicesOptions(searchRequest::indicesOptions, searchRequest::indicesOptions, expectedParams); - SearchSourceBuilder searchSourceBuilder = null; + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + //rarely skip setting the search source completely if (frequently()) { - searchSourceBuilder = new SearchSourceBuilder(); - if (randomBoolean()) { - searchSourceBuilder.size(randomIntBetween(0, Integer.MAX_VALUE)); - } - if (randomBoolean()) { - searchSourceBuilder.from(randomIntBetween(0, Integer.MAX_VALUE)); - } - if (randomBoolean()) { - searchSourceBuilder.minScore(randomFloat()); - } - if (randomBoolean()) { - searchSourceBuilder.explain(randomBoolean()); - } - if (randomBoolean()) { - searchSourceBuilder.profile(randomBoolean()); - } - if (randomBoolean()) { - searchSourceBuilder.highlighter(new HighlightBuilder().field(randomAlphaOfLengthBetween(3, 10))); - } - if (randomBoolean()) { - searchSourceBuilder.query(new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10))); - } - if (randomBoolean()) { - searchSourceBuilder.aggregation(new TermsAggregationBuilder(randomAlphaOfLengthBetween(3, 10), ValueType.STRING) - .field(randomAlphaOfLengthBetween(3, 10))); - } - if (randomBoolean()) { - searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion(randomAlphaOfLengthBetween(3, 10), - new CompletionSuggestionBuilder(randomAlphaOfLengthBetween(3, 10)))); - } - if (randomBoolean()) { - searchSourceBuilder.addRescorer(new QueryRescorerBuilder( - new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10)))); - } - if (randomBoolean()) { - searchSourceBuilder.collapse(new CollapseBuilder(randomAlphaOfLengthBetween(3, 10))); + //frequently set the search source to have some content, otherwise leave it empty but still set it + if (frequently()) { + if (randomBoolean()) { + searchSourceBuilder.size(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + searchSourceBuilder.from(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + searchSourceBuilder.minScore(randomFloat()); + } + if (randomBoolean()) { + searchSourceBuilder.explain(randomBoolean()); + } + if (randomBoolean()) { + searchSourceBuilder.profile(randomBoolean()); + } + if (randomBoolean()) { + searchSourceBuilder.highlighter(new HighlightBuilder().field(randomAlphaOfLengthBetween(3, 10))); + } + if (randomBoolean()) { + searchSourceBuilder.query(new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10))); + } + if (randomBoolean()) { + searchSourceBuilder.aggregation(new TermsAggregationBuilder(randomAlphaOfLengthBetween(3, 10), ValueType.STRING) + .field(randomAlphaOfLengthBetween(3, 10))); + } + if (randomBoolean()) { + searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion(randomAlphaOfLengthBetween(3, 10), + new CompletionSuggestionBuilder(randomAlphaOfLengthBetween(3, 10)))); + } + if (randomBoolean()) { + searchSourceBuilder.addRescorer(new QueryRescorerBuilder( + new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10)))); + } + if (randomBoolean()) { + searchSourceBuilder.collapse(new CollapseBuilder(randomAlphaOfLengthBetween(3, 10))); + } } searchRequest.source(searchSourceBuilder); } @@ -846,11 +852,7 @@ public class RequestTests extends ESTestCase { endpoint.add("_search"); assertEquals(endpoint.toString(), request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); - if (searchSourceBuilder == null) { - assertNull(request.getEntity()); - } else { - assertToXContentBody(searchSourceBuilder, request.getEntity()); - } + assertToXContentBody(searchSourceBuilder, request.getEntity()); } public void testMultiSearch() throws IOException { @@ -1130,4 +1132,13 @@ public class RequestTests extends ESTestCase { } return excludesParam.toString(); } + + private static String[] randomIndicesNames(int minIndicesNum, int maxIndicesNum) { + int numIndices = randomIntBetween(minIndicesNum, maxIndicesNum); + String[] indices = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); + } + return indices; + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 42d19fab82f..bc3b1698f96 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -22,10 +22,14 @@ package org.elasticsearch.client.documentation; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.ESRestHighLevelClientTestCase; @@ -58,7 +62,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().createIndex(new CreateIndexRequest("posts")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts")); assertTrue(createIndexResponse.isAcknowledged()); } @@ -80,7 +84,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::delete-index-request-indicesOptions // tag::delete-index-execute - DeleteIndexResponse deleteIndexResponse = client.indices().deleteIndex(request); + DeleteIndexResponse deleteIndexResponse = client.indices().delete(request); // end::delete-index-execute // tag::delete-index-response @@ -93,7 +97,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // tag::delete-index-notfound try { DeleteIndexRequest request = new DeleteIndexRequest("does_not_exist"); - client.indices().deleteIndex(request); + client.indices().delete(request); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.NOT_FOUND) { // <1> @@ -107,7 +111,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase final RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().createIndex(new CreateIndexRequest("posts")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts")); assertTrue(createIndexResponse.isAcknowledged()); } @@ -115,7 +119,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase DeleteIndexRequest request = new DeleteIndexRequest("posts"); // tag::delete-index-execute-async - client.indices().deleteIndexAsync(request, new ActionListener() { + client.indices().deleteAsync(request, new ActionListener() { @Override public void onResponse(DeleteIndexResponse deleteIndexResponse) { // <1> @@ -185,7 +189,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::create-index-request-waitForActiveShards // tag::create-index-execute - CreateIndexResponse createIndexResponse = client.indices().createIndex(request); + CreateIndexResponse createIndexResponse = client.indices().create(request); // end::create-index-execute // tag::create-index-response @@ -203,7 +207,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase { CreateIndexRequest request = new CreateIndexRequest("twitter"); // tag::create-index-execute-async - client.indices().createIndexAsync(request, new ActionListener() { + client.indices().createAsync(request, new ActionListener() { @Override public void onResponse(CreateIndexResponse createIndexResponse) { // <1> @@ -224,4 +228,138 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase } } + public void testOpenIndex() throws IOException { + RestHighLevelClient client = highLevelClient(); + + { + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")); + assertTrue(createIndexResponse.isAcknowledged()); + } + + { + // tag::open-index-request + OpenIndexRequest request = new OpenIndexRequest("index"); // <1> + // end::open-index-request + + // tag::open-index-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::open-index-request-timeout + // tag::open-index-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::open-index-request-masterTimeout + // tag::open-index-request-waitForActiveShards + request.waitForActiveShards(2); // <1> + request.waitForActiveShards(ActiveShardCount.DEFAULT); // <2> + // end::open-index-request-waitForActiveShards + + + // tag::open-index-request-indicesOptions + request.indicesOptions(IndicesOptions.strictExpandOpen()); // <1> + // end::open-index-request-indicesOptions + + // tag::open-index-execute + OpenIndexResponse openIndexResponse = client.indices().open(request); + // end::open-index-execute + + // tag::open-index-response + boolean acknowledged = openIndexResponse.isAcknowledged(); // <1> + boolean shardsAcked = openIndexResponse.isShardsAcknowledged(); // <2> + // end::open-index-response + assertTrue(acknowledged); + assertTrue(shardsAcked); + + // tag::open-index-execute-async + client.indices().openAsync(request, new ActionListener() { + @Override + public void onResponse(OpenIndexResponse openIndexResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }); + // end::open-index-execute-async + } + + { + // tag::open-index-notfound + try { + OpenIndexRequest request = new OpenIndexRequest("does_not_exist"); + client.indices().open(request); + } catch (ElasticsearchException exception) { + if (exception.status() == RestStatus.BAD_REQUEST) { + // <1> + } + } + // end::open-index-notfound + } + } + + public void testCloseIndex() throws IOException { + RestHighLevelClient client = highLevelClient(); + + { + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")); + assertTrue(createIndexResponse.isAcknowledged()); + } + + { + // tag::close-index-request + CloseIndexRequest request = new CloseIndexRequest("index"); // <1> + // end::close-index-request + + // tag::close-index-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::close-index-request-timeout + // tag::close-index-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::close-index-request-masterTimeout + + // tag::close-index-request-indicesOptions + request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::close-index-request-indicesOptions + + // tag::close-index-execute + CloseIndexResponse closeIndexResponse = client.indices().close(request); + // end::close-index-execute + + // tag::close-index-response + boolean acknowledged = closeIndexResponse.isAcknowledged(); // <1> + // end::close-index-response + assertTrue(acknowledged); + + // tag::close-index-execute-async + client.indices().closeAsync(request, new ActionListener() { + @Override + public void onResponse(CloseIndexResponse closeIndexResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }); + // end::close-index-execute-async + } + + { + // tag::close-index-notfound + try { + CloseIndexRequest request = new CloseIndexRequest("does_not_exist"); + client.indices().close(request); + } catch (ElasticsearchException exception) { + if (exception.status() == RestStatus.BAD_REQUEST) { + // <1> + } + } + // end::close-index-notfound + } + } } diff --git a/distribution/build.gradle b/distribution/build.gradle index c6fc9d5b694..d322aa9c1ff 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -83,7 +83,7 @@ ext.restTestExpansions = [ // we create the buildModules task above so the distribution subprojects can // depend on it, but we don't actually configure it until here so we can do a single // loop over modules to also setup cross task dependencies and increment our modules counter -project.rootProject.subprojects.findAll { it.path.startsWith(':modules:') }.each { Project module -> +project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { Project module -> buildFullNotice { def defaultLicensesDir = new File(module.projectDir, 'licenses') if (defaultLicensesDir.exists()) { diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index a8b7db48a7c..5675d3e8007 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -646,9 +646,11 @@ class InstallPluginCommand extends EnvironmentAwareCommand { Environment env, List deleteOnFailure) throws Exception { final MetaPluginInfo metaInfo = MetaPluginInfo.readFromProperties(tmpRoot); verifyPluginName(env.pluginsFile(), metaInfo.getName(), tmpRoot); + final Path destination = env.pluginsFile().resolve(metaInfo.getName()); deleteOnFailure.add(destination); terminal.println(VERBOSE, metaInfo.toString()); + final List pluginPaths = new ArrayList<>(); try (DirectoryStream paths = Files.newDirectoryStream(tmpRoot)) { // Extract bundled plugins path and validate plugin names @@ -665,19 +667,11 @@ class InstallPluginCommand extends EnvironmentAwareCommand { for (Path plugin : pluginPaths) { final PluginInfo info = verify(terminal, plugin, isBatch, env); pluginInfos.add(info); - Path tmpBinDir = plugin.resolve("bin"); - if (Files.exists(tmpBinDir)) { - Path destBinDir = env.binFile().resolve(metaInfo.getName()); - deleteOnFailure.add(destBinDir); - installBin(info, tmpBinDir, destBinDir); - } - - Path tmpConfigDir = plugin.resolve("config"); - if (Files.exists(tmpConfigDir)) { - // some files may already exist, and we don't remove plugin config files on plugin removal, - // so any installed config files are left on failure too - Path destConfigDir = env.configFile().resolve(metaInfo.getName()); - installConfig(info, tmpConfigDir, destConfigDir); + installPluginSupportFiles(info, plugin, env.binFile().resolve(metaInfo.getName()), + env.configFile().resolve(metaInfo.getName()), deleteOnFailure); + // ensure the plugin dir within the tmpRoot has the correct name + if (plugin.getFileName().toString().equals(info.getName()) == false) { + Files.move(plugin, plugin.getParent().resolve(info.getName()), StandardCopyOption.ATOMIC_MOVE); } } movePlugin(tmpRoot, destination); @@ -693,7 +687,7 @@ class InstallPluginCommand extends EnvironmentAwareCommand { /** * Installs the plugin from {@code tmpRoot} into the plugins dir. - * If the plugin has a bin dir and/or a config dir, those are copied. + * If the plugin has a bin dir and/or a config dir, those are moved. */ private void installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, Environment env, List deleteOnFailure) throws Exception { @@ -701,9 +695,20 @@ class InstallPluginCommand extends EnvironmentAwareCommand { final Path destination = env.pluginsFile().resolve(info.getName()); deleteOnFailure.add(destination); + installPluginSupportFiles(info, tmpRoot, env.binFile().resolve(info.getName()), + env.configFile().resolve(info.getName()), deleteOnFailure); + movePlugin(tmpRoot, destination); + if (info.requiresKeystore()) { + createKeystoreIfNeeded(terminal, env, info); + } + terminal.println("-> Installed " + info.getName()); + } + + /** Moves bin and config directories from the plugin if they exist */ + private void installPluginSupportFiles(PluginInfo info, Path tmpRoot, + Path destBinDir, Path destConfigDir, List deleteOnFailure) throws Exception { Path tmpBinDir = tmpRoot.resolve("bin"); if (Files.exists(tmpBinDir)) { - Path destBinDir = env.binFile().resolve(info.getName()); deleteOnFailure.add(destBinDir); installBin(info, tmpBinDir, destBinDir); } @@ -712,14 +717,8 @@ class InstallPluginCommand extends EnvironmentAwareCommand { if (Files.exists(tmpConfigDir)) { // some files may already exist, and we don't remove plugin config files on plugin removal, // so any installed config files are left on failure too - Path destConfigDir = env.configFile().resolve(info.getName()); installConfig(info, tmpConfigDir, destConfigDir); } - movePlugin(tmpRoot, destination); - if (info.requiresKeystore()) { - createKeystoreIfNeeded(terminal, env, info); - } - terminal.println("-> Installed " + info.getName()); } /** Moves the plugin directory into its final destination. **/ diff --git a/docs/java-rest/high-level/apis/close_index.asciidoc b/docs/java-rest/high-level/apis/close_index.asciidoc new file mode 100644 index 00000000000..a4d0f638353 --- /dev/null +++ b/docs/java-rest/high-level/apis/close_index.asciidoc @@ -0,0 +1,70 @@ +[[java-rest-high-close-index]] +=== Close Index API + +[[java-rest-high-close-index-request]] +==== Close Index Request + +A `CloseIndexRequest` requires an `index` argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request] +-------------------------------------------------- +<1> The index to close + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the index is closed +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the index is closed +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +[[java-rest-high-close-index-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-execute] +-------------------------------------------------- + +[[java-rest-high-close-index-async]] +==== Asynchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-execute-async] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-close-index-response]] +==== Close Index Response + +The returned `CloseIndexResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request diff --git a/docs/java-rest/high-level/apis/createindex.asciidoc b/docs/java-rest/high-level/apis/createindex.asciidoc index ebd9158e193..bfc7794c8f9 100644 --- a/docs/java-rest/high-level/apis/createindex.asciidoc +++ b/docs/java-rest/high-level/apis/createindex.asciidoc @@ -48,7 +48,7 @@ The following arguments can optionally be provided: include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-request-timeout] -------------------------------------------------- <1> Timeout to wait for the all the nodes to acknowledge the index creation as a `TimeValue` -<2> Timeout to wait for the all the nodes to acknowledge the index creatiom as a `String` +<2> Timeout to wait for the all the nodes to acknowledge the index creation as a `String` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -61,8 +61,10 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-reque -------------------------------------------------- include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-request-waitForActiveShards] -------------------------------------------------- -<1> The number of active shard copies to wait for before proceeding with the operation, as an `int`. -<2> The number of active shard copies to wait for before proceeding with the operation, as an `ActiveShardCount`. +<1> The number of active shard copies to wait for before the create index API returns a +response, as an `int`. +<2> The number of active shard copies to wait for before the create index API returns a +response, as an `ActiveShardCount`. [[java-rest-high-create-index-sync]] ==== Synchronous Execution diff --git a/docs/java-rest/high-level/apis/index.asciidoc b/docs/java-rest/high-level/apis/index.asciidoc index 2312f283720..f6da998a847 100644 --- a/docs/java-rest/high-level/apis/index.asciidoc +++ b/docs/java-rest/high-level/apis/index.asciidoc @@ -1,10 +1,23 @@ include::createindex.asciidoc[] + include::deleteindex.asciidoc[] + +include::open_index.asciidoc[] + +include::close_index.asciidoc[] + include::_index.asciidoc[] + include::get.asciidoc[] + include::delete.asciidoc[] + include::update.asciidoc[] + include::bulk.asciidoc[] + include::search.asciidoc[] + include::scroll.asciidoc[] + include::main.asciidoc[] diff --git a/docs/java-rest/high-level/apis/open_index.asciidoc b/docs/java-rest/high-level/apis/open_index.asciidoc new file mode 100644 index 00000000000..a30e62123a8 --- /dev/null +++ b/docs/java-rest/high-level/apis/open_index.asciidoc @@ -0,0 +1,81 @@ +[[java-rest-high-open-index]] +=== Open Index API + +[[java-rest-high-open-index-request]] +==== Open Index Request + +An `OpenIndexRequest` requires an `index` argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request] +-------------------------------------------------- +<1> The index to open + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the index is opened +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the index is opened +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-waitForActiveShards] +-------------------------------------------------- +<1> The number of active shard copies to wait for before the open index API +returns a response, as an `int`. +<2> The number of active shard copies to wait for before the open index API +returns a response, as an `ActiveShardCount`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +[[java-rest-high-open-index-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-execute] +-------------------------------------------------- + +[[java-rest-high-open-index-async]] +==== Asynchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-execute-async] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-open-index-response]] +==== Open Index Response + +The returned `OpenIndexResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request +<2> Indicates whether the requisite number of shard copies were started for +each shard in the index before timing out diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 7a6b55619f7..fa71b62d64e 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -6,6 +6,8 @@ The Java High Level REST Client supports the following APIs: Indices APIs:: * <> * <> +* <> +* <> Single document APIs:: * <> diff --git a/docs/painless/painless-getting-started.asciidoc b/docs/painless/painless-getting-started.asciidoc index 155b5f272b4..7898631416b 100644 --- a/docs/painless/painless-getting-started.asciidoc +++ b/docs/painless/painless-getting-started.asciidoc @@ -320,7 +320,7 @@ POST hockey/player/_update_by_query Note: all of the `_update_by_query` examples above could really do with a `query` to limit the data that they pull back. While you *could* use a -See {ref}/query-dsl-script-query.html[script query] it wouldn't be as efficient +{ref}/query-dsl-script-query.html[script query] it wouldn't be as efficient as using any other query because script queries aren't able to use the inverted index to limit the documents that they have to check. diff --git a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc index e5966e56b35..0c19bf172bb 100644 --- a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc @@ -1,5 +1,5 @@ [[search-aggregations-metrics-top-hits-aggregation]] -=== Top hits Aggregation +=== Top Hits Aggregation A `top_hits` metric aggregator keeps track of the most relevant document being aggregated. This aggregator is intended to be used as a sub aggregator, so that the top matching documents can be aggregated per bucket. diff --git a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc index 5dc1b80d4ad..4cc532c99c5 100644 --- a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc @@ -41,7 +41,7 @@ for more details) |Required | details)|Optional |`skip` |=== -The following snippet only retains buckets where the total sales for the month is more than 400: +The following snippet only retains buckets where the total sales for the month is more than 200: [source,js] -------------------------------------------------- diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 1ce44b6028d..cb976601fdc 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -171,7 +171,7 @@ The `basque` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /armenian_example +PUT /basque_example { "settings": { "analysis": { @@ -536,7 +536,7 @@ The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /detch_example +PUT /dutch_example { "settings": { "analysis": { @@ -1554,7 +1554,7 @@ The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /swidish_example +PUT /swedish_example { "settings": { "analysis": { diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index 31d529b6c44..b1eb36e346d 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -1,41 +1,44 @@ [[index-modules-translog]] == Translog -Changes to Lucene are only persisted to disk during a Lucene commit, -which is a relatively heavy operation and so cannot be performed after every -index or delete operation. Changes that happen after one commit and before another -will be lost in the event of process exit or HW failure. +Changes to Lucene are only persisted to disk during a Lucene commit, which is a +relatively expensive operation and so cannot be performed after every index or +delete operation. Changes that happen after one commit and before another will +be removed from the index by Lucene in the event of process exit or hardware +failure. -To prevent this data loss, each shard has a _transaction log_ or write ahead -log associated with it. Any index or delete operation is written to the -translog after being processed by the internal Lucene index. - -In the event of a crash, recent transactions can be replayed from the -transaction log when the shard recovers. +Because Lucene commits are too expensive to perform on every individual change, +each shard copy also has a _transaction log_ known as its _translog_ associated +with it. All index and delete operations are written to the translog after +being processed by the internal Lucene index but before they are acknowledged. +In the event of a crash, recent transactions that have been acknowledged but +not yet included in the last Lucene commit can instead be recovered from the +translog when the shard recovers. An Elasticsearch flush is the process of performing a Lucene commit and -starting a new translog. It is done automatically in the background in order -to make sure the transaction log doesn't grow too large, which would make +starting a new translog. Flushes are performed automatically in the background +in order to make sure the translog doesn't grow too large, which would make replaying its operations take a considerable amount of time during recovery. -It is also exposed through an API, though its rarely needed to be performed -manually. +The ability to perform a flush manually is also exposed through an API, +although this is rarely needed. [float] === Translog settings -The data in the transaction log is only persisted to disk when the translog is +The data in the translog is only persisted to disk when the translog is ++fsync++ed and committed. In the event of hardware failure, any data written since the previous translog commit will be lost. -By default, Elasticsearch ++fsync++s and commits the translog every 5 seconds if `index.translog.durability` is set -to `async` or if set to `request` (default) at the end of every <>, <>, -<>, or <> request. In fact, Elasticsearch -will only report success of an index, delete, update, or bulk request to the -client after the transaction log has been successfully ++fsync++ed and committed -on the primary and on every allocated replica. +By default, Elasticsearch ++fsync++s and commits the translog every 5 seconds +if `index.translog.durability` is set to `async` or if set to `request` +(default) at the end of every <>, <>, +<>, or <> request. More precisely, if set +to `request`, Elasticsearch will only report success of an index, delete, +update, or bulk request to the client after the translog has been successfully +++fsync++ed and committed on the primary and on every allocated replica. -The following <> per-index settings -control the behaviour of the transaction log: +The following <> per-index +settings control the behaviour of the translog: `index.translog.sync_interval`:: @@ -64,17 +67,20 @@ update, or bulk request. This setting accepts the following parameters: `index.translog.flush_threshold_size`:: -The translog stores all operations that are not yet safely persisted in Lucene (i.e., are -not part of a lucene commit point). Although these operations are available for reads, they will -need to be reindexed if the shard was to shutdown and has to be recovered. This settings controls -the maximum total size of these operations, to prevent recoveries from taking too long. Once the -maximum size has been reached a flush will happen, generating a new Lucene commit. Defaults to `512mb`. +The translog stores all operations that are not yet safely persisted in Lucene +(i.e., are not part of a Lucene commit point). Although these operations are +available for reads, they will need to be reindexed if the shard was to +shutdown and has to be recovered. This settings controls the maximum total size +of these operations, to prevent recoveries from taking too long. Once the +maximum size has been reached a flush will happen, generating a new Lucene +commit point. Defaults to `512mb`. `index.translog.retention.size`:: -The total size of translog files to keep. Keeping more translog files increases the chance of performing -an operation based sync when recovering replicas. If the translog files are not sufficient, replica recovery -will fall back to a file based sync. Defaults to `512mb` +The total size of translog files to keep. Keeping more translog files increases +the chance of performing an operation based sync when recovering replicas. If +the translog files are not sufficient, replica recovery will fall back to a +file based sync. Defaults to `512mb` `index.translog.retention.age`:: @@ -86,10 +92,14 @@ The maximum duration for which translog files will be kept. Defaults to `12h`. [[corrupt-translog-truncation]] === What to do if the translog becomes corrupted? -In some cases (a bad drive, user error) the translog can become corrupted. When -this corruption is detected by Elasticsearch due to mismatching checksums, -Elasticsearch will fail the shard and refuse to allocate that copy of the data -to the node, recovering from a replica if available. +In some cases (a bad drive, user error) the translog on a shard copy can become +corrupted. When this corruption is detected by Elasticsearch due to mismatching +checksums, Elasticsearch will fail that shard copy and refuse to use that copy +of the data. If there are other copies of the shard available then +Elasticsearch will automatically recover from one of them using the normal +shard allocation and recovery mechanism. In particular, if the corrupt shard +copy was the primary when the corruption was detected then one of its replicas +will be promoted in its place. If there is no copy of the data from which Elasticsearch can recover successfully, a user may want to recover the data that is part of the shard at diff --git a/docs/reference/mapping/types/range.asciidoc b/docs/reference/mapping/types/range.asciidoc index 76aba682771..0ef4a463c9b 100644 --- a/docs/reference/mapping/types/range.asciidoc +++ b/docs/reference/mapping/types/range.asciidoc @@ -47,18 +47,16 @@ PUT range_index/_doc/1 -------------------------------------------------- //CONSOLE -The following is an example of a `date_range` query over the `date_range` field named "time_frame". +The following is an example of a <> on the `integer_range` field named "expected_attendees". [source,js] -------------------------------------------------- -POST range_index/_search +GET range_index/_search { "query" : { - "range" : { - "time_frame" : { <5> - "gte" : "2015-10-31", - "lte" : "2015-11-01", - "relation" : "within" <6> + "term" : { + "expected_attendees" : { + "value": 12 } } } @@ -104,6 +102,27 @@ The result produced by the above query. -------------------------------------------------- // TESTRESPONSE[s/"took": 13/"took" : $body.took/] + +The following is an example of a `date_range` query over the `date_range` field named "time_frame". + +[source,js] +-------------------------------------------------- +GET range_index/_search +{ + "query" : { + "range" : { + "time_frame" : { <5> + "gte" : "2015-10-31", + "lte" : "2015-11-01", + "relation" : "within" <6> + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:range_index] + <1> `date_range` types accept the same field parameters defined by the <> type. <2> Example indexing a meeting with 10 to 20 attendees. <3> Date ranges accept the same format as described in <>. @@ -112,6 +131,44 @@ The result produced by the above query. <6> Range queries over range <> support a `relation` parameter which can be one of `WITHIN`, `CONTAINS`, `INTERSECTS` (default). +This query produces a similar result: + +[source,js] +-------------------------------------------------- +{ + "took": 13, + "timed_out": false, + "_shards" : { + "total": 2, + "successful": 2, + "skipped" : 0, + "failed": 0 + }, + "hits" : { + "total" : 1, + "max_score" : 1.0, + "hits" : [ + { + "_index" : "range_index", + "_type" : "_doc", + "_id" : "1", + "_score" : 1.0, + "_source" : { + "expected_attendees" : { + "gte" : 10, "lte" : 20 + }, + "time_frame" : { + "gte" : "2015-10-31 12:00:00", "lte" : "2015-11-01" + } + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 13/"took" : $body.took/] + + [[range-params]] ==== Parameters for range fields diff --git a/docs/reference/query-dsl/term-query.asciidoc b/docs/reference/query-dsl/term-query.asciidoc index 4b668203a33..f1224f33ca7 100644 --- a/docs/reference/query-dsl/term-query.asciidoc +++ b/docs/reference/query-dsl/term-query.asciidoc @@ -51,6 +51,8 @@ GET _search as the query clause for `normal`. <2> The `normal` clause has the default neutral boost of `1.0`. +A `term` query can also match against <>. + .Why doesn't the `term` query match my document? ************************************************** diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index fd680bb6d6c..4552366de98 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -24,7 +24,7 @@ GET /_search }, "highlight" : { "fields" : { - "comment" : {} + "content" : {} } } } diff --git a/docs/reference/search/request/rescore.asciidoc b/docs/reference/search/request/rescore.asciidoc index 204960544a6..6e1bb2a9e6c 100644 --- a/docs/reference/search/request/rescore.asciidoc +++ b/docs/reference/search/request/rescore.asciidoc @@ -15,7 +15,8 @@ Currently the rescore API has only one implementation: the query rescorer, which uses a query to tweak the scoring. In the future, alternative rescorers may be made available, for example, a pair-wise rescorer. -NOTE: the `rescore` phase is not executed when <> is used. +NOTE: An error will be thrown if an explicit <> (other than `_score`) +is provided with a `rescore` query. NOTE: when exposing pagination to your users, you should not change `window_size` as you step through each page (by passing different diff --git a/libs/build.gradle b/libs/build.gradle new file mode 100644 index 00000000000..e69de29bb2d diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java index 8285fef6d39..14e2365eb7e 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java @@ -26,7 +26,6 @@ import java.nio.channels.NetworkChannel; import java.nio.channels.SelectableChannel; import java.nio.channels.SelectionKey; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; /** @@ -48,9 +47,6 @@ import java.util.function.BiConsumer; public abstract class AbstractNioChannel implements NioChannel { final S socketChannel; - // This indicates if the channel has been scheduled to be closed. Read the closeFuture to determine if - // the channel close process has completed. - final AtomicBoolean isClosing = new AtomicBoolean(false); private final InetSocketAddress localAddress; private final CompletableFuture closeContext = new CompletableFuture<>(); @@ -73,21 +69,6 @@ public abstract class AbstractNioChannel - * If the channel is open and the state can be transitioned to closed, the close operation will - * be scheduled with the event loop. - *

- * If the channel is already set to closed, it is assumed that it is already scheduled to be closed. - */ - @Override - public void close() { - if (isClosing.compareAndSet(false, true)) { - selector.queueChannelClose(this); - } - } - /** * Closes the channel synchronously. This method should only be called from the selector thread. *

@@ -95,11 +76,10 @@ public abstract class AbstractNioChannel channelFactory = nioServerChannel.getChannelFactory(); SocketSelector selector = selectorSupplier.get(); NioSocketChannel nioSocketChannel = channelFactory.acceptNioChannel(nioServerChannel, selector); - nioServerChannel.getAcceptContext().accept(nioSocketChannel); + nioServerChannel.getContext().acceptChannel(nioSocketChannel); } /** diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java new file mode 100644 index 00000000000..5d77675aa48 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java @@ -0,0 +1,166 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.LinkedList; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; + +public class BytesChannelContext extends SocketChannelContext { + + private final ReadConsumer readConsumer; + private final InboundChannelBuffer channelBuffer; + private final LinkedList queued = new LinkedList<>(); + private final AtomicBoolean isClosing = new AtomicBoolean(false); + + public BytesChannelContext(NioSocketChannel channel, BiConsumer exceptionHandler, + ReadConsumer readConsumer, InboundChannelBuffer channelBuffer) { + super(channel, exceptionHandler); + this.readConsumer = readConsumer; + this.channelBuffer = channelBuffer; + } + + @Override + public int read() throws IOException { + if (channelBuffer.getRemaining() == 0) { + // Requiring one additional byte will ensure that a new page is allocated. + channelBuffer.ensureCapacity(channelBuffer.getCapacity() + 1); + } + + int bytesRead = readFromChannel(channelBuffer.sliceBuffersFrom(channelBuffer.getIndex())); + + if (bytesRead == 0) { + return 0; + } + + channelBuffer.incrementIndex(bytesRead); + + int bytesConsumed = Integer.MAX_VALUE; + while (bytesConsumed > 0 && channelBuffer.getIndex() > 0) { + bytesConsumed = readConsumer.consumeReads(channelBuffer); + channelBuffer.release(bytesConsumed); + } + + return bytesRead; + } + + @Override + public void sendMessage(ByteBuffer[] buffers, BiConsumer listener) { + if (isClosing.get()) { + listener.accept(null, new ClosedChannelException()); + return; + } + + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); + SocketSelector selector = channel.getSelector(); + if (selector.isOnCurrentThread() == false) { + selector.queueWrite(writeOperation); + return; + } + + selector.queueWriteInChannelBuffer(writeOperation); + } + + @Override + public void queueWriteOperation(WriteOperation writeOperation) { + channel.getSelector().assertOnSelectorThread(); + queued.add((BytesWriteOperation) writeOperation); + } + + @Override + public void flushChannel() throws IOException { + channel.getSelector().assertOnSelectorThread(); + int ops = queued.size(); + if (ops == 1) { + singleFlush(queued.pop()); + } else if (ops > 1) { + multiFlush(); + } + } + + @Override + public boolean hasQueuedWriteOps() { + channel.getSelector().assertOnSelectorThread(); + return queued.isEmpty() == false; + } + + @Override + public void closeChannel() { + if (isClosing.compareAndSet(false, true)) { + channel.getSelector().queueChannelClose(channel); + } + } + + @Override + public boolean selectorShouldClose() { + return isPeerClosed() || hasIOException() || isClosing.get(); + } + + @Override + public void closeFromSelector() throws IOException { + channel.getSelector().assertOnSelectorThread(); + if (channel.isOpen()) { + IOException channelCloseException = null; + try { + channel.closeFromSelector(); + } catch (IOException e) { + channelCloseException = e; + } + // Set to true in order to reject new writes before queuing with selector + isClosing.set(true); + channelBuffer.close(); + for (BytesWriteOperation op : queued) { + channel.getSelector().executeFailedListener(op.getListener(), new ClosedChannelException()); + } + queued.clear(); + if (channelCloseException != null) { + throw channelCloseException; + } + } + } + + private void singleFlush(BytesWriteOperation headOp) throws IOException { + try { + int written = flushToChannel(headOp.getBuffersToWrite()); + headOp.incrementIndex(written); + } catch (IOException e) { + channel.getSelector().executeFailedListener(headOp.getListener(), e); + throw e; + } + + if (headOp.isFullyFlushed()) { + channel.getSelector().executeListener(headOp.getListener(), null); + } else { + queued.push(headOp); + } + } + + private void multiFlush() throws IOException { + boolean lastOpCompleted = true; + while (lastOpCompleted && queued.isEmpty() == false) { + BytesWriteOperation op = queued.pop(); + singleFlush(op); + lastOpCompleted = op.isFullyFlushed(); + } + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesReadContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesReadContext.java deleted file mode 100644 index eeda147be6c..00000000000 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesReadContext.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import java.io.IOException; - -public class BytesReadContext implements ReadContext { - - private final NioSocketChannel channel; - private final ReadConsumer readConsumer; - private final InboundChannelBuffer channelBuffer; - - public BytesReadContext(NioSocketChannel channel, ReadConsumer readConsumer, InboundChannelBuffer channelBuffer) { - this.channel = channel; - this.channelBuffer = channelBuffer; - this.readConsumer = readConsumer; - } - - @Override - public int read() throws IOException { - if (channelBuffer.getRemaining() == 0) { - // Requiring one additional byte will ensure that a new page is allocated. - channelBuffer.ensureCapacity(channelBuffer.getCapacity() + 1); - } - - int bytesRead = channel.read(channelBuffer.sliceBuffersFrom(channelBuffer.getIndex())); - - if (bytesRead == -1) { - return bytesRead; - } - - channelBuffer.incrementIndex(bytesRead); - - int bytesConsumed = Integer.MAX_VALUE; - while (bytesConsumed > 0) { - bytesConsumed = readConsumer.consumeReads(channelBuffer); - channelBuffer.release(bytesConsumed); - } - - return bytesRead; - } - - @Override - public void close() { - channelBuffer.close(); - } -} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteContext.java deleted file mode 100644 index c2816deef53..00000000000 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteContext.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.util.LinkedList; -import java.util.function.BiConsumer; - -public class BytesWriteContext implements WriteContext { - - private final NioSocketChannel channel; - private final LinkedList queued = new LinkedList<>(); - - public BytesWriteContext(NioSocketChannel channel) { - this.channel = channel; - } - - @Override - public void sendMessage(Object message, BiConsumer listener) { - ByteBuffer[] buffers = (ByteBuffer[]) message; - if (channel.isWritable() == false) { - listener.accept(null, new ClosedChannelException()); - return; - } - - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); - SocketSelector selector = channel.getSelector(); - if (selector.isOnCurrentThread() == false) { - selector.queueWrite(writeOperation); - return; - } - - // TODO: Eval if we will allow writes from sendMessage - selector.queueWriteInChannelBuffer(writeOperation); - } - - @Override - public void queueWriteOperations(WriteOperation writeOperation) { - assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to queue writes"; - queued.add(writeOperation); - } - - @Override - public void flushChannel() throws IOException { - assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to flush writes"; - int ops = queued.size(); - if (ops == 1) { - singleFlush(queued.pop()); - } else if (ops > 1) { - multiFlush(); - } - } - - @Override - public boolean hasQueuedWriteOps() { - assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to access queued writes"; - return queued.isEmpty() == false; - } - - @Override - public void clearQueuedWriteOps(Exception e) { - assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to clear queued writes"; - for (WriteOperation op : queued) { - channel.getSelector().executeFailedListener(op.getListener(), e); - } - queued.clear(); - } - - private void singleFlush(WriteOperation headOp) throws IOException { - try { - headOp.flush(); - } catch (IOException e) { - channel.getSelector().executeFailedListener(headOp.getListener(), e); - throw e; - } - - if (headOp.isFullyFlushed()) { - channel.getSelector().executeListener(headOp.getListener(), null); - } else { - queued.push(headOp); - } - } - - private void multiFlush() throws IOException { - boolean lastOpCompleted = true; - while (lastOpCompleted && queued.isEmpty() == false) { - WriteOperation op = queued.pop(); - singleFlush(op); - lastOpCompleted = op.isFullyFlushed(); - } - } -} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java new file mode 100644 index 00000000000..14e8cace66d --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.function.BiConsumer; + +public class BytesWriteOperation implements WriteOperation { + + private final NioSocketChannel channel; + private final BiConsumer listener; + private final ByteBuffer[] buffers; + private final int[] offsets; + private final int length; + private int internalIndex; + + public BytesWriteOperation(NioSocketChannel channel, ByteBuffer[] buffers, BiConsumer listener) { + this.channel = channel; + this.listener = listener; + this.buffers = buffers; + this.offsets = new int[buffers.length]; + int offset = 0; + for (int i = 0; i < buffers.length; i++) { + ByteBuffer buffer = buffers[i]; + offsets[i] = offset; + offset += buffer.remaining(); + } + length = offset; + } + + @Override + public BiConsumer getListener() { + return listener; + } + + @Override + public NioSocketChannel getChannel() { + return channel; + } + + public boolean isFullyFlushed() { + assert length >= internalIndex : "Should never have an index that is greater than the length [length=" + length + ", index=" + + internalIndex + "]"; + return internalIndex == length; + } + + public void incrementIndex(int delta) { + internalIndex += delta; + assert length >= internalIndex : "Should never increment index past length [length=" + length + ", post-increment index=" + + internalIndex + ", delta=" + delta + "]"; + } + + public ByteBuffer[] getBuffersToWrite() { + final int index = Arrays.binarySearch(offsets, internalIndex); + int offsetIndex = index < 0 ? (-(index + 1)) - 1 : index; + + ByteBuffer[] postIndexBuffers = new ByteBuffer[buffers.length - offsetIndex]; + + ByteBuffer firstBuffer = buffers[offsetIndex].duplicate(); + firstBuffer.position(internalIndex - offsets[offsetIndex]); + postIndexBuffers[0] = firstBuffer; + int j = 1; + for (int i = (offsetIndex + 1); i < buffers.length; ++i) { + postIndexBuffers[j++] = buffers[i].duplicate(); + } + + return postIndexBuffers; + } + +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java new file mode 100644 index 00000000000..fa664484c1c --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.IOException; + +public interface ChannelContext { + /** + * This method cleans up any context resources that need to be released when a channel is closed. It + * should only be called by the selector thread. + * + * @throws IOException during channel / context close + */ + void closeFromSelector() throws IOException; + + /** + * Schedules a channel to be closed by the selector event loop with which it is registered. + * + * If the channel is open and the state can be transitioned to closed, the close operation will + * be scheduled with the event loop. + * + * Depending on the underlying protocol of the channel, a close operation might simply close the socket + * channel or may involve reading and writing messages. + */ + void closeChannel(); + + void handleException(Exception e); +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java index d90927af8b9..5fc3f46f998 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java @@ -88,9 +88,7 @@ public abstract class ChannelFactory new Page(ByteBuffer.allocate(PAGE_SIZE), () -> {})); + } + @Override public void close() { if (isClosed.compareAndSet(false, true)) { diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java index 433ec204e86..690e3d3b38b 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java @@ -44,6 +44,8 @@ public interface NioChannel { NetworkChannel getRawChannel(); + ChannelContext getContext(); + /** * Adds a close listener to the channel. Multiple close listeners can be added. There is no guarantee * about the order in which close listeners will be executed. If the channel is already closed, the diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java index 8eb904dc741..3d1748e413a 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java @@ -21,12 +21,13 @@ package org.elasticsearch.nio; import java.io.IOException; import java.nio.channels.ServerSocketChannel; -import java.util.function.Consumer; +import java.util.concurrent.atomic.AtomicBoolean; public class NioServerSocketChannel extends AbstractNioChannel { private final ChannelFactory channelFactory; - private Consumer acceptContext; + private ServerChannelContext context; + private final AtomicBoolean contextSet = new AtomicBoolean(false); public NioServerSocketChannel(ServerSocketChannel socketChannel, ChannelFactory channelFactory, AcceptingSelector selector) throws IOException { @@ -39,17 +40,22 @@ public class NioServerSocketChannel extends AbstractNioChannel acceptContext) { - this.acceptContext = acceptContext; + public void setContext(ServerChannelContext context) { + if (contextSet.compareAndSet(false, true)) { + this.context = context; + } else { + throw new IllegalStateException("Context on this channel were already set. It should only be once."); + } } - public Consumer getAcceptContext() { - return acceptContext; + @Override + public ServerChannelContext getContext() { + return context; } @Override diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java index 5260c0f5fcf..aba98ff0cbf 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java @@ -22,7 +22,6 @@ package org.elasticsearch.nio; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; import java.nio.channels.SocketChannel; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicBoolean; @@ -33,10 +32,8 @@ public class NioSocketChannel extends AbstractNioChannel { private final InetSocketAddress remoteAddress; private final CompletableFuture connectContext = new CompletableFuture<>(); private final SocketSelector socketSelector; - private final AtomicBoolean contextsSet = new AtomicBoolean(false); - private WriteContext writeContext; - private ReadContext readContext; - private BiConsumer exceptionContext; + private final AtomicBoolean contextSet = new AtomicBoolean(false); + private SocketChannelContext context; private Exception connectException; public NioSocketChannel(SocketChannel socketChannel, SocketSelector selector) throws IOException { @@ -45,23 +42,15 @@ public class NioSocketChannel extends AbstractNioChannel { this.socketSelector = selector; } - @Override - public void closeFromSelector() throws IOException { - assert socketSelector.isOnCurrentThread() : "Should only call from selector thread"; - // Even if the channel has already been closed we will clear any pending write operations just in case - if (writeContext.hasQueuedWriteOps()) { - writeContext.clearQueuedWriteOps(new ClosedChannelException()); - } - readContext.close(); - - super.closeFromSelector(); - } - @Override public SocketSelector getSelector() { return socketSelector; } + public int write(ByteBuffer buffer) throws IOException { + return socketChannel.write(buffer); + } + public int write(ByteBuffer[] buffers) throws IOException { if (buffers.length == 1) { return socketChannel.write(buffers[0]); @@ -82,37 +71,17 @@ public class NioSocketChannel extends AbstractNioChannel { } } - public int read(InboundChannelBuffer buffer) throws IOException { - int bytesRead = (int) socketChannel.read(buffer.sliceBuffersFrom(buffer.getIndex())); - - if (bytesRead == -1) { - return bytesRead; - } - - buffer.incrementIndex(bytesRead); - return bytesRead; - } - - public void setContexts(ReadContext readContext, WriteContext writeContext, BiConsumer exceptionContext) { - if (contextsSet.compareAndSet(false, true)) { - this.readContext = readContext; - this.writeContext = writeContext; - this.exceptionContext = exceptionContext; + public void setContext(SocketChannelContext context) { + if (contextSet.compareAndSet(false, true)) { + this.context = context; } else { - throw new IllegalStateException("Contexts on this channel were already set. They should only be once."); + throw new IllegalStateException("Context on this channel were already set. It should only be once."); } } - public WriteContext getWriteContext() { - return writeContext; - } - - public ReadContext getReadContext() { - return readContext; - } - - public BiConsumer getExceptionContext() { - return exceptionContext; + @Override + public SocketChannelContext getContext() { + return context; } public InetSocketAddress getRemoteAddress() { @@ -123,14 +92,6 @@ public class NioSocketChannel extends AbstractNioChannel { return isConnectComplete0(); } - public boolean isWritable() { - return isClosing.get() == false; - } - - public boolean isReadable() { - return isClosing.get() == false; - } - /** * This method will attempt to complete the connection process for this channel. It should be called for * new channels or for a channel that has produced a OP_CONNECT event. If this method returns true then diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java index b6272ce7135..be2dc6f3414 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java @@ -26,28 +26,81 @@ public final class SelectionKeyUtils { private SelectionKeyUtils() {} + /** + * Adds an interest in writes for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ public static void setWriteInterested(NioChannel channel) throws CancelledKeyException { SelectionKey selectionKey = channel.getSelectionKey(); selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_WRITE); } + /** + * Removes an interest in writes for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ public static void removeWriteInterested(NioChannel channel) throws CancelledKeyException { SelectionKey selectionKey = channel.getSelectionKey(); selectionKey.interestOps(selectionKey.interestOps() & ~SelectionKey.OP_WRITE); } + /** + * Removes an interest in connects and reads for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ public static void setConnectAndReadInterested(NioChannel channel) throws CancelledKeyException { SelectionKey selectionKey = channel.getSelectionKey(); selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_CONNECT | SelectionKey.OP_READ); } + /** + * Removes an interest in connects, reads, and writes for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ + public static void setConnectReadAndWriteInterested(NioChannel channel) throws CancelledKeyException { + SelectionKey selectionKey = channel.getSelectionKey(); + selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_CONNECT | SelectionKey.OP_READ | SelectionKey.OP_WRITE); + } + + /** + * Removes an interest in connects for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ public static void removeConnectInterested(NioChannel channel) throws CancelledKeyException { SelectionKey selectionKey = channel.getSelectionKey(); selectionKey.interestOps(selectionKey.interestOps() & ~SelectionKey.OP_CONNECT); } - public static void setAcceptInterested(NioServerSocketChannel channel) { + /** + * Adds an interest in accepts for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ + public static void setAcceptInterested(NioServerSocketChannel channel) throws CancelledKeyException { SelectionKey selectionKey = channel.getSelectionKey(); selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_ACCEPT); } + + + /** + * Checks for an interest in writes for this channel. + * + * @param channel the channel + * @return a boolean indicating if we are currently interested in writes for this channel + * @throws CancelledKeyException if the key was already cancelled + */ + public static boolean isWriteInterested(NioSocketChannel channel) throws CancelledKeyException { + return (channel.getSelectionKey().interestOps() & SelectionKey.OP_WRITE) != 0; + } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java new file mode 100644 index 00000000000..551cab48e05 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +public class ServerChannelContext implements ChannelContext { + + private final NioServerSocketChannel channel; + private final Consumer acceptor; + private final BiConsumer exceptionHandler; + private final AtomicBoolean isClosing = new AtomicBoolean(false); + + public ServerChannelContext(NioServerSocketChannel channel, Consumer acceptor, + BiConsumer exceptionHandler) { + this.channel = channel; + this.acceptor = acceptor; + this.exceptionHandler = exceptionHandler; + } + + public void acceptChannel(NioSocketChannel acceptedChannel) { + acceptor.accept(acceptedChannel); + } + + @Override + public void closeFromSelector() throws IOException { + channel.closeFromSelector(); + } + + @Override + public void closeChannel() { + if (isClosing.compareAndSet(false, true)) { + channel.getSelector().queueChannelClose(channel); + } + } + + @Override + public void handleException(Exception e) { + exceptionHandler.accept(channel, e); + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java new file mode 100644 index 00000000000..62f82e8995d --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java @@ -0,0 +1,129 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.function.BiConsumer; + +/** + * This context should implement the specific logic for a channel. When a channel receives a notification + * that it is ready to perform certain operations (read, write, etc) the {@link SocketChannelContext} will + * be called. This context will need to implement all protocol related logic. Additionally, if any special + * close behavior is required, it should be implemented in this context. + * + * The only methods of the context that should ever be called from a non-selector thread are + * {@link #closeChannel()} and {@link #sendMessage(ByteBuffer[], BiConsumer)}. + */ +public abstract class SocketChannelContext implements ChannelContext { + + protected final NioSocketChannel channel; + private final BiConsumer exceptionHandler; + private boolean ioException; + private boolean peerClosed; + + protected SocketChannelContext(NioSocketChannel channel, BiConsumer exceptionHandler) { + this.channel = channel; + this.exceptionHandler = exceptionHandler; + } + + @Override + public void handleException(Exception e) { + exceptionHandler.accept(channel, e); + } + + public void channelRegistered() throws IOException {} + + public abstract int read() throws IOException; + + public abstract void sendMessage(ByteBuffer[] buffers, BiConsumer listener); + + public abstract void queueWriteOperation(WriteOperation writeOperation); + + public abstract void flushChannel() throws IOException; + + public abstract boolean hasQueuedWriteOps(); + + /** + * This method indicates if a selector should close this channel. + * + * @return a boolean indicating if the selector should close + */ + public abstract boolean selectorShouldClose(); + + protected boolean hasIOException() { + return ioException; + } + + protected boolean isPeerClosed() { + return peerClosed; + } + + protected int readFromChannel(ByteBuffer buffer) throws IOException { + try { + int bytesRead = channel.read(buffer); + if (bytesRead < 0) { + peerClosed = true; + bytesRead = 0; + } + return bytesRead; + } catch (IOException e) { + ioException = true; + throw e; + } + } + + protected int readFromChannel(ByteBuffer[] buffers) throws IOException { + try { + int bytesRead = channel.read(buffers); + if (bytesRead < 0) { + peerClosed = true; + bytesRead = 0; + } + return bytesRead; + } catch (IOException e) { + ioException = true; + throw e; + } + } + + protected int flushToChannel(ByteBuffer buffer) throws IOException { + try { + return channel.write(buffer); + } catch (IOException e) { + ioException = true; + throw e; + } + } + + protected int flushToChannel(ByteBuffer[] buffers) throws IOException { + try { + return channel.write(buffers); + } catch (IOException e) { + ioException = true; + throw e; + } + } + + @FunctionalInterface + public interface ReadConsumer { + int consumeReads(InboundChannelBuffer channelBuffer) throws IOException; + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java index d3be18f3776..b1192f11eb1 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java @@ -43,8 +43,14 @@ public class SocketEventHandler extends EventHandler { * * @param channel that was registered */ - protected void handleRegistration(NioSocketChannel channel) { - SelectionKeyUtils.setConnectAndReadInterested(channel); + protected void handleRegistration(NioSocketChannel channel) throws IOException { + SocketChannelContext context = channel.getContext(); + context.channelRegistered(); + if (context.hasQueuedWriteOps()) { + SelectionKeyUtils.setConnectReadAndWriteInterested(channel); + } else { + SelectionKeyUtils.setConnectAndReadInterested(channel); + } } /** @@ -55,7 +61,7 @@ public class SocketEventHandler extends EventHandler { */ protected void registrationException(NioSocketChannel channel, Exception exception) { logger.debug(() -> new ParameterizedMessage("failed to register socket channel: {}", channel), exception); - exceptionCaught(channel, exception); + channel.getContext().handleException(exception); } /** @@ -76,7 +82,7 @@ public class SocketEventHandler extends EventHandler { */ protected void connectException(NioSocketChannel channel, Exception exception) { logger.debug(() -> new ParameterizedMessage("failed to connect to socket channel: {}", channel), exception); - exceptionCaught(channel, exception); + channel.getContext().handleException(exception); } /** @@ -86,10 +92,7 @@ public class SocketEventHandler extends EventHandler { * @param channel that can be read */ protected void handleRead(NioSocketChannel channel) throws IOException { - int bytesRead = channel.getReadContext().read(); - if (bytesRead == -1) { - handleClose(channel); - } + channel.getContext().read(); } /** @@ -100,23 +103,18 @@ public class SocketEventHandler extends EventHandler { */ protected void readException(NioSocketChannel channel, Exception exception) { logger.debug(() -> new ParameterizedMessage("exception while reading from socket channel: {}", channel), exception); - exceptionCaught(channel, exception); + channel.getContext().handleException(exception); } /** * This method is called when a channel signals it is ready to receive writes. All of the write logic * should occur in this call. * - * @param channel that can be read + * @param channel that can be written to */ protected void handleWrite(NioSocketChannel channel) throws IOException { - WriteContext channelContext = channel.getWriteContext(); + SocketChannelContext channelContext = channel.getContext(); channelContext.flushChannel(); - if (channelContext.hasQueuedWriteOps()) { - SelectionKeyUtils.setWriteInterested(channel); - } else { - SelectionKeyUtils.removeWriteInterested(channel); - } } /** @@ -127,20 +125,7 @@ public class SocketEventHandler extends EventHandler { */ protected void writeException(NioSocketChannel channel, Exception exception) { logger.debug(() -> new ParameterizedMessage("exception while writing to socket channel: {}", channel), exception); - exceptionCaught(channel, exception); - } - - /** - * This method is called when handling an event from a channel fails due to an unexpected exception. - * An example would be if checking ready ops on a {@link java.nio.channels.SelectionKey} threw - * {@link java.nio.channels.CancelledKeyException}. - * - * @param channel that caused the exception - * @param exception that was thrown - */ - protected void genericChannelException(NioChannel channel, Exception exception) { - super.genericChannelException(channel, exception); - exceptionCaught((NioSocketChannel) channel, exception); + channel.getContext().handleException(exception); } /** @@ -153,7 +138,20 @@ public class SocketEventHandler extends EventHandler { logger.warn(new ParameterizedMessage("exception while executing listener: {}", listener), exception); } - private void exceptionCaught(NioSocketChannel channel, Exception e) { - channel.getExceptionContext().accept(channel, e); + /** + * @param channel that was handled + */ + protected void postHandling(NioSocketChannel channel) { + if (channel.getContext().selectorShouldClose()) { + handleClose(channel); + } else { + boolean currentlyWriteInterested = SelectionKeyUtils.isWriteInterested(channel); + boolean pendingWrites = channel.getContext().hasQueuedWriteOps(); + if (currentlyWriteInterested == false && pendingWrites) { + SelectionKeyUtils.setWriteInterested(channel); + } else if (currentlyWriteInterested && pendingWrites == false) { + SelectionKeyUtils.removeWriteInterested(channel); + } + } } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java index ac8ad87b726..2de48fb8899 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java @@ -64,6 +64,8 @@ public class SocketSelector extends ESSelector { handleRead(nioSocketChannel); } } + + eventHandler.postHandling(nioSocketChannel); } @Override @@ -118,12 +120,12 @@ public class SocketSelector extends ESSelector { * @param writeOperation to be queued in a channel's buffer */ public void queueWriteInChannelBuffer(WriteOperation writeOperation) { - assert isOnCurrentThread() : "Must be on selector thread"; + assertOnSelectorThread(); NioSocketChannel channel = writeOperation.getChannel(); - WriteContext context = channel.getWriteContext(); + SocketChannelContext context = channel.getContext(); try { SelectionKeyUtils.setWriteInterested(channel); - context.queueWriteOperations(writeOperation); + context.queueWriteOperation(writeOperation); } catch (Exception e) { executeFailedListener(writeOperation.getListener(), e); } @@ -137,7 +139,7 @@ public class SocketSelector extends ESSelector { * @param value to provide to listener */ public void executeListener(BiConsumer listener, V value) { - assert isOnCurrentThread() : "Must be on selector thread"; + assertOnSelectorThread(); try { listener.accept(value, null); } catch (Exception e) { @@ -153,7 +155,7 @@ public class SocketSelector extends ESSelector { * @param exception to provide to listener */ public void executeFailedListener(BiConsumer listener, Exception exception) { - assert isOnCurrentThread() : "Must be on selector thread"; + assertOnSelectorThread(); try { listener.accept(null, exception); } catch (Exception e) { @@ -180,7 +182,7 @@ public class SocketSelector extends ESSelector { private void handleQueuedWrites() { WriteOperation writeOperation; while ((writeOperation = queuedWrites.poll()) != null) { - if (writeOperation.getChannel().isWritable()) { + if (writeOperation.getChannel().isOpen()) { queueWriteInChannelBuffer(writeOperation); } else { executeFailedListener(writeOperation.getListener(), new ClosedChannelException()); diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java index b6fcc838a96..d2dfe4f37a0 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java @@ -19,74 +19,16 @@ package org.elasticsearch.nio; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Arrays; import java.util.function.BiConsumer; -public class WriteOperation { +/** + * This is a basic write operation that can be queued with a channel. The only requirements of a write + * operation is that is has a listener and a reference to its channel. The actual conversion of the write + * operation implementation to bytes will be performed by the {@link SocketChannelContext}. + */ +public interface WriteOperation { - private final NioSocketChannel channel; - private final BiConsumer listener; - private final ByteBuffer[] buffers; - private final int[] offsets; - private final int length; - private int internalIndex; + BiConsumer getListener(); - public WriteOperation(NioSocketChannel channel, ByteBuffer[] buffers, BiConsumer listener) { - this.channel = channel; - this.listener = listener; - this.buffers = buffers; - this.offsets = new int[buffers.length]; - int offset = 0; - for (int i = 0; i < buffers.length; i++) { - ByteBuffer buffer = buffers[i]; - offsets[i] = offset; - offset += buffer.remaining(); - } - length = offset; - } - - public ByteBuffer[] getByteBuffers() { - return buffers; - } - - public BiConsumer getListener() { - return listener; - } - - public NioSocketChannel getChannel() { - return channel; - } - - public boolean isFullyFlushed() { - return internalIndex == length; - } - - public int flush() throws IOException { - int written = channel.write(getBuffersToWrite()); - internalIndex += written; - return written; - } - - private ByteBuffer[] getBuffersToWrite() { - int offsetIndex = getOffsetIndex(internalIndex); - - ByteBuffer[] postIndexBuffers = new ByteBuffer[buffers.length - offsetIndex]; - - ByteBuffer firstBuffer = buffers[offsetIndex].duplicate(); - firstBuffer.position(internalIndex - offsets[offsetIndex]); - postIndexBuffers[0] = firstBuffer; - int j = 1; - for (int i = (offsetIndex + 1); i < buffers.length; ++i) { - postIndexBuffers[j++] = buffers[i].duplicate(); - } - - return postIndexBuffers; - } - - private int getOffsetIndex(int offset) { - final int i = Arrays.binarySearch(offsets, offset); - return i < 0 ? (-(i + 1)) - 1 : i; - } + NioSocketChannel getChannel(); } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java index 9d8f47fe3ef..23ab3bb3e1d 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java @@ -27,8 +27,6 @@ import java.nio.channels.SelectionKey; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.ArrayList; -import java.util.function.BiConsumer; -import java.util.function.Consumer; import static org.mockito.Matchers.same; import static org.mockito.Mockito.mock; @@ -41,21 +39,21 @@ public class AcceptorEventHandlerTests extends ESTestCase { private SocketSelector socketSelector; private ChannelFactory channelFactory; private NioServerSocketChannel channel; - private Consumer acceptedChannelCallback; + private ServerChannelContext context; @Before @SuppressWarnings("unchecked") public void setUpHandler() throws IOException { channelFactory = mock(ChannelFactory.class); socketSelector = mock(SocketSelector.class); - acceptedChannelCallback = mock(Consumer.class); + context = mock(ServerChannelContext.class); ArrayList selectors = new ArrayList<>(); selectors.add(socketSelector); handler = new AcceptorEventHandler(logger, new RoundRobinSupplier<>(selectors.toArray(new SocketSelector[selectors.size()]))); AcceptingSelector selector = mock(AcceptingSelector.class); channel = new DoNotRegisterServerChannel(mock(ServerSocketChannel.class), channelFactory, selector); - channel.setAcceptContext(acceptedChannelCallback); + channel.setContext(context); channel.register(); } @@ -80,11 +78,11 @@ public class AcceptorEventHandlerTests extends ESTestCase { @SuppressWarnings("unchecked") public void testHandleAcceptCallsServerAcceptCallback() throws IOException { NioSocketChannel childChannel = new NioSocketChannel(mock(SocketChannel.class), socketSelector); - childChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class)); + childChannel.setContext(mock(SocketChannelContext.class)); when(channelFactory.acceptNioChannel(same(channel), same(socketSelector))).thenReturn(childChannel); handler.acceptChannel(channel); - verify(acceptedChannelCallback).accept(childChannel); + verify(context).acceptChannel(childChannel); } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java new file mode 100644 index 00000000000..68ae1f2e503 --- /dev/null +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java @@ -0,0 +1,337 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.function.BiConsumer; +import java.util.function.Supplier; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.isNull; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class BytesChannelContextTests extends ESTestCase { + + private SocketChannelContext.ReadConsumer readConsumer; + private NioSocketChannel channel; + private BytesChannelContext context; + private InboundChannelBuffer channelBuffer; + private SocketSelector selector; + private BiConsumer listener; + private int messageLength; + + @Before + @SuppressWarnings("unchecked") + public void init() { + readConsumer = mock(SocketChannelContext.ReadConsumer.class); + + messageLength = randomInt(96) + 20; + selector = mock(SocketSelector.class); + listener = mock(BiConsumer.class); + channel = mock(NioSocketChannel.class); + channelBuffer = InboundChannelBuffer.allocatingInstance(); + context = new BytesChannelContext(channel, null, readConsumer, channelBuffer); + + when(channel.getSelector()).thenReturn(selector); + when(selector.isOnCurrentThread()).thenReturn(true); + } + + public void testSuccessfulRead() throws IOException { + byte[] bytes = createMessage(messageLength); + + when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { + ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; + buffers[0].put(bytes); + return bytes.length; + }); + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, 0); + + assertEquals(messageLength, context.read()); + + assertEquals(0, channelBuffer.getIndex()); + assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); + verify(readConsumer, times(1)).consumeReads(channelBuffer); + } + + public void testMultipleReadsConsumed() throws IOException { + byte[] bytes = createMessage(messageLength * 2); + + when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { + ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; + buffers[0].put(bytes); + return bytes.length; + }); + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, messageLength, 0); + + assertEquals(bytes.length, context.read()); + + assertEquals(0, channelBuffer.getIndex()); + assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); + verify(readConsumer, times(2)).consumeReads(channelBuffer); + } + + public void testPartialRead() throws IOException { + byte[] bytes = createMessage(messageLength); + + when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { + ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; + buffers[0].put(bytes); + return bytes.length; + }); + + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(0); + + assertEquals(messageLength, context.read()); + + assertEquals(bytes.length, channelBuffer.getIndex()); + verify(readConsumer, times(1)).consumeReads(channelBuffer); + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength * 2, 0); + + assertEquals(messageLength, context.read()); + + assertEquals(0, channelBuffer.getIndex()); + assertEquals(BigArrays.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity()); + verify(readConsumer, times(2)).consumeReads(channelBuffer); + } + + public void testReadThrowsIOException() throws IOException { + IOException ioException = new IOException(); + when(channel.read(any(ByteBuffer[].class))).thenThrow(ioException); + + IOException ex = expectThrows(IOException.class, () -> context.read()); + assertSame(ioException, ex); + } + + public void testReadThrowsIOExceptionMeansReadyForClose() throws IOException { + when(channel.read(any(ByteBuffer[].class))).thenThrow(new IOException()); + + assertFalse(context.selectorShouldClose()); + expectThrows(IOException.class, () -> context.read()); + assertTrue(context.selectorShouldClose()); + } + + public void testReadLessThanZeroMeansReadyForClose() throws IOException { + when(channel.read(any(ByteBuffer[].class))).thenReturn(-1); + + assertEquals(0, context.read()); + + assertTrue(context.selectorShouldClose()); + } + + public void testCloseClosesChannelBuffer() throws IOException { + when(channel.isOpen()).thenReturn(true); + Runnable closer = mock(Runnable.class); + Supplier pageSupplier = () -> new InboundChannelBuffer.Page(ByteBuffer.allocate(1 << 14), closer); + InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier); + buffer.ensureCapacity(1); + BytesChannelContext context = new BytesChannelContext(channel, null, readConsumer, buffer); + context.closeFromSelector(); + verify(closer).run(); + } + + public void testWriteFailsIfClosing() { + context.closeChannel(); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + context.sendMessage(buffers, listener); + + verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); + } + + public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exception { + ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(BytesWriteOperation.class); + + when(selector.isOnCurrentThread()).thenReturn(false); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + context.sendMessage(buffers, listener); + + verify(selector).queueWrite(writeOpCaptor.capture()); + BytesWriteOperation writeOp = writeOpCaptor.getValue(); + + assertSame(listener, writeOp.getListener()); + assertSame(channel, writeOp.getChannel()); + assertEquals(buffers[0], writeOp.getBuffersToWrite()[0]); + } + + public void testSendMessageFromSameThreadIsQueuedInChannel() { + ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(BytesWriteOperation.class); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + context.sendMessage(buffers, listener); + + verify(selector).queueWriteInChannelBuffer(writeOpCaptor.capture()); + BytesWriteOperation writeOp = writeOpCaptor.getValue(); + + assertSame(listener, writeOp.getListener()); + assertSame(channel, writeOp.getChannel()); + assertEquals(buffers[0], writeOp.getBuffersToWrite()[0]); + } + + public void testWriteIsQueuedInChannel() { + assertFalse(context.hasQueuedWriteOps()); + + ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; + context.queueWriteOperation(new BytesWriteOperation(channel, buffer, listener)); + + assertTrue(context.hasQueuedWriteOps()); + } + + public void testWriteOpsClearedOnClose() throws Exception { + assertFalse(context.hasQueuedWriteOps()); + + ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; + context.queueWriteOperation(new BytesWriteOperation(channel, buffer, listener)); + + assertTrue(context.hasQueuedWriteOps()); + + when(channel.isOpen()).thenReturn(true); + context.closeFromSelector(); + + verify(selector).executeFailedListener(same(listener), any(ClosedChannelException.class)); + + assertFalse(context.hasQueuedWriteOps()); + } + + public void testQueuedWriteIsFlushedInFlushCall() throws Exception { + assertFalse(context.hasQueuedWriteOps()); + + ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; + BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); + context.queueWriteOperation(writeOperation); + + assertTrue(context.hasQueuedWriteOps()); + + when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(writeOperation.isFullyFlushed()).thenReturn(true); + when(writeOperation.getListener()).thenReturn(listener); + context.flushChannel(); + + verify(channel).write(buffers); + verify(selector).executeListener(listener, null); + assertFalse(context.hasQueuedWriteOps()); + } + + public void testPartialFlush() throws IOException { + assertFalse(context.hasQueuedWriteOps()); + + BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); + context.queueWriteOperation(writeOperation); + + assertTrue(context.hasQueuedWriteOps()); + + when(writeOperation.isFullyFlushed()).thenReturn(false); + context.flushChannel(); + + verify(listener, times(0)).accept(null, null); + assertTrue(context.hasQueuedWriteOps()); + } + + @SuppressWarnings("unchecked") + public void testMultipleWritesPartialFlushes() throws IOException { + assertFalse(context.hasQueuedWriteOps()); + + BiConsumer listener2 = mock(BiConsumer.class); + BytesWriteOperation writeOperation1 = mock(BytesWriteOperation.class); + BytesWriteOperation writeOperation2 = mock(BytesWriteOperation.class); + when(writeOperation1.getListener()).thenReturn(listener); + when(writeOperation2.getListener()).thenReturn(listener2); + context.queueWriteOperation(writeOperation1); + context.queueWriteOperation(writeOperation2); + + assertTrue(context.hasQueuedWriteOps()); + + when(writeOperation1.isFullyFlushed()).thenReturn(true); + when(writeOperation2.isFullyFlushed()).thenReturn(false); + context.flushChannel(); + + verify(selector).executeListener(listener, null); + verify(listener2, times(0)).accept(null, null); + assertTrue(context.hasQueuedWriteOps()); + + when(writeOperation2.isFullyFlushed()).thenReturn(true); + + context.flushChannel(); + + verify(selector).executeListener(listener2, null); + assertFalse(context.hasQueuedWriteOps()); + } + + public void testWhenIOExceptionThrownListenerIsCalled() throws IOException { + assertFalse(context.hasQueuedWriteOps()); + + ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; + BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); + context.queueWriteOperation(writeOperation); + + assertTrue(context.hasQueuedWriteOps()); + + IOException exception = new IOException(); + when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(channel.write(buffers)).thenThrow(exception); + when(writeOperation.getListener()).thenReturn(listener); + expectThrows(IOException.class, () -> context.flushChannel()); + + verify(selector).executeFailedListener(listener, exception); + assertFalse(context.hasQueuedWriteOps()); + } + + public void testWriteIOExceptionMeansChannelReadyToClose() throws IOException { + ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; + BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); + context.queueWriteOperation(writeOperation); + + IOException exception = new IOException(); + when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(channel.write(buffers)).thenThrow(exception); + + assertFalse(context.selectorShouldClose()); + expectThrows(IOException.class, () -> context.flushChannel()); + assertTrue(context.selectorShouldClose()); + } + + public void initiateCloseSchedulesCloseWithSelector() { + context.closeChannel(); + verify(selector).queueChannelClose(channel); + } + + private static byte[] createMessage(int length) { + byte[] bytes = new byte[length]; + for (int i = 0; i < length; ++i) { + bytes[i] = randomByte(); + } + return bytes; + } +} diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesReadContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesReadContextTests.java deleted file mode 100644 index 69f187378ac..00000000000 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesReadContextTests.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.function.Supplier; - -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class BytesReadContextTests extends ESTestCase { - - private ReadContext.ReadConsumer readConsumer; - private NioSocketChannel channel; - private BytesReadContext readContext; - private InboundChannelBuffer channelBuffer; - private int messageLength; - - @Before - public void init() { - readConsumer = mock(ReadContext.ReadConsumer.class); - - messageLength = randomInt(96) + 20; - channel = mock(NioSocketChannel.class); - Supplier pageSupplier = () -> - new InboundChannelBuffer.Page(ByteBuffer.allocate(BigArrays.BYTE_PAGE_SIZE), () -> {}); - channelBuffer = new InboundChannelBuffer(pageSupplier); - readContext = new BytesReadContext(channel, readConsumer, channelBuffer); - } - - public void testSuccessfulRead() throws IOException { - byte[] bytes = createMessage(messageLength); - - when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { - ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; - buffers[0].put(bytes); - return bytes.length; - }); - - when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, 0); - - assertEquals(messageLength, readContext.read()); - - assertEquals(0, channelBuffer.getIndex()); - assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); - verify(readConsumer, times(2)).consumeReads(channelBuffer); - } - - public void testMultipleReadsConsumed() throws IOException { - byte[] bytes = createMessage(messageLength * 2); - - when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { - ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; - buffers[0].put(bytes); - return bytes.length; - }); - - when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, messageLength, 0); - - assertEquals(bytes.length, readContext.read()); - - assertEquals(0, channelBuffer.getIndex()); - assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); - verify(readConsumer, times(3)).consumeReads(channelBuffer); - } - - public void testPartialRead() throws IOException { - byte[] bytes = createMessage(messageLength); - - when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { - ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; - buffers[0].put(bytes); - return bytes.length; - }); - - - when(readConsumer.consumeReads(channelBuffer)).thenReturn(0, messageLength); - - assertEquals(messageLength, readContext.read()); - - assertEquals(bytes.length, channelBuffer.getIndex()); - verify(readConsumer, times(1)).consumeReads(channelBuffer); - - when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength * 2, 0); - - assertEquals(messageLength, readContext.read()); - - assertEquals(0, channelBuffer.getIndex()); - assertEquals(BigArrays.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity()); - verify(readConsumer, times(3)).consumeReads(channelBuffer); - } - - public void testReadThrowsIOException() throws IOException { - IOException ioException = new IOException(); - when(channel.read(any(ByteBuffer[].class))).thenThrow(ioException); - - IOException ex = expectThrows(IOException.class, () -> readContext.read()); - assertSame(ioException, ex); - } - - public void closeClosesChannelBuffer() { - InboundChannelBuffer buffer = mock(InboundChannelBuffer.class); - BytesReadContext readContext = new BytesReadContext(channel, readConsumer, buffer); - - readContext.close(); - - verify(buffer).close(); - } - - private static byte[] createMessage(int length) { - byte[] bytes = new byte[length]; - for (int i = 0; i < length; ++i) { - bytes[i] = randomByte(); - } - return bytes; - } -} diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteContextTests.java deleted file mode 100644 index 9d5b1c92cb6..00000000000 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteContextTests.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; -import org.mockito.ArgumentCaptor; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.util.function.BiConsumer; - -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.isNull; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class BytesWriteContextTests extends ESTestCase { - - private SocketSelector selector; - private BiConsumer listener; - private BytesWriteContext writeContext; - private NioSocketChannel channel; - - @Before - @SuppressWarnings("unchecked") - public void setUp() throws Exception { - super.setUp(); - selector = mock(SocketSelector.class); - listener = mock(BiConsumer.class); - channel = mock(NioSocketChannel.class); - writeContext = new BytesWriteContext(channel); - - when(channel.getSelector()).thenReturn(selector); - when(selector.isOnCurrentThread()).thenReturn(true); - } - - public void testWriteFailsIfChannelNotWritable() throws Exception { - when(channel.isWritable()).thenReturn(false); - - ByteBuffer[] buffers = {ByteBuffer.wrap(generateBytes(10))}; - writeContext.sendMessage(buffers, listener); - - verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); - } - - public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exception { - ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class); - - when(selector.isOnCurrentThread()).thenReturn(false); - when(channel.isWritable()).thenReturn(true); - - ByteBuffer[] buffers = {ByteBuffer.wrap(generateBytes(10))}; - writeContext.sendMessage(buffers, listener); - - verify(selector).queueWrite(writeOpCaptor.capture()); - WriteOperation writeOp = writeOpCaptor.getValue(); - - assertSame(listener, writeOp.getListener()); - assertSame(channel, writeOp.getChannel()); - assertEquals(buffers[0], writeOp.getByteBuffers()[0]); - } - - public void testSendMessageFromSameThreadIsQueuedInChannel() throws Exception { - ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class); - - when(channel.isWritable()).thenReturn(true); - - ByteBuffer[] buffers = {ByteBuffer.wrap(generateBytes(10))}; - writeContext.sendMessage(buffers, listener); - - verify(selector).queueWriteInChannelBuffer(writeOpCaptor.capture()); - WriteOperation writeOp = writeOpCaptor.getValue(); - - assertSame(listener, writeOp.getListener()); - assertSame(channel, writeOp.getChannel()); - assertEquals(buffers[0], writeOp.getByteBuffers()[0]); - } - - public void testWriteIsQueuedInChannel() throws Exception { - assertFalse(writeContext.hasQueuedWriteOps()); - - ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; - writeContext.queueWriteOperations(new WriteOperation(channel, buffer, listener)); - - assertTrue(writeContext.hasQueuedWriteOps()); - } - - public void testWriteOpsCanBeCleared() throws Exception { - assertFalse(writeContext.hasQueuedWriteOps()); - - ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; - writeContext.queueWriteOperations(new WriteOperation(channel, buffer, listener)); - - assertTrue(writeContext.hasQueuedWriteOps()); - - ClosedChannelException e = new ClosedChannelException(); - writeContext.clearQueuedWriteOps(e); - - verify(selector).executeFailedListener(listener, e); - - assertFalse(writeContext.hasQueuedWriteOps()); - } - - public void testQueuedWriteIsFlushedInFlushCall() throws Exception { - assertFalse(writeContext.hasQueuedWriteOps()); - - WriteOperation writeOperation = mock(WriteOperation.class); - writeContext.queueWriteOperations(writeOperation); - - assertTrue(writeContext.hasQueuedWriteOps()); - - when(writeOperation.isFullyFlushed()).thenReturn(true); - when(writeOperation.getListener()).thenReturn(listener); - writeContext.flushChannel(); - - verify(writeOperation).flush(); - verify(selector).executeListener(listener, null); - assertFalse(writeContext.hasQueuedWriteOps()); - } - - public void testPartialFlush() throws IOException { - assertFalse(writeContext.hasQueuedWriteOps()); - - WriteOperation writeOperation = mock(WriteOperation.class); - writeContext.queueWriteOperations(writeOperation); - - assertTrue(writeContext.hasQueuedWriteOps()); - - when(writeOperation.isFullyFlushed()).thenReturn(false); - writeContext.flushChannel(); - - verify(listener, times(0)).accept(null, null); - assertTrue(writeContext.hasQueuedWriteOps()); - } - - @SuppressWarnings("unchecked") - public void testMultipleWritesPartialFlushes() throws IOException { - assertFalse(writeContext.hasQueuedWriteOps()); - - BiConsumer listener2 = mock(BiConsumer.class); - WriteOperation writeOperation1 = mock(WriteOperation.class); - WriteOperation writeOperation2 = mock(WriteOperation.class); - when(writeOperation1.getListener()).thenReturn(listener); - when(writeOperation2.getListener()).thenReturn(listener2); - writeContext.queueWriteOperations(writeOperation1); - writeContext.queueWriteOperations(writeOperation2); - - assertTrue(writeContext.hasQueuedWriteOps()); - - when(writeOperation1.isFullyFlushed()).thenReturn(true); - when(writeOperation2.isFullyFlushed()).thenReturn(false); - writeContext.flushChannel(); - - verify(selector).executeListener(listener, null); - verify(listener2, times(0)).accept(null, null); - assertTrue(writeContext.hasQueuedWriteOps()); - - when(writeOperation2.isFullyFlushed()).thenReturn(true); - - writeContext.flushChannel(); - - verify(selector).executeListener(listener2, null); - assertFalse(writeContext.hasQueuedWriteOps()); - } - - public void testWhenIOExceptionThrownListenerIsCalled() throws IOException { - assertFalse(writeContext.hasQueuedWriteOps()); - - WriteOperation writeOperation = mock(WriteOperation.class); - writeContext.queueWriteOperations(writeOperation); - - assertTrue(writeContext.hasQueuedWriteOps()); - - IOException exception = new IOException(); - when(writeOperation.flush()).thenThrow(exception); - when(writeOperation.getListener()).thenReturn(listener); - expectThrows(IOException.class, () -> writeContext.flushChannel()); - - verify(selector).executeFailedListener(listener, exception); - assertFalse(writeContext.hasQueuedWriteOps()); - } - - private byte[] generateBytes(int n) { - n += 10; - byte[] bytes = new byte[n]; - for (int i = 0; i < n; ++i) { - bytes[i] = randomByte(); - } - return bytes; - } -} diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java index c1183af4e5b..1c8a8a130cc 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java @@ -28,7 +28,6 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; -import java.util.function.BiConsumer; import static org.mockito.Matchers.any; import static org.mockito.Matchers.same; @@ -139,7 +138,7 @@ public class ChannelFactoryTests extends ESTestCase { @Override public NioSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { NioSocketChannel nioSocketChannel = new NioSocketChannel(channel, selector); - nioSocketChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class)); + nioSocketChannel.setContext(mock(SocketChannelContext.class)); return nioSocketChannel; } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioServerSocketChannelTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioServerSocketChannelTests.java index 713f01ec283..12a77a425eb 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioServerSocketChannelTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioServerSocketChannelTests.java @@ -30,6 +30,8 @@ import java.io.IOException; import java.nio.channels.ServerSocketChannel; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; +import java.util.function.Consumer; import java.util.function.Supplier; import static org.mockito.Mockito.mock; @@ -56,52 +58,42 @@ public class NioServerSocketChannelTests extends ESTestCase { thread.join(); } + @SuppressWarnings("unchecked") public void testClose() throws Exception { AtomicBoolean isClosed = new AtomicBoolean(false); CountDownLatch latch = new CountDownLatch(1); - NioChannel channel = new DoNotCloseServerChannel(mock(ServerSocketChannel.class), mock(ChannelFactory.class), selector); + try (ServerSocketChannel rawChannel = ServerSocketChannel.open()) { + NioServerSocketChannel channel = new NioServerSocketChannel(rawChannel, mock(ChannelFactory.class), selector); + channel.setContext(new ServerChannelContext(channel, mock(Consumer.class), mock(BiConsumer.class))); + channel.addCloseListener(ActionListener.toBiConsumer(new ActionListener() { + @Override + public void onResponse(Void o) { + isClosed.set(true); + latch.countDown(); + } - channel.addCloseListener(ActionListener.toBiConsumer(new ActionListener() { - @Override - public void onResponse(Void o) { - isClosed.set(true); - latch.countDown(); - } + @Override + public void onFailure(Exception e) { + isClosed.set(true); + latch.countDown(); + } + })); - @Override - public void onFailure(Exception e) { - isClosed.set(true); - latch.countDown(); - } - })); + assertTrue(channel.isOpen()); + assertTrue(rawChannel.isOpen()); + assertFalse(isClosed.get()); - assertTrue(channel.isOpen()); - assertFalse(closedRawChannel.get()); - assertFalse(isClosed.get()); - - PlainActionFuture closeFuture = PlainActionFuture.newFuture(); - channel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); - channel.close(); - closeFuture.actionGet(); + PlainActionFuture closeFuture = PlainActionFuture.newFuture(); + channel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); + selector.queueChannelClose(channel); + closeFuture.actionGet(); - assertTrue(closedRawChannel.get()); - assertFalse(channel.isOpen()); - latch.await(); - assertTrue(isClosed.get()); - } - - private class DoNotCloseServerChannel extends DoNotRegisterServerChannel { - - private DoNotCloseServerChannel(ServerSocketChannel channel, ChannelFactory channelFactory, AcceptingSelector selector) - throws IOException { - super(channel, channelFactory, selector); - } - - @Override - void closeRawChannel() throws IOException { - closedRawChannel.set(true); + assertFalse(rawChannel.isOpen()); + assertFalse(channel.isOpen()); + latch.await(); + assertTrue(isClosed.get()); } } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java index 6a32b11f18b..bbda9233bbb 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java @@ -41,7 +41,6 @@ import static org.mockito.Mockito.when; public class NioSocketChannelTests extends ESTestCase { private SocketSelector selector; - private AtomicBoolean closedRawChannel; private Thread thread; @Before @@ -49,7 +48,6 @@ public class NioSocketChannelTests extends ESTestCase { public void startSelector() throws IOException { selector = new SocketSelector(new SocketEventHandler(logger)); thread = new Thread(selector::runLoop); - closedRawChannel = new AtomicBoolean(false); thread.start(); FutureUtils.get(selector.isRunningFuture()); } @@ -65,42 +63,46 @@ public class NioSocketChannelTests extends ESTestCase { AtomicBoolean isClosed = new AtomicBoolean(false); CountDownLatch latch = new CountDownLatch(1); - NioSocketChannel socketChannel = new DoNotCloseChannel(mock(SocketChannel.class), selector); - socketChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class)); - socketChannel.addCloseListener(ActionListener.toBiConsumer(new ActionListener() { - @Override - public void onResponse(Void o) { - isClosed.set(true); - latch.countDown(); - } - @Override - public void onFailure(Exception e) { - isClosed.set(true); - latch.countDown(); - } - })); + try(SocketChannel rawChannel = SocketChannel.open()) { + NioSocketChannel socketChannel = new NioSocketChannel(rawChannel, selector); + socketChannel.setContext(new BytesChannelContext(socketChannel, mock(BiConsumer.class), + mock(SocketChannelContext.ReadConsumer.class), InboundChannelBuffer.allocatingInstance())); + socketChannel.addCloseListener(ActionListener.toBiConsumer(new ActionListener() { + @Override + public void onResponse(Void o) { + isClosed.set(true); + latch.countDown(); + } - assertTrue(socketChannel.isOpen()); - assertFalse(closedRawChannel.get()); - assertFalse(isClosed.get()); + @Override + public void onFailure(Exception e) { + isClosed.set(true); + latch.countDown(); + } + })); - PlainActionFuture closeFuture = PlainActionFuture.newFuture(); - socketChannel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); - socketChannel.close(); - closeFuture.actionGet(); + assertTrue(socketChannel.isOpen()); + assertTrue(rawChannel.isOpen()); + assertFalse(isClosed.get()); - assertTrue(closedRawChannel.get()); - assertFalse(socketChannel.isOpen()); - latch.await(); - assertTrue(isClosed.get()); + PlainActionFuture closeFuture = PlainActionFuture.newFuture(); + socketChannel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); + selector.queueChannelClose(socketChannel); + closeFuture.actionGet(); + + assertFalse(rawChannel.isOpen()); + assertFalse(socketChannel.isOpen()); + latch.await(); + assertTrue(isClosed.get()); + } } @SuppressWarnings("unchecked") public void testConnectSucceeds() throws Exception { SocketChannel rawChannel = mock(SocketChannel.class); when(rawChannel.finishConnect()).thenReturn(true); - NioSocketChannel socketChannel = new DoNotCloseChannel(rawChannel, selector); - socketChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class)); + NioSocketChannel socketChannel = new DoNotRegisterChannel(rawChannel, selector); + socketChannel.setContext(mock(SocketChannelContext.class)); selector.scheduleForRegistration(socketChannel); PlainActionFuture connectFuture = PlainActionFuture.newFuture(); @@ -109,15 +111,14 @@ public class NioSocketChannelTests extends ESTestCase { assertTrue(socketChannel.isConnectComplete()); assertTrue(socketChannel.isOpen()); - assertFalse(closedRawChannel.get()); } @SuppressWarnings("unchecked") public void testConnectFails() throws Exception { SocketChannel rawChannel = mock(SocketChannel.class); when(rawChannel.finishConnect()).thenThrow(new ConnectException()); - NioSocketChannel socketChannel = new DoNotCloseChannel(rawChannel, selector); - socketChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class)); + NioSocketChannel socketChannel = new DoNotRegisterChannel(rawChannel, selector); + socketChannel.setContext(mock(SocketChannelContext.class)); selector.scheduleForRegistration(socketChannel); PlainActionFuture connectFuture = PlainActionFuture.newFuture(); @@ -129,16 +130,4 @@ public class NioSocketChannelTests extends ESTestCase { // Even if connection fails the channel is 'open' until close() is called assertTrue(socketChannel.isOpen()); } - - private class DoNotCloseChannel extends DoNotRegisterChannel { - - private DoNotCloseChannel(SocketChannel channel, SocketSelector selector) throws IOException { - super(channel, selector); - } - - @Override - void closeRawChannel() throws IOException { - closedRawChannel.set(true); - } - } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java index 2898cf18d5b..d74214636db 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java @@ -28,8 +28,10 @@ import java.nio.channels.CancelledKeyException; import java.nio.channels.SelectionKey; import java.nio.channels.SocketChannel; import java.util.function.BiConsumer; +import java.util.function.Supplier; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -39,7 +41,6 @@ public class SocketEventHandlerTests extends ESTestCase { private SocketEventHandler handler; private NioSocketChannel channel; - private ReadContext readContext; private SocketChannel rawChannel; @Before @@ -50,21 +51,36 @@ public class SocketEventHandlerTests extends ESTestCase { handler = new SocketEventHandler(logger); rawChannel = mock(SocketChannel.class); channel = new DoNotRegisterChannel(rawChannel, socketSelector); - readContext = mock(ReadContext.class); when(rawChannel.finishConnect()).thenReturn(true); - channel.setContexts(readContext, new BytesWriteContext(channel), exceptionHandler); + InboundChannelBuffer buffer = InboundChannelBuffer.allocatingInstance(); + channel.setContext(new BytesChannelContext(channel, exceptionHandler, mock(SocketChannelContext.ReadConsumer.class), buffer)); channel.register(); channel.finishConnect(); when(socketSelector.isOnCurrentThread()).thenReturn(true); } + public void testRegisterCallsContext() throws IOException { + NioSocketChannel channel = mock(NioSocketChannel.class); + SocketChannelContext channelContext = mock(SocketChannelContext.class); + when(channel.getContext()).thenReturn(channelContext); + when(channel.getSelectionKey()).thenReturn(new TestSelectionKey(0)); + handler.handleRegistration(channel); + verify(channelContext).channelRegistered(); + } + public void testRegisterAddsOP_CONNECTAndOP_READInterest() throws IOException { handler.handleRegistration(channel); assertEquals(SelectionKey.OP_READ | SelectionKey.OP_CONNECT, channel.getSelectionKey().interestOps()); } + public void testRegisterWithPendingWritesAddsOP_CONNECTAndOP_READAndOP_WRITEInterest() throws IOException { + channel.getContext().queueWriteOperation(mock(BytesWriteOperation.class)); + handler.handleRegistration(channel); + assertEquals(SelectionKey.OP_READ | SelectionKey.OP_CONNECT | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps()); + } + public void testRegistrationExceptionCallsExceptionHandler() throws IOException { CancelledKeyException exception = new CancelledKeyException(); handler.registrationException(channel, exception); @@ -83,79 +99,75 @@ public class SocketEventHandlerTests extends ESTestCase { verify(exceptionHandler).accept(channel, exception); } - public void testHandleReadDelegatesToReadContext() throws IOException { - when(readContext.read()).thenReturn(1); + public void testHandleReadDelegatesToContext() throws IOException { + NioSocketChannel channel = new DoNotRegisterChannel(rawChannel, mock(SocketSelector.class)); + SocketChannelContext context = mock(SocketChannelContext.class); + channel.setContext(context); + when(context.read()).thenReturn(1); handler.handleRead(channel); - - verify(readContext).read(); + verify(context).read(); } - public void testHandleReadMarksChannelForCloseIfPeerClosed() throws IOException { - NioSocketChannel nioSocketChannel = mock(NioSocketChannel.class); - when(nioSocketChannel.getReadContext()).thenReturn(readContext); - when(readContext.read()).thenReturn(-1); - - handler.handleRead(nioSocketChannel); - - verify(nioSocketChannel).closeFromSelector(); - } - - public void testReadExceptionCallsExceptionHandler() throws IOException { + public void testReadExceptionCallsExceptionHandler() { IOException exception = new IOException(); handler.readException(channel, exception); verify(exceptionHandler).accept(channel, exception); } - @SuppressWarnings("unchecked") - public void testHandleWriteWithCompleteFlushRemovesOP_WRITEInterest() throws IOException { - SelectionKey selectionKey = channel.getSelectionKey(); - setWriteAndRead(channel); - assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, selectionKey.interestOps()); - - ByteBuffer[] buffers = {ByteBuffer.allocate(1)}; - channel.getWriteContext().queueWriteOperations(new WriteOperation(channel, buffers, mock(BiConsumer.class))); - - when(rawChannel.write(buffers[0])).thenReturn(1); - handler.handleWrite(channel); - - assertEquals(SelectionKey.OP_READ, selectionKey.interestOps()); - } - - @SuppressWarnings("unchecked") - public void testHandleWriteWithInCompleteFlushLeavesOP_WRITEInterest() throws IOException { - SelectionKey selectionKey = channel.getSelectionKey(); - setWriteAndRead(channel); - assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, selectionKey.interestOps()); - - ByteBuffer[] buffers = {ByteBuffer.allocate(1)}; - channel.getWriteContext().queueWriteOperations(new WriteOperation(channel, buffers, mock(BiConsumer.class))); - - when(rawChannel.write(buffers[0])).thenReturn(0); - handler.handleWrite(channel); - - assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, selectionKey.interestOps()); - } - - public void testHandleWriteWithNoOpsRemovesOP_WRITEInterest() throws IOException { - SelectionKey selectionKey = channel.getSelectionKey(); - setWriteAndRead(channel); - assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps()); - - handler.handleWrite(channel); - - assertEquals(SelectionKey.OP_READ, selectionKey.interestOps()); - } - - private void setWriteAndRead(NioChannel channel) { - SelectionKeyUtils.setConnectAndReadInterested(channel); - SelectionKeyUtils.removeConnectInterested(channel); - SelectionKeyUtils.setWriteInterested(channel); - } - - public void testWriteExceptionCallsExceptionHandler() throws IOException { + public void testWriteExceptionCallsExceptionHandler() { IOException exception = new IOException(); handler.writeException(channel, exception); verify(exceptionHandler).accept(channel, exception); } + + public void testPostHandlingCallWillCloseTheChannelIfReady() throws IOException { + NioSocketChannel channel = mock(NioSocketChannel.class); + SocketChannelContext context = mock(SocketChannelContext.class); + when(channel.getSelectionKey()).thenReturn(new TestSelectionKey(0)); + + when(channel.getContext()).thenReturn(context); + when(context.selectorShouldClose()).thenReturn(true); + handler.postHandling(channel); + + verify(context).closeFromSelector(); + } + + public void testPostHandlingCallWillNotCloseTheChannelIfNotReady() throws IOException { + NioSocketChannel channel = mock(NioSocketChannel.class); + SocketChannelContext context = mock(SocketChannelContext.class); + when(channel.getSelectionKey()).thenReturn(new TestSelectionKey(0)); + + when(channel.getContext()).thenReturn(context); + when(context.selectorShouldClose()).thenReturn(false); + handler.postHandling(channel); + + verify(channel, times(0)).closeFromSelector(); + } + + public void testPostHandlingWillAddWriteIfNecessary() throws IOException { + NioSocketChannel channel = new DoNotRegisterChannel(rawChannel, mock(SocketSelector.class)); + channel.setSelectionKey(new TestSelectionKey(SelectionKey.OP_READ)); + SocketChannelContext context = mock(SocketChannelContext.class); + channel.setContext(context); + + when(context.hasQueuedWriteOps()).thenReturn(true); + + assertEquals(SelectionKey.OP_READ, channel.getSelectionKey().interestOps()); + handler.postHandling(channel); + assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps()); + } + + public void testPostHandlingWillRemoveWriteIfNecessary() throws IOException { + NioSocketChannel channel = new DoNotRegisterChannel(rawChannel, mock(SocketSelector.class)); + channel.setSelectionKey(new TestSelectionKey(SelectionKey.OP_READ | SelectionKey.OP_WRITE)); + SocketChannelContext context = mock(SocketChannelContext.class); + channel.setContext(context); + + when(context.hasQueuedWriteOps()).thenReturn(false); + + assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps()); + handler.postHandling(channel); + assertEquals(SelectionKey.OP_READ, channel.getSelectionKey().interestOps()); + } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java index e50da352623..5992244b2f9 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java @@ -49,7 +49,7 @@ public class SocketSelectorTests extends ESTestCase { private SocketEventHandler eventHandler; private NioSocketChannel channel; private TestSelectionKey selectionKey; - private WriteContext writeContext; + private SocketChannelContext channelContext; private BiConsumer listener; private ByteBuffer[] buffers = {ByteBuffer.allocate(1)}; private Selector rawSelector; @@ -60,7 +60,7 @@ public class SocketSelectorTests extends ESTestCase { super.setUp(); eventHandler = mock(SocketEventHandler.class); channel = mock(NioSocketChannel.class); - writeContext = mock(WriteContext.class); + channelContext = mock(SocketChannelContext.class); listener = mock(BiConsumer.class); selectionKey = new TestSelectionKey(0); selectionKey.attach(channel); @@ -71,7 +71,7 @@ public class SocketSelectorTests extends ESTestCase { when(channel.isOpen()).thenReturn(true); when(channel.getSelectionKey()).thenReturn(selectionKey); - when(channel.getWriteContext()).thenReturn(writeContext); + when(channel.getContext()).thenReturn(channelContext); when(channel.isConnectComplete()).thenReturn(true); when(channel.getSelector()).thenReturn(socketSelector); } @@ -129,75 +129,71 @@ public class SocketSelectorTests extends ESTestCase { public void testQueueWriteWhenNotRunning() throws Exception { socketSelector.close(); - socketSelector.queueWrite(new WriteOperation(channel, buffers, listener)); + socketSelector.queueWrite(new BytesWriteOperation(channel, buffers, listener)); verify(listener).accept(isNull(Void.class), any(ClosedSelectorException.class)); } - public void testQueueWriteChannelIsNoLongerWritable() throws Exception { - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); + public void testQueueWriteChannelIsClosed() throws Exception { + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); socketSelector.queueWrite(writeOperation); - when(channel.isWritable()).thenReturn(false); + when(channel.isOpen()).thenReturn(false); socketSelector.preSelect(); - verify(writeContext, times(0)).queueWriteOperations(writeOperation); + verify(channelContext, times(0)).queueWriteOperation(writeOperation); verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); } public void testQueueWriteSelectionKeyThrowsException() throws Exception { SelectionKey selectionKey = mock(SelectionKey.class); - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); CancelledKeyException cancelledKeyException = new CancelledKeyException(); socketSelector.queueWrite(writeOperation); - when(channel.isWritable()).thenReturn(true); when(channel.getSelectionKey()).thenReturn(selectionKey); when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException); socketSelector.preSelect(); - verify(writeContext, times(0)).queueWriteOperations(writeOperation); + verify(channelContext, times(0)).queueWriteOperation(writeOperation); verify(listener).accept(null, cancelledKeyException); } public void testQueueWriteSuccessful() throws Exception { - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); socketSelector.queueWrite(writeOperation); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0); - when(channel.isWritable()).thenReturn(true); socketSelector.preSelect(); - verify(writeContext).queueWriteOperations(writeOperation); + verify(channelContext).queueWriteOperation(writeOperation); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0); } public void testQueueDirectlyInChannelBufferSuccessful() throws Exception { - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0); - when(channel.isWritable()).thenReturn(true); socketSelector.queueWriteInChannelBuffer(writeOperation); - verify(writeContext).queueWriteOperations(writeOperation); + verify(channelContext).queueWriteOperation(writeOperation); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0); } public void testQueueDirectlyInChannelBufferSelectionKeyThrowsException() throws Exception { SelectionKey selectionKey = mock(SelectionKey.class); - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); CancelledKeyException cancelledKeyException = new CancelledKeyException(); - when(channel.isWritable()).thenReturn(true); when(channel.getSelectionKey()).thenReturn(selectionKey); when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException); socketSelector.queueWriteInChannelBuffer(writeOperation); - verify(writeContext, times(0)).queueWriteOperations(writeOperation); + verify(channelContext, times(0)).queueWriteOperation(writeOperation); verify(listener).accept(null, cancelledKeyException); } @@ -285,6 +281,16 @@ public class SocketSelectorTests extends ESTestCase { verify(eventHandler).readException(channel, ioException); } + public void testWillCallPostHandleAfterChannelHandling() throws Exception { + selectionKey.setReadyOps(SelectionKey.OP_WRITE | SelectionKey.OP_READ); + + socketSelector.processKey(selectionKey); + + verify(eventHandler).handleWrite(channel); + verify(eventHandler).handleRead(channel); + verify(eventHandler).postHandling(channel); + } + public void testCleanup() throws Exception { NioSocketChannel unRegisteredChannel = mock(NioSocketChannel.class); @@ -292,7 +298,7 @@ public class SocketSelectorTests extends ESTestCase { socketSelector.preSelect(); - socketSelector.queueWrite(new WriteOperation(mock(NioSocketChannel.class), buffers, listener)); + socketSelector.queueWrite(new BytesWriteOperation(mock(NioSocketChannel.class), buffers, listener)); socketSelector.scheduleForRegistration(unRegisteredChannel); TestSelectionKey testSelectionKey = new TestSelectionKey(0); diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/WriteOperationTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/WriteOperationTests.java index da74269b825..59fb9cde438 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/WriteOperationTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/WriteOperationTests.java @@ -45,71 +45,58 @@ public class WriteOperationTests extends ESTestCase { } - public void testFlush() throws IOException { + public void testFullyFlushedMarker() { ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - WriteOperation writeOp = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOp = new BytesWriteOperation(channel, buffers, listener); - - when(channel.write(any(ByteBuffer[].class))).thenReturn(10); - - writeOp.flush(); + writeOp.incrementIndex(10); assertTrue(writeOp.isFullyFlushed()); } - public void testPartialFlush() throws IOException { + public void testPartiallyFlushedMarker() { ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - WriteOperation writeOp = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOp = new BytesWriteOperation(channel, buffers, listener); - when(channel.write(any(ByteBuffer[].class))).thenReturn(5); - - writeOp.flush(); + writeOp.incrementIndex(5); assertFalse(writeOp.isFullyFlushed()); } public void testMultipleFlushesWithCompositeBuffer() throws IOException { ByteBuffer[] buffers = {ByteBuffer.allocate(10), ByteBuffer.allocate(15), ByteBuffer.allocate(3)}; - WriteOperation writeOp = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOp = new BytesWriteOperation(channel, buffers, listener); ArgumentCaptor buffersCaptor = ArgumentCaptor.forClass(ByteBuffer[].class); - when(channel.write(buffersCaptor.capture())).thenReturn(5) - .thenReturn(5) - .thenReturn(2) - .thenReturn(15) - .thenReturn(1); - - writeOp.flush(); + writeOp.incrementIndex(5); assertFalse(writeOp.isFullyFlushed()); - writeOp.flush(); - assertFalse(writeOp.isFullyFlushed()); - writeOp.flush(); - assertFalse(writeOp.isFullyFlushed()); - writeOp.flush(); - assertFalse(writeOp.isFullyFlushed()); - writeOp.flush(); - assertTrue(writeOp.isFullyFlushed()); - - List values = buffersCaptor.getAllValues(); - ByteBuffer[] byteBuffers = values.get(0); - assertEquals(3, byteBuffers.length); - assertEquals(10, byteBuffers[0].remaining()); - - byteBuffers = values.get(1); + ByteBuffer[] byteBuffers = writeOp.getBuffersToWrite(); assertEquals(3, byteBuffers.length); assertEquals(5, byteBuffers[0].remaining()); - byteBuffers = values.get(2); + writeOp.incrementIndex(5); + assertFalse(writeOp.isFullyFlushed()); + byteBuffers = writeOp.getBuffersToWrite(); assertEquals(2, byteBuffers.length); assertEquals(15, byteBuffers[0].remaining()); - byteBuffers = values.get(3); + writeOp.incrementIndex(2); + assertFalse(writeOp.isFullyFlushed()); + byteBuffers = writeOp.getBuffersToWrite(); assertEquals(2, byteBuffers.length); assertEquals(13, byteBuffers[0].remaining()); - byteBuffers = values.get(4); + writeOp.incrementIndex(15); + assertFalse(writeOp.isFullyFlushed()); + byteBuffers = writeOp.getBuffersToWrite(); assertEquals(1, byteBuffers.length); assertEquals(1, byteBuffers[0].remaining()); + + writeOp.incrementIndex(1); + assertTrue(writeOp.isFullyFlushed()); + byteBuffers = writeOp.getBuffersToWrite(); + assertEquals(1, byteBuffers.length); + assertEquals(0, byteBuffers[0].remaining()); } } diff --git a/modules/build.gradle b/modules/build.gradle index b3dbde24936..7f7e7e0965b 100644 --- a/modules/build.gradle +++ b/modules/build.gradle @@ -17,7 +17,7 @@ * under the License. */ -subprojects { +configure(subprojects.findAll { it.parent.path == project.path }) { group = 'org.elasticsearch.plugin' // for modules which publish client jars apply plugin: 'elasticsearch.esplugin' diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 0bd96725c66..d287d7ee023 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -31,6 +31,7 @@ integTestCluster { dependencies { compile 'org.antlr:antlr4-runtime:4.5.3' compile 'org.ow2.asm:asm-debug-all:5.1' + compile project('spi') } dependencyLicenses { diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadContext.java b/modules/lang-painless/spi/build.gradle similarity index 66% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadContext.java rename to modules/lang-painless/spi/build.gradle index d23ce56f57a..7e43a242a23 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadContext.java +++ b/modules/lang-painless/spi/build.gradle @@ -17,19 +17,24 @@ * under the License. */ -package org.elasticsearch.nio; +apply plugin: 'elasticsearch.build' +apply plugin: 'nebula.maven-base-publish' +apply plugin: 'nebula.maven-scm' -import java.io.IOException; +group = 'org.elasticsearch.plugin' +archivesBaseName = 'elasticsearch-scripting-painless-spi' -public interface ReadContext extends AutoCloseable { - - int read() throws IOException; - - @Override - void close(); - - @FunctionalInterface - interface ReadConsumer { - int consumeReads(InboundChannelBuffer channelBuffer) throws IOException; +publishing { + publications { + nebula { + artifactId = archivesBaseName } + } } + +dependencies { + compile "org.elasticsearch:elasticsearch:${version}" +} + +// no tests...yet? +test.enabled = false diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java similarity index 100% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java rename to modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/Whitelist.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java similarity index 100% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/Whitelist.java rename to modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java similarity index 100% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java rename to modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java index 30ffaeff18b..e8fe1827268 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java @@ -112,7 +112,6 @@ public class RankEvalResponse extends ActionResponse implements ToXContentObject @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.startObject("rank_eval"); builder.field("quality_level", evaluationResult); builder.startObject("details"); for (String key : details.keySet()) { @@ -127,7 +126,6 @@ public class RankEvalResponse extends ActionResponse implements ToXContentObject } builder.endObject(); builder.endObject(); - builder.endObject(); return builder; } } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java index 827f7be4442..881b9e04709 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java @@ -92,23 +92,21 @@ public class RankEvalResponseTests extends ESTestCase { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); String xContent = response.toXContent(builder, ToXContent.EMPTY_PARAMS).bytes().utf8ToString(); assertEquals(("{" + - " \"rank_eval\": {" + - " \"quality_level\": 0.123," + - " \"details\": {" + - " \"coffee_query\": {" + - " \"quality_level\": 0.1," + - " \"unknown_docs\": [{\"_index\":\"index\",\"_id\":\"456\"}]," + - " \"hits\":[{\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"123\",\"_score\":1.0}," + - " \"rating\":5}," + - " {\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"456\",\"_score\":1.0}," + - " \"rating\":null}" + - " ]" + - " }" + - " }," + - " \"failures\": {" + - " \"beer_query\": {" + - " \"error\": \"ParsingException[someMsg]\"" + - " }" + + " \"quality_level\": 0.123," + + " \"details\": {" + + " \"coffee_query\": {" + + " \"quality_level\": 0.1," + + " \"unknown_docs\": [{\"_index\":\"index\",\"_id\":\"456\"}]," + + " \"hits\":[{\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"123\",\"_score\":1.0}," + + " \"rating\":5}," + + " {\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"456\",\"_score\":1.0}," + + " \"rating\":null}" + + " ]" + + " }" + + " }," + + " \"failures\": {" + + " \"beer_query\": {" + + " \"error\": \"ParsingException[someMsg]\"" + " }" + " }" + "}").replaceAll("\\s+", ""), xContent); diff --git a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml index a81df5fa3fa..2eab6e47e7f 100644 --- a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml +++ b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml @@ -64,27 +64,27 @@ "metric" : { "precision": { "ignore_unlabeled" : true }} } - - match: { rank_eval.quality_level: 1} - - match: { rank_eval.details.amsterdam_query.quality_level: 1.0} - - match: { rank_eval.details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]} - - match: { rank_eval.details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 2, "docs_retrieved": 2}} + - match: { quality_level: 1} + - match: { details.amsterdam_query.quality_level: 1.0} + - match: { details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]} + - match: { details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 2, "docs_retrieved": 2}} - - length: { rank_eval.details.amsterdam_query.hits: 3} - - match: { rank_eval.details.amsterdam_query.hits.0.hit._id: "doc2"} - - match: { rank_eval.details.amsterdam_query.hits.0.rating: 1} - - match: { rank_eval.details.amsterdam_query.hits.1.hit._id: "doc3"} - - match: { rank_eval.details.amsterdam_query.hits.1.rating: 1} - - match: { rank_eval.details.amsterdam_query.hits.2.hit._id: "doc4"} - - is_false: rank_eval.details.amsterdam_query.hits.2.rating + - length: { details.amsterdam_query.hits: 3} + - match: { details.amsterdam_query.hits.0.hit._id: "doc2"} + - match: { details.amsterdam_query.hits.0.rating: 1} + - match: { details.amsterdam_query.hits.1.hit._id: "doc3"} + - match: { details.amsterdam_query.hits.1.rating: 1} + - match: { details.amsterdam_query.hits.2.hit._id: "doc4"} + - is_false: details.amsterdam_query.hits.2.rating - - match: { rank_eval.details.berlin_query.quality_level: 1.0} - - match: { rank_eval.details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]} - - match: { rank_eval.details.berlin_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}} - - length: { rank_eval.details.berlin_query.hits: 2} - - match: { rank_eval.details.berlin_query.hits.0.hit._id: "doc1" } - - match: { rank_eval.details.berlin_query.hits.0.rating: 1} - - match: { rank_eval.details.berlin_query.hits.1.hit._id: "doc4" } - - is_false: rank_eval.details.berlin_query.hits.1.rating + - match: { details.berlin_query.quality_level: 1.0} + - match: { details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]} + - match: { details.berlin_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}} + - length: { details.berlin_query.hits: 2} + - match: { details.berlin_query.hits.0.hit._id: "doc1" } + - match: { details.berlin_query.hits.0.rating: 1} + - match: { details.berlin_query.hits.1.hit._id: "doc4" } + - is_false: details.berlin_query.hits.1.rating --- "Mean Reciprocal Rank": @@ -152,14 +152,14 @@ } # average is (1/3 + 1/2)/2 = 5/12 ~ 0.41666666666666663 - - gt: {rank_eval.quality_level: 0.416} - - lt: {rank_eval.quality_level: 0.417} - - gt: {rank_eval.details.amsterdam_query.quality_level: 0.333} - - lt: {rank_eval.details.amsterdam_query.quality_level: 0.334} - - match: {rank_eval.details.amsterdam_query.metric_details: {"first_relevant": 3}} - - match: {rank_eval.details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc2"}, + - gt: {quality_level: 0.416} + - lt: {quality_level: 0.417} + - gt: {details.amsterdam_query.quality_level: 0.333} + - lt: {details.amsterdam_query.quality_level: 0.334} + - match: {details.amsterdam_query.metric_details: {"first_relevant": 3}} + - match: {details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc2"}, {"_index": "foo", "_id": "doc3"} ]} - - match: {rank_eval.details.berlin_query.quality_level: 0.5} - - match: {rank_eval.details.berlin_query.metric_details: {"first_relevant": 2}} - - match: {rank_eval.details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc1"}]} + - match: {details.berlin_query.quality_level: 0.5} + - match: {details.berlin_query.metric_details: {"first_relevant": 2}} + - match: {details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc1"}]} diff --git a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml index 0aca6fdde9e..3a68890dce9 100644 --- a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml +++ b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml @@ -69,11 +69,11 @@ "metric" : { "dcg": {}} } - - gt: {rank_eval.quality_level: 13.848263 } - - lt: {rank_eval.quality_level: 13.848264 } - - gt: {rank_eval.details.dcg_query.quality_level: 13.848263} - - lt: {rank_eval.details.dcg_query.quality_level: 13.848264} - - match: {rank_eval.details.dcg_query.unknown_docs: [ ]} + - gt: {quality_level: 13.848263 } + - lt: {quality_level: 13.848264 } + - gt: {details.dcg_query.quality_level: 13.848263} + - lt: {details.dcg_query.quality_level: 13.848264} + - match: {details.dcg_query.unknown_docs: [ ]} # reverse the order in which the results are returned (less relevant docs first) @@ -96,11 +96,11 @@ "metric" : { "dcg": { }} } - - gt: {rank_eval.quality_level: 10.299674} - - lt: {rank_eval.quality_level: 10.299675} - - gt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299674} - - lt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299675} - - match: {rank_eval.details.dcg_query_reverse.unknown_docs: [ ]} + - gt: {quality_level: 10.299674} + - lt: {quality_level: 10.299675} + - gt: {details.dcg_query_reverse.quality_level: 10.299674} + - lt: {details.dcg_query_reverse.quality_level: 10.299675} + - match: {details.dcg_query_reverse.unknown_docs: [ ]} # if we mix both, we should get the average @@ -134,11 +134,11 @@ "metric" : { "dcg": { }} } - - gt: {rank_eval.quality_level: 12.073969} - - lt: {rank_eval.quality_level: 12.073970} - - gt: {rank_eval.details.dcg_query.quality_level: 13.848263} - - lt: {rank_eval.details.dcg_query.quality_level: 13.848264} - - match: {rank_eval.details.dcg_query.unknown_docs: [ ]} - - gt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299674} - - lt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299675} - - match: {rank_eval.details.dcg_query_reverse.unknown_docs: [ ]} + - gt: {quality_level: 12.073969} + - lt: {quality_level: 12.073970} + - gt: {details.dcg_query.quality_level: 13.848263} + - lt: {details.dcg_query.quality_level: 13.848264} + - match: {details.dcg_query.unknown_docs: [ ]} + - gt: {details.dcg_query_reverse.quality_level: 10.299674} + - lt: {details.dcg_query_reverse.quality_level: 10.299675} + - match: {details.dcg_query_reverse.unknown_docs: [ ]} diff --git a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml index 55efcdd104a..48ea593712e 100644 --- a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml +++ b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml @@ -34,9 +34,9 @@ "metric" : { "precision": { "ignore_unlabeled" : true }} } - - match: { rank_eval.quality_level: 1} - - match: { rank_eval.details.amsterdam_query.quality_level: 1.0} - - match: { rank_eval.details.amsterdam_query.unknown_docs: [ ]} - - match: { rank_eval.details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}} + - match: { quality_level: 1} + - match: { details.amsterdam_query.quality_level: 1.0} + - match: { details.amsterdam_query.unknown_docs: [ ]} + - match: { details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}} - - is_true: rank_eval.failures.invalid_query + - is_true: failures.invalid_query diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index f29daf79912..479fe78cc80 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -52,13 +52,6 @@ dependencies { testCompile project(path: ':modules:parent-join', configuration: 'runtime') } -dependencyLicenses { - // Don't check the client's license. We know it. - dependencies = project.configurations.runtime.fileCollection { - it.group.startsWith('org.elasticsearch') == false - } - project.configurations.provided -} - thirdPartyAudit.excludes = [ // Commons logging 'javax.servlet.ServletContextEvent', diff --git a/plugins/examples/meta-plugin/build.gradle b/plugins/examples/meta-plugin/build.gradle index 3674837b0b2..db28e637871 100644 --- a/plugins/examples/meta-plugin/build.gradle +++ b/plugins/examples/meta-plugin/build.gradle @@ -18,39 +18,11 @@ */ // A meta plugin packaging example that bundles multiple plugins in a single zip. -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' -File plugins = new File(buildDir, 'plugins-unzip') -subprojects { - // unzip the subproject plugins - task unzip(type:Copy, dependsOn: "${project.path}:bundlePlugin") { - File dest = new File(plugins, project.name) - from { zipTree(project(project.path).bundlePlugin.outputs.files.singleFile) } - eachFile { f -> f.path = f.path.replaceFirst('elasticsearch', '') } - into dest - } +apply plugin: 'elasticsearch.es-meta-plugin' + +es_meta_plugin { + name 'meta-plugin' + description 'example meta plugin' + plugins = ['dummy-plugin1', 'dummy-plugin2'] } - -// Build the meta plugin zip from the subproject plugins (unzipped) -task buildZip(type:Zip) { - subprojects.each { dependsOn("${it.name}:unzip") } - from plugins - from 'src/main/resources/meta-plugin-descriptor.properties' - into 'elasticsearch' - includeEmptyDirs false -} - -integTestCluster { - dependsOn buildZip - - // This is important, so that all the modules are available too. - // There are index templates that use token filters that are in analysis-module and - // processors are being used that are in ingest-common module. - distribution = 'zip' - - // Install the meta plugin before start. - setupCommand 'installMetaPlugin', - 'bin/elasticsearch-plugin', 'install', 'file:' + buildZip.archivePath -} -check.dependsOn integTest diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index 9917bf79f59..acea1ca5d48 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -31,15 +31,15 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.AcceptingSelector; import org.elasticsearch.nio.AcceptorEventHandler; -import org.elasticsearch.nio.BytesReadContext; -import org.elasticsearch.nio.BytesWriteContext; import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.nio.BytesChannelContext; import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.ReadContext; +import org.elasticsearch.nio.ServerChannelContext; import org.elasticsearch.nio.SocketEventHandler; import org.elasticsearch.nio.SocketSelector; import org.elasticsearch.threadpool.ThreadPool; @@ -53,6 +53,7 @@ import java.nio.ByteBuffer; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.concurrent.ConcurrentMap; +import java.util.function.BiConsumer; import java.util.function.Supplier; import static org.elasticsearch.common.settings.Setting.intSetting; @@ -72,12 +73,12 @@ public class NioTransport extends TcpTransport { public static final Setting NIO_ACCEPTOR_COUNT = intSetting("transport.nio.acceptor_count", 1, 1, Setting.Property.NodeScope); - private final PageCacheRecycler pageCacheRecycler; + protected final PageCacheRecycler pageCacheRecycler; private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); private volatile NioGroup nioGroup; private volatile TcpChannelFactory clientChannelFactory; - NioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, + protected NioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { super("nio", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); @@ -111,13 +112,13 @@ public class NioTransport extends TcpTransport { NioTransport.NIO_WORKER_COUNT.get(settings), SocketEventHandler::new); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); - clientChannelFactory = new TcpChannelFactory(clientProfileSettings); + clientChannelFactory = channelFactory(clientProfileSettings, true); if (useNetworkServer) { // loop through all profiles and start them up, special handling for default one for (ProfileSettings profileSettings : profileSettings) { String profileName = profileSettings.profileName; - TcpChannelFactory factory = new TcpChannelFactory(profileSettings); + TcpChannelFactory factory = channelFactory(profileSettings, false); profileToChannelFactory.putIfAbsent(profileName, factory); bindServer(profileSettings); } @@ -144,19 +145,30 @@ public class NioTransport extends TcpTransport { profileToChannelFactory.clear(); } - private void exceptionCaught(NioSocketChannel channel, Exception exception) { + protected void exceptionCaught(NioSocketChannel channel, Exception exception) { onException((TcpChannel) channel, exception); } - private void acceptChannel(NioSocketChannel channel) { + protected void acceptChannel(NioSocketChannel channel) { serverAcceptedChannel((TcpNioSocketChannel) channel); } - private class TcpChannelFactory extends ChannelFactory { + protected TcpChannelFactory channelFactory(ProfileSettings settings, boolean isClient) { + return new TcpChannelFactoryImpl(settings); + } + + protected abstract class TcpChannelFactory extends ChannelFactory { + + protected TcpChannelFactory(RawChannelFactory rawChannelFactory) { + super(rawChannelFactory); + } + } + + private class TcpChannelFactoryImpl extends TcpChannelFactory { private final String profileName; - TcpChannelFactory(TcpTransport.ProfileSettings profileSettings) { + private TcpChannelFactoryImpl(ProfileSettings profileSettings) { super(new RawChannelFactory(profileSettings.tcpNoDelay, profileSettings.tcpKeepAlive, profileSettings.reuseAddress, @@ -172,18 +184,21 @@ public class NioTransport extends TcpTransport { Recycler.V bytes = pageCacheRecycler.bytePage(false); return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close); }; - ReadContext.ReadConsumer nioReadConsumer = channelBuffer -> + SocketChannelContext.ReadConsumer nioReadConsumer = channelBuffer -> consumeNetworkReads(nioChannel, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex()))); - BytesReadContext readContext = new BytesReadContext(nioChannel, nioReadConsumer, new InboundChannelBuffer(pageSupplier)); - nioChannel.setContexts(readContext, new BytesWriteContext(nioChannel), NioTransport.this::exceptionCaught); + BiConsumer exceptionHandler = NioTransport.this::exceptionCaught; + BytesChannelContext context = new BytesChannelContext(nioChannel, exceptionHandler, nioReadConsumer, + new InboundChannelBuffer(pageSupplier)); + nioChannel.setContext(context); return nioChannel; } @Override public TcpNioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { - TcpNioServerSocketChannel nioServerChannel = new TcpNioServerSocketChannel(profileName, channel, this, selector); - nioServerChannel.setAcceptContext(NioTransport.this::acceptChannel); - return nioServerChannel; + TcpNioServerSocketChannel nioChannel = new TcpNioServerSocketChannel(profileName, channel, this, selector); + ServerChannelContext context = new ServerChannelContext(nioChannel, NioTransport.this::acceptChannel, (c, e) -> {}); + nioChannel.setContext(context); + return nioChannel; } } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java index 7f657c76348..683ae146cfb 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java @@ -38,9 +38,9 @@ public class TcpNioServerSocketChannel extends NioServerSocketChannel implements private final String profile; - TcpNioServerSocketChannel(String profile, ServerSocketChannel socketChannel, - ChannelFactory channelFactory, - AcceptingSelector selector) throws IOException { + public TcpNioServerSocketChannel(String profile, ServerSocketChannel socketChannel, + ChannelFactory channelFactory, + AcceptingSelector selector) throws IOException { super(socketChannel, channelFactory, selector); this.profile = profile; } @@ -60,6 +60,11 @@ public class TcpNioServerSocketChannel extends NioServerSocketChannel implements return null; } + @Override + public void close() { + getSelector().queueChannelClose(this); + } + @Override public String getProfile() { return profile; diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java index 5633899a04b..c2064e53ca6 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java @@ -33,13 +33,13 @@ public class TcpNioSocketChannel extends NioSocketChannel implements TcpChannel private final String profile; - TcpNioSocketChannel(String profile, SocketChannel socketChannel, SocketSelector selector) throws IOException { + public TcpNioSocketChannel(String profile, SocketChannel socketChannel, SocketSelector selector) throws IOException { super(socketChannel, selector); this.profile = profile; } public void sendMessage(BytesReference reference, ActionListener listener) { - getWriteContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); + getContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); } @Override @@ -59,6 +59,11 @@ public class TcpNioSocketChannel extends NioSocketChannel implements TcpChannel addCloseListener(ActionListener.toBiConsumer(listener)); } + @Override + public void close() { + getContext().closeChannel(); + } + @Override public String toString() { return "TcpNioSocketChannel{" + diff --git a/qa/build.gradle b/qa/build.gradle new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml b/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml index 0a59b7d0733..9dfbecce75b 100644 --- a/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml +++ b/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml @@ -67,6 +67,6 @@ "metric" : { "precision": { }} } - - match: {rank_eval.quality_level: 0.5833333333333333} - - match: {rank_eval.details.berlin_query.unknown_docs.0._id: "doc4"} - - match: {rank_eval.details.amsterdam_query.unknown_docs.0._id: "doc4"} + - match: {quality_level: 0.5833333333333333} + - match: {details.berlin_query.unknown_docs.0._id: "doc4"} + - match: {details.amsterdam_query.unknown_docs.0._id: "doc4"} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml index 7be97cda1fe..82655c5778d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml @@ -4,7 +4,14 @@ setup: - do: indices.create: index: test1 + wait_for_active_shards: all body: + settings: + # Limit the number of shards so that shards are unlikely + # to be relocated or being initialized between the test + # set up and the test execution + index.number_of_shards: 3 + index.number_of_replicas: 0 mappings: bar: properties: @@ -20,6 +27,11 @@ setup: fields: completion: type: completion + + - do: + cluster.health: + wait_for_no_relocating_shards: true + - do: index: index: test1 @@ -29,10 +41,10 @@ setup: - do: index: - index: test2 - type: baz - id: 1 - body: { "bar": "bar", "baz": "baz" } + index: test1 + type: bar + id: 2 + body: { "bar": "foo", "baz": "foo" } - do: indices.refresh: {} @@ -57,18 +69,17 @@ setup: completion: field: baz.completion - - do: - indices.refresh: {} - - do: search: - sort: bar,baz + body: + sort: [ "bar", "baz" ] --- "Fields - blank": - do: indices.stats: {} + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields - gt: { _all.total.completion.size_in_bytes: 0 } @@ -79,6 +90,7 @@ setup: - do: indices.stats: { fields: bar } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -90,6 +102,7 @@ setup: - do: indices.stats: { fields: "bar,baz.completion" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -102,6 +115,7 @@ setup: - do: indices.stats: { fields: "*" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.baz.memory_size_in_bytes: 0 } @@ -114,6 +128,7 @@ setup: - do: indices.stats: { fields: "bar*" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -126,6 +141,7 @@ setup: - do: indices.stats: { fields: "bar*", metric: _all } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -138,6 +154,7 @@ setup: - do: indices.stats: { fields: "bar*", metric: fielddata } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -148,6 +165,7 @@ setup: - do: indices.stats: { fields: "bar*", metric: completion } + - match: { _shards.failed: 0} - is_false: _all.total.fielddata - gt: { _all.total.completion.size_in_bytes: 0 } - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } @@ -158,6 +176,7 @@ setup: - do: indices.stats: { fields: "bar*" , metric: [ completion, fielddata, search ]} + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -170,6 +189,7 @@ setup: - do: indices.stats: { fielddata_fields: bar } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz - is_false: _all.total.completion.fields @@ -179,6 +199,7 @@ setup: - do: indices.stats: { fielddata_fields: "bar,baz,baz.completion" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.baz.memory_size_in_bytes: 0 } - is_false: _all.total.completion.fields @@ -188,6 +209,7 @@ setup: - do: indices.stats: { fielddata_fields: "*" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.baz.memory_size_in_bytes: 0 } - is_false: _all.total.completion.fields @@ -197,6 +219,7 @@ setup: - do: indices.stats: { fielddata_fields: "*r" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz - is_false: _all.total.completion.fields @@ -207,6 +230,7 @@ setup: - do: indices.stats: { fielddata_fields: "*r", metric: _all } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz - is_false: _all.total.completion.fields @@ -216,6 +240,7 @@ setup: - do: indices.stats: { fielddata_fields: "*r", metric: fielddata } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz - is_false: _all.total.completion.fields @@ -226,6 +251,7 @@ setup: - do: indices.stats: { fielddata_fields: "*r", metric: [ fielddata, search] } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz - is_false: _all.total.completion.fields @@ -236,6 +262,7 @@ setup: - do: indices.stats: { completion_fields: bar.completion } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - is_false: _all.total.completion.fields.baz\.completion - is_false: _all.total.fielddata.fields @@ -245,6 +272,7 @@ setup: - do: indices.stats: { completion_fields: "bar.completion,baz,baz.completion" } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - gt: { _all.total.completion.fields.baz\.completion.size_in_bytes: 0 } - is_false: _all.total.fielddata.fields @@ -254,6 +282,7 @@ setup: - do: indices.stats: { completion_fields: "*" } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - gt: { _all.total.completion.fields.baz\.completion.size_in_bytes: 0 } - is_false: _all.total.fielddata.fields @@ -263,6 +292,7 @@ setup: - do: indices.stats: { completion_fields: "*r*" } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - is_false: _all.total.completion.fields.baz\.completion - is_false: _all.total.fielddata.fields @@ -272,6 +302,7 @@ setup: - do: indices.stats: { completion_fields: "*r*", metric: _all } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - is_false: _all.total.completion.fields.baz\.completion - is_false: _all.total.fielddata.fields @@ -281,6 +312,7 @@ setup: - do: indices.stats: { completion_fields: "*r*", metric: completion } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - is_false: _all.total.completion.fields.baz\.completion - is_false: _all.total.fielddata.fields @@ -290,6 +322,7 @@ setup: - do: indices.stats: { completion_fields: "*r*", metric: [ completion, search ] } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - is_false: _all.total.completion.fields.baz\.completion - is_false: _all.total.fielddata.fields diff --git a/server/build.gradle b/server/build.gradle index 327f267ee8f..c11c88dfc6e 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -90,7 +90,7 @@ dependencies { compile 'com.carrotsearch:hppc:0.7.1' // time handling, remove with java 8 time - compile 'joda-time:joda-time:2.9.5' + compile 'joda-time:joda-time:2.9.9' // json and yaml compile "org.yaml:snakeyaml:${versions.snakeyaml}" diff --git a/server/licenses/joda-time-2.9.5.jar.sha1 b/server/licenses/joda-time-2.9.5.jar.sha1 deleted file mode 100644 index ecf1c781556..00000000000 --- a/server/licenses/joda-time-2.9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5f01da7306363fad2028b916f3eab926262de928 \ No newline at end of file diff --git a/server/licenses/joda-time-2.9.9.jar.sha1 b/server/licenses/joda-time-2.9.9.jar.sha1 new file mode 100644 index 00000000000..4009932ea3b --- /dev/null +++ b/server/licenses/joda-time-2.9.9.jar.sha1 @@ -0,0 +1 @@ +f7b520c458572890807d143670c9b24f4de90897 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index df748e79592..b741c34fab9 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -148,6 +148,8 @@ public class Version implements Comparable { public static final Version V_6_1_3 = new Version(V_6_1_3_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_2_0_ID = 6020099; public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final int V_6_3_0_ID = 6030099; + public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final int V_7_0_0_alpha1_ID = 7000001; public static final Version V_7_0_0_alpha1 = new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); @@ -166,6 +168,8 @@ public class Version implements Comparable { switch (id) { case V_7_0_0_alpha1_ID: return V_7_0_0_alpha1; + case V_6_3_0_ID: + return V_6_3_0; case V_6_2_0_ID: return V_6_2_0; case V_6_1_3_ID: diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java index b85962c0f55..4607586d9fa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java @@ -22,13 +22,23 @@ package org.elasticsearch.action.admin.indices.close; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; /** * A response for a close index action. */ -public class CloseIndexResponse extends AcknowledgedResponse { +public class CloseIndexResponse extends AcknowledgedResponse implements ToXContentObject { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("close_index", true, + args -> new CloseIndexResponse((boolean) args[0])); + + static { + declareAcknowledgedField(PARSER); + } CloseIndexResponse() { } @@ -48,4 +58,16 @@ public class CloseIndexResponse extends AcknowledgedResponse { super.writeTo(out); writeAcknowledged(out); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + addAcknowledgedField(builder); + builder.endObject(); + return builder; + } + + public static CloseIndexResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index 244b8a24b9b..362f54b74ab 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.close; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.master.TransportMasterNodeAction; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java index 95fef9fc653..4e98c60265c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java @@ -37,7 +37,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru /** * A response for a open index action. */ -public class OpenIndexResponse extends AcknowledgedResponse implements ToXContentObject { +public class OpenIndexResponse extends AcknowledgedResponse implements ToXContentObject { private static final String SHARDS_ACKNOWLEDGED = "shards_acknowledged"; private static final ParseField SHARDS_ACKNOWLEDGED_PARSER = new ParseField(SHARDS_ACKNOWLEDGED); diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index fa8c46edf5b..9bfb78f5058 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.update; +import java.util.Arrays; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -893,4 +894,28 @@ public class UpdateRequest extends InstanceShardOperationRequest builder.endObject(); return builder; } + + @Override + public String toString() { + StringBuilder res = new StringBuilder() + .append("update {[").append(index) + .append("][").append(type) + .append("][").append(id).append("]"); + res.append(", doc_as_upsert[").append(docAsUpsert).append("]"); + if (doc != null) { + res.append(", doc[").append(doc).append("]"); + } + if (script != null) { + res.append(", script[").append(script).append("]"); + } + if (upsertRequest != null) { + res.append(", upsert[").append(upsertRequest).append("]"); + } + res.append(", scripted_upsert[").append(scriptedUpsert).append("]"); + res.append(", detect_noop[").append(detectNoop).append("]"); + if (fields != null) { + res.append(", fields[").append(Arrays.toString(fields)).append("]"); + } + return res.append("}").toString(); + } } diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java index dc0f7b01563..c9c575df724 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.GenericAction; import org.elasticsearch.client.support.AbstractClient; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -195,8 +196,16 @@ public abstract class TransportClient extends AbstractClient { final TransportClientNodesService nodesService = new TransportClientNodesService(settings, transportService, threadPool, failureListner == null ? (t, e) -> {} : failureListner); - final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService, - actionModule.getActions().values().stream().map(x -> x.getAction()).collect(Collectors.toList())); + + // construct the list of client actions + final List actionPlugins = pluginsService.filterPlugins(ActionPlugin.class); + final List clientActions = + actionPlugins.stream().flatMap(p -> p.getClientActions().stream()).collect(Collectors.toList()); + // add all the base actions + final List> baseActions = + actionModule.getActions().values().stream().map(ActionPlugin.ActionHandler::getAction).collect(Collectors.toList()); + clientActions.addAll(baseActions); + final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService, clientActions); List pluginLifecycleComponents = new ArrayList<>(pluginsService.getGuiceServiceClasses().stream() .map(injector::getInstance).collect(Collectors.toList())); diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 333afebb641..7c1672603ae 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -700,7 +700,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust try { Translog translog = shard.getTranslog(); if (translog.syncNeeded()) { - translog.sync(); + shard.sync(); } } catch (AlreadyClosedException ex) { // fine - continue; diff --git a/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java b/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java index bf9045c5d00..37e96cbb54a 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java @@ -47,8 +47,8 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { if (settings.get("ignore_case") != null) { deprecationLogger.deprecated( - "This tokenize synonyms with whatever tokenizer and token filters appear before it in the chain. " + - "If you need ignore case with this filter, you should set lowercase filter before this"); + "The ignore_case option on the synonym_graph filter is deprecated. " + + "Instead, insert a lowercase filter in the filter chain before the synonym_graph filter."); } this.expand = settings.getAsBoolean("expand", true); diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index ca0d93fa7c5..48a3caf0ea3 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -47,8 +47,8 @@ public final class CombinedDeletionPolicy extends IndexDeletionPolicy { private final LongSupplier globalCheckpointSupplier; private final IndexCommit startingCommit; private final ObjectIntHashMap snapshottedCommits; // Number of snapshots held against each commit point. - private IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint. - private IndexCommit lastCommit; // the most recent commit point + private volatile IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint. + private volatile IndexCommit lastCommit; // the most recent commit point CombinedDeletionPolicy(EngineConfig.OpenMode openMode, TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier, IndexCommit startingCommit) { @@ -214,6 +214,21 @@ public final class CombinedDeletionPolicy extends IndexDeletionPolicy { return 0; } + /** + * Checks if the deletion policy can release some index commits with the latest global checkpoint. + */ + boolean hasUnreferencedCommits() throws IOException { + final IndexCommit lastCommit = this.lastCommit; + if (safeCommit != lastCommit) { // Race condition can happen but harmless + if (lastCommit.getUserData().containsKey(SequenceNumbers.MAX_SEQ_NO)) { + final long maxSeqNoFromLastCommit = Long.parseLong(lastCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO)); + // We can clean up the current safe commit if the last commit is safe + return globalCheckpointSupplier.getAsLong() >= maxSeqNoFromLastCommit; + } + } + return false; + } + /** * A wrapper of an index commit that prevents it from being deleted. */ diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index b73bfb78f3c..7feaeb63ac3 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -91,6 +91,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.BiFunction; +import java.util.stream.Stream; public abstract class Engine implements Closeable { @@ -549,6 +550,13 @@ public abstract class Engine implements Closeable { /** returns the translog for this engine */ public abstract Translog getTranslog(); + /** + * Ensures that all locations in the given stream have been written to the underlying storage. + */ + public abstract boolean ensureTranslogSynced(Stream locations) throws IOException; + + public abstract void syncTranslog() throws IOException; + protected void ensureOpen() { if (isClosed.get()) { throw new AlreadyClosedException(shardId + " engine is closed", failedEngine.get()); diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index ecbf015b472..9517404f2c1 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -31,7 +31,6 @@ import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ReferenceManager; @@ -95,6 +94,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiFunction; import java.util.function.LongSupplier; +import java.util.stream.Stream; public class InternalEngine extends Engine { @@ -521,6 +521,27 @@ public class InternalEngine extends Engine { return translog; } + @Override + public boolean ensureTranslogSynced(Stream locations) throws IOException { + final boolean synced = translog.ensureSynced(locations); + if (synced) { + revisitIndexDeletionPolicyOnTranslogSynced(); + } + return synced; + } + + @Override + public void syncTranslog() throws IOException { + translog.sync(); + revisitIndexDeletionPolicyOnTranslogSynced(); + } + + private void revisitIndexDeletionPolicyOnTranslogSynced() throws IOException { + if (combinedDeletionPolicy.hasUnreferencedCommits()) { + indexWriter.deleteUnusedFiles(); + } + } + @Override public String getHistoryUUID() { return historyUUID; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java index 8194b888615..a9d8df1cb26 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java @@ -48,7 +48,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo case "none": return s; default: - throw new IllegalArgumentException("failed to parse [" + s + "] must be one of [node,node]"); + throw new IllegalArgumentException("failed to parse [" + s + "] must be one of [node,none]"); } }, Property.IndexScope); diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java index 6dd9552b690..ed9dd14328d 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java @@ -32,9 +32,9 @@ import java.util.concurrent.ConcurrentMap; public class ShardFieldData implements IndexFieldDataCache.Listener { - final CounterMetric evictionsMetric = new CounterMetric(); - final CounterMetric totalMetric = new CounterMetric(); - final ConcurrentMap perFieldTotals = ConcurrentCollections.newConcurrentMap(); + private final CounterMetric evictionsMetric = new CounterMetric(); + private final CounterMetric totalMetric = new CounterMetric(); + private final ConcurrentMap perFieldTotals = ConcurrentCollections.newConcurrentMap(); public FieldDataStats stats(String... fields) { ObjectLongHashMap fieldTotals = null; diff --git a/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java index 9f91b163592..aea3677e33e 100644 --- a/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java @@ -33,6 +33,7 @@ import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.AbstractQueryBuilder; @@ -86,11 +87,11 @@ public class SimpleQueryStringQueryParser extends SimpleQueryParser { } /** - * Rethrow the runtime exception, unless the lenient flag has been set, returns null + * Rethrow the runtime exception, unless the lenient flag has been set, returns {@link MatchNoDocsQuery} */ private Query rethrowUnlessLenient(RuntimeException e) { if (settings.lenient()) { - return null; + return Queries.newMatchNoDocsQuery("failed query, caused by " + e.getMessage()); } throw e; } @@ -115,7 +116,7 @@ public class SimpleQueryStringQueryParser extends SimpleQueryParser { try { return queryBuilder.parse(MultiMatchQueryBuilder.Type.MOST_FIELDS, weights, text, null); } catch (IOException e) { - return rethrowUnlessLenient(new IllegalArgumentException(e.getMessage())); + return rethrowUnlessLenient(new IllegalStateException(e.getMessage())); } } @@ -135,7 +136,7 @@ public class SimpleQueryStringQueryParser extends SimpleQueryParser { settings.fuzzyMaxExpansions, settings.fuzzyTranspositions); disjuncts.add(wrapWithBoost(query, entry.getValue())); } catch (RuntimeException e) { - rethrowUnlessLenient(e); + disjuncts.add(rethrowUnlessLenient(e)); } } if (disjuncts.size() == 1) { @@ -156,7 +157,7 @@ public class SimpleQueryStringQueryParser extends SimpleQueryParser { } return queryBuilder.parse(MultiMatchQueryBuilder.Type.PHRASE, phraseWeights, text, null); } catch (IOException e) { - return rethrowUnlessLenient(new IllegalArgumentException(e.getMessage())); + return rethrowUnlessLenient(new IllegalStateException(e.getMessage())); } finally { queryBuilder.setPhraseSlop(0); } @@ -184,7 +185,7 @@ public class SimpleQueryStringQueryParser extends SimpleQueryParser { disjuncts.add(wrapWithBoost(query, entry.getValue())); } } catch (RuntimeException e) { - return rethrowUnlessLenient(e); + disjuncts.add(rethrowUnlessLenient(e)); } } if (disjuncts.size() == 1) { diff --git a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java index 95e3505e746..0ec03cb7a8f 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java @@ -130,10 +130,9 @@ public class GlobalCheckpointSyncAction extends TransportReplicationAction< } private void maybeSyncTranslog(final IndexShard indexShard) throws IOException { - final Translog translog = indexShard.getTranslog(); if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && - translog.getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) { - indexShard.getTranslog().sync(); + indexShard.getTranslog().getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) { + indexShard.sync(); } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 57c12a0ea70..7fe32ae8a47 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -2315,8 +2315,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl @Override protected void write(List>> candidates) throws IOException { try { - final Engine engine = getEngine(); - engine.getTranslog().ensureSynced(candidates.stream().map(Tuple::v1)); + getEngine().ensureTranslogSynced(candidates.stream().map(Tuple::v1)); } catch (AlreadyClosedException ex) { // that's fine since we already synced everything on engine close - this also is conform with the methods // documentation @@ -2341,9 +2340,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl translogSyncProcessor.put(location, syncListener); } - public final void sync() throws IOException { + public void sync() throws IOException { verifyNotClosed(); - getEngine().getTranslog().sync(); + getEngine().syncTranslog(); } /** diff --git a/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java b/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java index 41f0ed86116..7454d74349e 100644 --- a/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java @@ -42,6 +42,7 @@ import java.util.List; import java.util.Objects; import java.util.function.Supplier; import java.util.function.UnaryOperator; +import java.util.stream.Collectors; /** * An additional extension point for {@link Plugin}s that extends Elasticsearch's scripting functionality. Implement it like this: @@ -62,6 +63,15 @@ public interface ActionPlugin { default List> getActions() { return Collections.emptyList(); } + + /** + * Client actions added by this plugin. This defaults to all of the {@linkplain GenericAction} in + * {@linkplain ActionPlugin#getActions()}. + */ + default List getClientActions() { + return getActions().stream().map(a -> a.action).collect(Collectors.toList()); + } + /** * Action filters added by this plugin. */ diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java new file mode 100644 index 00000000000..e616e038311 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.close; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +import static org.hamcrest.CoreMatchers.equalTo; + +public class CloseIndexResponseTests extends ESTestCase { + + public void testFromToXContent() throws IOException { + final CloseIndexResponse closeIndexResponse = createTestItem(); + + boolean humanReadable = randomBoolean(); + final XContentType xContentType = randomFrom(XContentType.values()); + BytesReference originalBytes = toShuffledXContent(closeIndexResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + BytesReference mutated; + if (randomBoolean()) { + mutated = insertRandomFields(xContentType, originalBytes, null, random()); + } else { + mutated = originalBytes; + } + + CloseIndexResponse parsedCloseIndexResponse; + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { + parsedCloseIndexResponse = CloseIndexResponse.fromXContent(parser); + assertNull(parser.nextToken()); + } + assertThat(parsedCloseIndexResponse.isAcknowledged(), equalTo(closeIndexResponse.isAcknowledged())); + } + + private static CloseIndexResponse createTestItem() { + boolean acknowledged = randomBoolean(); + return new CloseIndexResponse(acknowledged); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index 7049d0fa9e9..36266026504 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -639,4 +639,16 @@ public class UpdateRequestTests extends ESTestCase { assertThat(result.action(), instanceOf(UpdateResponse.class)); assertThat(result.getResponseResult(), equalTo(DocWriteResponse.Result.NOOP)); } + + public void testToString() throws IOException { + UpdateRequest request = new UpdateRequest("test", "type1", "1") + .script(mockInlineScript("ctx._source.body = \"foo\"")); + assertThat(request.toString(), equalTo("update {[test][type1][1], doc_as_upsert[false], " + + "script[Script{type=inline, lang='mock', idOrCode='ctx._source.body = \"foo\"', options={}, params={}}], " + + "scripted_upsert[false], detect_noop[true]}")); + request = new UpdateRequest("test", "type1", "1").fromXContent( + createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"bar\"}}"))); + assertThat(request.toString(), equalTo("update {[test][type1][1], doc_as_upsert[false], " + + "doc[index {[null][null][null], source[{\"body\":\"bar\"}]}], scripted_upsert[false], detect_noop[true]}")); + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index ca6059dae00..d4af7836810 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -244,6 +244,44 @@ public class CombinedDeletionPolicyTests extends ESTestCase { equalTo(Long.parseLong(startingCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); } + public void testCheckUnreferencedCommits() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + final UUID translogUUID = UUID.randomUUID(); + final TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( + OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get, null); + final List commitList = new ArrayList<>(); + int totalCommits = between(2, 20); + long lastMaxSeqNo = between(1, 1000); + long lastTranslogGen = between(1, 50); + for (int i = 0; i < totalCommits; i++) { + lastMaxSeqNo += between(1, 10000); + lastTranslogGen += between(1, 100); + commitList.add(mockIndexCommit(lastMaxSeqNo, translogUUID, lastTranslogGen)); + } + IndexCommit safeCommit = randomFrom(commitList); + globalCheckpoint.set(Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO))); + indexPolicy.onCommit(commitList); + if (safeCommit == commitList.get(commitList.size() - 1)) { + // Safe commit is the last commit - no need to clean up + assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(lastTranslogGen)); + assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); + assertThat(indexPolicy.hasUnreferencedCommits(), equalTo(false)); + } else { + // Advanced but not enough + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), lastMaxSeqNo - 1)); + assertThat(indexPolicy.hasUnreferencedCommits(), equalTo(false)); + // Advanced enough + globalCheckpoint.set(randomLongBetween(lastMaxSeqNo, Long.MAX_VALUE)); + assertThat(indexPolicy.hasUnreferencedCommits(), equalTo(true)); + indexPolicy.onCommit(commitList); + // Safe commit is the last commit - no need to clean up + assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(lastTranslogGen)); + assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); + assertThat(indexPolicy.hasUnreferencedCommits(), equalTo(false)); + } + } + IndexCommit mockIndexCommit(long maxSeqNo, UUID translogUUID, long translogGen) throws IOException { final Map userData = new HashMap<>(); userData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 86b15d800fc..b8cd0979854 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -4278,4 +4278,29 @@ public class InternalEngineTests extends EngineTestCase { assertThat(userData.get(Translog.TRANSLOG_GENERATION_KEY), equalTo("1")); } } + + public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { + IOUtils.close(engine, store); + store = createStore(); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { + final int numDocs = scaledRandomIntBetween(10, 100); + for (int docId = 0; docId < numDocs; docId++) { + index(engine, docId); + if (frequently()) { + engine.flush(randomBoolean(), randomBoolean()); + } + } + engine.flush(false, randomBoolean()); + List commits = DirectoryReader.listCommits(store.directory()); + // Global checkpoint advanced but not enough - all commits are kept. + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpointTracker().getCheckpoint() - 1)); + engine.syncTranslog(); + assertThat(DirectoryReader.listCommits(store.directory()), equalTo(commits)); + // Global checkpoint advanced enough - only the last commit is kept. + globalCheckpoint.set(randomLongBetween(engine.getLocalCheckpointTracker().getCheckpoint(), Long.MAX_VALUE)); + engine.syncTranslog(); + assertThat(DirectoryReader.listCommits(store.directory()), contains(commits.get(commits.size() - 1))); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index bfc6fd06004..dc7c56ce04e 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -46,15 +46,18 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -607,6 +610,21 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase 0); + + Query query = new SimpleQueryStringBuilder("t*") + .field(DATE_FIELD_NAME) + .field(STRING_FIELD_NAME) + .lenient(true) + .toQuery(createShardContext()); + List expectedQueries = new ArrayList<>(); + expectedQueries.add(new MatchNoDocsQuery("")); + expectedQueries.add(new PrefixQuery(new Term(STRING_FIELD_NAME, "t"))); + DisjunctionMaxQuery expected = new DisjunctionMaxQuery(expectedQueries, 1.0f); + assertEquals(expected, query); + } + private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) { Settings build = Settings.builder().put(oldIndexSettings) .put(indexSettings) diff --git a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 1b3af027808..a684bab5ac3 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -658,7 +658,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase @Override protected void performOnReplica(final GlobalCheckpointSyncAction.Request request, final IndexShard replica) throws IOException { - replica.getTranslog().sync(); + replica.sync(); } } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java index 618714fc9d9..3fc62673de0 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -123,9 +123,9 @@ public class GlobalCheckpointSyncActionTests extends ESTestCase { } if (durability == Translog.Durability.ASYNC || lastSyncedGlobalCheckpoint == globalCheckpoint) { - verify(translog, never()).sync(); + verify(indexShard, never()).sync(); } else { - verify(translog).sync(); + verify(indexShard).sync(); } } diff --git a/settings.gradle b/settings.gradle index 46ecb3dad1c..e3a24ea148d 100644 --- a/settings.gradle +++ b/settings.gradle @@ -27,80 +27,58 @@ List projects = [ 'test:fixtures:hdfs-fixture', 'test:fixtures:krb5kdc-fixture', 'test:fixtures:old-elasticsearch', - 'test:logger-usage', - 'libs:elasticsearch-core', - 'libs:elasticsearch-nio', - 'modules:aggs-matrix-stats', - 'modules:analysis-common', - 'modules:ingest-common', - 'modules:lang-expression', - 'modules:lang-mustache', - 'modules:lang-painless', - 'modules:mapper-extras', - 'modules:parent-join', - 'modules:percolator', - 'modules:rank-eval', - 'modules:reindex', - 'modules:repository-url', - 'modules:transport-netty4', - 'modules:tribe', - 'plugins:analysis-icu', - 'plugins:analysis-kuromoji', - 'plugins:analysis-phonetic', - 'plugins:analysis-smartcn', - 'plugins:analysis-stempel', - 'plugins:analysis-ukrainian', - 'plugins:discovery-azure-classic', - 'plugins:discovery-ec2', - 'plugins:discovery-file', - 'plugins:discovery-gce', - 'plugins:ingest-geoip', - 'plugins:ingest-attachment', - 'plugins:ingest-user-agent', - 'plugins:mapper-murmur3', - 'plugins:mapper-size', - 'plugins:repository-azure', - 'plugins:repository-gcs', - 'plugins:repository-hdfs', - 'plugins:repository-s3', - 'plugins:jvm-example', - 'plugins:store-smb', - 'plugins:transport-nio', - 'qa:auto-create-index', - 'qa:ccs-unavailable-clusters', - 'qa:evil-tests', - 'qa:full-cluster-restart', - 'qa:integration-bwc', - 'qa:mixed-cluster', - 'qa:multi-cluster-search', - 'qa:no-bootstrap-tests', - 'qa:reindex-from-old', - 'qa:rolling-upgrade', - 'qa:smoke-test-client', - 'qa:smoke-test-http', - 'qa:smoke-test-ingest-with-all-dependencies', - 'qa:smoke-test-ingest-disabled', - 'qa:smoke-test-multinode', - 'qa:smoke-test-rank-eval-with-mustache', - 'qa:smoke-test-plugins', - 'qa:smoke-test-reindex-with-all-modules', - 'qa:smoke-test-tribe-node', - 'qa:vagrant', - 'qa:verify-version-constants', - 'qa:wildfly', - 'qa:query-builder-bwc' + 'test:logger-usage' ] -projects.add("libs") -File libsDir = new File(rootProject.projectDir, 'libs') -for (File libDir : new File(rootProject.projectDir, 'libs').listFiles()) { - if (libDir.isDirectory() == false) continue; - if (libDir.name.startsWith('build') || libDir.name.startsWith('.')) continue; - projects.add("libs:${libDir.name}".toString()) +/** + * Iterates over sub directories, looking for build.gradle, and adds a project if found + * for that dir with the given path prefix. Note that this requires each level + * of the dir hierarchy to have a build.gradle. Otherwise we would have to iterate + * all files/directories in the source tree to find all projects. + */ +void addSubProjects(String path, File dir, List projects, List branches) { + if (dir.isDirectory() == false) return; + if (dir.name == 'buildSrc') return; + if (new File(dir, 'build.gradle').exists() == false) return; + if (findProject(dir) != null) return; + + final String projectName = "${path}:${dir.name}" + include projectName + + if (dir.name == 'bwc-snapshot-dummy-projects') { + for (final String branch : branches) { + final String snapshotProjectName = "${projectName}:bwc-snapshot-${branch}" + projects.add(snapshotProjectName) + include snapshotProjectName + project("${snapshotProjectName}").projectDir = dir + } + // TODO do we want to assert that there's nothing else in the bwc directory? + } else { + if (path.isEmpty() || path.startsWith(':example-plugins')) { + project(projectName).projectDir = dir + } + for (File subdir : dir.listFiles()) { + addSubProjects(projectName, subdir, projects, branches) + } + } } +// include example plugins first, so adding plugin dirs below won't muck with :example-plugins +File examplePluginsDir = new File(rootProject.projectDir, 'plugins/examples') +for (File example : examplePluginsDir.listFiles()) { + if (example.isDirectory() == false) continue; + if (example.name.startsWith('build') || example.name.startsWith('.')) continue; + addSubProjects(':example-plugins', example, projects, []) +} +project(':example-plugins').projectDir = new File(rootProject.projectDir, 'plugins/examples') + +addSubProjects('', new File(rootProject.projectDir, 'libs'), projects, []) +addSubProjects('', new File(rootProject.projectDir, 'modules'), projects, []) +addSubProjects('', new File(rootProject.projectDir, 'plugins'), projects, []) +addSubProjects('', new File(rootProject.projectDir, 'qa'), projects, []) + /* Create projects for building BWC snapshot distributions from the heads of other branches */ -final List branches = ['5.6', '6.0', '6.1', '6.x'] +final List branches = ['5.6', '6.0', '6.1', '6.2', '6.x'] for (final String branch : branches) { projects.add("distribution:bwc-snapshot-${branch}".toString()) } @@ -139,47 +117,6 @@ if (isEclipse) { project(":libs:elasticsearch-nio-tests").buildFileName = 'eclipse-build.gradle' } -/** - * Iterates over sub directories, looking for build.gradle, and adds a project if found - * for that dir with the given path prefix. Note that this requires each level - * of the dir hierarchy to have a build.gradle. Otherwise we would have to iterate - * all files/directories in the source tree to find all projects. - */ -void addSubProjects(String path, File dir, List projects, List branches) { - if (dir.isDirectory() == false) return; - if (dir.name == 'buildSrc') return; - if (new File(dir, 'build.gradle').exists() == false) return; - - final String projectName = "${path}:${dir.name}" - include projectName - - if (dir.name == 'bwc-snapshot-dummy-projects') { - for (final String branch : branches) { - final String snapshotProjectName = "${projectName}:bwc-snapshot-${branch}" - projects.add(snapshotProjectName) - include snapshotProjectName - project("${snapshotProjectName}").projectDir = dir - } - // TODO do we want to assert that there's nothing else in the bwc directory? - } else { - if (path.isEmpty() || path.startsWith(':example-plugins')) { - project(projectName).projectDir = dir - } - for (File subdir : dir.listFiles()) { - addSubProjects(projectName, subdir, projects, branches) - } - } -} - -// include example plugins -File examplePluginsDir = new File(rootProject.projectDir, 'plugins/examples') -for (File example : examplePluginsDir.listFiles()) { - if (example.isDirectory() == false) continue; - if (example.name.startsWith('build') || example.name.startsWith('.')) continue; - addSubProjects(':example-plugins', example, projects, []) -} -project(':example-plugins').projectDir = new File(rootProject.projectDir, 'plugins/examples') - // look for extra plugins for elasticsearch File extraProjects = new File(rootProject.projectDir.parentFile, "${dirName}-extra") if (extraProjects.exists()) { @@ -187,5 +124,4 @@ if (extraProjects.exists()) { addSubProjects('', extraProjectDir, projects, branches) } } -include 'libs' diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index 6e2f43ae752..01fd3bad0f3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -165,7 +165,7 @@ public class ClientYamlTestClient { Header[] requestHeaders = new Header[headers.size()]; int index = 0; for (Map.Entry header : headers.entrySet()) { - logger.info("Adding header {} with value {}", header.getKey(), header.getValue()); + logger.debug("Adding header {} with value {}", header.getKey(), header.getValue()); requestHeaders[index++] = new BasicHeader(header.getKey(), header.getValue()); } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 20971b3865e..ed0431d9678 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -1927,16 +1927,12 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void testHandshakeWithIncompatVersion() { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); - NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); Version version = Version.fromString("2.0.0"); - try (MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version); - MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, null, - Collections.emptySet())) { + try (MockTransportService service = build(Settings.EMPTY, version, null, true)) { service.start(); service.acceptIncomingRequests(); - DiscoveryNode node = - new DiscoveryNode("TS_TPC", "TS_TPC", transport.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); + TransportAddress address = service.boundAddress().publishAddress(); + DiscoveryNode node = new DiscoveryNode("TS_TPC", "TS_TPC", address, emptyMap(), emptySet(), version0); ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); builder.addConnections(1, TransportRequestOptions.Type.BULK, @@ -1950,17 +1946,12 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void testHandshakeUpdatesVersion() throws IOException { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); - NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); Version version = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); - try (MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version); - MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, null, - Collections.emptySet())) { + try (MockTransportService service = build(Settings.EMPTY, version, null, true)) { service.start(); service.acceptIncomingRequests(); - DiscoveryNode node = - new DiscoveryNode("TS_TPC", "TS_TPC", transport.boundAddress().publishAddress(), emptyMap(), emptySet(), - Version.fromString("2.0.0")); + TransportAddress address = service.boundAddress().publishAddress(); + DiscoveryNode node = new DiscoveryNode("TS_TPC", "TS_TPC", address, emptyMap(), emptySet(), Version.fromString("2.0.0")); ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); builder.addConnections(1, TransportRequestOptions.Type.BULK, diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index a8876453b5b..ec262261e54 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -31,14 +31,14 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.nio.AcceptingSelector; import org.elasticsearch.nio.AcceptorEventHandler; -import org.elasticsearch.nio.BytesReadContext; -import org.elasticsearch.nio.BytesWriteContext; +import org.elasticsearch.nio.BytesChannelContext; import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.ReadContext; +import org.elasticsearch.nio.ServerChannelContext; +import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.SocketSelector; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TcpChannel; @@ -162,18 +162,19 @@ public class MockNioTransport extends TcpTransport { Recycler.V bytes = pageCacheRecycler.bytePage(false); return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close); }; - ReadContext.ReadConsumer nioReadConsumer = channelBuffer -> + SocketChannelContext.ReadConsumer nioReadConsumer = channelBuffer -> consumeNetworkReads(nioChannel, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex()))); - BytesReadContext readContext = new BytesReadContext(nioChannel, nioReadConsumer, new InboundChannelBuffer(pageSupplier)); - BytesWriteContext writeContext = new BytesWriteContext(nioChannel); - nioChannel.setContexts(readContext, writeContext, MockNioTransport.this::exceptionCaught); + BytesChannelContext context = new BytesChannelContext(nioChannel, MockNioTransport.this::exceptionCaught, nioReadConsumer, + new InboundChannelBuffer(pageSupplier)); + nioChannel.setContext(context); return nioChannel; } @Override public MockServerChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { MockServerChannel nioServerChannel = new MockServerChannel(profileName, channel, this, selector); - nioServerChannel.setAcceptContext(MockNioTransport.this::acceptChannel); + ServerChannelContext context = new ServerChannelContext(nioServerChannel, MockNioTransport.this::acceptChannel, (c, e) -> {}); + nioServerChannel.setContext(context); return nioServerChannel; } } @@ -188,6 +189,11 @@ public class MockNioTransport extends TcpTransport { this.profile = profile; } + @Override + public void close() { + getSelector().queueChannelClose(this); + } + @Override public String getProfile() { return profile; @@ -224,6 +230,11 @@ public class MockNioTransport extends TcpTransport { this.profile = profile; } + @Override + public void close() { + getContext().closeChannel(); + } + @Override public String getProfile() { return profile; @@ -243,7 +254,7 @@ public class MockNioTransport extends TcpTransport { @Override public void sendMessage(BytesReference reference, ActionListener listener) { - getWriteContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); + getContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); } } }