diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 6b14e7f4f7f..ef2372c003d 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -325,7 +325,7 @@ vagrant plugin install vagrant-cachier . Validate your installed dependencies: ------------------------------------- -gradle :qa:vagrant:checkVagrantVersion +gradle :qa:vagrant:vagrantCheckVersion ------------------------------------- . Download and smoke test the VMs with `gradle vagrantSmokeTest` or @@ -417,17 +417,26 @@ and in another window: ---------------------------------------------------- vagrant up centos-7 --provider virtualbox && vagrant ssh centos-7 -cd $TESTROOT -sudo bats $BATS/*rpm*.bats +cd $BATS_ARCHIVES +sudo -E bats $BATS_TESTS/*rpm*.bats ---------------------------------------------------- If you wanted to retest all the release artifacts on a single VM you could: ------------------------------------------------- -gradle prepareTestRoot +gradle vagrantSetUp vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404 -cd $TESTROOT -sudo bats $BATS/*.bats +cd $BATS_ARCHIVES +sudo -E bats $BATS_TESTS/*.bats +------------------------------------------------- + +Note: Starting vagrant VM outside of the elasticsearch folder requires to +indicates the folder that contains the Vagrantfile using the VAGRANT_CWD +environment variable: + +------------------------------------------------- +gradle vagrantSetUp +VAGRANT_CWD=/path/to/elasticsearch vagrant up centos-7 --provider virtualbox ------------------------------------------------- == Coverage analysis diff --git a/Vagrantfile b/Vagrantfile index 96151724d13..592f0fdc4a5 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -77,6 +77,9 @@ Vagrant.configure(2) do |config| # the elasticsearch project called vagrant.... config.vm.synced_folder ".", "/vagrant", disabled: true config.vm.synced_folder ".", "/elasticsearch" + # Expose project directory + PROJECT_DIR = ENV['VAGRANT_PROJECT_DIR'] || Dir.pwd + config.vm.synced_folder PROJECT_DIR, "/project" config.vm.provider "virtualbox" do |v| # Give the boxes 3GB because Elasticsearch defaults to using 2GB v.memory = 3072 @@ -272,8 +275,10 @@ export ZIP=/elasticsearch/distribution/zip/build/distributions export TAR=/elasticsearch/distribution/tar/build/distributions export RPM=/elasticsearch/distribution/rpm/build/distributions export DEB=/elasticsearch/distribution/deb/build/distributions -export TESTROOT=/elasticsearch/qa/vagrant/build/testroot -export BATS=/elasticsearch/qa/vagrant/src/test/resources/packaging/scripts +export BATS=/project/build/bats +export BATS_UTILS=/project/build/bats/utils +export BATS_TESTS=/project/build/bats/tests +export BATS_ARCHIVES=/project/build/bats/archives VARS SHELL end diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 65402290e01..628e59de1a6 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -495,6 +495,8 @@ class BuildPlugin implements Plugin { systemProperty 'tests.artifact', project.name systemProperty 'tests.task', path systemProperty 'tests.security.manager', 'true' + // Breaking change in JDK-9, revert to JDK-8 behavior for now, see https://github.com/elastic/elasticsearch/issues/21534 + systemProperty 'jdk.io.permissionsUseCanonicalPath', 'true' systemProperty 'jna.nosys', 'true' // default test sysprop values systemProperty 'tests.ifNoTests', 'fail' diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 07306dd14ea..ca4957f7a6c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -123,7 +123,7 @@ class ClusterConfiguration { Map systemProperties = new HashMap<>() - Map settings = new HashMap<>() + Map settings = new HashMap<>() // map from destination path, to source file Map extraConfigFiles = new HashMap<>() @@ -140,7 +140,7 @@ class ClusterConfiguration { } @Input - void setting(String name, String value) { + void setting(String name, Object value) { settings.put(name, value) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy index c68e0528c9b..65b90c4d9a0 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy @@ -18,14 +18,7 @@ */ package org.elasticsearch.gradle.vagrant -import org.gradle.api.DefaultTask import org.gradle.api.tasks.Input -import org.gradle.api.tasks.TaskAction -import org.gradle.logging.ProgressLoggerFactory -import org.gradle.process.internal.ExecAction -import org.gradle.process.internal.ExecActionFactory - -import javax.inject.Inject /** * Runs bats over vagrant. Pretty much like running it using Exec but with a diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy index d79c2533fab..ecba08d7d4c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy @@ -34,11 +34,18 @@ public class VagrantCommandTask extends LoggedExec { @Input String boxName + @Input + Map environmentVars + public VagrantCommandTask() { executable = 'vagrant' + project.afterEvaluate { // It'd be nice if --machine-readable were, well, nice standardOutput = new TeeOutputStream(standardOutput, createLoggerOutputStream()) + if (environmentVars != null) { + environment environmentVars + } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy new file mode 100644 index 00000000000..f16913d5be6 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.vagrant + +import org.gradle.api.tasks.Input + +class VagrantPropertiesExtension { + + @Input + List boxes + + @Input + Long testSeed + + @Input + String formattedTestSeed + + @Input + String upgradeFromVersion + + @Input + List upgradeFromVersions + + @Input + String batsDir + + @Input + Boolean inheritTests + + @Input + Boolean inheritTestArchives + + @Input + Boolean inheritTestUtils + + VagrantPropertiesExtension(List availableBoxes) { + this.boxes = availableBoxes + this.batsDir = 'src/test/resources/packaging' + } + + void boxes(String... boxes) { + this.boxes = Arrays.asList(boxes) + } + + void setBatsDir(String batsDir) { + this.batsDir = batsDir + } + + void setInheritTests(Boolean inheritTests) { + this.inheritTests = inheritTests + } + + void setInheritTestArchives(Boolean inheritTestArchives) { + this.inheritTestArchives = inheritTestArchives + } + + void setInheritTestUtils(Boolean inheritTestUtils) { + this.inheritTestUtils = inheritTestUtils + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy new file mode 100644 index 00000000000..0c16a8972c8 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -0,0 +1,457 @@ +package org.elasticsearch.gradle.vagrant + +import org.elasticsearch.gradle.FileContentsTask +import org.gradle.BuildAdapter +import org.gradle.BuildResult +import org.gradle.api.* +import org.gradle.api.artifacts.dsl.RepositoryHandler +import org.gradle.api.internal.artifacts.dependencies.DefaultProjectDependency +import org.gradle.api.tasks.Copy +import org.gradle.api.tasks.Delete +import org.gradle.api.tasks.Exec + +class VagrantTestPlugin implements Plugin { + + /** All available boxes **/ + static List BOXES = [ + 'centos-6', + 'centos-7', + 'debian-8', + 'fedora-24', + 'oel-6', + 'oel-7', + 'opensuse-13', + 'sles-12', + 'ubuntu-1204', + 'ubuntu-1404', + 'ubuntu-1604' + ] + + /** Boxes used when sampling the tests **/ + static List SAMPLE = [ + 'centos-7', + 'ubuntu-1404', + ] + + /** All onboarded archives by default, available for Bats tests even if not used **/ + static List DISTRIBUTION_ARCHIVES = ['tar', 'rpm', 'deb'] + + /** Packages onboarded for upgrade tests **/ + static List UPGRADE_FROM_ARCHIVES = ['rpm', 'deb'] + + private static final BATS = 'bats' + private static final String BATS_TEST_COMMAND ="cd \$BATS_ARCHIVES && sudo -E bats --tap \$BATS_TESTS/*.$BATS" + + @Override + void apply(Project project) { + + // Creates the Vagrant extension for the project + project.extensions.create('esvagrant', VagrantPropertiesExtension, listVagrantBoxes(project)) + + // Add required repositories for Bats tests + configureBatsRepositories(project) + + // Creates custom configurations for Bats testing files (and associated scripts and archives) + createBatsConfiguration(project) + + // Creates all the main Vagrant tasks + createVagrantTasks(project) + + if (project.extensions.esvagrant.boxes == null || project.extensions.esvagrant.boxes.size() == 0) { + throw new InvalidUserDataException('Vagrant boxes cannot be null or empty for esvagrant') + } + + for (String box : project.extensions.esvagrant.boxes) { + if (BOXES.contains(box) == false) { + throw new InvalidUserDataException("Vagrant box [${box}] not found, available virtual machines are ${BOXES}") + } + } + + // Creates all tasks related to the Vagrant boxes + createVagrantBoxesTasks(project) + } + + private List listVagrantBoxes(Project project) { + String vagrantBoxes = project.getProperties().get('vagrant.boxes', 'sample') + if (vagrantBoxes == 'sample') { + return SAMPLE + } else if (vagrantBoxes == 'all') { + return BOXES + } else { + return vagrantBoxes.split(',') + } + } + + private static Set listVersions(Project project) { + Node xml + new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s -> + xml = new XmlParser().parse(s) + } + Set versions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /[5]\.\d\.\d/ }) + if (versions.isEmpty() == false) { + return versions; + } + + // If no version is found, we run the tests with the current version + return Collections.singleton(project.version); + } + + private static File getVersionsFile(Project project) { + File versions = new File(project.projectDir, 'versions'); + if (versions.exists() == false) { + // Use the elasticsearch's versions file from project :qa:vagrant + versions = project.project(":qa:vagrant").file('versions') + } + return versions + } + + private static void configureBatsRepositories(Project project) { + RepositoryHandler repos = project.repositories + + // Try maven central first, it'll have releases before 5.0.0 + repos.mavenCentral() + + /* Setup a repository that tries to download from + https://artifacts.elastic.co/downloads/elasticsearch/[module]-[revision].[ext] + which should work for 5.0.0+. This isn't a real ivy repository but gradle + is fine with that */ + repos.ivy { + artifactPattern "https://artifacts.elastic.co/downloads/elasticsearch/[module]-[revision].[ext]" + } + } + + private static void createBatsConfiguration(Project project) { + project.configurations.create(BATS) + + Long seed + String formattedSeed = null + String[] upgradeFromVersions + + String maybeTestsSeed = System.getProperty("tests.seed", null); + if (maybeTestsSeed != null) { + List seeds = maybeTestsSeed.tokenize(':') + if (seeds.size() != 0) { + String masterSeed = seeds.get(0) + seed = new BigInteger(masterSeed, 16).longValue() + formattedSeed = maybeTestsSeed + } + } + if (formattedSeed == null) { + seed = new Random().nextLong() + formattedSeed = String.format("%016X", seed) + } + + String maybeUpdradeFromVersions = System.getProperty("tests.packaging.upgrade.from.versions", null) + if (maybeUpdradeFromVersions != null) { + upgradeFromVersions = maybeUpdradeFromVersions.split(",") + } else { + upgradeFromVersions = getVersionsFile(project) + } + + String upgradeFromVersion = upgradeFromVersions[new Random(seed).nextInt(upgradeFromVersions.length)] + + DISTRIBUTION_ARCHIVES.each { + // Adds a dependency for the current version + project.dependencies.add(BATS, project.dependencies.project(path: ":distribution:${it}", configuration: 'archives')) + } + + UPGRADE_FROM_ARCHIVES.each { + // The version of elasticsearch that we upgrade *from* + project.dependencies.add(BATS, "org.elasticsearch.distribution.${it}:elasticsearch:${upgradeFromVersion}@${it}") + } + + project.extensions.esvagrant.testSeed = seed + project.extensions.esvagrant.formattedTestSeed = formattedSeed + project.extensions.esvagrant.upgradeFromVersion = upgradeFromVersion + project.extensions.esvagrant.upgradeFromVersions = upgradeFromVersions + } + + private static void createCleanTask(Project project) { + project.tasks.create('clean', Delete.class) { + description 'Clean the project build directory' + group 'Build' + delete project.buildDir + } + } + + private static void createStopTask(Project project) { + project.tasks.create('stop') { + description 'Stop any tasks from tests that still may be running' + group 'Verification' + } + } + + private static void createSmokeTestTask(Project project) { + project.tasks.create('vagrantSmokeTest') { + description 'Smoke test the specified vagrant boxes' + group 'Verification' + } + } + + private static void createPrepareVagrantTestEnvTask(Project project) { + File batsDir = new File("${project.buildDir}/${BATS}") + + Task createBatsDirsTask = project.tasks.create('createBatsDirs') + createBatsDirsTask.outputs.dir batsDir + createBatsDirsTask.dependsOn project.tasks.vagrantVerifyVersions + createBatsDirsTask.doLast { + batsDir.mkdirs() + } + + Copy copyBatsArchives = project.tasks.create('copyBatsArchives', Copy) { + dependsOn createBatsDirsTask + into "${batsDir}/archives" + from project.configurations[BATS] + } + + Copy copyBatsTests = project.tasks.create('copyBatsTests', Copy) { + dependsOn createBatsDirsTask + into "${batsDir}/tests" + from { + "${project.extensions.esvagrant.batsDir}/tests" + } + } + + Copy copyBatsUtils = project.tasks.create('copyBatsUtils', Copy) { + dependsOn createBatsDirsTask + into "${batsDir}/utils" + from { + "${project.extensions.esvagrant.batsDir}/utils" + } + } + + // Now we iterate over dependencies of the bats configuration. When a project dependency is found, + // we bring back its own archives, test files or test utils. + project.afterEvaluate { + project.configurations.bats.dependencies.findAll {it.configuration == BATS }.each { d -> + if (d instanceof DefaultProjectDependency) { + DefaultProjectDependency externalBatsDependency = (DefaultProjectDependency) d + Project externalBatsProject = externalBatsDependency.dependencyProject + String externalBatsDir = externalBatsProject.extensions.esvagrant.batsDir + + if (project.extensions.esvagrant.inheritTests) { + copyBatsTests.from(externalBatsProject.files("${externalBatsDir}/tests")) + } + if (project.extensions.esvagrant.inheritTestArchives) { + copyBatsArchives.from(externalBatsDependency.projectConfiguration.files) + } + if (project.extensions.esvagrant.inheritTestUtils) { + copyBatsUtils.from(externalBatsProject.files("${externalBatsDir}/utils")) + } + } + } + } + + Task createVersionFile = project.tasks.create('createVersionFile', FileContentsTask) { + dependsOn createBatsDirsTask + file "${batsDir}/archives/version" + contents project.version + } + + Task createUpgradeFromFile = project.tasks.create('createUpgradeFromFile', FileContentsTask) { + dependsOn createBatsDirsTask + file "${batsDir}/archives/upgrade_from_version" + contents project.extensions.esvagrant.upgradeFromVersion + } + + Task vagrantSetUpTask = project.tasks.create('vagrantSetUp') + vagrantSetUpTask.dependsOn 'vagrantCheckVersion' + vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils, copyBatsArchives, createVersionFile, createUpgradeFromFile + vagrantSetUpTask.doFirst { + project.gradle.addBuildListener new BuildAdapter() { + @Override + void buildFinished(BuildResult result) { + if (result.failure) { + println "Reproduce with: gradle packagingTest " + +"-Pvagrant.boxes=${project.extensions.esvagrant.boxes} " + + "-Dtests.seed=${project.extensions.esvagrant.formattedSeed} " + + "-Dtests.packaging.upgrade.from.versions=${project.extensions.esvagrant.upgradeFromVersions.join(",")}" + } + } + } + } + } + + private static void createUpdateVersionsTask(Project project) { + project.tasks.create('vagrantUpdateVersions') { + description 'Update file containing options for the\n "starting" version in the "upgrade from" packaging tests.' + group 'Verification' + doLast { + File versions = getVersionsFile(project) + versions.text = listVersions(project).join('\n') + '\n' + } + } + } + + private static void createVerifyVersionsTask(Project project) { + project.tasks.create('vagrantVerifyVersions') { + description 'Update file containing options for the\n "starting" version in the "upgrade from" packaging tests.' + group 'Verification' + doLast { + String maybeUpdateFromVersions = System.getProperty("tests.packaging.upgrade.from.versions", null) + if (maybeUpdateFromVersions == null) { + Set versions = listVersions(project) + Set actualVersions = new TreeSet<>(project.extensions.esvagrant.upgradeFromVersions) + if (!versions.equals(actualVersions)) { + throw new GradleException("out-of-date versions " + actualVersions + + ", expected " + versions + "; run gradle vagrantUpdateVersions") + } + } + } + } + } + + private static void createCheckVagrantVersionTask(Project project) { + project.tasks.create('vagrantCheckVersion', Exec) { + description 'Check the Vagrant version' + group 'Verification' + commandLine 'vagrant', '--version' + standardOutput = new ByteArrayOutputStream() + doLast { + String version = standardOutput.toString().trim() + if ((version ==~ /Vagrant 1\.(8\.[6-9]|9\.[0-9])+/) == false) { + throw new InvalidUserDataException("Illegal version of vagrant [${version}]. Need [Vagrant 1.8.6+]") + } + } + } + } + + private static void createCheckVirtualBoxVersionTask(Project project) { + project.tasks.create('virtualboxCheckVersion', Exec) { + description 'Check the Virtualbox version' + group 'Verification' + commandLine 'vboxmanage', '--version' + standardOutput = new ByteArrayOutputStream() + doLast { + String version = standardOutput.toString().trim() + try { + String[] versions = version.split('\\.') + int major = Integer.parseInt(versions[0]) + int minor = Integer.parseInt(versions[1]) + if ((major < 5) || (major == 5 && minor < 1)) { + throw new InvalidUserDataException("Illegal version of virtualbox [${version}]. Need [5.1+]") + } + } catch (NumberFormatException | ArrayIndexOutOfBoundsException e) { + throw new InvalidUserDataException("Unable to parse version of virtualbox [${version}]. Required [5.1+]", e) + } + } + } + } + + private static void createPackagingTestTask(Project project) { + project.tasks.create('packagingTest') { + group 'Verification' + description "Tests yum/apt packages using vagrant and bats.\n" + + " Specify the vagrant boxes to test using the gradle property 'vagrant.boxes'.\n" + + " 'sample' can be used to test a single yum and apt box. 'all' can be used to\n" + + " test all available boxes. The available boxes are: \n" + + " ${BOXES}" + dependsOn 'vagrantCheckVersion' + } + } + + private static void createVagrantTasks(Project project) { + createCleanTask(project) + createStopTask(project) + createSmokeTestTask(project) + createUpdateVersionsTask(project) + createVerifyVersionsTask(project) + createCheckVagrantVersionTask(project) + createCheckVirtualBoxVersionTask(project) + createPrepareVagrantTestEnvTask(project) + createPackagingTestTask(project) + } + + private static void createVagrantBoxesTasks(Project project) { + assert project.extensions.esvagrant.boxes != null + + assert project.tasks.stop != null + Task stop = project.tasks.stop + + assert project.tasks.vagrantSmokeTest != null + Task vagrantSmokeTest = project.tasks.vagrantSmokeTest + + assert project.tasks.vagrantCheckVersion != null + Task vagrantCheckVersion = project.tasks.vagrantCheckVersion + + assert project.tasks.virtualboxCheckVersion != null + Task virtualboxCheckVersion = project.tasks.virtualboxCheckVersion + + assert project.tasks.vagrantSetUp != null + Task vagrantSetUp = project.tasks.vagrantSetUp + + assert project.tasks.packagingTest != null + Task packagingTest = project.tasks.packagingTest + + /* + * We always use the main project.rootDir as Vagrant's current working directory (VAGRANT_CWD) + * so that boxes are not duplicated for every Gradle project that use this VagrantTestPlugin. + */ + def vagrantEnvVars = [ + 'VAGRANT_CWD' : "${project.rootDir.absolutePath}", + 'VAGRANT_VAGRANTFILE' : 'Vagrantfile', + 'VAGRANT_PROJECT_DIR' : "${project.projectDir.absolutePath}" + ] + + // Each box gets it own set of tasks + for (String box : BOXES) { + String boxTask = box.capitalize().replace('-', '') + + // always add a halt task for all boxes, so clean makes sure they are all shutdown + Task halt = project.tasks.create("vagrant${boxTask}#halt", VagrantCommandTask) { + boxName box + environmentVars vagrantEnvVars + args 'halt', box + } + stop.dependsOn(halt) + if (project.extensions.esvagrant.boxes.contains(box) == false) { + // we only need a halt task if this box was not specified + continue; + } + + Task update = project.tasks.create("vagrant${boxTask}#update", VagrantCommandTask) { + boxName box + environmentVars vagrantEnvVars + args 'box', 'update', box + dependsOn vagrantCheckVersion, virtualboxCheckVersion, vagrantSetUp + } + + Task up = project.tasks.create("vagrant${boxTask}#up", VagrantCommandTask) { + boxName box + environmentVars vagrantEnvVars + /* Its important that we try to reprovision the box even if it already + exists. That way updates to the vagrant configuration take automatically. + That isn't to say that the updates will always be compatible. Its ok to + just destroy the boxes if they get busted but that is a manual step + because its slow-ish. */ + /* We lock the provider to virtualbox because the Vagrantfile specifies + lots of boxes that only work properly in virtualbox. Virtualbox is + vagrant's default but its possible to change that default and folks do. + But the boxes that we use are unlikely to work properly with other + virtualization providers. Thus the lock. */ + args 'up', box, '--provision', '--provider', 'virtualbox' + /* It'd be possible to check if the box is already up here and output + SKIPPED but that would require running vagrant status which is slow! */ + dependsOn update + } + + Task smoke = project.tasks.create("vagrant${boxTask}#smoketest", Exec) { + environment vagrantEnvVars + dependsOn up + finalizedBy halt + commandLine 'vagrant', 'ssh', box, '--command', + "set -o pipefail && echo 'Hello from ${project.path}' | sed -ue 's/^/ ${box}: /'" + } + vagrantSmokeTest.dependsOn(smoke) + + Task packaging = project.tasks.create("vagrant${boxTask}#packagingtest", BatsOverVagrantTask) { + boxName box + environmentVars vagrantEnvVars + dependsOn up + finalizedBy halt + command BATS_TEST_COMMAND + } + packagingTest.dependsOn(packaging) + } + } +} diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.vagrant.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.vagrant.properties new file mode 100644 index 00000000000..844310fa9d7 --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.vagrant.properties @@ -0,0 +1 @@ +implementation-class=org.elasticsearch.gradle.vagrant.VagrantTestPlugin diff --git a/buildSrc/version.properties b/buildSrc/version.properties index f1cdf9749de..7bede9f390e 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 6.0.0-alpha1 -lucene = 6.3.0-snapshot-a66a445 +lucene = 6.3.0 # optional dependencies spatial4j = 0.6 diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/NoopPlugin.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/NoopPlugin.java index 343d3cf613a..ac45f20dc25 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/NoopPlugin.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/NoopPlugin.java @@ -35,7 +35,7 @@ import java.util.List; public class NoopPlugin extends Plugin implements ActionPlugin { @Override - public List, ? extends ActionResponse>> getActions() { + public List> getActions() { return Arrays.asList( new ActionHandler<>(NoopBulkAction.INSTANCE, TransportNoopBulkAction.class), new ActionHandler<>(NoopSearchAction.INSTANCE, TransportNoopSearchAction.class) diff --git a/core/licenses/lucene-analyzers-common-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-analyzers-common-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index 1626a88f4a2..00000000000 --- a/core/licenses/lucene-analyzers-common-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -61aacb657e44a9beabf95834e106bbb96373a703 \ No newline at end of file diff --git a/core/licenses/lucene-analyzers-common-6.3.0.jar.sha1 b/core/licenses/lucene-analyzers-common-6.3.0.jar.sha1 new file mode 100644 index 00000000000..77d6e83314f --- /dev/null +++ b/core/licenses/lucene-analyzers-common-6.3.0.jar.sha1 @@ -0,0 +1 @@ +494aed699af238c3872a6b65e17939e9cb7ddbe0 \ No newline at end of file diff --git a/core/licenses/lucene-backward-codecs-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-backward-codecs-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index 2f45d50eeee..00000000000 --- a/core/licenses/lucene-backward-codecs-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -600de75a81e259cab0384e546d9a1d527ddba6d6 \ No newline at end of file diff --git a/core/licenses/lucene-backward-codecs-6.3.0.jar.sha1 b/core/licenses/lucene-backward-codecs-6.3.0.jar.sha1 new file mode 100644 index 00000000000..8d1640eecf8 --- /dev/null +++ b/core/licenses/lucene-backward-codecs-6.3.0.jar.sha1 @@ -0,0 +1 @@ +77dede7dff1b833ca2e92d8ab137edb209354d9b \ No newline at end of file diff --git a/core/licenses/lucene-core-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-core-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index 9dcdbeb40e9..00000000000 --- a/core/licenses/lucene-core-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -188774468a56a8731ca639527d721060d26ffebd \ No newline at end of file diff --git a/core/licenses/lucene-core-6.3.0.jar.sha1 b/core/licenses/lucene-core-6.3.0.jar.sha1 new file mode 100644 index 00000000000..b9f5ccfb8d8 --- /dev/null +++ b/core/licenses/lucene-core-6.3.0.jar.sha1 @@ -0,0 +1 @@ +d3c87ea89e2f83e401f9cc7f14e4c43945f7f1e1 \ No newline at end of file diff --git a/core/licenses/lucene-grouping-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-grouping-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index 14c8d7aa2b7..00000000000 --- a/core/licenses/lucene-grouping-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5afd9271e3d8f645440f48ff2487545ae5573e7e \ No newline at end of file diff --git a/core/licenses/lucene-grouping-6.3.0.jar.sha1 b/core/licenses/lucene-grouping-6.3.0.jar.sha1 new file mode 100644 index 00000000000..003c3801acd --- /dev/null +++ b/core/licenses/lucene-grouping-6.3.0.jar.sha1 @@ -0,0 +1 @@ +2c96d59e318ea66838aeb9c5cfb8b4d27b40953c \ No newline at end of file diff --git a/core/licenses/lucene-highlighter-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-highlighter-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index e695284756d..00000000000 --- a/core/licenses/lucene-highlighter-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0f575175e26d4d3b1095f6300cbefbbb3ee994cd \ No newline at end of file diff --git a/core/licenses/lucene-highlighter-6.3.0.jar.sha1 b/core/licenses/lucene-highlighter-6.3.0.jar.sha1 new file mode 100644 index 00000000000..0a7d5deac0c --- /dev/null +++ b/core/licenses/lucene-highlighter-6.3.0.jar.sha1 @@ -0,0 +1 @@ +4f154d8badfe47fe45503c18fb30f2177f758794 \ No newline at end of file diff --git a/core/licenses/lucene-join-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-join-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index ad02b0cac3b..00000000000 --- a/core/licenses/lucene-join-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ee898c3d318681c9f29c56e6d9b52876be96d814 \ No newline at end of file diff --git a/core/licenses/lucene-join-6.3.0.jar.sha1 b/core/licenses/lucene-join-6.3.0.jar.sha1 new file mode 100644 index 00000000000..df43f249d16 --- /dev/null +++ b/core/licenses/lucene-join-6.3.0.jar.sha1 @@ -0,0 +1 @@ +79b898117dcfde2981ec6806e420ff218842eca8 \ No newline at end of file diff --git a/core/licenses/lucene-memory-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-memory-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index 0e36d650670..00000000000 --- a/core/licenses/lucene-memory-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea6defd322456711394b4dabcda70a217e3caacd \ No newline at end of file diff --git a/core/licenses/lucene-memory-6.3.0.jar.sha1 b/core/licenses/lucene-memory-6.3.0.jar.sha1 new file mode 100644 index 00000000000..a8a4e5f1dd9 --- /dev/null +++ b/core/licenses/lucene-memory-6.3.0.jar.sha1 @@ -0,0 +1 @@ +89edeb404e507d640cb13903acff6953199704a2 \ No newline at end of file diff --git a/core/licenses/lucene-misc-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-misc-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index e458570651a..00000000000 --- a/core/licenses/lucene-misc-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea2de7f9753a8e19a1ec9f25a3ea65d7ce909a0e \ No newline at end of file diff --git a/core/licenses/lucene-misc-6.3.0.jar.sha1 b/core/licenses/lucene-misc-6.3.0.jar.sha1 new file mode 100644 index 00000000000..de4685d9564 --- /dev/null +++ b/core/licenses/lucene-misc-6.3.0.jar.sha1 @@ -0,0 +1 @@ +02d0e1f5a9df15ac911ad495bad5ea253ab50a9f \ No newline at end of file diff --git a/core/licenses/lucene-queries-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-queries-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index 1231424e3be..00000000000 --- a/core/licenses/lucene-queries-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0b15c6f29bfb9ec14a4615013a94bfa43a63793d \ No newline at end of file diff --git a/core/licenses/lucene-queries-6.3.0.jar.sha1 b/core/licenses/lucene-queries-6.3.0.jar.sha1 new file mode 100644 index 00000000000..8bf5b45a4ea --- /dev/null +++ b/core/licenses/lucene-queries-6.3.0.jar.sha1 @@ -0,0 +1 @@ +eb7938233c8103223069c7b5b5f785b4d20ddafa \ No newline at end of file diff --git a/core/licenses/lucene-queryparser-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-queryparser-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index a367f4e45cf..00000000000 --- a/core/licenses/lucene-queryparser-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d89d9fa1036c38144e0b8db079ae959353847c86 \ No newline at end of file diff --git a/core/licenses/lucene-queryparser-6.3.0.jar.sha1 b/core/licenses/lucene-queryparser-6.3.0.jar.sha1 new file mode 100644 index 00000000000..e2dae1cc8b0 --- /dev/null +++ b/core/licenses/lucene-queryparser-6.3.0.jar.sha1 @@ -0,0 +1 @@ +e979fb02155cbe81a8d335d6dc41d2ef06be68b6 \ No newline at end of file diff --git a/core/licenses/lucene-sandbox-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-sandbox-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index 4c8874c0b4b..00000000000 --- a/core/licenses/lucene-sandbox-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c003c1ab0a19a02b30156ce13372cff1001d6a7d \ No newline at end of file diff --git a/core/licenses/lucene-sandbox-6.3.0.jar.sha1 b/core/licenses/lucene-sandbox-6.3.0.jar.sha1 new file mode 100644 index 00000000000..6baf6baabfe --- /dev/null +++ b/core/licenses/lucene-sandbox-6.3.0.jar.sha1 @@ -0,0 +1 @@ +257387c45c6fa2b77fd6931751f93fdcd798ced4 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-spatial-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index 75dd8263828..00000000000 --- a/core/licenses/lucene-spatial-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3c570bf588d7c9ca43d074db9ce9c9b8408b930 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-6.3.0.jar.sha1 b/core/licenses/lucene-spatial-6.3.0.jar.sha1 new file mode 100644 index 00000000000..ff35a066ffd --- /dev/null +++ b/core/licenses/lucene-spatial-6.3.0.jar.sha1 @@ -0,0 +1 @@ +3cf5fe5402b5e34b240b73501c9e97a82428259e \ No newline at end of file diff --git a/core/licenses/lucene-spatial-extras-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-spatial-extras-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index debd8e0b873..00000000000 --- a/core/licenses/lucene-spatial-extras-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de54ca61f5892cf2c88ac083b3332a827beca7ff \ No newline at end of file diff --git a/core/licenses/lucene-spatial-extras-6.3.0.jar.sha1 b/core/licenses/lucene-spatial-extras-6.3.0.jar.sha1 new file mode 100644 index 00000000000..0c52cf09377 --- /dev/null +++ b/core/licenses/lucene-spatial-extras-6.3.0.jar.sha1 @@ -0,0 +1 @@ +1b77ef3740dc885c62d5966fbe9aea1199d344fb \ No newline at end of file diff --git a/core/licenses/lucene-spatial3d-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-spatial3d-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index b9eb9a0c270..00000000000 --- a/core/licenses/lucene-spatial3d-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cacdf81b324acd335be63798d5a3dd16e7dff9a3 \ No newline at end of file diff --git a/core/licenses/lucene-spatial3d-6.3.0.jar.sha1 b/core/licenses/lucene-spatial3d-6.3.0.jar.sha1 new file mode 100644 index 00000000000..c23003146af --- /dev/null +++ b/core/licenses/lucene-spatial3d-6.3.0.jar.sha1 @@ -0,0 +1 @@ +aa94b4a8636b3633008640cc5155ad354aebcea5 \ No newline at end of file diff --git a/core/licenses/lucene-suggest-6.3.0-snapshot-a66a445.jar.sha1 b/core/licenses/lucene-suggest-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index a6517bc7d42..00000000000 --- a/core/licenses/lucene-suggest-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a5cb3723bc8e0db185fc43e57b648145de27fde8 \ No newline at end of file diff --git a/core/licenses/lucene-suggest-6.3.0.jar.sha1 b/core/licenses/lucene-suggest-6.3.0.jar.sha1 new file mode 100644 index 00000000000..137b8976536 --- /dev/null +++ b/core/licenses/lucene-suggest-6.3.0.jar.sha1 @@ -0,0 +1 @@ +ed5d8ee5cd7edcad5d4ffca2b4540ccc844e9bb0 \ No newline at end of file diff --git a/core/src/main/java/org/apache/lucene/index/XPointValues.java b/core/src/main/java/org/apache/lucene/index/XPointValues.java deleted file mode 100644 index c4fa0b4d623..00000000000 --- a/core/src/main/java/org/apache/lucene/index/XPointValues.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.index; -import org.apache.lucene.util.StringHelper; - -import java.io.IOException; - -/** - * Forked utility methods from Lucene's PointValues until LUCENE-7257 is released. - */ -public class XPointValues { - /** Return the cumulated number of points across all leaves of the given - * {@link IndexReader}. Leaves that do not have points for the given field - * are ignored. - * @see PointValues#size(String) */ - public static long size(IndexReader reader, String field) throws IOException { - long size = 0; - for (LeafReaderContext ctx : reader.leaves()) { - FieldInfo info = ctx.reader().getFieldInfos().fieldInfo(field); - if (info == null || info.getPointDimensionCount() == 0) { - continue; - } - PointValues values = ctx.reader().getPointValues(); - size += values.size(field); - } - return size; - } - - /** Return the cumulated number of docs that have points across all leaves - * of the given {@link IndexReader}. Leaves that do not have points for the - * given field are ignored. - * @see PointValues#getDocCount(String) */ - public static int getDocCount(IndexReader reader, String field) throws IOException { - int count = 0; - for (LeafReaderContext ctx : reader.leaves()) { - FieldInfo info = ctx.reader().getFieldInfos().fieldInfo(field); - if (info == null || info.getPointDimensionCount() == 0) { - continue; - } - PointValues values = ctx.reader().getPointValues(); - count += values.getDocCount(field); - } - return count; - } - - /** Return the minimum packed values across all leaves of the given - * {@link IndexReader}. Leaves that do not have points for the given field - * are ignored. - * @see PointValues#getMinPackedValue(String) */ - public static byte[] getMinPackedValue(IndexReader reader, String field) throws IOException { - byte[] minValue = null; - for (LeafReaderContext ctx : reader.leaves()) { - FieldInfo info = ctx.reader().getFieldInfos().fieldInfo(field); - if (info == null || info.getPointDimensionCount() == 0) { - continue; - } - PointValues values = ctx.reader().getPointValues(); - byte[] leafMinValue = values.getMinPackedValue(field); - if (leafMinValue == null) { - continue; - } - if (minValue == null) { - minValue = leafMinValue.clone(); - } else { - final int numDimensions = values.getNumDimensions(field); - final int numBytesPerDimension = values.getBytesPerDimension(field); - for (int i = 0; i < numDimensions; ++i) { - int offset = i * numBytesPerDimension; - if (StringHelper.compare(numBytesPerDimension, leafMinValue, offset, minValue, offset) < 0) { - System.arraycopy(leafMinValue, offset, minValue, offset, numBytesPerDimension); - } - } - } - } - return minValue; - } - - /** Return the maximum packed values across all leaves of the given - * {@link IndexReader}. Leaves that do not have points for the given field - * are ignored. - * @see PointValues#getMaxPackedValue(String) */ - public static byte[] getMaxPackedValue(IndexReader reader, String field) throws IOException { - byte[] maxValue = null; - for (LeafReaderContext ctx : reader.leaves()) { - FieldInfo info = ctx.reader().getFieldInfos().fieldInfo(field); - if (info == null || info.getPointDimensionCount() == 0) { - continue; - } - PointValues values = ctx.reader().getPointValues(); - byte[] leafMaxValue = values.getMaxPackedValue(field); - if (leafMaxValue == null) { - continue; - } - if (maxValue == null) { - maxValue = leafMaxValue.clone(); - } else { - final int numDimensions = values.getNumDimensions(field); - final int numBytesPerDimension = values.getBytesPerDimension(field); - for (int i = 0; i < numDimensions; ++i) { - int offset = i * numBytesPerDimension; - if (StringHelper.compare(numBytesPerDimension, leafMaxValue, offset, maxValue, offset) > 0) { - System.arraycopy(leafMaxValue, offset, maxValue, offset, numBytesPerDimension); - } - } - } - } - return maxValue; - } - - /** Default constructor */ - private XPointValues() { - } -} diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java index eb33dbe4b18..8a0b4f4e00b 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -523,16 +523,14 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.index.shard.IndexShardRelocatedException::new, 45), NODE_SHOULD_NOT_CONNECT_EXCEPTION(org.elasticsearch.transport.NodeShouldNotConnectException.class, org.elasticsearch.transport.NodeShouldNotConnectException::new, 46), - INDEX_TEMPLATE_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexTemplateAlreadyExistsException.class, - org.elasticsearch.indices.IndexTemplateAlreadyExistsException::new, 47), + // 47 used to be for IndexTemplateAlreadyExistsException which was deprecated in 5.1 removed in 6.0 TRANSLOG_CORRUPTED_EXCEPTION(org.elasticsearch.index.translog.TranslogCorruptedException.class, org.elasticsearch.index.translog.TranslogCorruptedException::new, 48), CLUSTER_BLOCK_EXCEPTION(org.elasticsearch.cluster.block.ClusterBlockException.class, org.elasticsearch.cluster.block.ClusterBlockException::new, 49), FETCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.fetch.FetchPhaseExecutionException.class, org.elasticsearch.search.fetch.FetchPhaseExecutionException::new, 50), - INDEX_SHARD_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.IndexShardAlreadyExistsException.class, - org.elasticsearch.index.IndexShardAlreadyExistsException::new, 51), + // 51 used to be for IndexShardAlreadyExistsException which was deprecated in 5.1 removed in 6.0 VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class, org.elasticsearch.index.engine.VersionConflictEngineException::new, 52), ENGINE_EXCEPTION(org.elasticsearch.index.engine.EngineException.class, org.elasticsearch.index.engine.EngineException::new, 53), @@ -553,7 +551,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62), ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class, org.elasticsearch.indices.AliasFilterParsingException::new, 63), - // 64 was DeleteByQueryFailedEngineException, which was removed in 3.0 + // 64 was DeleteByQueryFailedEngineException, which was removed in 5.0 GATEWAY_EXCEPTION(org.elasticsearch.gateway.GatewayException.class, org.elasticsearch.gateway.GatewayException::new, 65), INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class, org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66), diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index e9e950ce80a..2d61fb8194a 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -19,6 +19,7 @@ package org.elasticsearch; +import org.apache.lucene.util.MathUtil; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; @@ -298,7 +299,27 @@ public class Version { * is a beta or RC release then the version itself is returned. */ public Version minimumCompatibilityVersion() { - return Version.smallest(this, fromId(major * 1000000 + 99)); + final int bwcMajor; + final int bwcMinor; + if (this.onOrAfter(Version.V_6_0_0_alpha1)) { + bwcMajor = major-1; + bwcMinor = 0; // TODO we have to move this to the latest released minor of the last major but for now we just keep + } else { + bwcMajor = major; + bwcMinor = 0; + } + return Version.smallest(this, fromId(bwcMajor * 1000000 + bwcMinor * 10000 + 99)); + } + + /** + * Returns true iff both version are compatible. Otherwise false + */ + public boolean isCompatible(Version version) { + boolean compatible = onOrAfter(version.minimumCompatibilityVersion()) + && version.onOrAfter(minimumCompatibilityVersion()); + + assert compatible == false || Math.max(major, version.major) - Math.min(major, version.major) <= 1; + return compatible; } @SuppressForbidden(reason = "System.out.*") diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index a3797c3cb88..0097db4b7c4 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -356,7 +356,7 @@ public class ActionModule extends AbstractModule { register(handler.getAction().name(), handler); } - public , Response extends ActionResponse> void register( + public void register( GenericAction action, Class> transportAction, Class... supportTransportActions) { register(new ActionHandler<>(action, transportAction, supportTransportActions)); diff --git a/core/src/main/java/org/elasticsearch/action/ActionRequest.java b/core/src/main/java/org/elasticsearch/action/ActionRequest.java index e8dd639c4df..769b2e7b573 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionRequest.java +++ b/core/src/main/java/org/elasticsearch/action/ActionRequest.java @@ -25,7 +25,7 @@ import org.elasticsearch.transport.TransportRequest; import java.io.IOException; -public abstract class ActionRequest> extends TransportRequest { +public abstract class ActionRequest extends TransportRequest { public ActionRequest() { super(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/LivenessRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/LivenessRequest.java index 033dd5957d9..d6441bb8e77 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/LivenessRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/LivenessRequest.java @@ -25,7 +25,7 @@ import org.elasticsearch.action.ActionRequestValidationException; * Transport level private response for the transport handler registered under * {@value org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction#NAME} */ -public final class LivenessRequest extends ActionRequest { +public final class LivenessRequest extends ActionRequest { @Override public ActionRequestValidationException validate() { return null; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java index efbc9679e71..07d40b5ffca 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java @@ -33,7 +33,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** * A request to get node tasks */ -public class GetTaskRequest extends ActionRequest { +public class GetTaskRequest extends ActionRequest { private TaskId taskId = TaskId.EMPTY_TASK_ID; private boolean waitForCompletion = false; private TimeValue timeout = null; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java index 967ea31c84a..819d2de999c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java @@ -30,7 +30,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; /** Request the mappings of specific fields */ -public class GetFieldMappingsRequest extends ActionRequest implements IndicesRequest.Replaceable { +public class GetFieldMappingsRequest extends ActionRequest implements IndicesRequest.Replaceable { protected boolean local = false; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java index c8b10af9a8f..0f396afa551 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java @@ -32,7 +32,7 @@ public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpda private boolean updateAllTypes = false; - PutMappingClusterStateUpdateRequest() { + public PutMappingClusterStateUpdateRequest() { } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 896b78f7c26..ae9f2a38060 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -64,8 +64,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequesttrue to force only creation, not an update of an index template. If it already - * exists, it will fail with an {@link org.elasticsearch.indices.IndexTemplateAlreadyExistsException}. + * exists, it will fail with an {@link IllegalArgumentException}. */ public PutIndexTemplateRequest create(boolean create) { this.create = create; @@ -473,7 +471,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest 0 ? indexPatterns.get(0) : ""); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java index b7fc314a3f1..c1db96ae7ce 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java @@ -76,7 +76,7 @@ public class PutIndexTemplateRequestBuilder /** * Set to true to force only creation, not an update of an index template. If it already - * exists, it will fail with an {@link org.elasticsearch.indices.IndexTemplateAlreadyExistsException}. + * exists, it will fail with an {@link IllegalArgumentException}. */ public PutIndexTemplateRequestBuilder setCreate(boolean create) { request.create(create); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index ee4f96d484a..48d163cdb26 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -61,7 +61,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * Note that we only support refresh on the bulk request not per item. * @see org.elasticsearch.client.Client#bulk(BulkRequest) */ -public class BulkRequest extends ActionRequest implements CompositeIndicesRequest, WriteRequest { +public class BulkRequest extends ActionRequest implements CompositeIndicesRequest, WriteRequest { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(BulkRequest.class)); diff --git a/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index 1308d56acaf..ee4ce570df2 100644 --- a/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -48,7 +48,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; -public class MultiGetRequest extends ActionRequest implements Iterable, CompositeIndicesRequest, RealtimeRequest { +public class MultiGetRequest extends ActionRequest implements Iterable, CompositeIndicesRequest, RealtimeRequest { /** * A single get item. diff --git a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java index 508ad90a9ea..5f7f32e1760 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java @@ -59,7 +59,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio } @Override - public , Response extends ActionResponse> void apply(Task task, String action, Request request, ActionListener listener, ActionFilterChain chain) { + public void apply(Task task, String action, Request request, ActionListener listener, ActionFilterChain chain) { switch (action) { case IndexAction.NAME: IndexRequest indexRequest = (IndexRequest) request; diff --git a/core/src/main/java/org/elasticsearch/action/ingest/IngestProxyActionFilter.java b/core/src/main/java/org/elasticsearch/action/ingest/IngestProxyActionFilter.java index 5d2aea389dc..a3928c17fc7 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/IngestProxyActionFilter.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/IngestProxyActionFilter.java @@ -54,7 +54,7 @@ public final class IngestProxyActionFilter implements ActionFilter { } @Override - public , Response extends ActionResponse> void apply(Task task, String action, Request request, ActionListener listener, ActionFilterChain chain) { + public void apply(Task task, String action, Request request, ActionListener listener, ActionFilterChain chain) { Action ingestAction; switch (action) { case IndexAction.NAME: diff --git a/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java b/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java index a63f7a30dbe..c9761034418 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java @@ -37,7 +37,7 @@ import java.util.Map; import static org.elasticsearch.ingest.IngestDocument.MetaData; -public class SimulatePipelineRequest extends ActionRequest { +public class SimulatePipelineRequest extends ActionRequest { private String id; private boolean verbose; diff --git a/core/src/main/java/org/elasticsearch/action/main/MainRequest.java b/core/src/main/java/org/elasticsearch/action/main/MainRequest.java index 1484bc2a3e9..1736e56a8dc 100644 --- a/core/src/main/java/org/elasticsearch/action/main/MainRequest.java +++ b/core/src/main/java/org/elasticsearch/action/main/MainRequest.java @@ -22,7 +22,7 @@ package org.elasticsearch.action.main; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -public class MainRequest extends ActionRequest { +public class MainRequest extends ActionRequest { @Override public ActionRequestValidationException validate() { diff --git a/core/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java b/core/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java index f2f26d655d3..23c5c3747fb 100644 --- a/core/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java @@ -31,7 +31,7 @@ import java.util.List; import static org.elasticsearch.action.ValidateActions.addValidationError; -public class ClearScrollRequest extends ActionRequest { +public class ClearScrollRequest extends ActionRequest { private List scrollIds; diff --git a/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index 08a1ec5b3de..b4f0f932eed 100644 --- a/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -36,7 +36,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** * A multi search API request. */ -public class MultiSearchRequest extends ActionRequest implements CompositeIndicesRequest { +public class MultiSearchRequest extends ActionRequest implements CompositeIndicesRequest { private int maxConcurrentSearchRequests = 0; private List requests = new ArrayList<>(); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java index de27805b139..ae960dae984 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -49,7 +49,7 @@ import java.util.Objects; * @see org.elasticsearch.client.Client#search(SearchRequest) * @see SearchResponse */ -public final class SearchRequest extends ActionRequest implements IndicesRequest.Replaceable { +public final class SearchRequest extends ActionRequest implements IndicesRequest.Replaceable { private SearchType searchType = SearchType.DEFAULT; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java index 8a171e24a1e..317efe40314 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java @@ -33,7 +33,7 @@ import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; -public class SearchScrollRequest extends ActionRequest { +public class SearchScrollRequest extends ActionRequest { private String scrollId; private Scroll scroll; diff --git a/core/src/main/java/org/elasticsearch/action/support/ActionFilter.java b/core/src/main/java/org/elasticsearch/action/support/ActionFilter.java index f536d9e0ceb..4a2c88f75dc 100644 --- a/core/src/main/java/org/elasticsearch/action/support/ActionFilter.java +++ b/core/src/main/java/org/elasticsearch/action/support/ActionFilter.java @@ -40,7 +40,7 @@ public interface ActionFilter { * Enables filtering the execution of an action on the request side, either by sending a response through the * {@link ActionListener} or by continuing the execution through the given {@link ActionFilterChain chain} */ - , Response extends ActionResponse> void apply(Task task, String action, Request request, + void apply(Task task, String action, Request request, ActionListener listener, ActionFilterChain chain); /** @@ -62,7 +62,7 @@ public interface ActionFilter { } @Override - public final , Response extends ActionResponse> void apply(Task task, String action, Request request, + public final void apply(Task task, String action, Request request, ActionListener listener, ActionFilterChain chain) { if (apply(action, request, listener)) { chain.proceed(task, action, request, listener); @@ -73,7 +73,7 @@ public interface ActionFilter { * Applies this filter and returns {@code true} if the execution chain should proceed, or {@code false} * if it should be aborted since the filter already handled the request and called the given listener. */ - protected abstract boolean apply(String action, ActionRequest request, ActionListener listener); + protected abstract boolean apply(String action, ActionRequest request, ActionListener listener); @Override public final void apply(String action, Response response, ActionListener listener, diff --git a/core/src/main/java/org/elasticsearch/action/support/ActionFilterChain.java b/core/src/main/java/org/elasticsearch/action/support/ActionFilterChain.java index 54f55e187a9..29991451f2e 100644 --- a/core/src/main/java/org/elasticsearch/action/support/ActionFilterChain.java +++ b/core/src/main/java/org/elasticsearch/action/support/ActionFilterChain.java @@ -27,7 +27,7 @@ import org.elasticsearch.tasks.Task; /** * A filter chain allowing to continue and process the transport action request */ -public interface ActionFilterChain, Response extends ActionResponse> { +public interface ActionFilterChain { /** * Continue processing the request. Should only be called if a response has not been sent through diff --git a/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index 0a53b63b662..68b699cb110 100644 --- a/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -35,7 +35,7 @@ import java.util.function.Supplier; /** * A TransportAction that self registers a handler into the transport service */ -public abstract class HandledTransportAction, Response extends ActionResponse> +public abstract class HandledTransportAction extends TransportAction { protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, diff --git a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java index b348d2ec317..dbd08aa376f 100644 --- a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -38,7 +38,7 @@ import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.action.support.PlainActionFuture.newFuture; -public abstract class TransportAction, Response extends ActionResponse> extends AbstractComponent { +public abstract class TransportAction extends AbstractComponent { protected final ThreadPool threadPool; protected final String actionName; @@ -148,7 +148,7 @@ public abstract class TransportAction, Re protected abstract void doExecute(Request request, ActionListener listener); - private static class RequestFilterChain, Response extends ActionResponse> + private static class RequestFilterChain implements ActionFilterChain { private final TransportAction action; @@ -184,7 +184,7 @@ public abstract class TransportAction, Re } } - private static class ResponseFilterChain, Response extends ActionResponse> + private static class ResponseFilterChain implements ActionFilterChain { private final ActionFilter[] filters; diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastRequest.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastRequest.java index b6ab85c0b18..a04d2edc8dc 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastRequest.java @@ -28,7 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -public class BroadcastRequest> extends ActionRequest implements IndicesRequest.Replaceable { +public class BroadcastRequest> extends ActionRequest implements IndicesRequest.Replaceable { protected String[] indices; private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); diff --git a/core/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java b/core/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java index efbcadf445f..6f2ce6c4ef9 100644 --- a/core/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java @@ -29,7 +29,7 @@ import java.io.IOException; /** * A based request for master based operation. */ -public abstract class MasterNodeRequest> extends ActionRequest { +public abstract class MasterNodeRequest> extends ActionRequest { public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java b/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java index 4523e8d339a..2f8490cc872 100644 --- a/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; -public abstract class BaseNodesRequest> extends ActionRequest { +public abstract class BaseNodesRequest> extends ActionRequest { /** * the list of nodesIds that will be used to resolve this request and {@link #concreteNodes} diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index ac2166cd6d7..d520b3d4e70 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -43,7 +43,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * Requests that are run on a particular replica, first on the primary and then on the replicas like {@link IndexRequest} or * {@link TransportShardRefreshAction}. */ -public abstract class ReplicationRequest> extends ActionRequest +public abstract class ReplicationRequest> extends ActionRequest implements IndicesRequest { public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); diff --git a/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java b/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java index a6bb0f8e0a1..791617231b5 100644 --- a/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java @@ -32,7 +32,7 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.util.concurrent.TimeUnit; -public abstract class InstanceShardOperationRequest> extends ActionRequest +public abstract class InstanceShardOperationRequest> extends ActionRequest implements IndicesRequest { public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); diff --git a/core/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardRequest.java b/core/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardRequest.java index 4265fa98337..18f054bb82e 100644 --- a/core/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardRequest.java @@ -31,7 +31,7 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -public abstract class SingleShardRequest> extends ActionRequest implements IndicesRequest { +public abstract class SingleShardRequest> extends ActionRequest implements IndicesRequest { public static final IndicesOptions INDICES_OPTIONS = IndicesOptions.strictSingleIndexNoExpandForbidClosed(); diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java b/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java index 18eea411b63..e912eebb4fb 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java @@ -36,7 +36,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** * A base class for task requests */ -public class BaseTasksRequest> extends ActionRequest { +public class BaseTasksRequest> extends ActionRequest { public static final String[] ALL_ACTIONS = Strings.EMPTY_ARRAY; diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java b/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java index 3cd73226e73..da9dae6759d 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java @@ -41,7 +41,7 @@ import java.util.Iterator; import java.util.List; import java.util.Set; -public class MultiTermVectorsRequest extends ActionRequest implements Iterable, CompositeIndicesRequest, RealtimeRequest { +public class MultiTermVectorsRequest extends ActionRequest implements Iterable, CompositeIndicesRequest, RealtimeRequest { String preference; List requests = new ArrayList<>(); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Security.java b/core/src/main/java/org/elasticsearch/bootstrap/Security.java index e45e42757c2..2b8fd325d81 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -20,10 +20,10 @@ package org.elasticsearch.bootstrap; import org.elasticsearch.SecureSM; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.http.HttpTransportSettings; @@ -266,12 +266,14 @@ final class Security { } } - static void addBindPermissions(Permissions policy, Settings settings) throws IOException { - // http is simple - String httpRange = HttpTransportSettings.SETTING_HTTP_PORT.get(settings).getPortRangeString(); - // listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted. - // see SocketPermission implies() code - policy.add(new SocketPermission("*:" + httpRange, "listen,resolve")); + /** + * Add dynamic {@link SocketPermission}s based on HTTP and transport settings. + * + * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to. + * @param settings the {@link Settings} instance to read the HTTP and transport settings from + */ + static void addBindPermissions(Permissions policy, Settings settings) { + addSocketPermissionForHttp(policy, settings); // transport is waaaay overengineered Map profiles = TransportSettings.TRANSPORT_PROFILES_SETTING.get(settings).getAsGroups(); if (!profiles.containsKey(TransportSettings.DEFAULT_PROFILE)) { @@ -284,16 +286,76 @@ final class Security { for (Map.Entry entry : profiles.entrySet()) { Settings profileSettings = entry.getValue(); String name = entry.getKey(); - String transportRange = profileSettings.get("port", TransportSettings.PORT.get(settings)); // a profile is only valid if its the default profile, or if it has an actual name and specifies a port boolean valid = TransportSettings.DEFAULT_PROFILE.equals(name) || (Strings.hasLength(name) && profileSettings.get("port") != null); if (valid) { - // listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted. - // see SocketPermission implies() code - policy.add(new SocketPermission("*:" + transportRange, "listen,resolve")); + addSocketPermissionForTransportProfile(policy, profileSettings, settings); } } + + for (final Settings tribeNodeSettings : settings.getGroups("tribe", true).values()) { + // tribe nodes have HTTP disabled by default, so we check if HTTP is enabled before granting + if (NetworkModule.HTTP_ENABLED.exists(tribeNodeSettings) && NetworkModule.HTTP_ENABLED.get(tribeNodeSettings)) { + addSocketPermissionForHttp(policy, tribeNodeSettings); + } + addSocketPermissionForTransport(policy, tribeNodeSettings); + } + } + + /** + * Add dynamic {@link SocketPermission} based on HTTP settings. + * + * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to. + * @param settings the {@link Settings} instance to read the HTTP settingsfrom + */ + private static void addSocketPermissionForHttp(final Permissions policy, final Settings settings) { + // http is simple + final String httpRange = HttpTransportSettings.SETTING_HTTP_PORT.get(settings).getPortRangeString(); + addSocketPermissionForPortRange(policy, httpRange); + } + + /** + * Add dynamic {@link SocketPermission} based on transport settings. This method will first check if there is a port range specified in + * the transport profile specified by {@code profileSettings} and will fall back to {@code settings}. + * + * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to + * @param profileSettings the {@link Settings} to read the transport profile from + * @param settings the {@link Settings} instance to read the transport settings from + */ + private static void addSocketPermissionForTransportProfile( + final Permissions policy, + final Settings profileSettings, + final Settings settings) { + final String transportRange = profileSettings.get("port"); + if (transportRange != null) { + addSocketPermissionForPortRange(policy, transportRange); + } else { + addSocketPermissionForTransport(policy, settings); + } + } + + /** + * Add dynamic {@link SocketPermission} based on transport settings. + * + * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to + * @param settings the {@link Settings} instance to read the transport settings from + */ + private static void addSocketPermissionForTransport(final Permissions policy, final Settings settings) { + final String transportRange = TransportSettings.PORT.get(settings); + addSocketPermissionForPortRange(policy, transportRange); + } + + /** + * Add dynamic {@link SocketPermission} for the specified port range. + * + * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission} to. + * @param portRange the port range + */ + private static void addSocketPermissionForPortRange(final Permissions policy, final String portRange) { + // listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted. + // see SocketPermission implies() code + policy.add(new SocketPermission("*:" + portRange, "listen,resolve")); } /** diff --git a/core/src/main/java/org/elasticsearch/client/ElasticsearchClient.java b/core/src/main/java/org/elasticsearch/client/ElasticsearchClient.java index d9ddc56d48a..84438ff6d1a 100644 --- a/core/src/main/java/org/elasticsearch/client/ElasticsearchClient.java +++ b/core/src/main/java/org/elasticsearch/client/ElasticsearchClient.java @@ -40,7 +40,7 @@ public interface ElasticsearchClient { * @param The request builder type. * @return A future allowing to get back the response. */ - , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> ActionFuture execute( + > ActionFuture execute( final Action action, final Request request); /** @@ -53,7 +53,7 @@ public interface ElasticsearchClient { * @param The response type. * @param The request builder type. */ - , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> void execute( + > void execute( final Action action, final Request request, ActionListener listener); /** @@ -65,7 +65,7 @@ public interface ElasticsearchClient { * @param The request builder. * @return The request builder, that can, at a later stage, execute the request. */ - , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> RequestBuilder prepareExecute( + > RequestBuilder prepareExecute( final Action action); /** diff --git a/core/src/main/java/org/elasticsearch/client/FilterClient.java b/core/src/main/java/org/elasticsearch/client/FilterClient.java index d0f52282c76..23d3c2c3d0c 100644 --- a/core/src/main/java/org/elasticsearch/client/FilterClient.java +++ b/core/src/main/java/org/elasticsearch/client/FilterClient.java @@ -62,7 +62,7 @@ public abstract class FilterClient extends AbstractClient { } @Override - protected , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> void doExecute( + protected > void doExecute( Action action, Request request, ActionListener listener) { in().execute(action, request, listener); } diff --git a/core/src/main/java/org/elasticsearch/client/ParentTaskAssigningClient.java b/core/src/main/java/org/elasticsearch/client/ParentTaskAssigningClient.java index 44ba2b76e43..62843c41b70 100644 --- a/core/src/main/java/org/elasticsearch/client/ParentTaskAssigningClient.java +++ b/core/src/main/java/org/elasticsearch/client/ParentTaskAssigningClient.java @@ -58,7 +58,7 @@ public class ParentTaskAssigningClient extends FilterClient { } @Override - protected < Request extends ActionRequest, + protected < Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder > void doExecute(Action action, Request request, ActionListener listener) { diff --git a/core/src/main/java/org/elasticsearch/client/node/NodeClient.java b/core/src/main/java/org/elasticsearch/client/node/NodeClient.java index 5fc2319284d..6c3aa071ba3 100644 --- a/core/src/main/java/org/elasticsearch/client/node/NodeClient.java +++ b/core/src/main/java/org/elasticsearch/client/node/NodeClient.java @@ -56,7 +56,7 @@ public class NodeClient extends AbstractClient { } @Override - public < Request extends ActionRequest, + public < Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder > void doExecute(Action action, Request request, ActionListener listener) { @@ -69,7 +69,7 @@ public class NodeClient extends AbstractClient { * method if you don't need access to the task when listening for the response. This is the method used to implement the {@link Client} * interface. */ - public < Request extends ActionRequest, + public < Request extends ActionRequest, Response extends ActionResponse > Task executeLocally(GenericAction action, Request request, ActionListener listener) { return transportAction(action).execute(request, listener); @@ -79,7 +79,7 @@ public class NodeClient extends AbstractClient { * Execute an {@link Action} locally, returning that {@link Task} used to track it, and linking an {@link TaskListener}. Prefer this * method if you need access to the task when listening for the response. */ - public < Request extends ActionRequest, + public < Request extends ActionRequest, Response extends ActionResponse > Task executeLocally(GenericAction action, Request request, TaskListener listener) { return transportAction(action).execute(request, listener); @@ -89,7 +89,7 @@ public class NodeClient extends AbstractClient { * Get the {@link TransportAction} for an {@link Action}, throwing exceptions if the action isn't available. */ @SuppressWarnings("unchecked") - private < Request extends ActionRequest, + private < Request extends ActionRequest, Response extends ActionResponse > TransportAction transportAction(GenericAction action) { if (actions == null) { diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 006040b8e16..075fbf1fad6 100644 --- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -377,13 +377,13 @@ public abstract class AbstractClient extends AbstractComponent implements Client } @Override - public final , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> RequestBuilder prepareExecute( + public final > RequestBuilder prepareExecute( final Action action) { return action.newRequestBuilder(this); } @Override - public final , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> ActionFuture execute( + public final > ActionFuture execute( Action action, Request request) { PlainActionFuture actionFuture = PlainActionFuture.newFuture(); execute(action, request, actionFuture); @@ -394,13 +394,13 @@ public abstract class AbstractClient extends AbstractComponent implements Client * This is the single execution point of *all* clients. */ @Override - public final , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> void execute( + public final > void execute( Action action, Request request, ActionListener listener) { listener = threadedWrapper.wrap(listener); doExecute(action, request, listener); } - protected abstract , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> void doExecute(final Action action, final Request request, ActionListener listener); + protected abstract > void doExecute(final Action action, final Request request, ActionListener listener); @Override public ActionFuture index(final IndexRequest request) { @@ -696,19 +696,19 @@ public abstract class AbstractClient extends AbstractComponent implements Client } @Override - public , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> ActionFuture execute( + public > ActionFuture execute( Action action, Request request) { return client.execute(action, request); } @Override - public , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> void execute( + public > void execute( Action action, Request request, ActionListener listener) { client.execute(action, request, listener); } @Override - public , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> RequestBuilder prepareExecute( + public > RequestBuilder prepareExecute( Action action) { return client.prepareExecute(action); } @@ -1212,19 +1212,19 @@ public abstract class AbstractClient extends AbstractComponent implements Client } @Override - public , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> ActionFuture execute( + public > ActionFuture execute( Action action, Request request) { return client.execute(action, request); } @Override - public , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> void execute( + public > void execute( Action action, Request request, ActionListener listener) { client.execute(action, request, listener); } @Override - public , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> RequestBuilder prepareExecute( + public > RequestBuilder prepareExecute( Action action) { return client.prepareExecute(action); } @@ -1745,7 +1745,7 @@ public abstract class AbstractClient extends AbstractComponent implements Client public Client filterWithHeader(Map headers) { return new FilterClient(this) { @Override - protected , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> void doExecute(Action action, Request request, ActionListener listener) { + protected > void doExecute(Action action, Request request, ActionListener listener) { ThreadContext threadContext = threadPool().getThreadContext(); try (ThreadContext.StoredContext ctx = threadContext.stashAndMergeHeaders(headers)) { super.doExecute(action, request, listener); diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 7ef7f400a53..673693c7c38 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -329,7 +329,7 @@ public abstract class TransportClient extends AbstractClient { } @Override - protected , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> void doExecute(Action action, Request request, ActionListener listener) { + protected > void doExecute(Action action, Request request, ActionListener listener) { proxy.execute(action, request, listener); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index 8d4948ebb8b..faf0a880579 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -49,8 +49,6 @@ import java.util.Set; public class IndexTemplateMetaData extends AbstractDiffable { - public static final Version V_5_1_0 = Version.fromId(5010099); - public static final IndexTemplateMetaData PROTO = IndexTemplateMetaData.builder("").build(); private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(IndexTemplateMetaData.class)); @@ -210,7 +208,7 @@ public class IndexTemplateMetaData extends AbstractDiffable 0 ? patterns.get(0) : ""); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 5516c7eca06..08ba1dea67e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -40,7 +40,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.indices.IndexTemplateAlreadyExistsException; import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidIndexTemplateException; @@ -161,7 +160,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent { @Override public ClusterState execute(ClusterState currentState) throws Exception { if (request.create && currentState.metaData().templates().containsKey(request.name)) { - throw new IndexTemplateAlreadyExistsException(request.name); + throw new IllegalArgumentException("index_template [" + request.name + "] already exists"); } validateAndAddTemplate(request, templateBuilder, indicesService); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index c6f5ddc1fa6..15e384df3eb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -20,9 +20,9 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateTaskListener; @@ -34,7 +34,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; @@ -51,10 +50,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; /** * Service responsible for submitting mapping changes */ @@ -215,28 +212,24 @@ public class MetaDataMappingService extends AbstractComponent { @Override public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - Set indicesToClose = new HashSet<>(); + Map indexMapperServices = new HashMap<>(); BatchResult.Builder builder = BatchResult.builder(); try { - // precreate incoming indices; for (PutMappingClusterStateUpdateRequest request : tasks) { try { for (Index index : request.indices()) { final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index); - if (indicesService.hasIndex(indexMetaData.getIndex()) == false) { - // if the index does not exists we create it once, add all types to the mapper service and - // close it later once we are done with mapping update - indicesToClose.add(indexMetaData.getIndex()); - IndexService indexService = - indicesService.createIndex(indexMetaData, Collections.emptyList(), shardId -> {}); + if (indexMapperServices.containsKey(indexMetaData.getIndex()) == false) { + MapperService mapperService = indicesService.createIndexMapperService(indexMetaData); + indexMapperServices.put(index, mapperService); // add mappings for all types, we need them for cross-type validation for (ObjectCursor mapping : indexMetaData.getMappings().values()) { - indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), + mapperService.merge(mapping.value.type(), mapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes()); } } } - currentState = applyRequest(currentState, request); + currentState = applyRequest(currentState, request, indexMapperServices); builder.success(request); } catch (Exception e) { builder.failure(request, e); @@ -244,34 +237,33 @@ public class MetaDataMappingService extends AbstractComponent { } return builder.build(currentState); } finally { - for (Index index : indicesToClose) { - indicesService.removeIndex(index, "created for mapping processing"); - } + IOUtils.close(indexMapperServices.values()); } } - private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException { + private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request, + Map indexMapperServices) throws IOException { String mappingType = request.type(); CompressedXContent mappingUpdateSource = new CompressedXContent(request.source()); final MetaData metaData = currentState.metaData(); - final List> updateList = new ArrayList<>(); + final List updateList = new ArrayList<>(); for (Index index : request.indices()) { - IndexService indexService = indicesService.indexServiceSafe(index); + MapperService mapperService = indexMapperServices.get(index); // IMPORTANT: always get the metadata from the state since it get's batched // and if we pull it from the indexService we might miss an update etc. final IndexMetaData indexMetaData = currentState.getMetaData().getIndexSafe(index); - // this is paranoia... just to be sure we use the exact same indexService and metadata tuple on the update that + // this is paranoia... just to be sure we use the exact same metadata tuple on the update that // we used for the validation, it makes this mechanism little less scary (a little) - updateList.add(new Tuple<>(indexService, indexMetaData)); + updateList.add(indexMetaData); // try and parse it (no need to add it here) so we can bail early in case of parsing exception DocumentMapper newMapper; - DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type()); + DocumentMapper existingMapper = mapperService.documentMapper(request.type()); if (MapperService.DEFAULT_MAPPING.equals(request.type())) { // _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default - newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, false); + newMapper = mapperService.parse(request.type(), mappingUpdateSource, false); } else { - newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, existingMapper == null); + newMapper = mapperService.parse(request.type(), mappingUpdateSource, existingMapper == null); if (existingMapper != null) { // first, simulate: just call merge and ignore the result existingMapper.merge(newMapper.mapping(), request.updateAllTypes()); @@ -287,9 +279,9 @@ public class MetaDataMappingService extends AbstractComponent { for (ObjectCursor mapping : indexMetaData.getMappings().values()) { String parentType = newMapper.parentFieldMapper().type(); if (parentType.equals(mapping.value.type()) && - indexService.mapperService().getParentTypes().contains(parentType) == false) { + mapperService.getParentTypes().contains(parentType) == false) { throw new IllegalArgumentException("can't add a _parent field that points to an " + - "already existing type, that isn't already a parent"); + "already existing type, that isn't already a parent"); } } } @@ -307,24 +299,25 @@ public class MetaDataMappingService extends AbstractComponent { throw new InvalidTypeNameException("Document mapping type name can't start with '_', found: [" + mappingType + "]"); } MetaData.Builder builder = MetaData.builder(metaData); - for (Tuple toUpdate : updateList) { + boolean updated = false; + for (IndexMetaData indexMetaData : updateList) { // do the actual merge here on the master, and update the mapping source // we use the exact same indexService and metadata we used to validate above here to actually apply the update - final IndexService indexService = toUpdate.v1(); - final IndexMetaData indexMetaData = toUpdate.v2(); final Index index = indexMetaData.getIndex(); + final MapperService mapperService = indexMapperServices.get(index); CompressedXContent existingSource = null; - DocumentMapper existingMapper = indexService.mapperService().documentMapper(mappingType); + DocumentMapper existingMapper = mapperService.documentMapper(mappingType); if (existingMapper != null) { existingSource = existingMapper.mappingSource(); } - DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes()); + DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes()); CompressedXContent updatedSource = mergedMapper.mappingSource(); if (existingSource != null) { if (existingSource.equals(updatedSource)) { // same source, no changes, ignore it } else { + updated = true; // use the merged mapping source if (logger.isDebugEnabled()) { logger.debug("{} update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource); @@ -334,6 +327,7 @@ public class MetaDataMappingService extends AbstractComponent { } } else { + updated = true; if (logger.isDebugEnabled()) { logger.debug("{} create_mapping [{}] with source [{}]", index, mappingType, updatedSource); } else if (logger.isInfoEnabled()) { @@ -344,13 +338,16 @@ public class MetaDataMappingService extends AbstractComponent { IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData); // Mapping updates on a single type may have side-effects on other types so we need to // update mapping metadata on all types - for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) { + for (DocumentMapper mapper : mapperService.docMappers(true)) { indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource())); } builder.put(indexMetaDataBuilder); } - - return ClusterState.builder(currentState).metaData(builder).build(); + if (updated) { + return ClusterState.builder(currentState).metaData(builder).build(); + } else { + return currentState; + } } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index e252845be8b..2028cc5b8c9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -33,6 +33,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -581,14 +582,6 @@ public class IndexShardRoutingTable implements Iterable { } public Builder addShard(ShardRouting shardEntry) { - for (ShardRouting shard : shards) { - // don't add two that map to the same node id - // we rely on the fact that a node does not have primary and backup of the same shard - if (shard.assignedToNode() && shardEntry.assignedToNode() - && shard.currentNodeId().equals(shardEntry.currentNodeId())) { - return this; - } - } shards.add(shardEntry); return this; } @@ -599,9 +592,28 @@ public class IndexShardRoutingTable implements Iterable { } public IndexShardRoutingTable build() { + // don't allow more than one shard copy with same id to be allocated to same node + assert distinctNodes(shards) : "more than one shard with same id assigned to same node (shards: " + shards + ")"; return new IndexShardRoutingTable(shardId, Collections.unmodifiableList(new ArrayList<>(shards))); } + static boolean distinctNodes(List shards) { + Set nodes = new HashSet<>(); + for (ShardRouting shard : shards) { + if (shard.assignedToNode()) { + if (nodes.add(shard.currentNodeId()) == false) { + return false; + } + if (shard.relocating()) { + if (nodes.add(shard.relocatingNodeId()) == false) { + return false; + } + } + } + } + return true; + } + public static IndexShardRoutingTable readFrom(StreamInput in) throws IOException { Index index = new Index(in); return readFromThin(in, index); diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index af064bd42d0..a5e1cc04fdc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -83,6 +83,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; +import java.util.function.UnaryOperator; import java.util.stream.Collectors; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; @@ -123,7 +124,7 @@ public class ClusterService extends AbstractLifecycleComponent { private final Queue onGoingTimeouts = ConcurrentCollections.newQueue(); - private final AtomicReference state = new AtomicReference<>(); + private final AtomicReference state; private final ClusterBlocks.Builder initialBlocks; @@ -137,7 +138,7 @@ public class ClusterService extends AbstractLifecycleComponent { this.clusterSettings = clusterSettings; this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); // will be replaced on doStart. - this.state.set(new ClusterServiceState(ClusterState.builder(clusterName).build(), ClusterStateStatus.UNKNOWN)); + this.state = new AtomicReference<>(new ClusterServiceState(ClusterState.builder(clusterName).build(), ClusterStateStatus.UNKNOWN)); this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold); @@ -158,14 +159,45 @@ public class ClusterService extends AbstractLifecycleComponent { } public synchronized void setLocalNode(DiscoveryNode localNode) { - assert state.get().getClusterState().nodes().getLocalNodeId() == null : "local node is already set"; - this.state.getAndUpdate(css -> { + assert clusterServiceState().getClusterState().nodes().getLocalNodeId() == null : "local node is already set"; + updateState(css -> { ClusterState clusterState = css.getClusterState(); DiscoveryNodes nodes = DiscoveryNodes.builder(clusterState.nodes()).add(localNode).localNodeId(localNode.getId()).build(); return new ClusterServiceState(ClusterState.builder(clusterState).nodes(nodes).build(), css.getClusterStateStatus()); }); } + private void updateState(UnaryOperator updateFunction) { + this.state.getAndUpdate(oldClusterServiceState -> { + ClusterServiceState newClusterServiceState = updateFunction.apply(oldClusterServiceState); + assert validStateTransition(oldClusterServiceState, newClusterServiceState) : + "Invalid cluster service state transition from " + oldClusterServiceState + " to " + newClusterServiceState; + return newClusterServiceState; + }); + } + + private static boolean validStateTransition(ClusterServiceState oldClusterServiceState, ClusterServiceState newClusterServiceState) { + if (oldClusterServiceState == null || newClusterServiceState == null) { + return false; + } + ClusterStateStatus oldStatus = oldClusterServiceState.getClusterStateStatus(); + ClusterStateStatus newStatus = newClusterServiceState.getClusterStateStatus(); + // only go from UNKNOWN to UNKNOWN or BEING_APPLIED + if (oldStatus == ClusterStateStatus.UNKNOWN && newStatus == ClusterStateStatus.APPLIED) { + return false; + } + // only go from BEING_APPLIED to APPLIED + if (oldStatus == ClusterStateStatus.BEING_APPLIED && newStatus != ClusterStateStatus.APPLIED) { + return false; + } + // only go from APPLIED to BEING_APPLIED + if (oldStatus == ClusterStateStatus.APPLIED && newStatus != ClusterStateStatus.BEING_APPLIED) { + return false; + } + boolean identicalClusterState = oldClusterServiceState.getClusterState() == newClusterServiceState.getClusterState(); + return identicalClusterState == (oldStatus == ClusterStateStatus.BEING_APPLIED && newStatus == ClusterStateStatus.APPLIED); + } + public synchronized void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) { assert this.nodeConnectionsService == null : "nodeConnectionsService is already set"; this.nodeConnectionsService = nodeConnectionsService; @@ -201,10 +233,10 @@ public class ClusterService extends AbstractLifecycleComponent { @Override protected synchronized void doStart() { Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting"); - Objects.requireNonNull(state.get().getClusterState().nodes().getLocalNode(), "please set the local node before starting"); + Objects.requireNonNull(clusterServiceState().getClusterState().nodes().getLocalNode(), "please set the local node before starting"); Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting"); add(localNodeMasterListeners); - this.state.getAndUpdate(css -> new ClusterServiceState( + updateState(css -> new ClusterServiceState( ClusterState.builder(css.getClusterState()).blocks(initialBlocks).build(), css.getClusterStateStatus())); this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME), @@ -240,7 +272,7 @@ public class ClusterService extends AbstractLifecycleComponent { * The local node. */ public DiscoveryNode localNode() { - DiscoveryNode localNode = state.get().getClusterState().getNodes().getLocalNode(); + DiscoveryNode localNode = state().getNodes().getLocalNode(); if (localNode == null) { throw new IllegalStateException("No local node found. Is the node started?"); } @@ -255,7 +287,7 @@ public class ClusterService extends AbstractLifecycleComponent { * The current cluster state. */ public ClusterState state() { - return this.state.get().getClusterState(); + return clusterServiceState().getClusterState(); } /** @@ -507,6 +539,13 @@ public class ClusterService extends AbstractLifecycleComponent { return true; } + /** asserts that the current thread is NOT the cluster state update thread */ + public static boolean assertNotClusterStateUpdateThread(String reason) { + assert Thread.currentThread().getName().contains(UPDATE_THREAD_NAME) == false : + "Expected current thread [" + Thread.currentThread() + "] to not be the cluster state update thread. Reason: [" + reason + "]"; + return true; + } + public ClusterName getClusterName() { return clusterName; } @@ -554,7 +593,7 @@ public class ClusterService extends AbstractLifecycleComponent { return; } logger.debug("processing [{}]: execute", tasksSummary); - ClusterState previousClusterState = state.get().getClusterState(); + ClusterState previousClusterState = clusterServiceState().getClusterState(); if (!previousClusterState.nodes().isLocalNodeElectedMaster() && executor.runOnlyOnMaster()) { logger.debug("failing [{}]: local node is no longer master", tasksSummary); toExecute.stream().forEach(task -> task.listener.onNoLongerMaster(task.source)); @@ -704,7 +743,8 @@ public class ClusterService extends AbstractLifecycleComponent { } // update the current cluster state - state.set(new ClusterServiceState(newClusterState, ClusterStateStatus.BEING_APPLIED)); + ClusterState finalNewClusterState = newClusterState; + updateState(css -> new ClusterServiceState(finalNewClusterState, ClusterStateStatus.BEING_APPLIED)); logger.debug("set local cluster state to version {}", newClusterState.version()); try { // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency @@ -726,7 +766,7 @@ public class ClusterService extends AbstractLifecycleComponent { nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().removedNodes()); - state.getAndUpdate(css -> new ClusterServiceState(css.getClusterState(), ClusterStateStatus.APPLIED)); + updateState(css -> new ClusterServiceState(css.getClusterState(), ClusterStateStatus.APPLIED)); for (ClusterStateListener listener : postAppliedListeners) { try { diff --git a/core/src/main/java/org/elasticsearch/common/transport/TransportAddress.java b/core/src/main/java/org/elasticsearch/common/transport/TransportAddress.java index 9c55f2d2f18..b6ab1892f38 100644 --- a/core/src/main/java/org/elasticsearch/common/transport/TransportAddress.java +++ b/core/src/main/java/org/elasticsearch/common/transport/TransportAddress.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.transport; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -68,6 +69,12 @@ public final class TransportAddress implements Writeable { * Read from a stream. */ public TransportAddress(StreamInput in) throws IOException { + if (in.getVersion().before(Version.V_6_0_0_alpha1)) { // bwc layer for 5.x where we had more than one transport address + final short i = in.readShort(); + if(i != 1) { // we fail hard to ensure nobody tries to use some custom transport address impl even if that is difficult to add + throw new AssertionError("illegal transport ID from node of version: " + in.getVersion() + " got: " + i + " expected: 1"); + } + } final int len = in.readByte(); final byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6) in.readFully(a); @@ -78,6 +85,9 @@ public final class TransportAddress implements Writeable { @Override public void writeTo(StreamOutput out) throws IOException { + if (out.getVersion().before(Version.V_6_0_0_alpha1)) { + out.writeShort((short)1); // this maps to InetSocketTransportAddress in 5.x + } byte[] bytes = address.getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6) out.writeByte((byte) bytes.length); // 1 byte out.write(bytes, 0, bytes.length); diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/BaseFuture.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/BaseFuture.java index c3e60ec5be3..ee9aea9ed70 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/BaseFuture.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/BaseFuture.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.util.concurrent; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transports; @@ -60,7 +61,9 @@ public abstract class BaseFuture implements Future { public V get(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException, ExecutionException { assert timeout <= 0 || - (Transports.assertNotTransportThread(BLOCKING_OP_REASON) && ThreadPool.assertNotScheduleThread(BLOCKING_OP_REASON)); + (Transports.assertNotTransportThread(BLOCKING_OP_REASON) && + ThreadPool.assertNotScheduleThread(BLOCKING_OP_REASON) && + ClusterService.assertNotClusterStateUpdateThread(BLOCKING_OP_REASON)); return sync.get(unit.toNanos(timeout)); } @@ -82,7 +85,9 @@ public abstract class BaseFuture implements Future { */ @Override public V get() throws InterruptedException, ExecutionException { - assert Transports.assertNotTransportThread(BLOCKING_OP_REASON) && ThreadPool.assertNotScheduleThread(BLOCKING_OP_REASON); + assert Transports.assertNotTransportThread(BLOCKING_OP_REASON) && + ThreadPool.assertNotScheduleThread(BLOCKING_OP_REASON) && + ClusterService.assertNotClusterStateUpdateThread(BLOCKING_OP_REASON); return sync.get(); } diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index df68a9fe648..98ce54428c7 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -53,11 +53,9 @@ public class DiscoveryModule { new Setting<>("discovery.zen.hosts_provider", (String)null, Optional::ofNullable, Property.NodeScope); private final Discovery discovery; - private final ZenPing zenPing; public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService, NetworkService networkService, - ClusterService clusterService, Function createZenPing, - List plugins) { + ClusterService clusterService, List plugins) { final UnicastHostsProvider hostsProvider; Map> hostProviders = new HashMap<>(); @@ -79,14 +77,11 @@ public class DiscoveryModule { hostsProvider = Collections::emptyList; } - zenPing = createZenPing.apply(hostsProvider); - Map> discoveryTypes = new HashMap<>(); - discoveryTypes.put("zen", - () -> new ZenDiscovery(settings, threadPool, transportService, clusterService, clusterService.getClusterSettings(), zenPing)); + discoveryTypes.put("zen", () -> new ZenDiscovery(settings, threadPool, transportService, clusterService, hostsProvider)); discoveryTypes.put("none", () -> new NoneDiscovery(settings, clusterService, clusterService.getClusterSettings())); for (DiscoveryPlugin plugin : plugins) { - plugin.getDiscoveryTypes(threadPool, transportService, clusterService, zenPing).entrySet().forEach(entry -> { + plugin.getDiscoveryTypes(threadPool, transportService, clusterService, hostsProvider).entrySet().forEach(entry -> { if (discoveryTypes.put(entry.getKey(), entry.getValue()) != null) { throw new IllegalArgumentException("Cannot register discovery type [" + entry.getKey() + "] twice"); } @@ -103,9 +98,4 @@ public class DiscoveryModule { public Discovery getDiscovery() { return discovery; } - - // TODO: remove this, it should be completely local to discovery, but service disruption tests want to mess with it - public ZenPing getZenPing() { - return zenPing; - } } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 8f02b037c20..f9a16243e00 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -107,7 +107,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private AllocationService allocationService; private final ClusterName clusterName; private final DiscoverySettings discoverySettings; - private final ZenPing zenPing; + protected final ZenPing zenPing; // protected to allow tests access private final MasterFaultDetection masterFD; private final NodesFaultDetection nodesFD; private final PublishClusterStateAction publishClusterState; @@ -139,13 +139,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, ClusterSettings clusterSettings, ZenPing zenPing) { + ClusterService clusterService, UnicastHostsProvider hostsProvider) { super(settings); this.clusterService = clusterService; this.clusterName = clusterService.getClusterName(); this.transportService = transportService; - this.discoverySettings = new DiscoverySettings(settings, clusterSettings); - this.zenPing = zenPing; + this.discoverySettings = new DiscoverySettings(settings, clusterService.getClusterSettings()); + this.zenPing = newZenPing(settings, threadPool, transportService, hostsProvider); this.electMaster = new ElectMasterService(settings); this.pingTimeout = PING_TIMEOUT_SETTING.get(settings); this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings); @@ -160,12 +160,15 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.ignore_non_master [{}]", this.pingTimeout, joinTimeout, masterElectionIgnoreNonMasters); - clusterSettings.addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, this::handleMinimumMasterNodesChanged, (value) -> { - final ClusterState clusterState = clusterService.state(); - int masterNodes = clusterState.nodes().getMasterNodes().size(); - if (value > masterNodes) { - throw new IllegalArgumentException("cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " to more than the current master nodes count [" + masterNodes + "]"); - } + clusterService.getClusterSettings().addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, + this::handleMinimumMasterNodesChanged, (value) -> { + final ClusterState clusterState = clusterService.state(); + int masterNodes = clusterState.nodes().getMasterNodes().size(); + if (value > masterNodes) { + throw new IllegalArgumentException("cannot set " + + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " to more than the current" + + " master nodes count [" + masterNodes + "]"); + } }); this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, clusterService); @@ -188,6 +191,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover DISCOVERY_REJOIN_ACTION_NAME, RejoinClusterRequest::new, ThreadPool.Names.SAME, new RejoinClusterRequestHandler()); } + // protected to allow overriding in tests + protected ZenPing newZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, + UnicastHostsProvider hostsProvider) { + return new UnicastZenPing(settings, threadPool, transportService, hostsProvider); + } + @Override public void setAllocationService(AllocationService allocationService) { this.allocationService = allocationService; diff --git a/core/src/main/java/org/elasticsearch/index/IndexModule.java b/core/src/main/java/org/elasticsearch/index/IndexModule.java index 9d39bd38818..b6f4b1e24f0 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/core/src/main/java/org/elasticsearch/index/IndexModule.java @@ -33,6 +33,7 @@ import org.elasticsearch.index.cache.query.DisabledQueryCache; import org.elasticsearch.index.cache.query.IndexQueryCache; import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexingOperationListener; @@ -370,6 +371,16 @@ public final class IndexModule { globalCheckpointSyncer, searchOperationListeners, indexOperationListeners); } + /** + * creates a new mapper service to do administrative work like mapping updates. This *should not* be used for document parsing. + * doing so will result in an exception. + */ + public MapperService newIndexMapperService(MapperRegistry mapperRegistry) throws IOException { + return new MapperService(indexSettings, analysisRegistry.build(indexSettings), + new SimilarityService(indexSettings, similarities), mapperRegistry, + () -> { throw new UnsupportedOperationException("no index query shard context available"); }); + } + /** * Forces a certain query cache to use instead of the default one. If this is set * and query caching is not disabled with {@code index.queries.cache.enabled}, then diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index 90b400d70d9..dce125cbdf4 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -94,7 +94,6 @@ import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; public class IndexService extends AbstractIndexComponent implements IndicesClusterStateService.AllocatedIndex { private final IndexEventListener eventListener; - private final IndexAnalyzers indexAnalyzers; private final IndexFieldDataService indexFieldData; private final BitsetFilterCache bitsetFilterCache; private final NodeEnvironment nodeEnv; @@ -147,12 +146,11 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust super(indexSettings); this.indexSettings = indexSettings; this.globalCheckpointSyncer = globalCheckpointSyncer; - this.indexAnalyzers = registry.build(indexSettings); this.similarityService = similarityService; - this.mapperService = new MapperService(indexSettings, indexAnalyzers, similarityService, mapperRegistry, + this.mapperService = new MapperService(indexSettings, registry.build(indexSettings), similarityService, mapperRegistry, // we parse all percolator queries as they would be parsed on shard 0 () -> newQueryShardContext(0, null, () -> { - throw new IllegalArgumentException("Percolator queries are not allowed to use the curent timestamp"); + throw new IllegalArgumentException("Percolator queries are not allowed to use the current timestamp"); })); this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService, mapperService); this.shardStoreDeleter = shardStoreDeleter; @@ -231,7 +229,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust } public IndexAnalyzers getIndexAnalyzers() { - return this.indexAnalyzers; + return this.mapperService.getIndexAnalyzers(); } public MapperService mapperService() { @@ -255,7 +253,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust } } } finally { - IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, indexAnalyzers, refreshTask, fsyncTask, globalCheckpointTask); + IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, mapperService, refreshTask, fsyncTask, globalCheckpointTask); } } } @@ -336,7 +334,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust } if (shards.containsKey(shardId.id())) { - throw new IndexShardAlreadyExistsException(shardId + " already exists"); + throw new IllegalStateException(shardId + " already exists"); } logger.debug("creating shard_id {}", shardId); diff --git a/core/src/main/java/org/elasticsearch/index/IndexShardAlreadyExistsException.java b/core/src/main/java/org/elasticsearch/index/IndexShardAlreadyExistsException.java deleted file mode 100644 index 7cdd869821e..00000000000 --- a/core/src/main/java/org/elasticsearch/index/IndexShardAlreadyExistsException.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; - -public class IndexShardAlreadyExistsException extends ElasticsearchException { - - public IndexShardAlreadyExistsException(String message) { - super(message); - } - - public IndexShardAlreadyExistsException(StreamInput in) throws IOException { - super(in); - } -} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 1915efcb214..cb2a1af9539 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -23,7 +23,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.StoredField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.LongPoint; -import org.apache.lucene.index.XPointValues; +import org.apache.lucene.index.PointValues; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.BoostQuery; @@ -300,13 +300,13 @@ public class DateFieldMapper extends FieldMapper { @Override public FieldStats.Date stats(IndexReader reader) throws IOException { String field = name(); - long size = XPointValues.size(reader, field); + long size = PointValues.size(reader, field); if (size == 0) { return null; } - int docCount = XPointValues.getDocCount(reader, field); - byte[] min = XPointValues.getMinPackedValue(reader, field); - byte[] max = XPointValues.getMaxPackedValue(reader, field); + int docCount = PointValues.getDocCount(reader, field); + byte[] min = PointValues.getMinPackedValue(reader, field); + byte[] max = PointValues.getMaxPackedValue(reader, field); return new FieldStats.Date(reader.maxDoc(),docCount, -1L, size, isSearchable(), isAggregatable(), dateTimeFormatter(), LongPoint.decodeDimension(min, 0), LongPoint.decodeDimension(max, 0)); @@ -321,13 +321,13 @@ public class DateFieldMapper extends FieldMapper { dateParser = this.dateMathParser; } - if (XPointValues.size(reader, name()) == 0) { + if (PointValues.size(reader, name()) == 0) { // no points, so nothing matches return Relation.DISJOINT; } - long minValue = LongPoint.decodeDimension(XPointValues.getMinPackedValue(reader, name()), 0); - long maxValue = LongPoint.decodeDimension(XPointValues.getMaxPackedValue(reader, name()), 0); + long minValue = LongPoint.decodeDimension(PointValues.getMinPackedValue(reader, name()), 0); + long maxValue = LongPoint.decodeDimension(PointValues.getMaxPackedValue(reader, name()), 0); long fromInclusive = Long.MIN_VALUE; if (from != null) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index 90740b794a8..4be8de2056f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -25,7 +25,7 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.XPointValues; +import org.apache.lucene.index.PointValues; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; @@ -212,13 +212,13 @@ public class IpFieldMapper extends FieldMapper { @Override public FieldStats.Ip stats(IndexReader reader) throws IOException { String field = name(); - long size = XPointValues.size(reader, field); + long size = PointValues.size(reader, field); if (size == 0) { return null; } - int docCount = XPointValues.getDocCount(reader, field); - byte[] min = XPointValues.getMinPackedValue(reader, field); - byte[] max = XPointValues.getMaxPackedValue(reader, field); + int docCount = PointValues.getDocCount(reader, field); + byte[] min = PointValues.getMinPackedValue(reader, field); + byte[] max = PointValues.getMaxPackedValue(reader, field); return new FieldStats.Ip(reader.maxDoc(), docCount, -1L, size, isSearchable(), isAggregatable(), InetAddressPoint.decode(min), InetAddressPoint.decode(max)); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index f2a958f6fcf..d848ce15331 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -44,6 +44,7 @@ import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.indices.TypeMissingException; import org.elasticsearch.indices.mapper.MapperRegistry; +import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -62,7 +63,7 @@ import static java.util.Collections.emptySet; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; -public class MapperService extends AbstractIndexComponent { +public class MapperService extends AbstractIndexComponent implements Closeable { /** * The reason why a mapping is being merged. @@ -624,6 +625,11 @@ public class MapperService extends AbstractIndexComponent { return parentTypes; } + @Override + public void close() throws IOException { + indexAnalyzers.close(); + } + /** * @return Whether a field is a metadata field. */ diff --git a/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index b9bc3a2860a..afdb6c83d50 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -29,7 +29,7 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.XPointValues; +import org.apache.lucene.index.PointValues; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; @@ -241,13 +241,13 @@ public class NumberFieldMapper extends FieldMapper { @Override FieldStats.Double stats(IndexReader reader, String fieldName, boolean isSearchable, boolean isAggregatable) throws IOException { - long size = XPointValues.size(reader, fieldName); + long size = PointValues.size(reader, fieldName); if (size == 0) { return null; } - int docCount = XPointValues.getDocCount(reader, fieldName); - byte[] min = XPointValues.getMinPackedValue(reader, fieldName); - byte[] max = XPointValues.getMaxPackedValue(reader, fieldName); + int docCount = PointValues.getDocCount(reader, fieldName); + byte[] min = PointValues.getMinPackedValue(reader, fieldName); + byte[] max = PointValues.getMaxPackedValue(reader, fieldName); return new FieldStats.Double(reader.maxDoc(),docCount, -1L, size, isSearchable, isAggregatable, HalfFloatPoint.decodeDimension(min, 0), HalfFloatPoint.decodeDimension(max, 0)); @@ -325,13 +325,13 @@ public class NumberFieldMapper extends FieldMapper { @Override FieldStats.Double stats(IndexReader reader, String fieldName, boolean isSearchable, boolean isAggregatable) throws IOException { - long size = XPointValues.size(reader, fieldName); + long size = PointValues.size(reader, fieldName); if (size == 0) { return null; } - int docCount = XPointValues.getDocCount(reader, fieldName); - byte[] min = XPointValues.getMinPackedValue(reader, fieldName); - byte[] max = XPointValues.getMaxPackedValue(reader, fieldName); + int docCount = PointValues.getDocCount(reader, fieldName); + byte[] min = PointValues.getMinPackedValue(reader, fieldName); + byte[] max = PointValues.getMaxPackedValue(reader, fieldName); return new FieldStats.Double(reader.maxDoc(),docCount, -1L, size, isSearchable, isAggregatable, FloatPoint.decodeDimension(min, 0), FloatPoint.decodeDimension(max, 0)); @@ -409,13 +409,13 @@ public class NumberFieldMapper extends FieldMapper { @Override FieldStats.Double stats(IndexReader reader, String fieldName, boolean isSearchable, boolean isAggregatable) throws IOException { - long size = XPointValues.size(reader, fieldName); + long size = PointValues.size(reader, fieldName); if (size == 0) { return null; } - int docCount = XPointValues.getDocCount(reader, fieldName); - byte[] min = XPointValues.getMinPackedValue(reader, fieldName); - byte[] max = XPointValues.getMaxPackedValue(reader, fieldName); + int docCount = PointValues.getDocCount(reader, fieldName); + byte[] min = PointValues.getMinPackedValue(reader, fieldName); + byte[] max = PointValues.getMaxPackedValue(reader, fieldName); return new FieldStats.Double(reader.maxDoc(),docCount, -1L, size, isSearchable, isAggregatable, DoublePoint.decodeDimension(min, 0), DoublePoint.decodeDimension(max, 0)); @@ -627,13 +627,13 @@ public class NumberFieldMapper extends FieldMapper { @Override FieldStats.Long stats(IndexReader reader, String fieldName, boolean isSearchable, boolean isAggregatable) throws IOException { - long size = XPointValues.size(reader, fieldName); + long size = PointValues.size(reader, fieldName); if (size == 0) { return null; } - int docCount = XPointValues.getDocCount(reader, fieldName); - byte[] min = XPointValues.getMinPackedValue(reader, fieldName); - byte[] max = XPointValues.getMaxPackedValue(reader, fieldName); + int docCount = PointValues.getDocCount(reader, fieldName); + byte[] min = PointValues.getMinPackedValue(reader, fieldName); + byte[] max = PointValues.getMaxPackedValue(reader, fieldName); return new FieldStats.Long(reader.maxDoc(),docCount, -1L, size, isSearchable, isAggregatable, IntPoint.decodeDimension(min, 0), IntPoint.decodeDimension(max, 0)); @@ -723,13 +723,13 @@ public class NumberFieldMapper extends FieldMapper { @Override FieldStats.Long stats(IndexReader reader, String fieldName, boolean isSearchable, boolean isAggregatable) throws IOException { - long size = XPointValues.size(reader, fieldName); + long size = PointValues.size(reader, fieldName); if (size == 0) { return null; } - int docCount = XPointValues.getDocCount(reader, fieldName); - byte[] min = XPointValues.getMinPackedValue(reader, fieldName); - byte[] max = XPointValues.getMaxPackedValue(reader, fieldName); + int docCount = PointValues.getDocCount(reader, fieldName); + byte[] min = PointValues.getMinPackedValue(reader, fieldName); + byte[] max = PointValues.getMaxPackedValue(reader, fieldName); return new FieldStats.Long(reader.maxDoc(),docCount, -1L, size, isSearchable, isAggregatable, LongPoint.decodeDimension(min, 0), LongPoint.decodeDimension(max, 0)); diff --git a/core/src/main/java/org/elasticsearch/indices/IndexTemplateAlreadyExistsException.java b/core/src/main/java/org/elasticsearch/indices/IndexTemplateAlreadyExistsException.java deleted file mode 100644 index 920d2ea51b7..00000000000 --- a/core/src/main/java/org/elasticsearch/indices/IndexTemplateAlreadyExistsException.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.indices; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.rest.RestStatus; - -import java.io.IOException; - -public class IndexTemplateAlreadyExistsException extends ElasticsearchException { - - private final String name; - - public IndexTemplateAlreadyExistsException(String name) { - super("index_template [" + name + "] already exists"); - this.name = name; - } - - public IndexTemplateAlreadyExistsException(StreamInput in) throws IOException { - super(in); - name = in.readOptionalString(); - } - - public String name() { - return this.name; - } - - @Override - public RestStatus status() { - return RestStatus.BAD_REQUEST; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeOptionalString(name); - } -} diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index bbbaec9a7e3..befecd8df52 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.DirectoryReader; @@ -464,6 +463,21 @@ public class IndicesService extends AbstractLifecycleComponent indicesFieldDataCache); } + /** + * creates a new mapper service for the given index, in order to do administrative work like mapping updates. + * This *should not* be used for document parsing. Doing so will result in an exception. + * + * Note: the returned {@link MapperService} should be closed when unneeded. + */ + public synchronized MapperService createIndexMapperService(IndexMetaData indexMetaData) throws IOException { + final Index index = indexMetaData.getIndex(); + final Predicate indexNameMatcher = (indexExpression) -> indexNameExpressionResolver.matchesIndex(index.getName(), indexExpression, clusterService.state()); + final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexNameMatcher, indexScopeSetting); + final IndexModule indexModule = new IndexModule(idxSettings, indexStoreConfig, analysisRegistry); + pluginsService.onIndexModule(indexModule); + return indexModule.newIndexMapperService(mapperRegistry); + } + /** * This method verifies that the given {@code metaData} holds sane values to create an {@link IndexService}. * This method tries to update the meta data of the created {@link IndexService} if the given {@code metaDataUpdate} is different from the given {@code metaData}. diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 0ee93db4852..47ccd568b4d 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -51,7 +51,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexComponent; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.IndexShardAlreadyExistsException; import org.elasticsearch.index.seqno.GlobalCheckpointService; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; import org.elasticsearch.index.shard.IndexEventListener; @@ -539,10 +538,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple RecoveryState recoveryState = new RecoveryState(shardRouting, nodes.getLocalNode(), sourceNode); indicesService.createShard(shardRouting, recoveryState, recoveryTargetService, new RecoveryListener(shardRouting), repositoriesService, failedShardHandler); - } catch (IndexShardAlreadyExistsException e) { - // ignore this, the method call can happen several times - logger.debug("Trying to create shard that already exists", e); - assert false; } catch (Exception e) { failAndRemoveShard(shardRouting, true, "failed to create shard", e); } diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 57853824fb3..9eb7f9a0376 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -399,8 +399,7 @@ public class Node implements Closeable { } final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, - networkService, clusterService, hostsProvider -> newZenPing(settings, threadPool, transportService, hostsProvider), - pluginsService.filterPlugins(DiscoveryPlugin.class)); + networkService, clusterService, pluginsService.filterPlugins(DiscoveryPlugin.class)); pluginsService.processModules(modules); modules.add(b -> { b.bind(IndicesQueriesRegistry.class).toInstance(searchModule.getQueryParserRegistry()); @@ -434,7 +433,6 @@ public class Node implements Closeable { indicesModule.getMapperRegistry(), settingsModule.getIndexScopedSettings())); b.bind(ClusterInfoService.class).toInstance(clusterInfoService); b.bind(Discovery.class).toInstance(discoveryModule.getDiscovery()); - b.bind(ZenPing.class).toInstance(discoveryModule.getZenPing()); { RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); processRecoverySettings(settingsModule.getClusterSettings(), recoverySettings); @@ -873,12 +871,6 @@ public class Node implements Closeable { return customNameResolvers; } - /** Create a new ZenPing instance for use in zen discovery. */ - protected ZenPing newZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, - UnicastHostsProvider hostsProvider) { - return new UnicastZenPing(settings, threadPool, transportService, hostsProvider); - } - /** Constructs an internal node used as a client into a cluster fronted by this tribe node. */ protected Node newTribeClientNode(Settings settings, Collection> classpathPlugins) { return new Node(new Environment(settings), classpathPlugins); diff --git a/core/src/main/java/org/elasticsearch/plugins/ActionPlugin.java b/core/src/main/java/org/elasticsearch/plugins/ActionPlugin.java index 3d769d27a87..2198297129e 100644 --- a/core/src/main/java/org/elasticsearch/plugins/ActionPlugin.java +++ b/core/src/main/java/org/elasticsearch/plugins/ActionPlugin.java @@ -49,7 +49,7 @@ public interface ActionPlugin { /** * Actions added by this plugin. */ - default List, ? extends ActionResponse>> getActions() { + default List> getActions() { return Collections.emptyList(); } /** @@ -72,7 +72,7 @@ public interface ActionPlugin { return Collections.emptyList(); } - final class ActionHandler, Response extends ActionResponse> { + final class ActionHandler { private final GenericAction action; private final Class> transportAction; private final Class[] supportTransportActions; diff --git a/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java b/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java index adb8bfcc388..37b97855084 100644 --- a/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java +++ b/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java @@ -57,10 +57,10 @@ public interface DiscoveryPlugin { * @param threadPool Use to schedule ping actions * @param transportService Use to communicate with other nodes * @param clusterService Use to find current nodes in the cluster - * @param zenPing Use to ping other nodes with zen unicast host list + * @param hostsProvider Use to find configured hosts which should be pinged for initial discovery */ default Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, ZenPing zenPing) { + ClusterService clusterService, UnicastHostsProvider hostsProvider) { return Collections.emptyMap(); } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java index ccfbf3515fc..cb8310975fc 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java @@ -47,6 +47,11 @@ public final class ParentFieldSubFetchPhase implements FetchSubPhase { } String parentId = getParentId(parentFieldMapper, hitContext.reader(), hitContext.docId()); + if (parentId == null) { + // hit has no _parent field. Can happen for nested inner hits if parent hit is a p/c document. + return; + } + Map fields = hitContext.hit().fieldsOrNull(); if (fields == null) { fields = new HashMap<>(); @@ -59,8 +64,7 @@ public final class ParentFieldSubFetchPhase implements FetchSubPhase { try { SortedDocValues docValues = reader.getSortedDocValues(fieldMapper.name()); BytesRef parentId = docValues.get(docId); - assert parentId.length > 0; - return parentId.utf8ToString(); + return parentId.length > 0 ? parentId.utf8ToString() : null; } catch (IOException e) { throw ExceptionsHelper.convertToElastic(e); } diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index 9e4e0262080..086c48c4114 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -1231,8 +1231,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } streamIn = compressor.streamInput(streamIn); } - if (version.onOrAfter(getCurrentVersion().minimumCompatibilityVersion()) == false - || version.major != getCurrentVersion().major) { + if (version.isCompatible(getCurrentVersion()) == false) { throw new IllegalStateException("Received message from unsupported version: [" + version + "] minimal compatible version is: [" + getCurrentVersion().minimumCompatibilityVersion() + "]"); } diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index 842ff05409f..7b1c83d66aa 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -371,18 +371,13 @@ public class TransportService extends AbstractLifecycleComponent { if (checkClusterName && !Objects.equals(clusterName, response.clusterName)) { throw new IllegalStateException("handshake failed, mismatched cluster name [" + response.clusterName + "] - " + node); - } else if (!isVersionCompatible(response.version)) { + } else if (response.version.isCompatible((localNode != null ? localNode.getVersion() : Version.CURRENT)) == false) { throw new IllegalStateException("handshake failed, incompatible version [" + response.version + "] - " + node); } return response.discoveryNode; } - private boolean isVersionCompatible(Version version) { - return version.minimumCompatibilityVersion().equals( - localNode != null ? localNode.getVersion().minimumCompatibilityVersion() : Version.CURRENT.minimumCompatibilityVersion()); - } - static class HandshakeRequest extends TransportRequest { public static final HandshakeRequest INSTANCE = new HandshakeRequest(); diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy index 999f036d9f4..1fa2043d547 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.1.jar}" { //// Very special jar permissions: //// These are dangerous permissions that we don't want to grant to everything. -grant codeBase "${codebase.lucene-core-6.3.0-snapshot-a66a445.jar}" { +grant codeBase "${codebase.lucene-core-6.3.0.jar}" { // needed to allow MMapDirectory's "unmap hack" (die unmap hack, die) // java 8 package permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; @@ -42,7 +42,7 @@ grant codeBase "${codebase.lucene-core-6.3.0-snapshot-a66a445.jar}" { permission java.lang.RuntimePermission "accessDeclaredMembers"; }; -grant codeBase "${codebase.lucene-misc-6.3.0-snapshot-a66a445.jar}" { +grant codeBase "${codebase.lucene-misc-6.3.0.jar}" { // needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper permission java.nio.file.LinkPermission "hard"; }; diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index 1c780f96933..9492b72d030 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -33,7 +33,7 @@ grant codeBase "${codebase.securemock-1.2.jar}" { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; -grant codeBase "${codebase.lucene-test-framework-6.3.0-snapshot-a66a445.jar}" { +grant codeBase "${codebase.lucene-test-framework-6.3.0.jar}" { // needed by RamUsageTester permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; // needed for testing hardlinks in StoreRecoveryTests since we install MockFS diff --git a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 8a2e965a7b4..7c8b9c9f2a6 100644 --- a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -57,7 +57,6 @@ import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.TranslogRecoveryPerformer; -import org.elasticsearch.indices.IndexTemplateAlreadyExistsException; import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.indices.recovery.RecoverFilesRecoveryException; @@ -336,16 +335,6 @@ public class ExceptionSerializationTests extends ESTestCase { assertTrue(ex.getCause() instanceof NullPointerException); } - public void testIndexTemplateAlreadyExistsException() throws IOException { - IndexTemplateAlreadyExistsException ex = serialize(new IndexTemplateAlreadyExistsException("the dude abides!")); - assertEquals("the dude abides!", ex.name()); - assertEquals("index_template [the dude abides!] already exists", ex.getMessage()); - - ex = serialize(new IndexTemplateAlreadyExistsException((String) null)); - assertNull(ex.name()); - assertEquals("index_template [null] already exists", ex.getMessage()); - } - public void testBatchOperationException() throws IOException { ShardId id = new ShardId("foo", "_na_", 1); TranslogRecoveryPerformer.BatchOperationException ex = serialize( @@ -683,11 +672,11 @@ public class ExceptionSerializationTests extends ESTestCase { ids.put(44, org.elasticsearch.indices.recovery.RecoveryFailedException.class); ids.put(45, org.elasticsearch.index.shard.IndexShardRelocatedException.class); ids.put(46, org.elasticsearch.transport.NodeShouldNotConnectException.class); - ids.put(47, org.elasticsearch.indices.IndexTemplateAlreadyExistsException.class); + ids.put(47, null); ids.put(48, org.elasticsearch.index.translog.TranslogCorruptedException.class); ids.put(49, org.elasticsearch.cluster.block.ClusterBlockException.class); ids.put(50, org.elasticsearch.search.fetch.FetchPhaseExecutionException.class); - ids.put(51, org.elasticsearch.index.IndexShardAlreadyExistsException.class); + ids.put(51, null); ids.put(52, org.elasticsearch.index.engine.VersionConflictEngineException.class); ids.put(53, org.elasticsearch.index.engine.EngineException.class); ids.put(54, null); // was DocumentAlreadyExistsException, which is superseded with VersionConflictEngineException diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java index b3d1571c4ec..f8c31ee3189 100644 --- a/core/src/test/java/org/elasticsearch/VersionTests.java +++ b/core/src/test/java/org/elasticsearch/VersionTests.java @@ -134,6 +134,10 @@ public class VersionTests extends ESTestCase { assertThat(Version.V_2_2_0.minimumCompatibilityVersion(), equalTo(Version.V_2_0_0)); assertThat(Version.V_2_3_0.minimumCompatibilityVersion(), equalTo(Version.V_2_0_0)); assertThat(Version.V_5_0_0_alpha1.minimumCompatibilityVersion(), equalTo(Version.V_5_0_0_alpha1)); + // from 6.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is + // released since we need to bump the supported minor in Version#minimumCompatibilityVersion() + assertThat("did you miss to bump the minor in Version#minimumCompatibilityVersion()", + Version.V_6_0_0_alpha1.minimumCompatibilityVersion(), equalTo(VersionUtils.getPreviousVersion(Version.V_6_0_0_alpha1))); } public void testToString() { @@ -222,7 +226,7 @@ public class VersionTests extends ESTestCase { assertTrue(constantName + " should be final", Modifier.isFinal(versionConstant.getModifiers())); Version v = (Version) versionConstant.get(Version.class); - logger.info("Checking {}", v); + logger.debug("Checking {}", v); assertEquals("Version id " + field.getName() + " does not point to " + constantName, v, Version.fromId(versionId)); assertEquals("Version " + constantName + " does not have correct id", versionId, v.id); if (v.major >= 2) { @@ -277,9 +281,7 @@ public class VersionTests extends ESTestCase { assertUnknownVersion(V_20_0_0_UNRELEASED); expectThrows(AssertionError.class, () -> assertUnknownVersion(Version.CURRENT)); assertUnknownVersion(AliasFilter.V_5_1_0); // once we released 5.1.0 and it's added to Version.java we need to remove this constant - assertUnknownVersion(IndexTemplateMetaData.V_5_1_0); assertUnknownVersion(OsStats.V_5_1_0); // once we released 5.1.0 and it's added to Version.java we need to remove this constant - assertUnknownVersion(PutIndexTemplateRequest.V_5_1_0); assertUnknownVersion(QueryStringQueryBuilder.V_5_1_0_UNRELEASED); assertUnknownVersion(SimpleQueryStringBuilder.V_5_1_0_UNRELEASED); // once we released 5.0.0 and it's added to Version.java we need to remove this constant @@ -291,4 +293,18 @@ public class VersionTests extends ESTestCase { assertFalse("Version " + version + " has been releaed don't use a new instance of this version", VersionUtils.allVersions().contains(version)); } + + public void testIsCompatible() { + assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); + assertTrue(isCompatible(Version.V_5_0_0, Version.V_6_0_0_alpha1)); + assertFalse(isCompatible(Version.V_2_0_0, Version.V_6_0_0_alpha1)); + assertFalse(isCompatible(Version.V_2_0_0, Version.V_5_0_0)); + } + + public boolean isCompatible(Version left, Version right) { + boolean result = left.isCompatible(right); + assert result == right.isCompatible(left); + return result; + } + } diff --git a/core/src/test/java/org/elasticsearch/action/ActionModuleTests.java b/core/src/test/java/org/elasticsearch/action/ActionModuleTests.java index 2c17cea5ef5..a0437f05c4c 100644 --- a/core/src/test/java/org/elasticsearch/action/ActionModuleTests.java +++ b/core/src/test/java/org/elasticsearch/action/ActionModuleTests.java @@ -52,7 +52,7 @@ public class ActionModuleTests extends ESTestCase { public void testPluginCantOverwriteBuiltinAction() { ActionPlugin dupsMainAction = new ActionPlugin() { @Override - public List, ? extends ActionResponse>> getActions() { + public List> getActions() { return singletonList(new ActionHandler<>(MainAction.INSTANCE, TransportMainAction.class)); } }; @@ -61,7 +61,7 @@ public class ActionModuleTests extends ESTestCase { } public void testPluginCanRegisterAction() { - class FakeRequest extends ActionRequest { + class FakeRequest extends ActionRequest { @Override public ActionRequestValidationException validate() { return null; @@ -90,7 +90,7 @@ public class ActionModuleTests extends ESTestCase { FakeAction action = new FakeAction(); ActionPlugin registersFakeAction = new ActionPlugin() { @Override - public List, ? extends ActionResponse>> getActions() { + public List> getActions() { return singletonList(new ActionHandler<>(action, FakeTransportAction.class)); } }; diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index 340d7199234..28aff0d3bda 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -70,7 +70,7 @@ import static org.elasticsearch.test.ESTestCase.awaitBusy; public class TestTaskPlugin extends Plugin implements ActionPlugin { @Override - public List, ? extends ActionResponse>> getActions() { + public List> getActions() { return Arrays.asList(new ActionHandler<>(TestTaskAction.INSTANCE, TransportTestTaskAction.class), new ActionHandler<>(UnblockTestTasksAction.INSTANCE, TransportUnblockTestTasksAction.class)); } diff --git a/core/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java b/core/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java index bbf1d2f1942..228c68a1760 100644 --- a/core/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java @@ -222,7 +222,7 @@ public class TransportActionFilterChainTests extends ESTestCase { RequestTestFilter testFilter = new RequestTestFilter(randomInt(), new RequestCallback() { @Override - public , Response extends ActionResponse> void execute(Task task, String action, Request request, + public void execute(Task task, String action, Request request, ActionListener listener, ActionFilterChain actionFilterChain) { for (int i = 0; i <= additionalContinueCount; i++) { actionFilterChain.proceed(task, action, request, listener); @@ -349,7 +349,7 @@ public class TransportActionFilterChainTests extends ESTestCase { } @Override - public , Response extends ActionResponse> void apply(Task task, String action, Request request, + public void apply(Task task, String action, Request request, ActionListener listener, ActionFilterChain chain) { this.runs.incrementAndGet(); this.lastActionName = action; @@ -382,7 +382,7 @@ public class TransportActionFilterChainTests extends ESTestCase { } @Override - public , Response extends ActionResponse> void apply(Task task, String action, Request request, + public void apply(Task task, String action, Request request, ActionListener listener, ActionFilterChain chain) { chain.proceed(task, action, request, listener); } @@ -400,7 +400,7 @@ public class TransportActionFilterChainTests extends ESTestCase { private static enum RequestOperation implements RequestCallback { CONTINUE_PROCESSING { @Override - public , Response extends ActionResponse> void execute(Task task, String action, Request request, + public void execute(Task task, String action, Request request, ActionListener listener, ActionFilterChain actionFilterChain) { actionFilterChain.proceed(task, action, request, listener); } @@ -408,14 +408,14 @@ public class TransportActionFilterChainTests extends ESTestCase { LISTENER_RESPONSE { @Override @SuppressWarnings("unchecked") // Safe because its all we test with - public , Response extends ActionResponse> void execute(Task task, String action, Request request, + public void execute(Task task, String action, Request request, ActionListener listener, ActionFilterChain actionFilterChain) { ((ActionListener) listener).onResponse(new TestResponse()); } }, LISTENER_FAILURE { @Override - public , Response extends ActionResponse> void execute(Task task, String action, Request request, + public void execute(Task task, String action, Request request, ActionListener listener, ActionFilterChain actionFilterChain) { listener.onFailure(new ElasticsearchTimeoutException("")); } @@ -448,7 +448,7 @@ public class TransportActionFilterChainTests extends ESTestCase { } private interface RequestCallback { - , Response extends ActionResponse> void execute(Task task, String action, Request request, + void execute(Task task, String action, Request request, ActionListener listener, ActionFilterChain actionFilterChain); } @@ -457,7 +457,7 @@ public class TransportActionFilterChainTests extends ESTestCase { ActionFilterChain chain); } - public static class TestRequest extends ActionRequest { + public static class TestRequest extends ActionRequest { @Override public ActionRequestValidationException validate() { return null; diff --git a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java index 73085276628..96ba5729cb8 100644 --- a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java +++ b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.FaultDetection; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; @@ -45,11 +46,6 @@ import static org.hamcrest.Matchers.equalTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class IndexingMasterFailoverIT extends ESIntegTestCase { - @Override - protected boolean addMockZenPings() { - return false; - } - @Override protected Collection> nodePlugins() { final HashSet> classes = new HashSet<>(super.nodePlugins()); @@ -57,6 +53,12 @@ public class IndexingMasterFailoverIT extends ESIntegTestCase { return classes; } + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false).build(); + } + /** * Indexing operations which entail mapping changes require a blocking request to the master node to update the mapping. * If the master node is being disrupted or if it cannot commit cluster state changes, it needs to retry within timeout limits. diff --git a/core/src/test/java/org/elasticsearch/client/ParentTaskAssigningClientTests.java b/core/src/test/java/org/elasticsearch/client/ParentTaskAssigningClientTests.java index 360137b8904..35406ef1153 100644 --- a/core/src/test/java/org/elasticsearch/client/ParentTaskAssigningClientTests.java +++ b/core/src/test/java/org/elasticsearch/client/ParentTaskAssigningClientTests.java @@ -38,7 +38,7 @@ public class ParentTaskAssigningClientTests extends ESTestCase { // This mock will do nothing but verify that parentTaskId is set on all requests sent to it. NoOpClient mock = new NoOpClient(getTestName()) { @Override - protected < Request extends ActionRequest, + protected < Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder > void doExecute( Action action, Request request, diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java index dbb066dcb1b..0772e87d900 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java @@ -19,6 +19,9 @@ package org.elasticsearch.client.transport; +import java.io.IOException; +import java.util.Arrays; + import org.elasticsearch.Version; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -32,14 +35,11 @@ import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.discovery.MockZenPing; +import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.transport.MockTcpTransportPlugin; import org.elasticsearch.transport.MockTransportClient; import org.elasticsearch.transport.TransportService; -import java.io.IOException; -import java.util.Arrays; - import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; @@ -66,7 +66,7 @@ public class TransportClientIT extends ESIntegTestCase { .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(Node.NODE_DATA_SETTING.getKey(), false) .put("cluster.name", "foobar") - .build(), Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class)).start()) { + .build(), Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class)).start()) { TransportAddress transportAddress = node.injector().getInstance(TransportService.class).boundAddress().publishAddress(); client.addTransportAddress(transportAddress); // since we force transport clients there has to be one node started that we connect to. diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index 3cafff08a07..111a3b1fe10 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -93,7 +93,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { } @Override - protected boolean apply(String action, ActionRequest request, ActionListener listener) { + protected boolean apply(String action, ActionRequest request, ActionListener listener) { if (blockedActions.contains(action)) { throw new ElasticsearchException("force exception on [" + action + "]"); } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 81bc7db0b6b..9d44dbbca38 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoriesMetaData; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -56,6 +57,7 @@ import org.elasticsearch.test.ESIntegTestCase; import java.util.Collections; import java.util.List; +import java.util.Set; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -238,13 +240,19 @@ public class ClusterStateDiffIT extends ESIntegTestCase { for (int i = 0; i < shardCount; i++) { IndexShardRoutingTable.Builder indexShard = new IndexShardRoutingTable.Builder(new ShardId(index, "_na_", i)); int replicaCount = randomIntBetween(1, 10); + Set availableNodeIds = Sets.newHashSet(nodeIds); for (int j = 0; j < replicaCount; j++) { UnassignedInfo unassignedInfo = null; if (randomInt(5) == 1) { unassignedInfo = new UnassignedInfo(randomReason(), randomAsciiOfLength(10)); } + if (availableNodeIds.isEmpty()) { + break; + } + String nodeId = randomFrom(availableNodeIds); + availableNodeIds.remove(nodeId); indexShard.addShard( - TestShardRouting.newShardRouting(index, i, randomFrom(nodeIds), null, j == 0, + TestShardRouting.newShardRouting(index, i, nodeId, null, j == 0, ShardRoutingState.fromValue((byte) randomIntBetween(2, 3)), unassignedInfo)); } builder.addIndexShard(indexShard.build()); @@ -258,8 +266,20 @@ public class ClusterStateDiffIT extends ESIntegTestCase { private IndexRoutingTable randomChangeToIndexRoutingTable(IndexRoutingTable original, String[] nodes) { IndexRoutingTable.Builder builder = IndexRoutingTable.builder(original.getIndex()); for (ObjectCursor indexShardRoutingTable : original.shards().values()) { + Set availableNodes = Sets.newHashSet(nodes); for (ShardRouting shardRouting : indexShardRoutingTable.value.shards()) { - final ShardRouting updatedShardRouting = randomChange(shardRouting, nodes); + availableNodes.remove(shardRouting.currentNodeId()); + if (shardRouting.relocating()) { + availableNodes.remove(shardRouting.relocatingNodeId()); + } + } + + for (ShardRouting shardRouting : indexShardRoutingTable.value.shards()) { + final ShardRouting updatedShardRouting = randomChange(shardRouting, availableNodes); + availableNodes.remove(updatedShardRouting.currentNodeId()); + if (shardRouting.relocating()) { + availableNodes.remove(updatedShardRouting.relocatingNodeId()); + } builder.addShard(updatedShardRouting); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index fd68e484062..3e58291d4ad 100644 --- a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -36,6 +36,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDelay; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; @@ -74,8 +75,9 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { } @Override - protected boolean addMockZenPings() { - return false; + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false).build(); } public void testSimpleMinimumMasterNodes() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java index eb5c88d7e83..feaeee703b6 100644 --- a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java @@ -353,7 +353,7 @@ public class ClusterStateHealthTests extends ESTestCase { final int numberOfReplicas, final boolean withPrimaryAllocationFailures) { // generate random node ids - final List nodeIds = new ArrayList<>(); + final Set nodeIds = new HashSet<>(); final int numNodes = randomIntBetween(numberOfReplicas + 1, 10); for (int i = 0; i < numNodes; i++) { nodeIds.add(randomAsciiOfLength(8)); @@ -372,7 +372,7 @@ public class ClusterStateHealthTests extends ESTestCase { for (final ShardRouting shardRouting : shardRoutingTable.getShards()) { if (shardRouting.primary()) { newIndexRoutingTable.addShard( - shardRouting.initialize(nodeIds.get(randomIntBetween(0, numNodes - 1)), null, shardRouting.getExpectedShardSize()) + shardRouting.initialize(randomFrom(nodeIds), null, shardRouting.getExpectedShardSize()) ); } else { newIndexRoutingTable.addShard(shardRouting); @@ -460,17 +460,15 @@ public class ClusterStateHealthTests extends ESTestCase { newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex()); for (final ObjectCursor shardEntry : indexRoutingTable.getShards().values()) { final IndexShardRoutingTable shardRoutingTable = shardEntry.value; + final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); + Set allocatedNodes = new HashSet<>(); + allocatedNodes.add(primaryNodeId); for (final ShardRouting shardRouting : shardRoutingTable.getShards()) { if (shardRouting.primary() == false) { // give the replica a different node id than the primary - final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); - String replicaNodeId; - do { - replicaNodeId = nodeIds.get(randomIntBetween(0, numNodes - 1)); - } while (primaryNodeId.equals(replicaNodeId)); - newIndexRoutingTable.addShard( - shardRouting.initialize(replicaNodeId, null, shardRouting.getExpectedShardSize()) - ); + String replicaNodeId = randomFrom(Sets.difference(nodeIds, allocatedNodes)); + newIndexRoutingTable.addShard(shardRouting.initialize(replicaNodeId, null, shardRouting.getExpectedShardSize())); + allocatedNodes.add(replicaNodeId); } else { newIndexRoutingTable.addShard(shardRouting); } diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java index c6ce30e2a52..1dfd3fd33e6 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java @@ -18,11 +18,17 @@ */ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.test.ESSingleNodeTestCase; +import java.util.Collections; + import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -63,4 +69,34 @@ public class MetaDataMappingServiceTests extends ESSingleNodeTestCase { assertThat(documentMapper.parentFieldMapper().active(), is(true)); } + public void testMappingClusterStateUpdateDoesntChangeExistingIndices() throws Exception { + final IndexService indexService = createIndex("test", client().admin().indices().prepareCreate("test").addMapping("type")); + final CompressedXContent currentMapping = indexService.mapperService().documentMapper("type").mappingSource(); + + final MetaDataMappingService mappingService = getInstanceFromNode(MetaDataMappingService.class); + final ClusterService clusterService = getInstanceFromNode(ClusterService.class); + // TODO - it will be nice to get a random mapping generator + final PutMappingClusterStateUpdateRequest request = new PutMappingClusterStateUpdateRequest().type("type"); + request.source("{ \"properties\" { \"field\": { \"type\": \"string\" }}}"); + mappingService.putMappingExecutor.execute(clusterService.state(), Collections.singletonList(request)); + assertThat(indexService.mapperService().documentMapper("type").mappingSource(), equalTo(currentMapping)); + } + + public void testClusterStateIsNotChangedWithIdenticalMappings() throws Exception { + createIndex("test", client().admin().indices().prepareCreate("test").addMapping("type")); + + final MetaDataMappingService mappingService = getInstanceFromNode(MetaDataMappingService.class); + final ClusterService clusterService = getInstanceFromNode(ClusterService.class); + final PutMappingClusterStateUpdateRequest request = new PutMappingClusterStateUpdateRequest().type("type"); + request.source("{ \"properties\" { \"field\": { \"type\": \"string\" }}}"); + ClusterState result = mappingService.putMappingExecutor.execute(clusterService.state(), Collections.singletonList(request)) + .resultingState; + + assertFalse(result != clusterService.state()); + + ClusterState result2 = mappingService.putMappingExecutor.execute(result, Collections.singletonList(request)) + .resultingState; + + assertSame(result, result2); + } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 0d284a1e47e..93326e54db9 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -34,6 +34,7 @@ import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; @@ -62,8 +63,9 @@ public class PrimaryAllocationIT extends ESIntegTestCase { } @Override - protected boolean addMockZenPings() { - return false; + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false).build(); } private void createStaleReplicaScenario() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java b/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java index c3064c7fa9d..69773e99921 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing; +import java.util.Set; + import static org.elasticsearch.test.ESTestCase.randomAsciiOfLength; import static org.elasticsearch.test.ESTestCase.randomFrom; import static org.elasticsearch.test.ESTestCase.randomInt; @@ -31,7 +33,7 @@ public final class RandomShardRoutingMutator { } - public static ShardRouting randomChange(ShardRouting shardRouting, String[] nodes) { + public static ShardRouting randomChange(ShardRouting shardRouting, Set nodes) { switch (randomInt(2)) { case 0: if (shardRouting.unassigned() == false && shardRouting.primary() == false) { @@ -42,7 +44,7 @@ public final class RandomShardRoutingMutator { } break; case 1: - if (shardRouting.unassigned()) { + if (shardRouting.unassigned() && nodes.isEmpty() == false) { shardRouting = shardRouting.initialize(randomFrom(nodes), null, -1); } break; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java index 6ed42ee45aa..e26fece7c6d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java @@ -28,9 +28,12 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes.Builder; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.shard.ShardId; import org.junit.Before; +import java.util.Arrays; import java.util.Set; import java.util.stream.Collectors; @@ -328,6 +331,19 @@ public class RoutingTableTests extends ESAllocationTestCase { expectThrows(IllegalStateException.class, () -> indexRoutingTable.validate(metaData4)); } + public void testDistinctNodes() { + ShardId shardId = new ShardId(new Index("index", "uuid"), 0); + ShardRouting routing1 = TestShardRouting.newShardRouting(shardId, "node1", randomBoolean(), ShardRoutingState.STARTED); + ShardRouting routing2 = TestShardRouting.newShardRouting(shardId, "node2", randomBoolean(), ShardRoutingState.STARTED); + ShardRouting routing3 = TestShardRouting.newShardRouting(shardId, "node1", randomBoolean(), ShardRoutingState.STARTED); + ShardRouting routing4 = TestShardRouting.newShardRouting(shardId, "node3", "node2", randomBoolean(), ShardRoutingState.RELOCATING); + assertTrue(IndexShardRoutingTable.Builder.distinctNodes(Arrays.asList(routing1, routing2))); + assertFalse(IndexShardRoutingTable.Builder.distinctNodes(Arrays.asList(routing1, routing3))); + assertFalse(IndexShardRoutingTable.Builder.distinctNodes(Arrays.asList(routing1, routing2, routing3))); + assertTrue(IndexShardRoutingTable.Builder.distinctNodes(Arrays.asList(routing1, routing4))); + assertFalse(IndexShardRoutingTable.Builder.distinctNodes(Arrays.asList(routing2, routing4))); + } + /** reverse engineer the in sync aid based on the given indexRoutingTable **/ public static IndexMetaData updateActiveAllocations(IndexRoutingTable indexRoutingTable, IndexMetaData indexMetaData) { IndexMetaData.Builder imdBuilder = IndexMetaData.builder(indexMetaData); diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java index 1ea6853ee7c..9fd4fc18514 100644 --- a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.BaseFuture; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.test.ESTestCase; @@ -68,8 +69,10 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static java.util.Collections.emptyMap; @@ -303,6 +306,61 @@ public class ClusterServiceTests extends ESTestCase { assertTrue(published.get()); } + public void testBlockingCallInClusterStateTaskListenerFails() throws InterruptedException { + assumeTrue("assertions must be enabled for this test to work", BaseFuture.class.desiredAssertionStatus()); + final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference assertionRef = new AtomicReference<>(); + + clusterService.submitStateUpdateTask( + "testBlockingCallInClusterStateTaskListenerFails", + new Object(), + ClusterStateTaskConfig.build(Priority.NORMAL), + new ClusterStateTaskExecutor() { + @Override + public boolean runOnlyOnMaster() { + return false; + } + + @Override + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + ClusterState newClusterState = ClusterState.builder(currentState).build(); + return BatchResult.builder().successes(tasks).build(newClusterState); + } + + @Override + public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { + assertNotNull(assertionRef.get()); + } + }, + new ClusterStateTaskListener() { + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + BaseFuture future = new BaseFuture() {}; + try { + if (randomBoolean()) { + future.get(1L, TimeUnit.SECONDS); + } else { + future.get(); + } + } catch (Exception e) { + throw new RuntimeException(e); + } catch (AssertionError e) { + assertionRef.set(e); + latch.countDown(); + } + } + + @Override + public void onFailure(String source, Exception e) { + } + } + ); + + latch.await(); + assertNotNull(assertionRef.get()); + assertThat(assertionRef.get().getMessage(), containsString("not be the cluster state update thread. Reason: [Blocking operation]")); + } + public void testOneExecutorDontStarveAnother() throws InterruptedException { final List executionOrder = Collections.synchronizedList(new ArrayList<>()); final Semaphore allowProcessing = new Semaphore(0); diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index a5262922efe..a2001504f19 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -23,6 +23,8 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import java.util.function.Supplier; @@ -39,15 +41,22 @@ import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.NoopDiscovery; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.junit.After; import org.junit.Before; +import org.mockito.Mock; +import org.mockito.Mockito; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class DiscoveryModuleTests extends ESTestCase { private TransportService transportService; private ClusterService clusterService; + private ThreadPool threadPool; public interface DummyHostsProviderPlugin extends DiscoveryPlugin { Map> impl(); @@ -62,52 +71,51 @@ public class DiscoveryModuleTests extends ESTestCase { Map> impl(); @Override default Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, ZenPing zenPing) { + ClusterService clusterService, UnicastHostsProvider hostsProvider) { return impl(); } } @Before public void setupDummyServices() { - ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, null, null); - clusterService = new ClusterService(Settings.EMPTY, clusterSettings, null); + clusterService = mock(ClusterService.class); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + threadPool = mock(ThreadPool.class); } @After public void clearDummyServices() throws IOException { - IOUtils.close(transportService, clusterService); - transportService = null; - clusterService = null; + IOUtils.close(transportService); } - private DiscoveryModule newModule(Settings settings, Function createZenPing, - List plugins) { - return new DiscoveryModule(settings, null, transportService, null, clusterService, createZenPing, plugins); + private DiscoveryModule newModule(Settings settings, List plugins) { + return new DiscoveryModule(settings, threadPool, transportService, null, clusterService, plugins); } public void testDefaults() { - DiscoveryModule module = newModule(Settings.EMPTY, hostsProvider -> null, Collections.emptyList()); + DiscoveryModule module = newModule(Settings.EMPTY, Collections.emptyList()); assertTrue(module.getDiscovery() instanceof ZenDiscovery); } public void testLazyConstructionDiscovery() { DummyDiscoveryPlugin plugin = () -> Collections.singletonMap("custom", () -> { throw new AssertionError("created discovery type which was not selected"); }); - newModule(Settings.EMPTY, hostsProvider -> null, Collections.singletonList(plugin)); + newModule(Settings.EMPTY, Collections.singletonList(plugin)); } public void testRegisterDiscovery() { Settings settings = Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "custom").build(); DummyDiscoveryPlugin plugin = () -> Collections.singletonMap("custom", NoopDiscovery::new); - DiscoveryModule module = newModule(settings, hostsProvider -> null, Collections.singletonList(plugin)); + DiscoveryModule module = newModule(settings, Collections.singletonList(plugin)); assertTrue(module.getDiscovery() instanceof NoopDiscovery); } public void testUnknownDiscovery() { Settings settings = Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "dne").build(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - newModule(settings, hostsProvider -> null, Collections.emptyList())); + newModule(settings, Collections.emptyList())); assertEquals("Unknown discovery type [dne]", e.getMessage()); } @@ -115,24 +123,26 @@ public class DiscoveryModuleTests extends ESTestCase { DummyDiscoveryPlugin plugin1 = () -> Collections.singletonMap("dup", () -> null); DummyDiscoveryPlugin plugin2 = () -> Collections.singletonMap("dup", () -> null); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - newModule(Settings.EMPTY, hostsProvider -> null, Arrays.asList(plugin1, plugin2))); + newModule(Settings.EMPTY, Arrays.asList(plugin1, plugin2))); assertEquals("Cannot register discovery type [dup] twice", e.getMessage()); } public void testHostsProvider() { Settings settings = Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "custom").build(); final UnicastHostsProvider provider = Collections::emptyList; - DummyHostsProviderPlugin plugin = () -> Collections.singletonMap("custom", () -> provider); - newModule(settings, hostsProvider -> { - assertEquals(provider, hostsProvider); - return null; - }, Collections.singletonList(plugin)); + AtomicBoolean created = new AtomicBoolean(false); + DummyHostsProviderPlugin plugin = () -> Collections.singletonMap("custom", () -> { + created.set(true); + return Collections::emptyList; + }); + newModule(settings, Collections.singletonList(plugin)); + assertTrue(created.get()); } public void testUnknownHostsProvider() { Settings settings = Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "dne").build(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - newModule(settings, hostsProvider -> null, Collections.emptyList())); + newModule(settings, Collections.emptyList())); assertEquals("Unknown zen hosts provider [dne]", e.getMessage()); } @@ -140,13 +150,13 @@ public class DiscoveryModuleTests extends ESTestCase { DummyHostsProviderPlugin plugin1 = () -> Collections.singletonMap("dup", () -> null); DummyHostsProviderPlugin plugin2 = () -> Collections.singletonMap("dup", () -> null); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - newModule(Settings.EMPTY, hostsProvider -> null, Arrays.asList(plugin1, plugin2))); + newModule(Settings.EMPTY, Arrays.asList(plugin1, plugin2))); assertEquals("Cannot register zen hosts provider [dup] twice", e.getMessage()); } public void testLazyConstructionHostsProvider() { DummyHostsProviderPlugin plugin = () -> Collections.singletonMap("custom", () -> { throw new AssertionError("created hosts provider which was not selected"); }); - newModule(Settings.EMPTY, hostsProvider -> null, Collections.singletonList(plugin)); + newModule(Settings.EMPTY, Collections.singletonList(plugin)); } } diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index 65ead9c09f1..22844e05881 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -67,6 +67,7 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration; +import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.disruption.BlockClusterStateProcessing; import org.elasticsearch.test.disruption.IntermittentLongGCDisruption; import org.elasticsearch.test.disruption.LongGCDisruption; @@ -129,14 +130,10 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { private ClusterDiscoveryConfiguration discoveryConfig; - @Override - protected boolean addMockZenPings() { - return false; - } - @Override protected Settings nodeSettings(int nodeOrdinal) { - return discoveryConfig.nodeSettings(nodeOrdinal); + return Settings.builder().put(discoveryConfig.nodeSettings(nodeOrdinal)) + .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false).build(); } @Before @@ -196,7 +193,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { ensureStableCluster(numberOfNodes); // TODO: this is a temporary solution so that nodes will not base their reaction to a partition based on previous successful results - ZenPing zenPing = internalCluster().getInstance(ZenPing.class); + ZenPing zenPing = ((TestZenDiscovery)internalCluster().getInstance(Discovery.class)).getZenPing(); if (zenPing instanceof UnicastZenPing) { ((UnicastZenPing) zenPing).clearTemporalResponses(); } @@ -856,7 +853,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // Forcefully clean temporal response lists on all nodes. Otherwise the node in the unicast host list // includes all the other nodes that have pinged it and the issue doesn't manifest - ZenPing zenPing = internalCluster().getInstance(ZenPing.class); + ZenPing zenPing = ((TestZenDiscovery)internalCluster().getInstance(Discovery.class)).getZenPing(); if (zenPing instanceof UnicastZenPing) { ((UnicastZenPing) zenPing).clearTemporalResponses(); } @@ -893,7 +890,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // Forcefully clean temporal response lists on all nodes. Otherwise the node in the unicast host list // includes all the other nodes that have pinged it and the issue doesn't manifest - ZenPing zenPing = internalCluster().getInstance(ZenPing.class); + ZenPing zenPing = ((TestZenDiscovery)internalCluster().getInstance(Discovery.class)).getZenPing(); if (zenPing instanceof UnicastZenPing) { ((UnicastZenPing) zenPing).clearTemporalResponses(); } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index 88cf23fe938..acc5d4e8018 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -19,6 +19,16 @@ package org.elasticsearch.discovery.zen; +import java.io.Closeable; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; @@ -34,22 +44,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.zen.PublishClusterStateActionTests.AssertingAckListener; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.discovery.MockZenPing; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.io.Closeable; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING; @@ -269,8 +268,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { } private ZenDiscovery buildZenDiscovery(Settings settings, TransportService service, ClusterService clusterService, ThreadPool threadPool) { - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, clusterService, clusterSettings, new MockZenPing(settings)); + ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, clusterService, Collections::emptyList); zenDiscovery.start(); return zenDiscovery; } diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 2dd08feb6ca..9750cd35d01 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -35,16 +35,27 @@ import org.elasticsearch.gateway.GatewayMetaState; import org.elasticsearch.gateway.LocalAllocateDangledIndices; import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.StringFieldMapper; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.similarity.BM25SimilarityProvider; import org.elasticsearch.indices.IndicesService.ShardDeletionCheckResult; +import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.IndexSettingsModule; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -53,6 +64,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; public class IndicesServiceTests extends ESSingleNodeTestCase { @@ -65,6 +77,30 @@ public class IndicesServiceTests extends ESSingleNodeTestCase { return getInstanceFromNode(NodeEnvironment.class); } + @Override + protected Collection> getPlugins() { + ArrayList> plugins = new ArrayList<>(super.getPlugins()); + plugins.add(TestPlugin.class); + return plugins; + } + + public static class TestPlugin extends Plugin implements MapperPlugin { + + public TestPlugin() {} + + @Override + public Map getMappers() { + return Collections.singletonMap("fake-mapper", new StringFieldMapper.TypeParser()); + } + + @Override + public void onIndexModule(IndexModule indexModule) { + super.onIndexModule(indexModule); + indexModule.addSimilarity("fake-similarity", BM25SimilarityProvider::new); + } + } + + @Override protected boolean resetNodeAfterTest() { return true; @@ -328,4 +364,26 @@ public class IndicesServiceTests extends ESSingleNodeTestCase { } } + /** + * Tests that teh {@link MapperService} created by {@link IndicesService#createIndexMapperService(IndexMetaData)} contains + * custom types and similarities registered by plugins + */ + public void testStandAloneMapperServiceWithPlugins() throws IOException { + final String indexName = "test"; + final Index index = new Index(indexName, UUIDs.randomBase64UUID()); + final IndicesService indicesService = getIndicesService(); + final Settings idxSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) + .put(IndexModule.SIMILARITY_SETTINGS_PREFIX + ".test.type", "fake-similarity") + .build(); + final IndexMetaData indexMetaData = new IndexMetaData.Builder(index.getName()) + .settings(idxSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + MapperService mapperService = indicesService.createIndexMapperService(indexMetaData); + assertNotNull(mapperService.documentMapperParser().parserContext("type").typeParser("fake-mapper")); + assertThat(mapperService.documentMapperParser().parserContext("type").getSimilarity("test"), + instanceOf(BM25SimilarityProvider.class)); + } } diff --git a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index 88ba954dd8e..0807ed4389c 100644 --- a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -451,7 +451,9 @@ public class UpdateSettingsIT extends ESIntegTestCase { } finally { Loggers.setLevel(rootLogger, savedLevel); Loggers.removeAppender(rootLogger, mockAppender); - mockAppender.stop(); + // don't call stop here some node might still use this reference at this point causing tests to fail. + // this is only relevant in integ tests, unittest can control what uses a logger and what doesn't + // mockAppender.stop(); } } @@ -551,7 +553,9 @@ public class UpdateSettingsIT extends ESIntegTestCase { } finally { Loggers.setLevel(rootLogger, savedLevel); Loggers.removeAppender(rootLogger, mockAppender); - mockAppender.stop(); + // don't call stop here some node might still use this reference at this point causing tests to fail. + // this is only relevant in integ tests, unittest can control what uses a logger and what doesn't + // mockAppender.stop(); } } diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index 8582ca0e02f..c55fc514332 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -43,6 +43,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; @@ -50,6 +51,7 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.disruption.BlockClusterStateProcessing; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -76,8 +78,9 @@ import static org.hamcrest.Matchers.instanceOf; public class RareClusterStateIT extends ESIntegTestCase { @Override - protected boolean addMockZenPings() { - return false; + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false).build(); } @Override @@ -322,7 +325,12 @@ public class RareClusterStateIT extends ESIntegTestCase { // Here we want to test that everything goes well if the mappings that // are needed for a document are not available on the replica at the // time of indexing it - final List nodeNames = internalCluster().startNodesAsync(2).get(); + final List nodeNames = internalCluster().startNodesAsync(2, + Settings.builder() + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2) + .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // explicitly set so it won't default to publish timeout + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait post commit as we are blocking things by design + .build()).get(); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); final String master = internalCluster().getMasterName(); diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java index 1b2083fabec..7f0e9350488 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java @@ -71,13 +71,7 @@ public class IndicesStoreTests extends ESTestCase { } public void testShardCanBeDeletedNoShardRouting() throws Exception { - int numShards = randomIntBetween(1, 7); - int numReplicas = randomInt(2); - - ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test")); - clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas))); IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); - assertFalse(IndicesStore.shardCanBeDeleted(localNode.getId(), routingTable.build())); } @@ -85,8 +79,6 @@ public class IndicesStoreTests extends ESTestCase { int numShards = randomIntBetween(1, 7); int numReplicas = randomInt(2); - ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test")); - clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas))); IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); for (int i = 0; i < numShards; i++) { @@ -102,7 +94,8 @@ public class IndicesStoreTests extends ESTestCase { if (state == ShardRoutingState.UNASSIGNED) { unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null); } - routingTable.addShard(TestShardRouting.newShardRouting("test", i, randomBoolean() ? localNode.getId() : randomAsciiOfLength(10), null, j == 0, state, unassignedInfo)); + String relocatingNodeId = state == ShardRoutingState.RELOCATING ? randomAsciiOfLength(10) : null; + routingTable.addShard(TestShardRouting.newShardRouting("test", i, randomAsciiOfLength(10), relocatingNodeId, j == 0, state, unassignedInfo)); } } @@ -113,69 +106,19 @@ public class IndicesStoreTests extends ESTestCase { int numShards = randomIntBetween(1, 7); int numReplicas = randomInt(2); - ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test")); - clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas))); - clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).add(localNode).add(new DiscoveryNode("xyz", - buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT))); IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); int localShardId = randomInt(numShards - 1); for (int i = 0; i < numShards; i++) { - String nodeId = i == localShardId ? localNode.getId() : randomBoolean() ? "abc" : "xyz"; - String relocationNodeId = randomBoolean() ? null : randomBoolean() ? localNode.getId() : "xyz"; - routingTable.addShard(TestShardRouting.newShardRouting("test", i, nodeId, relocationNodeId, true, ShardRoutingState.STARTED)); + int localNodeIndex = randomInt(numReplicas); + boolean primaryOnLocalNode = i == localShardId && localNodeIndex == numReplicas; + routingTable.addShard(TestShardRouting.newShardRouting("test", i, primaryOnLocalNode ? localNode.getId() : randomAsciiOfLength(10), true, ShardRoutingState.STARTED)); for (int j = 0; j < numReplicas; j++) { - routingTable.addShard(TestShardRouting.newShardRouting("test", i, nodeId, relocationNodeId, false, ShardRoutingState.STARTED)); + boolean replicaOnLocalNode = i == localShardId && localNodeIndex == j; + routingTable.addShard(TestShardRouting.newShardRouting("test", i, replicaOnLocalNode ? localNode.getId() : randomAsciiOfLength(10), false, ShardRoutingState.STARTED)); } } // Shard exists locally, can't delete shard assertFalse(IndicesStore.shardCanBeDeleted(localNode.getId(), routingTable.build())); } - - public void testShardCanBeDeletedNodeVersion() throws Exception { - int numShards = randomIntBetween(1, 7); - int numReplicas = randomInt(2); - - // Most of the times don't test bwc and use current version - final Version nodeVersion = randomBoolean() ? CURRENT : randomVersion(random()); - ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test")); - clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas))); - clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).add(localNode).add(new DiscoveryNode("xyz", - buildNewFakeTransportAddress(), emptyMap(), emptySet(), nodeVersion))); - IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); - for (int i = 0; i < numShards; i++) { - routingTable.addShard(TestShardRouting.newShardRouting("test", i, "xyz", null, true, ShardRoutingState.STARTED)); - for (int j = 0; j < numReplicas; j++) { - routingTable.addShard(TestShardRouting.newShardRouting("test", i, "xyz", null, false, ShardRoutingState.STARTED)); - } - } - - // shard exist on other node (abc) - assertTrue(IndicesStore.shardCanBeDeleted(localNode.getId(), routingTable.build())); - } - - public void testShardCanBeDeletedRelocatingNode() throws Exception { - int numShards = randomIntBetween(1, 7); - int numReplicas = randomInt(2); - - ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test")); - clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas))); - final Version nodeVersion = randomBoolean() ? CURRENT : randomVersion(random()); - - clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()) - .add(localNode) - .add(new DiscoveryNode("xyz", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)) - .add(new DiscoveryNode("def", buildNewFakeTransportAddress(), emptyMap(), emptySet(), nodeVersion) // <-- only set relocating, since we're testing that in this test - )); - IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); - for (int i = 0; i < numShards; i++) { - routingTable.addShard(TestShardRouting.newShardRouting("test", i, "xyz", "def", true, ShardRoutingState.STARTED)); - for (int j = 0; j < numReplicas; j++) { - routingTable.addShard(TestShardRouting.newShardRouting("test", i, "xyz", "def", false, ShardRoutingState.STARTED)); - } - } - - // shard exist on other node (abc and def) - assertTrue(IndicesStore.shardCanBeDeleted(localNode.getId(), routingTable.build())); - } } diff --git a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index d487c9bc197..612e1d1e16b 100644 --- a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.indices.IndexTemplateAlreadyExistsException; import org.elasticsearch.indices.InvalidAliasNameException; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ESIntegTestCase; @@ -109,7 +108,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("field2").field("type", "text").field("store", false).endObject() .endObject().endObject().endObject()) - , IndexTemplateAlreadyExistsException.class + , IllegalArgumentException.class ); response = client().admin().indices().prepareGetTemplates().get(); diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java index 86ca66eb87c..809591cd688 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java @@ -995,4 +995,21 @@ public class InnerHitsIT extends ESIntegTestCase { equalTo("fox ate rabbit x y z")); } + public void testNestedInnerHitWrappedInParentChildInnerhit() throws Exception { + assertAcked(prepareCreate("test").addMapping("child_type", "_parent", "type=parent_type", "nested_type", "type=nested")); + client().prepareIndex("test", "parent_type", "1").setSource("key", "value").get(); + client().prepareIndex("test", "child_type", "2").setParent("1").setSource("nested_type", Collections.singletonMap("key", "value")) + .get(); + refresh(); + SearchResponse response = client().prepareSearch("test") + .setQuery(boolQuery().must(matchQuery("key", "value")) + .should(hasChildQuery("child_type", nestedQuery("nested_type", matchAllQuery(), ScoreMode.None) + .innerHit(new InnerHitBuilder()), ScoreMode.None).innerHit(new InnerHitBuilder()))) + .get(); + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); + assertThat(hit.getInnerHits().get("child_type").getAt(0).field("_parent").getValue(), equalTo("1")); + assertThat(hit.getInnerHits().get("child_type").getAt(0).getInnerHits().get("nested_type").getAt(0).field("_parent"), nullValue()); + } + } diff --git a/core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java b/core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java index da1dcf43e5d..0525f4a32dc 100644 --- a/core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java +++ b/core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java @@ -19,8 +19,25 @@ package org.elasticsearch.transport; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.support.TransportStatus; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; /** Unit tests for TCPTransport */ public class TCPTransportTests extends ESTestCase { @@ -127,4 +144,103 @@ public class TCPTransportTests extends ESTestCase { assertEquals(101, addresses[1].getPort()); assertEquals(102, addresses[2].getPort()); } + + public void testCompressRequest() throws IOException { + final boolean compressed = randomBoolean(); + final AtomicBoolean called = new AtomicBoolean(false); + Req request = new Req(randomRealisticUnicodeOfLengthBetween(10, 100)); + ThreadPool threadPool = new TestThreadPool(TCPTransportTests.class.getName()); + try { + TcpTransport transport = new TcpTransport("test", Settings.builder().put("transport.tcp.compress", compressed).build(), + threadPool, new BigArrays(Settings.EMPTY, null), null, null, null) { + @Override + protected InetSocketAddress getLocalAddress(Object o) { + return null; + } + + @Override + protected Object bind(String name, InetSocketAddress address) throws IOException { + return null; + } + + @Override + protected void closeChannels(List channel) throws IOException { + + } + + @Override + protected NodeChannels connectToChannelsLight(DiscoveryNode node) throws IOException { + return new NodeChannels(new Object[0], new Object[0], new Object[0], new Object[0], new Object[0]); + } + + @Override + protected void sendMessage(Object o, BytesReference reference, Runnable sendListener) throws IOException { + StreamInput streamIn = reference.streamInput(); + streamIn.skip(TcpHeader.MARKER_BYTES_SIZE); + int len = streamIn.readInt(); + long requestId = streamIn.readLong(); + assertEquals(42, requestId); + byte status = streamIn.readByte(); + Version version = Version.fromId(streamIn.readInt()); + assertEquals(Version.CURRENT, version); + assertEquals(compressed, TransportStatus.isCompress(status)); + called.compareAndSet(false, true); + if (compressed) { + final int bytesConsumed = TcpHeader.HEADER_SIZE; + streamIn = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed)) + .streamInput(streamIn); + } + threadPool.getThreadContext().readHeaders(streamIn); + assertEquals("foobar", streamIn.readString()); + Req readReq = new Req(""); + readReq.readFrom(streamIn); + assertEquals(request.value, readReq.value); + } + + @Override + protected NodeChannels connectToChannels(DiscoveryNode node) throws IOException { + return new NodeChannels(new Object[0], new Object[0], new Object[0], new Object[0], new Object[0]); + } + + @Override + protected boolean isOpen(Object o) { + return false; + } + + @Override + public long serverOpen() { + return 0; + } + + @Override + protected Object nodeChannel(DiscoveryNode node, TransportRequestOptions options) throws ConnectTransportException { + return new NodeChannels(new Object[0], new Object[0], new Object[0], new Object[0], new Object[0]); + } + }; + DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); + transport.sendRequest(node, 42, "foobar", request, TransportRequestOptions.EMPTY); + assertTrue(called.get()); + } finally { + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + } + + private static final class Req extends TransportRequest { + public String value; + + private Req(String value) { + this.value = value; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + value = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(value); + } + } + } diff --git a/distribution/src/main/resources/config/jvm.options b/distribution/src/main/resources/config/jvm.options index 63245f172bf..37c4d5b3c93 100644 --- a/distribution/src/main/resources/config/jvm.options +++ b/distribution/src/main/resources/config/jvm.options @@ -59,6 +59,9 @@ # use our provided JNA always versus the system one -Djna.nosys=true +# use old-style file permissions on JDK9 +-Djdk.io.permissionsUseCanonicalPath=true + # flags to keep Netty from being unsafe -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true diff --git a/docs/java-api/docs/index_.asciidoc b/docs/java-api/docs/index_.asciidoc index 2b29f15fabd..1e48fbd431c 100644 --- a/docs/java-api/docs/index_.asciidoc +++ b/docs/java-api/docs/index_.asciidoc @@ -60,8 +60,9 @@ json.put("message","trying out Elasticsearch"); [[java-docs-index-generate-beans]] ===== Serialize your beans -Elasticsearch already uses http://wiki.fasterxml.com/JacksonHome[Jackson]. -So you can use it to serialize your beans to JSON: +You can use http://wiki.fasterxml.com/JacksonHome[Jackson] to serialize +your beans to JSON. Please add http://search.maven.org/#search%7Cga%7C1%7Cjackson-databind[Jackson Databind] + to your project. Then you can use `ObjectMapper` to serialize your beans: [source,java] -------------------------------------------------- diff --git a/docs/reference/aggregations/bucket/geohashgrid-aggregation.asciidoc b/docs/reference/aggregations/bucket/geohashgrid-aggregation.asciidoc index 9963b48fb04..17ca509e3c5 100644 --- a/docs/reference/aggregations/bucket/geohashgrid-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/geohashgrid-aggregation.asciidoc @@ -76,7 +76,7 @@ When requesting detailed buckets (typically for displaying a "zoomed in" map) a "zoom1":{ "geohash_grid" : { "field":"location", - "precision":8, + "precision":8 } } } diff --git a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc index 61deffeccd2..172a528fcb3 100644 --- a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc @@ -353,7 +353,10 @@ Customized scores can be implemented via a script: -------------------------------------------------- "script_heuristic": { - "script": "_subset_freq/(_superset_freq - _subset_freq + 1)" + "script": { + "lang": "painless", + "inline": "params._subset_freq/(params._superset_freq - params._subset_freq + 1)" + } } -------------------------------------------------- diff --git a/modules/lang-expression/licenses/lucene-expressions-6.3.0-snapshot-a66a445.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index aadc6a31524..00000000000 --- a/modules/lang-expression/licenses/lucene-expressions-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0bf61de45f8ea73a185d48572ea094f6b696a7a8 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-6.3.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.3.0.jar.sha1 new file mode 100644 index 00000000000..8fca696518d --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-6.3.0.jar.sha1 @@ -0,0 +1 @@ +f9847cdbdd355f9f96993c4c322d6b453f4e84a8 \ No newline at end of file diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java index b1db44defa0..93549d1d791 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java @@ -33,7 +33,7 @@ import java.util.List; import static org.elasticsearch.action.ValidateActions.addValidationError; -public class MultiSearchTemplateRequest extends ActionRequest implements CompositeIndicesRequest { +public class MultiSearchTemplateRequest extends ActionRequest implements CompositeIndicesRequest { private List requests = new ArrayList<>(); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java index 170070564f9..a5a0ded3bee 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java @@ -42,7 +42,7 @@ public class MustachePlugin extends Plugin implements ScriptPlugin, ActionPlugin } @Override - public List, ? extends ActionResponse>> getActions() { + public List> getActions() { return Arrays.asList(new ActionHandler<>(SearchTemplateAction.INSTANCE, TransportSearchTemplateAction.class), new ActionHandler<>(MultiSearchTemplateAction.INSTANCE, TransportMultiSearchTemplateAction.class)); } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java index d7ac37f8313..b405d0950e7 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java @@ -37,7 +37,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** * A request to execute a search based on a search template. */ -public class SearchTemplateRequest extends ActionRequest implements IndicesRequest { +public class SearchTemplateRequest extends ActionRequest implements IndicesRequest { private SearchRequest request; private boolean simulate = false; diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateRequest.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateRequest.java index d8bb91f2fb0..3ecf3c273ea 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateRequest.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateRequest.java @@ -48,7 +48,7 @@ import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeSt * @deprecated Instead use multi search API with {@link PercolateQueryBuilder} */ @Deprecated -public class MultiPercolateRequest extends ActionRequest implements CompositeIndicesRequest { +public class MultiPercolateRequest extends ActionRequest implements CompositeIndicesRequest { private String[] indices; private String documentType; diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateRequest.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateRequest.java index 98aaa891640..bc449ea932d 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateRequest.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateRequest.java @@ -44,7 +44,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * @deprecated Instead use search API with {@link PercolateQueryBuilder} */ @Deprecated -public class PercolateRequest extends ActionRequest implements IndicesRequest.Replaceable { +public class PercolateRequest extends ActionRequest implements IndicesRequest.Replaceable { protected String[] indices; private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java index 8b602e3c478..d314de3b05f 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java @@ -47,7 +47,7 @@ public class PercolatorPlugin extends Plugin implements MapperPlugin, ActionPlug } @Override - public List, ? extends ActionResponse>> getActions() { + public List> getActions() { return Arrays.asList(new ActionHandler<>(PercolateAction.INSTANCE, TransportPercolateAction.class), new ActionHandler<>(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class)); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java index d3463cdb61f..bfdd95cddfe 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java @@ -39,7 +39,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; public abstract class AbstractBulkByScrollRequest> - extends ActionRequest { + extends ActionRequest { public static final int SIZE_ALL_MATCHES = -1; private static final TimeValue DEFAULT_SCROLL_TIMEOUT = timeValueMinutes(5); private static final int DEFAULT_SCROLL_SIZE = 1000; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java index fe7bcb1f85b..707da4fe5da 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java @@ -37,7 +37,7 @@ public class ReindexPlugin extends Plugin implements ActionPlugin { public static final String NAME = "reindex"; @Override - public List, ? extends ActionResponse>> getActions() { + public List> getActions() { return Arrays.asList(new ActionHandler<>(ReindexAction.INSTANCE, TransportReindexAction.class), new ActionHandler<>(UpdateByQueryAction.INSTANCE, TransportUpdateByQueryAction.class), new ActionHandler<>(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class), diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionScriptTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionScriptTestCase.java index f8351b262fc..36c392622bc 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionScriptTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionScriptTestCase.java @@ -55,7 +55,7 @@ public abstract class AbstractAsyncBulkIndexByScrollActionScriptTestCase< } @SuppressWarnings("unchecked") - protected > T applyScript(Consumer> scriptBody) { + protected T applyScript(Consumer> scriptBody) { IndexRequest index = new IndexRequest("index", "type", "1").source(singletonMap("foo", "bar")); ScrollableHitSource.Hit doc = new ScrollableHitSource.BasicHit("test", "type", "id", 0); ExecutableScript executableScript = new SimpleExecutableScript(scriptBody); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 35c3f235cd8..b14b790340c 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -728,7 +728,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { @Override @SuppressWarnings("unchecked") - protected , Response extends ActionResponse, + protected > void doExecute( Action action, Request request, ActionListener listener) { lastHeaders.set(threadPool.getThreadContext().getHeaders()); @@ -837,7 +837,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { } } - private static class RequestAndListener, Response> { + private static class RequestAndListener { private final Request request; private final ActionListener listener; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java index b4ac273b43b..27955f71f92 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java @@ -165,7 +165,7 @@ public class ReindexFromRemoteWithAuthTests extends ESSingleNodeTestCase { } @Override - public , Response extends ActionResponse> void apply(Task task, String action, + public void apply(Task task, String action, Request request, ActionListener listener, ActionFilterChain chain) { if (false == action.equals(SearchAction.NAME)) { chain.proceed(task, action, request, listener); diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index 29114cfcf70..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39e5761c8209a6e4e940a3aec4ba57a6b631ca00 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.3.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.3.0.jar.sha1 new file mode 100644 index 00000000000..22ff33fee84 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.3.0.jar.sha1 @@ -0,0 +1 @@ +467d808656db028faa3cbc86d386dbf6164a835c \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index 2ec23fb8b2d..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c4230c40a10cbb4ad54bcbe9e4265ecb598a4c25 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.3.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.3.0.jar.sha1 new file mode 100644 index 00000000000..13226a0d4be --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.3.0.jar.sha1 @@ -0,0 +1 @@ +bea02277bff7fa0f4d93e6abca94eaf0eec9c84f \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index 27a5a67a55a..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ccd0636f0df42146b5c77cac5ec57739c9ff2893 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.3.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.3.0.jar.sha1 new file mode 100644 index 00000000000..5a57464512f --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.3.0.jar.sha1 @@ -0,0 +1 @@ +657a1409f539b4a20b5487496a8e4471b33902fd \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index a70cf1ae74f..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -17b3d2f5ffd58756b6d5bdc651eb2ea461885d0a \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.3.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.3.0.jar.sha1 new file mode 100644 index 00000000000..eab2257293c --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.3.0.jar.sha1 @@ -0,0 +1 @@ +47792194b04e8cd61c3667da50a38adae257b19a \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index 466578a5e24..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d3d540a7225837e25cc0ed02aefb0c7763e0f832 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.3.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.3.0.jar.sha1 new file mode 100644 index 00000000000..bba7a9bc273 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.3.0.jar.sha1 @@ -0,0 +1 @@ +bcf535520b92821cf04486031214d35d7405571c \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.3.0-snapshot-a66a445.jar.sha1 deleted file mode 100644 index 5ad5644d679..00000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.3.0-snapshot-a66a445.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e711a007cd1588f8118eb02803381d448ae087c \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.3.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.3.0.jar.sha1 new file mode 100644 index 00000000000..e136d57854a --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.3.0.jar.sha1 @@ -0,0 +1 @@ +82ed82174fae75f93741b8418046bc94e50434f8 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 88874968b21..c2d004bab4c 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -67,7 +67,7 @@ task createKey(type: LoggedExec) { project.delete(keystore.parentFile) keystore.parentFile.mkdirs() } - executable = 'keytool' + executable = new File(project.javaHome, 'bin/keytool') standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) args '-genkey', '-alias', 'test-node', diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java index 1c27a9da0af..9735b83d9e6 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java @@ -74,10 +74,10 @@ public class AzureDiscoveryPlugin extends Plugin implements DiscoveryPlugin { @Override public Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, ZenPing zenPing) { + ClusterService clusterService, UnicastHostsProvider hostsProvider) { // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider return Collections.singletonMap(AZURE, () -> - new ZenDiscovery(settings, threadPool, transportService, clusterService, clusterService.getClusterSettings(), zenPing)); + new ZenDiscovery(settings, threadPool, transportService, clusterService, hostsProvider)); } @Override diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java index 09ab7569f3d..6d367e21679 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java @@ -101,10 +101,10 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close @Override public Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, ZenPing zenPing) { + ClusterService clusterService, UnicastHostsProvider hostsProvider) { // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider return Collections.singletonMap(EC2, () -> - new ZenDiscovery(settings, threadPool, transportService, clusterService, clusterService.getClusterSettings(), zenPing)); + new ZenDiscovery(settings, threadPool, transportService, clusterService, hostsProvider)); } @Override diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index bbd2221d8e0..ede168e1f9d 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -35,7 +35,7 @@ task createKey(type: LoggedExec) { project.delete(keystore.parentFile) keystore.parentFile.mkdirs() } - executable = 'keytool' + executable = new File(project.javaHome, 'bin/keytool') standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) args '-genkey', '-alias', 'test-node', diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java index 4d684a1b22c..f53abc4241c 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java @@ -99,10 +99,10 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close @Override public Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, ZenPing zenPing) { + ClusterService clusterService, UnicastHostsProvider hostsProvider) { // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider return Collections.singletonMap(GCE, () -> - new ZenDiscovery(settings, threadPool, transportService, clusterService, clusterService.getClusterSettings(), zenPing)); + new ZenDiscovery(settings, threadPool, transportService, clusterService, hostsProvider)); } @Override diff --git a/qa/backwards-5.0/build.gradle b/qa/backwards-5.0/build.gradle index 7cb7d7a1a2a..bb5d69d32bb 100644 --- a/qa/backwards-5.0/build.gradle +++ b/qa/backwards-5.0/build.gradle @@ -18,7 +18,13 @@ integTest { cluster { numNodes = 2 numBwcNodes = 1 - bwcVersion = "6.0.0-alpha1-SNAPSHOT" // this is the same as the current version until we released the first RC + bwcVersion = "5.1.0-SNAPSHOT" setting 'logger.org.elasticsearch', 'DEBUG' } } + +repositories { + maven { + url "https://oss.sonatype.org/content/repositories/snapshots/" + } +} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index 34621802f55..ca2575901bc 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -19,6 +19,11 @@ package org.elasticsearch.tribe; +import java.io.IOException; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.List; + import org.apache.lucene.util.IOUtils; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; @@ -34,16 +39,11 @@ import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.discovery.MockZenPing; +import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.transport.MockTcpTransportPlugin; import org.junit.AfterClass; import org.junit.BeforeClass; -import java.io.IOException; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.List; - import static org.hamcrest.CoreMatchers.either; import static org.hamcrest.CoreMatchers.equalTo; @@ -68,7 +68,7 @@ public class TribeUnitTests extends ESTestCase { .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) .build(); - final List> mockPlugins = Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class); + final List> mockPlugins = Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class); tribe1 = new MockNode( Settings.builder() .put(baseSettings) @@ -110,7 +110,7 @@ public class TribeUnitTests extends ESTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put(extraSettings).build(); - try (Node node = new MockNode(settings, Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class)).start()) { + try (Node node = new MockNode(settings, Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class)).start()) { try (Client client = node.client()) { assertBusy(() -> { ClusterState state = client.admin().cluster().prepareState().clear().setNodes(true).get().getState(); diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index f90763a12dd..d88ef4b74e4 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -25,12 +25,8 @@ task oldClusterTest(type: RestIntegTestTask) { mustRunAfter(precommit) cluster { distribution = 'zip' - // TODO: Right now, this just forms a cluster with the current version of ES, - // because we don't support clusters with nodes on different alpha/beta releases of ES. - // When the GA is released, we should change the bwcVersion to 5.0.0 and uncomment - // numBwcNodes = 2 - //bwcVersion = '5.0.0-alpha5' // TODO: either randomize, or make this settable with sysprop - //numBwcNodes = 2 + bwcVersion = '5.1.0-SNAPSHOT' // TODO: either randomize, or make this settable with sysprop + numBwcNodes = 1 numNodes = 2 clusterName = 'rolling-upgrade' } @@ -69,3 +65,9 @@ task integTest { test.enabled = false // no unit tests for rolling upgrades, only the rest integration test check.dependsOn(integTest) + +repositories { + maven { + url "https://oss.sonatype.org/content/repositories/snapshots/" + } +} diff --git a/qa/smoke-test-tribe-node/build.gradle b/qa/smoke-test-tribe-node/build.gradle new file mode 100644 index 00000000000..6e108e87043 --- /dev/null +++ b/qa/smoke-test-tribe-node/build.gradle @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.gradle.test.ClusterConfiguration +import org.elasticsearch.gradle.test.ClusterFormationTasks +import org.elasticsearch.gradle.test.NodeInfo + +apply plugin: 'elasticsearch.rest-test' + +List oneNodes + +task setupClusterOne(type: DefaultTask) { + mustRunAfter(precommit) + ClusterConfiguration configOne = new ClusterConfiguration(project) + configOne.clusterName = 'one' + configOne.setting('node.name', 'one') + oneNodes = ClusterFormationTasks.setup(project, setupClusterOne, configOne) +} + +List twoNodes + +task setupClusterTwo(type: DefaultTask) { + mustRunAfter(precommit) + ClusterConfiguration configTwo = new ClusterConfiguration(project) + configTwo.clusterName = 'two' + configTwo.setting('node.name', 'two') + twoNodes = ClusterFormationTasks.setup(project, setupClusterTwo, configTwo) +} + +integTest { + dependsOn(setupClusterOne, setupClusterTwo) + cluster { + // tribe nodes had a bug where if explicit ports was specified for the tribe node, the dynamic socket permissions that were applied + // would not account for the fact that the internal node client needed to bind to sockets too; thus, we use explicit port ranges to + // ensure that the code that fixes this bug is exercised + setting 'http.port', '40200-40249' + setting 'transport.tcp.port', '40300-40349' + setting 'node.name', 'quest' + setting 'tribe.one.cluster.name', 'one' + setting 'tribe.one.discovery.zen.ping.unicast.hosts', "'${-> oneNodes.get(0).transportUri()}'" + setting 'tribe.one.http.enabled', 'true' + setting 'tribe.one.http.port', '40250-40299' + setting 'tribe.one.transport.tcp.port', '40350-40399' + setting 'tribe.two.cluster.name', 'two' + setting 'tribe.two.discovery.zen.ping.unicast.hosts', "'${-> twoNodes.get(0).transportUri()}'" + setting 'tribe.two.http.enabled', 'true' + setting 'tribe.two.http.port', '40250-40299' + setting 'tribe.two.transport.tcp.port', '40250-40399' + } + // need to kill the standalone nodes here + finalizedBy 'setupClusterOne#stop' + finalizedBy 'setupClusterTwo#stop' +} diff --git a/qa/smoke-test-tribe-node/src/test/java/org/elasticsearch/tribe/TribeClientYamlTestSuiteIT.java b/qa/smoke-test-tribe-node/src/test/java/org/elasticsearch/tribe/TribeClientYamlTestSuiteIT.java new file mode 100644 index 00000000000..6013913bdc4 --- /dev/null +++ b/qa/smoke-test-tribe-node/src/test/java/org/elasticsearch/tribe/TribeClientYamlTestSuiteIT.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.tribe; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.test.rest.yaml.parser.ClientYamlTestParseException; + +import java.io.IOException; + +public class TribeClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + // tribe nodes can not handle delete indices requests + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + // tribe nodes can not handle delete template requests + @Override + protected boolean preserveTemplatesUponCompletion() { + return true; + } + + public TribeClientYamlTestSuiteIT(@Name("yaml") final ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws IOException, ClientYamlTestParseException { + return createParameters(); + } + +} diff --git a/qa/smoke-test-tribe-node/src/test/resources/rest-api-spec/test/tribe/10_basic.yaml b/qa/smoke-test-tribe-node/src/test/resources/rest-api-spec/test/tribe/10_basic.yaml new file mode 100644 index 00000000000..d70a355ac62 --- /dev/null +++ b/qa/smoke-test-tribe-node/src/test/resources/rest-api-spec/test/tribe/10_basic.yaml @@ -0,0 +1,16 @@ +--- +"Tribe node test": + - do: + cat.nodes: + h: name + s: name + v: true + + - match: + $body: | + /^ name\n + one\n + quest\n + quest/one\n + quest/two\n + two\n $/ diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index 7bd9861a9d1..b30ea329f10 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -17,288 +17,13 @@ * under the License. */ -import org.elasticsearch.gradle.FileContentsTask -import org.elasticsearch.gradle.vagrant.BatsOverVagrantTask -import org.elasticsearch.gradle.vagrant.VagrantCommandTask - -String testScripts = '*.bats' -String testCommand = "cd \$TESTROOT && sudo bats --tap \$BATS/$testScripts" -String smokeTestCommand = 'echo I work' - -// the images we allow testing with -List availableBoxes = [ - 'centos-6', - 'centos-7', - 'debian-8', - 'fedora-24', - 'oel-6', - 'oel-7', - 'opensuse-13', - 'sles-12', - 'ubuntu-1204', - 'ubuntu-1404', - 'ubuntu-1604' -] - -String vagrantBoxes = getProperties().get('vagrant.boxes', 'sample') -List boxes = [] -for (String box : vagrantBoxes.split(',')) { - if (box == 'sample') { - boxes.add('centos-7') - boxes.add('ubuntu-1404') - } else if (box == 'all') { - boxes = availableBoxes - break - } else { - if (availableBoxes.contains(box) == false) { - throw new IllegalArgumentException("Unknown vagrant box '${box}'") - } - boxes.add(box) - } -} - -long seed -String formattedSeed = null -String[] upgradeFromVersions -String upgradeFromVersion - -String maybeTestsSeed = System.getProperty("tests.seed", null); -if (maybeTestsSeed != null) { - List seeds = maybeTestsSeed.tokenize(':') - if (seeds.size() != 0) { - String masterSeed = seeds.get(0) - seed = new BigInteger(masterSeed, 16).longValue() - formattedSeed = maybeTestsSeed - } -} -if (formattedSeed == null) { - seed = new Random().nextLong() - formattedSeed = String.format("%016X", seed) -} - -String maybeUpdradeFromVersions = System.getProperty("tests.packaging.upgrade.from.versions", null) -if (maybeUpdradeFromVersions != null) { - upgradeFromVersions = maybeUpdradeFromVersions.split(",") -} else { - upgradeFromVersions = new File(project.projectDir, 'versions') -} - -upgradeFromVersion = upgradeFromVersions[new Random(seed).nextInt(upgradeFromVersions.length)] - -configurations { - test -} - -repositories { - mavenCentral() // Try maven central first, it'll have releases before 5.0.0 - /* Setup a repository that tries to download from - https://artifacts.elastic.co/downloads/elasticsearch/[module]-[revision].[ext] - which should work for 5.0.0+. This isn't a real ivy repository but gradle - is fine with that */ - ivy { - artifactPattern "https://artifacts.elastic.co/downloads/elasticsearch/[module]-[revision].[ext]" - } -} +apply plugin: 'elasticsearch.vagrant' dependencies { - test project(path: ':distribution:tar', configuration: 'archives') - test project(path: ':distribution:rpm', configuration: 'archives') - test project(path: ':distribution:deb', configuration: 'archives') - // Collect all the plugins for (Project subproj : project.rootProject.subprojects) { if (subproj.path.startsWith(':plugins:')) { - test project(path: "${subproj.path}", configuration: 'zip') - } - } - - // The version of elasticsearch that we upgrade *from* - test "org.elasticsearch.distribution.deb:elasticsearch:$upgradeFromVersion@deb" - test "org.elasticsearch.distribution.rpm:elasticsearch:$upgradeFromVersion@rpm" -} - -task clean(type: Delete) { - group 'Build' - delete buildDir -} - -task stop { - group 'Verification' - description 'Stop any tasks from tests that still may be running' -} - -Set getVersions() { - Node xml - new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s -> - xml = new XmlParser().parse(s) - } - Set versions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /[5]\.\d\.\d/ }) - if (versions.isEmpty() == false) { - return versions; - } - - // If no version is found, we run the tests with the current version - return Collections.singleton(project.version); -} - -task updatePackagingTestUpgradeFromVersions { - group 'Verification' - description 'Update file containing options for the\n "starting" version in the "upgrade from" packaging tests.' - doLast { - Set versions = getVersions() - new File(project.projectDir, 'versions').text = versions.join('\n') + '\n' - } -} - -task verifyPackagingTestUpgradeFromVersions { - doLast { - String maybeUpdateFromVersions = System.getProperty("tests.packaging.upgrade.from.versions", null) - if (maybeUpdateFromVersions == null) { - Set versions = getVersions() - Set actualVersions = new TreeSet<>(Arrays.asList(upgradeFromVersions)) - if (!versions.equals(actualVersions)) { - throw new GradleException("out-of-date versions " + actualVersions + - ", expected " + versions + "; run gradle updatePackagingTestUpgradeFromVersions") - } + bats project(path: "${subproj.path}", configuration: 'zip') } } } - -File testRoot = new File("$buildDir/testroot") -task createTestRoot { - dependsOn verifyPackagingTestUpgradeFromVersions - outputs.dir testRoot - doLast { - testRoot.mkdirs() - } -} - -task createVersionFile(type: FileContentsTask) { - dependsOn createTestRoot - file "${testRoot}/version" - contents = version -} - -task createUpgradeFromFile(type: FileContentsTask) { - dependsOn createTestRoot - file "${testRoot}/upgrade_from_version" - contents = upgradeFromVersion -} - -task prepareTestRoot(type: Copy) { - description 'Dump bats test dependencies into the $TESTROOT' - into testRoot - from configurations.test - - dependsOn createVersionFile, createUpgradeFromFile - doFirst { - gradle.addBuildListener new BuildAdapter() { - @Override - void buildFinished(BuildResult result) { - if (result.failure) { - println "Reproduce with: gradle packagingTest -Pvagrant.boxes=${vagrantBoxes} -Dtests.seed=${formattedSeed} -Dtests.packaging.upgrade.from.versions=${upgradeFromVersions.join(",")}" - } - } - } - } -} - -task checkVagrantVersion(type: Exec) { - commandLine 'vagrant', '--version' - standardOutput = new ByteArrayOutputStream() - doLast { - String version = standardOutput.toString().trim() - if ((version ==~ /Vagrant 1\.(8\.[6-9]|9\.[0-9])+/) == false) { - throw new InvalidUserDataException("Illegal version of vagrant [${version}]. Need [Vagrant 1.8.6+]") - } - } -} - -task checkVirtualBoxVersion(type: Exec) { - commandLine 'vboxmanage', '--version' - standardOutput = new ByteArrayOutputStream() - doLast { - String version = standardOutput.toString().trim() - try { - String[] versions = version.split('\\.') - int major = Integer.parseInt(versions[0]) - int minor = Integer.parseInt(versions[1]) - if ((major < 5) || (major == 5 && minor < 1)) { - throw new InvalidUserDataException("Illegal version of virtualbox [${version}]. Need [5.1+]") - } - } catch (NumberFormatException | ArrayIndexOutOfBoundsException e) { - throw new InvalidUserDataException("Unable to parse version of virtualbox [${version}]. Required [5.1+]", e) - } - } -} - -task vagrantSmokeTest { - group 'Verification' - description 'Smoke test the specified vagrant boxes' -} - -task packagingTest { - group 'Verification' - description "Tests yum/apt packages using vagrant and bats.\n" + - " Specify the vagrant boxes to test using the gradle property 'vagrant.boxes'.\n" + - " 'sample' can be used to test a single yum and apt box. 'all' can be used to\n" + - " test all available boxes. The available boxes are: \n" + - " ${availableBoxes}" -} - -// Each box gets it own set of tasks -for (String box : availableBoxes) { - String boxTask = box.capitalize().replace('-', '') - - // always add a halt task for all boxes, so clean makes sure they are all shutdown - Task halt = tasks.create("vagrant${boxTask}#halt", VagrantCommandTask) { - boxName box - args 'halt', box - } - stop.dependsOn(halt) - if (boxes.contains(box) == false) { - // we only need a halt task if this box was not specified - continue; - } - - Task update = tasks.create("vagrant${boxTask}#update", VagrantCommandTask) { - boxName box - args 'box', 'update', box - dependsOn checkVagrantVersion, checkVirtualBoxVersion - } - - Task up = tasks.create("vagrant${boxTask}#up", VagrantCommandTask) { - boxName box - /* It's important that we try to reprovision the box even if it already - exists. That way updates to the vagrant configuration take automatically. - That isn't to say that the updates will always be compatible. Its ok to - just destroy the boxes if they get busted but that is a manual step - because its slow-ish. */ - /* We lock the provider to virtualbox because the Vagrantfile specifies - lots of boxes that only work properly in virtualbox. Virtualbox is - vagrant's default but its possible to change that default and folks do. - But the boxes that we use are unlikely to work properly with other - virtualization providers. Thus the lock. */ - args 'up', box, '--provision', '--provider', 'virtualbox' - /* It'd be possible to check if the box is already up here and output - SKIPPED but that would require running vagrant status which is slow! */ - dependsOn update - } - - Task smoke = tasks.create("vagrant${boxTask}#smoketest", Exec) { - dependsOn up - finalizedBy halt - commandLine 'vagrant', 'ssh', box, '--command', - "set -o pipefail && ${smokeTestCommand} | sed -ue 's/^/ ${box}: /'" - } - vagrantSmokeTest.dependsOn(smoke) - - Task packaging = tasks.create("packagingTest${boxTask}", BatsOverVagrantTask) { - dependsOn up - finalizedBy halt - boxName box - command testCommand - dependsOn prepareTestRoot - } - packagingTest.dependsOn(packaging) -} diff --git a/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats b/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats similarity index 98% rename from qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats rename to qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats index 9712febc760..726cd5468ac 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats @@ -29,9 +29,9 @@ # under the License. # Load test utilities -load packaging_test_utils -load tar -load plugins +load $BATS_UTILS/utils.bash +load $BATS_UTILS/tar.bash +load $BATS_UTILS/plugins.bash setup() { skip_not_tar_gz diff --git a/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats b/qa/vagrant/src/test/resources/packaging/tests/25_tar_plugins.bats similarity index 100% rename from qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats rename to qa/vagrant/src/test/resources/packaging/tests/25_tar_plugins.bats diff --git a/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats b/qa/vagrant/src/test/resources/packaging/tests/30_deb_package.bats similarity index 98% rename from qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats rename to qa/vagrant/src/test/resources/packaging/tests/30_deb_package.bats index d435a76b9c7..b7e925f2899 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/30_deb_package.bats @@ -30,9 +30,9 @@ # under the License. # Load test utilities -load packaging_test_utils -load os_package -load plugins +load $BATS_UTILS/utils.bash +load $BATS_UTILS/packages.bash +load $BATS_UTILS/plugins.bash # Cleans everything for the 1st execution setup() { diff --git a/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats b/qa/vagrant/src/test/resources/packaging/tests/40_rpm_package.bats similarity index 98% rename from qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats rename to qa/vagrant/src/test/resources/packaging/tests/40_rpm_package.bats index b6ec78509d1..9a85afc9a63 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/40_rpm_package.bats @@ -29,9 +29,9 @@ # under the License. # Load test utilities -load packaging_test_utils -load os_package -load plugins +load $BATS_UTILS/utils.bash +load $BATS_UTILS/packages.bash +load $BATS_UTILS/plugins.bash # Cleans everything for the 1st execution setup() { diff --git a/qa/vagrant/src/test/resources/packaging/scripts/50_modules_and_plugins.bats b/qa/vagrant/src/test/resources/packaging/tests/50_modules_and_plugins.bats similarity index 100% rename from qa/vagrant/src/test/resources/packaging/scripts/50_modules_and_plugins.bats rename to qa/vagrant/src/test/resources/packaging/tests/50_modules_and_plugins.bats diff --git a/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats b/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats similarity index 98% rename from qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats rename to qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats index de1416059dd..7eaa0843f9f 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats @@ -29,9 +29,9 @@ # under the License. # Load test utilities -load packaging_test_utils -load os_package -load plugins +load $BATS_UTILS/utils.bash +load $BATS_UTILS/packages.bash +load $BATS_UTILS/plugins.bash # Cleans everything for the 1st execution setup() { diff --git a/qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats b/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats similarity index 98% rename from qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats rename to qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats index 237c8956c40..26c8c8082d1 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats @@ -29,9 +29,9 @@ # under the License. # Load test utilities -load packaging_test_utils -load os_package -load plugins +load $BATS_UTILS/utils.bash +load $BATS_UTILS/packages.bash +load $BATS_UTILS/plugins.bash # Cleans everything for the 1st execution setup() { diff --git a/qa/vagrant/src/test/resources/packaging/scripts/80_upgrade.bats b/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats similarity index 98% rename from qa/vagrant/src/test/resources/packaging/scripts/80_upgrade.bats rename to qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats index feca52c7bbc..a14823a9cc4 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/80_upgrade.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats @@ -31,8 +31,8 @@ # under the License. # Load test utilities -load packaging_test_utils -load os_package +load $BATS_UTILS/utils.bash +load $BATS_UTILS/packages.bash # Cleans everything for the 1st execution setup() { diff --git a/qa/vagrant/src/test/resources/packaging/scripts/90_reinstall.bats b/qa/vagrant/src/test/resources/packaging/tests/90_reinstall.bats similarity index 97% rename from qa/vagrant/src/test/resources/packaging/scripts/90_reinstall.bats rename to qa/vagrant/src/test/resources/packaging/tests/90_reinstall.bats index 3c2f7be7330..4dd682efbdd 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/90_reinstall.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/90_reinstall.bats @@ -31,8 +31,8 @@ # under the License. # Load test utilities -load packaging_test_utils -load os_package +load $BATS_UTILS/utils.bash +load $BATS_UTILS/packages.bash # Cleans everything for the 1st execution setup() { diff --git a/qa/vagrant/src/test/resources/packaging/scripts/example/scripts/is_guide.groovy b/qa/vagrant/src/test/resources/packaging/tests/example/scripts/is_guide.groovy similarity index 100% rename from qa/vagrant/src/test/resources/packaging/scripts/example/scripts/is_guide.groovy rename to qa/vagrant/src/test/resources/packaging/tests/example/scripts/is_guide.groovy diff --git a/qa/vagrant/src/test/resources/packaging/scripts/example/scripts/is_guide.mustache b/qa/vagrant/src/test/resources/packaging/tests/example/scripts/is_guide.mustache similarity index 100% rename from qa/vagrant/src/test/resources/packaging/scripts/example/scripts/is_guide.mustache rename to qa/vagrant/src/test/resources/packaging/tests/example/scripts/is_guide.mustache diff --git a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash similarity index 99% rename from qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash rename to qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash index b979f40e309..2ff853bc70b 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash @@ -39,9 +39,9 @@ # system uses. # Load test utilities -load packaging_test_utils -load modules -load plugins +load $BATS_UTILS/utils.bash +load $BATS_UTILS/modules.bash +load $BATS_UTILS/plugins.bash setup() { # The rules on when we should clean an reinstall are complex - all the @@ -60,7 +60,7 @@ setup() { } if [[ "$BATS_TEST_FILENAME" =~ 25_tar_plugins.bats$ ]]; then - load tar + load $BATS_UTILS/tar.bash GROUP='TAR PLUGINS' install() { install_archive @@ -70,7 +70,7 @@ if [[ "$BATS_TEST_FILENAME" =~ 25_tar_plugins.bats$ ]]; then export_elasticsearch_paths export ESPLUGIN_COMMAND_USER=elasticsearch else - load os_package + load $BATS_UTILS/packages.bash if is_rpm; then GROUP='RPM PLUGINS' elif is_dpkg; then diff --git a/qa/vagrant/src/test/resources/packaging/scripts/modules.bash b/qa/vagrant/src/test/resources/packaging/utils/modules.bash similarity index 100% rename from qa/vagrant/src/test/resources/packaging/scripts/modules.bash rename to qa/vagrant/src/test/resources/packaging/utils/modules.bash diff --git a/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash b/qa/vagrant/src/test/resources/packaging/utils/packages.bash similarity index 100% rename from qa/vagrant/src/test/resources/packaging/scripts/os_package.bash rename to qa/vagrant/src/test/resources/packaging/utils/packages.bash diff --git a/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash b/qa/vagrant/src/test/resources/packaging/utils/plugins.bash similarity index 100% rename from qa/vagrant/src/test/resources/packaging/scripts/plugins.bash rename to qa/vagrant/src/test/resources/packaging/utils/plugins.bash diff --git a/qa/vagrant/src/test/resources/packaging/scripts/tar.bash b/qa/vagrant/src/test/resources/packaging/utils/tar.bash similarity index 100% rename from qa/vagrant/src/test/resources/packaging/scripts/tar.bash rename to qa/vagrant/src/test/resources/packaging/utils/tar.bash diff --git a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash similarity index 100% rename from qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash rename to qa/vagrant/src/test/resources/packaging/utils/utils.bash diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml index 0c732ea3c11..7c7445fc67d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml @@ -1,8 +1,8 @@ --- "Help": - skip: - version: " - 5.0.99" - reason: templates were introduced in 5.1.0 + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 - do: cat.templates: help: true @@ -30,9 +30,11 @@ --- "Normal templates": + - skip: - version: " - 5.0.99" - reason: templates were introduced in 5.1.0 + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 + - do: indices.put_template: name: test @@ -78,9 +80,11 @@ --- "Filtered templates": + - skip: - version: " - 5.0.99" - reason: templates were introduced in 5.1.0 + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 + - do: indices.put_template: name: test @@ -120,8 +124,8 @@ --- "Column headers": - skip: - version: " - 5.0.99" - reason: templates were introduced in 5.1.0 + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 - do: indices.put_template: name: test @@ -156,8 +160,8 @@ --- "Select columns": - skip: - version: " - 5.0.99" - reason: templates were introduced in 5.1.0 + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 - do: indices.put_template: name: test @@ -189,8 +193,8 @@ --- "Sort templates": - skip: - version: " - 5.0.99" - reason: templates were introduced in 5.1.0 + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 - do: indices.put_template: name: test @@ -239,6 +243,9 @@ --- "Multiple template": + - skip: + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 - do: indices.put_template: name: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yaml index b6feac83c37..0dd1a452548 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yaml @@ -6,6 +6,11 @@ setup: ignore: [404] --- "Test indices.exists_template": + + - skip: + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 + - do: indices.exists_template: name: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yaml index a03a10c1a5a..4e21b818d62 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yaml @@ -11,6 +11,10 @@ setup: --- "Get template": + - skip: + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 + - do: indices.get_template: name: test @@ -21,6 +25,10 @@ setup: --- "Get all templates": + - skip: + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 + - do: indices.put_template: name: test2 @@ -38,6 +46,10 @@ setup: --- "Get template with local flag": + - skip: + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 + - do: indices.get_template: name: test @@ -48,6 +60,10 @@ setup: --- "Get template with flat settings and master timeout": + - skip: + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 + - do: indices.get_template: name: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yaml index 1bc660452d8..8c5ba1c8d5e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yaml @@ -1,5 +1,10 @@ --- "Put template": + + - skip: + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 + - do: indices.put_template: name: test @@ -19,6 +24,11 @@ --- "Put multiple template": + + - skip: + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 + - do: indices.put_template: name: test @@ -38,6 +48,11 @@ --- "Put template with aliases": + + - skip: + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 + - do: indices.put_template: name: test @@ -61,6 +76,11 @@ --- "Put template create": + + - skip: + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 + - do: indices.put_template: name: test @@ -92,6 +112,11 @@ --- "Test Put Versioned Template": + + - skip: + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 + - do: indices.put_template: name: "my_template" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml index 62a75b0ff04..b391032bee3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml @@ -58,6 +58,7 @@ index: "source" target: "target" wait_for_active_shards: 1 + master_timeout: 10s body: settings: index.number_of_replicas: 0 diff --git a/settings.gradle b/settings.gradle index 4c662ac448f..ad3d1d32211 100644 --- a/settings.gradle +++ b/settings.gradle @@ -58,12 +58,13 @@ List projects = [ 'qa:evil-tests', 'qa:rolling-upgrade', 'qa:smoke-test-client', + 'qa:smoke-test-http', 'qa:smoke-test-ingest-with-all-dependencies', 'qa:smoke-test-ingest-disabled', 'qa:smoke-test-multinode', 'qa:smoke-test-plugins', 'qa:smoke-test-reindex-with-painless', - 'qa:smoke-test-http', + 'qa:smoke-test-tribe-node', 'qa:vagrant', ] diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index 5e7902f9769..c75d9bbcb6d 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -102,16 +102,6 @@ public class MockNode extends Node { } } - @Override - protected ZenPing newZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, - UnicastHostsProvider hostsProvider) { - if (getPluginsService().filterPlugins(MockZenPing.TestPlugin.class).isEmpty()) { - return super.newZenPing(settings, threadPool, transportService, hostsProvider); - } else { - return new MockZenPing(settings); - } - } - @Override protected Node newTribeClientNode(Settings settings, Collection> classpathPlugins) { return new MockNode(settings, classpathPlugins); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java index 0ece6fad393..1a7aac925f3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.junit.listeners.LoggingListener; @@ -204,11 +205,6 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase { return finalSettings.build(); } - @Override - protected boolean addMockZenPings() { - return false; - } - protected int minExternalNodes() { return 1; } protected int maxExternalNodes() { @@ -246,6 +242,7 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase { protected Settings commonNodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder().put(requiredSettings()); builder.put(NetworkModule.TRANSPORT_TYPE_KEY, randomBoolean() ? "netty3" : "netty4"); // run same transport / disco as external + builder.put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false); return builder.build(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 6df9179147e..806c336ac74 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -120,7 +120,7 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.client.RandomizingClient; -import org.elasticsearch.test.discovery.MockZenPing; +import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; @@ -1812,10 +1812,6 @@ public abstract class ESIntegTestCase extends ESTestCase { return true; } - protected boolean addMockZenPings() { - return true; - } - /** * Returns a function that allows to wrap / filter all clients that are exposed by the test cluster. This is useful * for debugging or request / response pre and post processing. It also allows to intercept all calls done by the test @@ -1853,9 +1849,7 @@ public abstract class ESIntegTestCase extends ESTestCase { mocks.add(MockTcpTransportPlugin.class); } - if (addMockZenPings()) { - mocks.add(MockZenPing.TestPlugin.class); - } + mocks.add(TestZenDiscovery.TestPlugin.class); mocks.add(TestSeedPlugin.class); return Collections.unmodifiableList(mocks); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 795d1dd1038..0b2adfa52e1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -18,6 +18,13 @@ */ package org.elasticsearch.test; +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; + import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; @@ -45,7 +52,7 @@ import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.test.discovery.MockZenPing; +import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.MockTcpTransportPlugin; import org.junit.After; @@ -53,13 +60,6 @@ import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; -import java.io.IOException; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; - import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -191,9 +191,9 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { plugins = new ArrayList<>(plugins); plugins.add(MockTcpTransportPlugin.class); } - if (plugins.contains(MockZenPing.TestPlugin.class) == false) { + if (plugins.contains(TestZenDiscovery.TestPlugin.class) == false) { plugins = new ArrayList<>(plugins); - plugins.add(MockZenPing.TestPlugin.class); + plugins.add(TestZenDiscovery.TestPlugin.class); } Node build = new MockNode(settings, plugins); try { diff --git a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java index 1d91b0980e4..5e1e1acd9ab 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java @@ -39,7 +39,7 @@ public class NoOpClient extends AbstractClient { } @Override - protected , + protected > void doExecute(Action action, Request request, ActionListener listener) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java index d5e7de1d9bf..c544b2bad88 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java @@ -18,39 +18,30 @@ */ package org.elasticsearch.test.discovery; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.discovery.zen.PingContextProvider; -import org.elasticsearch.discovery.zen.ZenPing; -import org.elasticsearch.plugins.DiscoveryPlugin; -import org.elasticsearch.plugins.Plugin; - import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.discovery.zen.PingContextProvider; +import org.elasticsearch.discovery.zen.ZenPing; + /** * A {@link ZenPing} implementation which returns results based on an static in-memory map. This allows pinging * to be immediate and can be used to speed up tests. */ public final class MockZenPing extends AbstractComponent implements ZenPing { - /** A marker plugin used by {@link org.elasticsearch.node.MockNode} to indicate this mock zen ping should be used. */ - public static class TestPlugin extends Plugin {} - static final Map> activeNodesPerCluster = ConcurrentCollections.newConcurrentMap(); private volatile PingContextProvider contextProvider; - @Inject public MockZenPing(Settings settings) { super(settings); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java new file mode 100644 index 00000000000..3ca66c11bf7 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.discovery; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; +import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.discovery.zen.ZenPing; +import org.elasticsearch.plugins.DiscoveryPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +/** + * A alternative zen discovery which allows using mocks for things like pings, as well as + * giving access to internals. + */ +public class TestZenDiscovery extends ZenDiscovery { + + public static final Setting USE_MOCK_PINGS = + Setting.boolSetting("discovery.zen.use_mock_pings", true, Setting.Property.NodeScope); + + /** A plugin which installs mock discovery and configures it to be used. */ + public static class TestPlugin extends Plugin implements DiscoveryPlugin { + private Settings settings; + public TestPlugin(Settings settings) { + this.settings = settings; + } + @Override + public Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, + ClusterService clusterService, UnicastHostsProvider hostsProvider) { + return Collections.singletonMap("test-zen", + () -> new TestZenDiscovery(settings, threadPool, transportService, clusterService, hostsProvider)); + } + + @Override + public List> getSettings() { + return Collections.singletonList(USE_MOCK_PINGS); + } + + @Override + public Settings additionalSettings() { + return Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "test-zen").build(); + } + } + + private TestZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService, + ClusterService clusterService, UnicastHostsProvider hostsProvider) { + super(settings, threadPool, transportService, clusterService, hostsProvider); + } + + @Override + protected ZenPing newZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, + UnicastHostsProvider hostsProvider) { + if (USE_MOCK_PINGS.get(settings)) { + return new MockZenPing(settings); + } else { + return super.newZenPing(settings, threadPool, transportService, hostsProvider); + } + } + + public ZenPing getZenPing() { + return zenPing; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 0a426d85265..8cff517316b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -653,7 +653,7 @@ public class ElasticsearchAssertions { // streamable that comes in. } if (streamable instanceof ActionRequest) { - ((ActionRequest) streamable).validate(); + ((ActionRequest) streamable).validate(); } BytesReference orig; try { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 1e419faf06b..e05057648cc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -150,6 +150,16 @@ public class ESRestTestCase extends ESTestCase { return false; } + /** + * Controls whether or not to preserve templates upon completion of this test. The default implementation is to delete not preserve + * templates. + * + * @return whether or not to preserve templates + */ + protected boolean preserveTemplatesUponCompletion() { + return false; + } + private void wipeCluster() throws IOException { if (preserveIndicesUponCompletion() == false) { // wipe indices @@ -164,7 +174,9 @@ public class ESRestTestCase extends ESTestCase { } // wipe index templates - adminClient().performRequest("DELETE", "_template/*"); + if (preserveTemplatesUponCompletion() == false) { + adminClient().performRequest("DELETE", "_template/*"); + } wipeSnapshots(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index da8c54396df..14affcaf3eb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -100,7 +100,7 @@ public class ClientYamlTestClient { Version version = null; Version masterVersion = null; for (String perNode : split) { - final String[] versionAndMaster = perNode.split(" "); + final String[] versionAndMaster = perNode.split("\\s+"); assert versionAndMaster.length == 2 : "invalid line: " + perNode + " length: " + versionAndMaster.length; final Version currentVersion = Version.fromString(versionAndMaster[0]); final boolean master = versionAndMaster[1].trim().equals("*"); diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index 7c001f910d7..327a49d3678 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -19,24 +19,6 @@ */ package org.elasticsearch.test.test; -import org.apache.lucene.util.IOUtils; -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.NodeConfigurationSource; -import org.elasticsearch.test.discovery.MockZenPing; -import org.elasticsearch.transport.MockTcpTransportPlugin; -import org.elasticsearch.transport.TransportSettings; - import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -52,6 +34,24 @@ import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; +import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.NodeConfigurationSource; +import org.elasticsearch.test.discovery.TestZenDiscovery; +import org.elasticsearch.transport.MockTcpTransportPlugin; +import org.elasticsearch.transport.TransportSettings; + import static org.elasticsearch.cluster.node.DiscoveryNode.Role.DATA; import static org.elasticsearch.cluster.node.DiscoveryNode.Role.INGEST; import static org.elasticsearch.cluster.node.DiscoveryNode.Role.MASTER; @@ -155,7 +155,7 @@ public class InternalTestClusterTests extends ESTestCase { String nodePrefix = "foobar"; Path baseDir = createTempDir(); - final List> mockPlugins = Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class); + final List> mockPlugins = Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class); InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, mockPlugins, Function.identity()); @@ -218,7 +218,7 @@ public class InternalTestClusterTests extends ESTestCase { Path baseDir = createTempDir(); InternalTestCluster cluster = new InternalTestCluster(clusterSeed, baseDir, masterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class), + enableHttpPipelining, nodePrefix, Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class), Function.identity()); try { cluster.beforeTest(random(), 0.0); @@ -296,7 +296,7 @@ public class InternalTestClusterTests extends ESTestCase { return Settings.builder() .put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).build(); } - }, 0, randomBoolean(), "", Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class), Function.identity()); + }, 0, randomBoolean(), "", Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class), Function.identity()); cluster.beforeTest(random(), 0.0); try { Map> pathsPerRole = new HashMap<>();