Merge remote-tracking branch 'upstream/master' into feature/seq_no

This commit is contained in:
Boaz Leskes 2016-11-15 10:14:47 +00:00
commit c9f49039d3
210 changed files with 1880 additions and 1100 deletions

View File

@ -325,7 +325,7 @@ vagrant plugin install vagrant-cachier
. Validate your installed dependencies: . Validate your installed dependencies:
------------------------------------- -------------------------------------
gradle :qa:vagrant:checkVagrantVersion gradle :qa:vagrant:vagrantCheckVersion
------------------------------------- -------------------------------------
. Download and smoke test the VMs with `gradle vagrantSmokeTest` or . Download and smoke test the VMs with `gradle vagrantSmokeTest` or
@ -417,17 +417,26 @@ and in another window:
---------------------------------------------------- ----------------------------------------------------
vagrant up centos-7 --provider virtualbox && vagrant ssh centos-7 vagrant up centos-7 --provider virtualbox && vagrant ssh centos-7
cd $TESTROOT cd $BATS_ARCHIVES
sudo bats $BATS/*rpm*.bats sudo -E bats $BATS_TESTS/*rpm*.bats
---------------------------------------------------- ----------------------------------------------------
If you wanted to retest all the release artifacts on a single VM you could: If you wanted to retest all the release artifacts on a single VM you could:
------------------------------------------------- -------------------------------------------------
gradle prepareTestRoot gradle vagrantSetUp
vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404 vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404
cd $TESTROOT cd $BATS_ARCHIVES
sudo bats $BATS/*.bats sudo -E bats $BATS_TESTS/*.bats
-------------------------------------------------
Note: Starting vagrant VM outside of the elasticsearch folder requires to
indicates the folder that contains the Vagrantfile using the VAGRANT_CWD
environment variable:
-------------------------------------------------
gradle vagrantSetUp
VAGRANT_CWD=/path/to/elasticsearch vagrant up centos-7 --provider virtualbox
------------------------------------------------- -------------------------------------------------
== Coverage analysis == Coverage analysis

9
Vagrantfile vendored
View File

@ -77,6 +77,9 @@ Vagrant.configure(2) do |config|
# the elasticsearch project called vagrant.... # the elasticsearch project called vagrant....
config.vm.synced_folder ".", "/vagrant", disabled: true config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.synced_folder ".", "/elasticsearch" config.vm.synced_folder ".", "/elasticsearch"
# Expose project directory
PROJECT_DIR = ENV['VAGRANT_PROJECT_DIR'] || Dir.pwd
config.vm.synced_folder PROJECT_DIR, "/project"
config.vm.provider "virtualbox" do |v| config.vm.provider "virtualbox" do |v|
# Give the boxes 3GB because Elasticsearch defaults to using 2GB # Give the boxes 3GB because Elasticsearch defaults to using 2GB
v.memory = 3072 v.memory = 3072
@ -272,8 +275,10 @@ export ZIP=/elasticsearch/distribution/zip/build/distributions
export TAR=/elasticsearch/distribution/tar/build/distributions export TAR=/elasticsearch/distribution/tar/build/distributions
export RPM=/elasticsearch/distribution/rpm/build/distributions export RPM=/elasticsearch/distribution/rpm/build/distributions
export DEB=/elasticsearch/distribution/deb/build/distributions export DEB=/elasticsearch/distribution/deb/build/distributions
export TESTROOT=/elasticsearch/qa/vagrant/build/testroot export BATS=/project/build/bats
export BATS=/elasticsearch/qa/vagrant/src/test/resources/packaging/scripts export BATS_UTILS=/project/build/bats/utils
export BATS_TESTS=/project/build/bats/tests
export BATS_ARCHIVES=/project/build/bats/archives
VARS VARS
SHELL SHELL
end end

View File

@ -495,6 +495,8 @@ class BuildPlugin implements Plugin<Project> {
systemProperty 'tests.artifact', project.name systemProperty 'tests.artifact', project.name
systemProperty 'tests.task', path systemProperty 'tests.task', path
systemProperty 'tests.security.manager', 'true' systemProperty 'tests.security.manager', 'true'
// Breaking change in JDK-9, revert to JDK-8 behavior for now, see https://github.com/elastic/elasticsearch/issues/21534
systemProperty 'jdk.io.permissionsUseCanonicalPath', 'true'
systemProperty 'jna.nosys', 'true' systemProperty 'jna.nosys', 'true'
// default test sysprop values // default test sysprop values
systemProperty 'tests.ifNoTests', 'fail' systemProperty 'tests.ifNoTests', 'fail'

View File

@ -123,7 +123,7 @@ class ClusterConfiguration {
Map<String, String> systemProperties = new HashMap<>() Map<String, String> systemProperties = new HashMap<>()
Map<String, String> settings = new HashMap<>() Map<String, Object> settings = new HashMap<>()
// map from destination path, to source file // map from destination path, to source file
Map<String, Object> extraConfigFiles = new HashMap<>() Map<String, Object> extraConfigFiles = new HashMap<>()
@ -140,7 +140,7 @@ class ClusterConfiguration {
} }
@Input @Input
void setting(String name, String value) { void setting(String name, Object value) {
settings.put(name, value) settings.put(name, value)
} }

View File

@ -18,14 +18,7 @@
*/ */
package org.elasticsearch.gradle.vagrant package org.elasticsearch.gradle.vagrant
import org.gradle.api.DefaultTask
import org.gradle.api.tasks.Input import org.gradle.api.tasks.Input
import org.gradle.api.tasks.TaskAction
import org.gradle.logging.ProgressLoggerFactory
import org.gradle.process.internal.ExecAction
import org.gradle.process.internal.ExecActionFactory
import javax.inject.Inject
/** /**
* Runs bats over vagrant. Pretty much like running it using Exec but with a * Runs bats over vagrant. Pretty much like running it using Exec but with a

View File

@ -34,11 +34,18 @@ public class VagrantCommandTask extends LoggedExec {
@Input @Input
String boxName String boxName
@Input
Map<String, String> environmentVars
public VagrantCommandTask() { public VagrantCommandTask() {
executable = 'vagrant' executable = 'vagrant'
project.afterEvaluate { project.afterEvaluate {
// It'd be nice if --machine-readable were, well, nice // It'd be nice if --machine-readable were, well, nice
standardOutput = new TeeOutputStream(standardOutput, createLoggerOutputStream()) standardOutput = new TeeOutputStream(standardOutput, createLoggerOutputStream())
if (environmentVars != null) {
environment environmentVars
}
} }
} }

View File

@ -0,0 +1,76 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.vagrant
import org.gradle.api.tasks.Input
class VagrantPropertiesExtension {
@Input
List<String> boxes
@Input
Long testSeed
@Input
String formattedTestSeed
@Input
String upgradeFromVersion
@Input
List<String> upgradeFromVersions
@Input
String batsDir
@Input
Boolean inheritTests
@Input
Boolean inheritTestArchives
@Input
Boolean inheritTestUtils
VagrantPropertiesExtension(List<String> availableBoxes) {
this.boxes = availableBoxes
this.batsDir = 'src/test/resources/packaging'
}
void boxes(String... boxes) {
this.boxes = Arrays.asList(boxes)
}
void setBatsDir(String batsDir) {
this.batsDir = batsDir
}
void setInheritTests(Boolean inheritTests) {
this.inheritTests = inheritTests
}
void setInheritTestArchives(Boolean inheritTestArchives) {
this.inheritTestArchives = inheritTestArchives
}
void setInheritTestUtils(Boolean inheritTestUtils) {
this.inheritTestUtils = inheritTestUtils
}
}

View File

@ -0,0 +1,457 @@
package org.elasticsearch.gradle.vagrant
import org.elasticsearch.gradle.FileContentsTask
import org.gradle.BuildAdapter
import org.gradle.BuildResult
import org.gradle.api.*
import org.gradle.api.artifacts.dsl.RepositoryHandler
import org.gradle.api.internal.artifacts.dependencies.DefaultProjectDependency
import org.gradle.api.tasks.Copy
import org.gradle.api.tasks.Delete
import org.gradle.api.tasks.Exec
class VagrantTestPlugin implements Plugin<Project> {
/** All available boxes **/
static List<String> BOXES = [
'centos-6',
'centos-7',
'debian-8',
'fedora-24',
'oel-6',
'oel-7',
'opensuse-13',
'sles-12',
'ubuntu-1204',
'ubuntu-1404',
'ubuntu-1604'
]
/** Boxes used when sampling the tests **/
static List<String> SAMPLE = [
'centos-7',
'ubuntu-1404',
]
/** All onboarded archives by default, available for Bats tests even if not used **/
static List<String> DISTRIBUTION_ARCHIVES = ['tar', 'rpm', 'deb']
/** Packages onboarded for upgrade tests **/
static List<String> UPGRADE_FROM_ARCHIVES = ['rpm', 'deb']
private static final BATS = 'bats'
private static final String BATS_TEST_COMMAND ="cd \$BATS_ARCHIVES && sudo -E bats --tap \$BATS_TESTS/*.$BATS"
@Override
void apply(Project project) {
// Creates the Vagrant extension for the project
project.extensions.create('esvagrant', VagrantPropertiesExtension, listVagrantBoxes(project))
// Add required repositories for Bats tests
configureBatsRepositories(project)
// Creates custom configurations for Bats testing files (and associated scripts and archives)
createBatsConfiguration(project)
// Creates all the main Vagrant tasks
createVagrantTasks(project)
if (project.extensions.esvagrant.boxes == null || project.extensions.esvagrant.boxes.size() == 0) {
throw new InvalidUserDataException('Vagrant boxes cannot be null or empty for esvagrant')
}
for (String box : project.extensions.esvagrant.boxes) {
if (BOXES.contains(box) == false) {
throw new InvalidUserDataException("Vagrant box [${box}] not found, available virtual machines are ${BOXES}")
}
}
// Creates all tasks related to the Vagrant boxes
createVagrantBoxesTasks(project)
}
private List<String> listVagrantBoxes(Project project) {
String vagrantBoxes = project.getProperties().get('vagrant.boxes', 'sample')
if (vagrantBoxes == 'sample') {
return SAMPLE
} else if (vagrantBoxes == 'all') {
return BOXES
} else {
return vagrantBoxes.split(',')
}
}
private static Set<String> listVersions(Project project) {
Node xml
new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s ->
xml = new XmlParser().parse(s)
}
Set<String> versions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /[5]\.\d\.\d/ })
if (versions.isEmpty() == false) {
return versions;
}
// If no version is found, we run the tests with the current version
return Collections.singleton(project.version);
}
private static File getVersionsFile(Project project) {
File versions = new File(project.projectDir, 'versions');
if (versions.exists() == false) {
// Use the elasticsearch's versions file from project :qa:vagrant
versions = project.project(":qa:vagrant").file('versions')
}
return versions
}
private static void configureBatsRepositories(Project project) {
RepositoryHandler repos = project.repositories
// Try maven central first, it'll have releases before 5.0.0
repos.mavenCentral()
/* Setup a repository that tries to download from
https://artifacts.elastic.co/downloads/elasticsearch/[module]-[revision].[ext]
which should work for 5.0.0+. This isn't a real ivy repository but gradle
is fine with that */
repos.ivy {
artifactPattern "https://artifacts.elastic.co/downloads/elasticsearch/[module]-[revision].[ext]"
}
}
private static void createBatsConfiguration(Project project) {
project.configurations.create(BATS)
Long seed
String formattedSeed = null
String[] upgradeFromVersions
String maybeTestsSeed = System.getProperty("tests.seed", null);
if (maybeTestsSeed != null) {
List<String> seeds = maybeTestsSeed.tokenize(':')
if (seeds.size() != 0) {
String masterSeed = seeds.get(0)
seed = new BigInteger(masterSeed, 16).longValue()
formattedSeed = maybeTestsSeed
}
}
if (formattedSeed == null) {
seed = new Random().nextLong()
formattedSeed = String.format("%016X", seed)
}
String maybeUpdradeFromVersions = System.getProperty("tests.packaging.upgrade.from.versions", null)
if (maybeUpdradeFromVersions != null) {
upgradeFromVersions = maybeUpdradeFromVersions.split(",")
} else {
upgradeFromVersions = getVersionsFile(project)
}
String upgradeFromVersion = upgradeFromVersions[new Random(seed).nextInt(upgradeFromVersions.length)]
DISTRIBUTION_ARCHIVES.each {
// Adds a dependency for the current version
project.dependencies.add(BATS, project.dependencies.project(path: ":distribution:${it}", configuration: 'archives'))
}
UPGRADE_FROM_ARCHIVES.each {
// The version of elasticsearch that we upgrade *from*
project.dependencies.add(BATS, "org.elasticsearch.distribution.${it}:elasticsearch:${upgradeFromVersion}@${it}")
}
project.extensions.esvagrant.testSeed = seed
project.extensions.esvagrant.formattedTestSeed = formattedSeed
project.extensions.esvagrant.upgradeFromVersion = upgradeFromVersion
project.extensions.esvagrant.upgradeFromVersions = upgradeFromVersions
}
private static void createCleanTask(Project project) {
project.tasks.create('clean', Delete.class) {
description 'Clean the project build directory'
group 'Build'
delete project.buildDir
}
}
private static void createStopTask(Project project) {
project.tasks.create('stop') {
description 'Stop any tasks from tests that still may be running'
group 'Verification'
}
}
private static void createSmokeTestTask(Project project) {
project.tasks.create('vagrantSmokeTest') {
description 'Smoke test the specified vagrant boxes'
group 'Verification'
}
}
private static void createPrepareVagrantTestEnvTask(Project project) {
File batsDir = new File("${project.buildDir}/${BATS}")
Task createBatsDirsTask = project.tasks.create('createBatsDirs')
createBatsDirsTask.outputs.dir batsDir
createBatsDirsTask.dependsOn project.tasks.vagrantVerifyVersions
createBatsDirsTask.doLast {
batsDir.mkdirs()
}
Copy copyBatsArchives = project.tasks.create('copyBatsArchives', Copy) {
dependsOn createBatsDirsTask
into "${batsDir}/archives"
from project.configurations[BATS]
}
Copy copyBatsTests = project.tasks.create('copyBatsTests', Copy) {
dependsOn createBatsDirsTask
into "${batsDir}/tests"
from {
"${project.extensions.esvagrant.batsDir}/tests"
}
}
Copy copyBatsUtils = project.tasks.create('copyBatsUtils', Copy) {
dependsOn createBatsDirsTask
into "${batsDir}/utils"
from {
"${project.extensions.esvagrant.batsDir}/utils"
}
}
// Now we iterate over dependencies of the bats configuration. When a project dependency is found,
// we bring back its own archives, test files or test utils.
project.afterEvaluate {
project.configurations.bats.dependencies.findAll {it.configuration == BATS }.each { d ->
if (d instanceof DefaultProjectDependency) {
DefaultProjectDependency externalBatsDependency = (DefaultProjectDependency) d
Project externalBatsProject = externalBatsDependency.dependencyProject
String externalBatsDir = externalBatsProject.extensions.esvagrant.batsDir
if (project.extensions.esvagrant.inheritTests) {
copyBatsTests.from(externalBatsProject.files("${externalBatsDir}/tests"))
}
if (project.extensions.esvagrant.inheritTestArchives) {
copyBatsArchives.from(externalBatsDependency.projectConfiguration.files)
}
if (project.extensions.esvagrant.inheritTestUtils) {
copyBatsUtils.from(externalBatsProject.files("${externalBatsDir}/utils"))
}
}
}
}
Task createVersionFile = project.tasks.create('createVersionFile', FileContentsTask) {
dependsOn createBatsDirsTask
file "${batsDir}/archives/version"
contents project.version
}
Task createUpgradeFromFile = project.tasks.create('createUpgradeFromFile', FileContentsTask) {
dependsOn createBatsDirsTask
file "${batsDir}/archives/upgrade_from_version"
contents project.extensions.esvagrant.upgradeFromVersion
}
Task vagrantSetUpTask = project.tasks.create('vagrantSetUp')
vagrantSetUpTask.dependsOn 'vagrantCheckVersion'
vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils, copyBatsArchives, createVersionFile, createUpgradeFromFile
vagrantSetUpTask.doFirst {
project.gradle.addBuildListener new BuildAdapter() {
@Override
void buildFinished(BuildResult result) {
if (result.failure) {
println "Reproduce with: gradle packagingTest "
+"-Pvagrant.boxes=${project.extensions.esvagrant.boxes} "
+ "-Dtests.seed=${project.extensions.esvagrant.formattedSeed} "
+ "-Dtests.packaging.upgrade.from.versions=${project.extensions.esvagrant.upgradeFromVersions.join(",")}"
}
}
}
}
}
private static void createUpdateVersionsTask(Project project) {
project.tasks.create('vagrantUpdateVersions') {
description 'Update file containing options for the\n "starting" version in the "upgrade from" packaging tests.'
group 'Verification'
doLast {
File versions = getVersionsFile(project)
versions.text = listVersions(project).join('\n') + '\n'
}
}
}
private static void createVerifyVersionsTask(Project project) {
project.tasks.create('vagrantVerifyVersions') {
description 'Update file containing options for the\n "starting" version in the "upgrade from" packaging tests.'
group 'Verification'
doLast {
String maybeUpdateFromVersions = System.getProperty("tests.packaging.upgrade.from.versions", null)
if (maybeUpdateFromVersions == null) {
Set<String> versions = listVersions(project)
Set<String> actualVersions = new TreeSet<>(project.extensions.esvagrant.upgradeFromVersions)
if (!versions.equals(actualVersions)) {
throw new GradleException("out-of-date versions " + actualVersions +
", expected " + versions + "; run gradle vagrantUpdateVersions")
}
}
}
}
}
private static void createCheckVagrantVersionTask(Project project) {
project.tasks.create('vagrantCheckVersion', Exec) {
description 'Check the Vagrant version'
group 'Verification'
commandLine 'vagrant', '--version'
standardOutput = new ByteArrayOutputStream()
doLast {
String version = standardOutput.toString().trim()
if ((version ==~ /Vagrant 1\.(8\.[6-9]|9\.[0-9])+/) == false) {
throw new InvalidUserDataException("Illegal version of vagrant [${version}]. Need [Vagrant 1.8.6+]")
}
}
}
}
private static void createCheckVirtualBoxVersionTask(Project project) {
project.tasks.create('virtualboxCheckVersion', Exec) {
description 'Check the Virtualbox version'
group 'Verification'
commandLine 'vboxmanage', '--version'
standardOutput = new ByteArrayOutputStream()
doLast {
String version = standardOutput.toString().trim()
try {
String[] versions = version.split('\\.')
int major = Integer.parseInt(versions[0])
int minor = Integer.parseInt(versions[1])
if ((major < 5) || (major == 5 && minor < 1)) {
throw new InvalidUserDataException("Illegal version of virtualbox [${version}]. Need [5.1+]")
}
} catch (NumberFormatException | ArrayIndexOutOfBoundsException e) {
throw new InvalidUserDataException("Unable to parse version of virtualbox [${version}]. Required [5.1+]", e)
}
}
}
}
private static void createPackagingTestTask(Project project) {
project.tasks.create('packagingTest') {
group 'Verification'
description "Tests yum/apt packages using vagrant and bats.\n" +
" Specify the vagrant boxes to test using the gradle property 'vagrant.boxes'.\n" +
" 'sample' can be used to test a single yum and apt box. 'all' can be used to\n" +
" test all available boxes. The available boxes are: \n" +
" ${BOXES}"
dependsOn 'vagrantCheckVersion'
}
}
private static void createVagrantTasks(Project project) {
createCleanTask(project)
createStopTask(project)
createSmokeTestTask(project)
createUpdateVersionsTask(project)
createVerifyVersionsTask(project)
createCheckVagrantVersionTask(project)
createCheckVirtualBoxVersionTask(project)
createPrepareVagrantTestEnvTask(project)
createPackagingTestTask(project)
}
private static void createVagrantBoxesTasks(Project project) {
assert project.extensions.esvagrant.boxes != null
assert project.tasks.stop != null
Task stop = project.tasks.stop
assert project.tasks.vagrantSmokeTest != null
Task vagrantSmokeTest = project.tasks.vagrantSmokeTest
assert project.tasks.vagrantCheckVersion != null
Task vagrantCheckVersion = project.tasks.vagrantCheckVersion
assert project.tasks.virtualboxCheckVersion != null
Task virtualboxCheckVersion = project.tasks.virtualboxCheckVersion
assert project.tasks.vagrantSetUp != null
Task vagrantSetUp = project.tasks.vagrantSetUp
assert project.tasks.packagingTest != null
Task packagingTest = project.tasks.packagingTest
/*
* We always use the main project.rootDir as Vagrant's current working directory (VAGRANT_CWD)
* so that boxes are not duplicated for every Gradle project that use this VagrantTestPlugin.
*/
def vagrantEnvVars = [
'VAGRANT_CWD' : "${project.rootDir.absolutePath}",
'VAGRANT_VAGRANTFILE' : 'Vagrantfile',
'VAGRANT_PROJECT_DIR' : "${project.projectDir.absolutePath}"
]
// Each box gets it own set of tasks
for (String box : BOXES) {
String boxTask = box.capitalize().replace('-', '')
// always add a halt task for all boxes, so clean makes sure they are all shutdown
Task halt = project.tasks.create("vagrant${boxTask}#halt", VagrantCommandTask) {
boxName box
environmentVars vagrantEnvVars
args 'halt', box
}
stop.dependsOn(halt)
if (project.extensions.esvagrant.boxes.contains(box) == false) {
// we only need a halt task if this box was not specified
continue;
}
Task update = project.tasks.create("vagrant${boxTask}#update", VagrantCommandTask) {
boxName box
environmentVars vagrantEnvVars
args 'box', 'update', box
dependsOn vagrantCheckVersion, virtualboxCheckVersion, vagrantSetUp
}
Task up = project.tasks.create("vagrant${boxTask}#up", VagrantCommandTask) {
boxName box
environmentVars vagrantEnvVars
/* Its important that we try to reprovision the box even if it already
exists. That way updates to the vagrant configuration take automatically.
That isn't to say that the updates will always be compatible. Its ok to
just destroy the boxes if they get busted but that is a manual step
because its slow-ish. */
/* We lock the provider to virtualbox because the Vagrantfile specifies
lots of boxes that only work properly in virtualbox. Virtualbox is
vagrant's default but its possible to change that default and folks do.
But the boxes that we use are unlikely to work properly with other
virtualization providers. Thus the lock. */
args 'up', box, '--provision', '--provider', 'virtualbox'
/* It'd be possible to check if the box is already up here and output
SKIPPED but that would require running vagrant status which is slow! */
dependsOn update
}
Task smoke = project.tasks.create("vagrant${boxTask}#smoketest", Exec) {
environment vagrantEnvVars
dependsOn up
finalizedBy halt
commandLine 'vagrant', 'ssh', box, '--command',
"set -o pipefail && echo 'Hello from ${project.path}' | sed -ue 's/^/ ${box}: /'"
}
vagrantSmokeTest.dependsOn(smoke)
Task packaging = project.tasks.create("vagrant${boxTask}#packagingtest", BatsOverVagrantTask) {
boxName box
environmentVars vagrantEnvVars
dependsOn up
finalizedBy halt
command BATS_TEST_COMMAND
}
packagingTest.dependsOn(packaging)
}
}
}

View File

@ -0,0 +1 @@
implementation-class=org.elasticsearch.gradle.vagrant.VagrantTestPlugin

View File

@ -1,5 +1,5 @@
elasticsearch = 6.0.0-alpha1 elasticsearch = 6.0.0-alpha1
lucene = 6.3.0-snapshot-a66a445 lucene = 6.3.0
# optional dependencies # optional dependencies
spatial4j = 0.6 spatial4j = 0.6

View File

@ -35,7 +35,7 @@ import java.util.List;
public class NoopPlugin extends Plugin implements ActionPlugin { public class NoopPlugin extends Plugin implements ActionPlugin {
@Override @Override
public List<ActionHandler<? extends ActionRequest<?>, ? extends ActionResponse>> getActions() { public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() {
return Arrays.asList( return Arrays.asList(
new ActionHandler<>(NoopBulkAction.INSTANCE, TransportNoopBulkAction.class), new ActionHandler<>(NoopBulkAction.INSTANCE, TransportNoopBulkAction.class),
new ActionHandler<>(NoopSearchAction.INSTANCE, TransportNoopSearchAction.class) new ActionHandler<>(NoopSearchAction.INSTANCE, TransportNoopSearchAction.class)

View File

@ -1 +0,0 @@
61aacb657e44a9beabf95834e106bbb96373a703

View File

@ -0,0 +1 @@
494aed699af238c3872a6b65e17939e9cb7ddbe0

View File

@ -1 +0,0 @@
600de75a81e259cab0384e546d9a1d527ddba6d6

View File

@ -0,0 +1 @@
77dede7dff1b833ca2e92d8ab137edb209354d9b

View File

@ -1 +0,0 @@
188774468a56a8731ca639527d721060d26ffebd

View File

@ -0,0 +1 @@
d3c87ea89e2f83e401f9cc7f14e4c43945f7f1e1

View File

@ -1 +0,0 @@
5afd9271e3d8f645440f48ff2487545ae5573e7e

View File

@ -0,0 +1 @@
2c96d59e318ea66838aeb9c5cfb8b4d27b40953c

View File

@ -1 +0,0 @@
0f575175e26d4d3b1095f6300cbefbbb3ee994cd

View File

@ -0,0 +1 @@
4f154d8badfe47fe45503c18fb30f2177f758794

View File

@ -1 +0,0 @@
ee898c3d318681c9f29c56e6d9b52876be96d814

View File

@ -0,0 +1 @@
79b898117dcfde2981ec6806e420ff218842eca8

View File

@ -1 +0,0 @@
ea6defd322456711394b4dabcda70a217e3caacd

View File

@ -0,0 +1 @@
89edeb404e507d640cb13903acff6953199704a2

View File

@ -1 +0,0 @@
ea2de7f9753a8e19a1ec9f25a3ea65d7ce909a0e

View File

@ -0,0 +1 @@
02d0e1f5a9df15ac911ad495bad5ea253ab50a9f

View File

@ -1 +0,0 @@
0b15c6f29bfb9ec14a4615013a94bfa43a63793d

View File

@ -0,0 +1 @@
eb7938233c8103223069c7b5b5f785b4d20ddafa

View File

@ -1 +0,0 @@
d89d9fa1036c38144e0b8db079ae959353847c86

View File

@ -0,0 +1 @@
e979fb02155cbe81a8d335d6dc41d2ef06be68b6

View File

@ -1 +0,0 @@
c003c1ab0a19a02b30156ce13372cff1001d6a7d

View File

@ -0,0 +1 @@
257387c45c6fa2b77fd6931751f93fdcd798ced4

View File

@ -1 +0,0 @@
a3c570bf588d7c9ca43d074db9ce9c9b8408b930

View File

@ -0,0 +1 @@
3cf5fe5402b5e34b240b73501c9e97a82428259e

View File

@ -1 +0,0 @@
de54ca61f5892cf2c88ac083b3332a827beca7ff

View File

@ -0,0 +1 @@
1b77ef3740dc885c62d5966fbe9aea1199d344fb

View File

@ -1 +0,0 @@
cacdf81b324acd335be63798d5a3dd16e7dff9a3

View File

@ -0,0 +1 @@
aa94b4a8636b3633008640cc5155ad354aebcea5

View File

@ -1 +0,0 @@
a5cb3723bc8e0db185fc43e57b648145de27fde8

View File

@ -0,0 +1 @@
ed5d8ee5cd7edcad5d4ffca2b4540ccc844e9bb0

View File

@ -1,130 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.lucene.index;
import org.apache.lucene.util.StringHelper;
import java.io.IOException;
/**
* Forked utility methods from Lucene's PointValues until LUCENE-7257 is released.
*/
public class XPointValues {
/** Return the cumulated number of points across all leaves of the given
* {@link IndexReader}. Leaves that do not have points for the given field
* are ignored.
* @see PointValues#size(String) */
public static long size(IndexReader reader, String field) throws IOException {
long size = 0;
for (LeafReaderContext ctx : reader.leaves()) {
FieldInfo info = ctx.reader().getFieldInfos().fieldInfo(field);
if (info == null || info.getPointDimensionCount() == 0) {
continue;
}
PointValues values = ctx.reader().getPointValues();
size += values.size(field);
}
return size;
}
/** Return the cumulated number of docs that have points across all leaves
* of the given {@link IndexReader}. Leaves that do not have points for the
* given field are ignored.
* @see PointValues#getDocCount(String) */
public static int getDocCount(IndexReader reader, String field) throws IOException {
int count = 0;
for (LeafReaderContext ctx : reader.leaves()) {
FieldInfo info = ctx.reader().getFieldInfos().fieldInfo(field);
if (info == null || info.getPointDimensionCount() == 0) {
continue;
}
PointValues values = ctx.reader().getPointValues();
count += values.getDocCount(field);
}
return count;
}
/** Return the minimum packed values across all leaves of the given
* {@link IndexReader}. Leaves that do not have points for the given field
* are ignored.
* @see PointValues#getMinPackedValue(String) */
public static byte[] getMinPackedValue(IndexReader reader, String field) throws IOException {
byte[] minValue = null;
for (LeafReaderContext ctx : reader.leaves()) {
FieldInfo info = ctx.reader().getFieldInfos().fieldInfo(field);
if (info == null || info.getPointDimensionCount() == 0) {
continue;
}
PointValues values = ctx.reader().getPointValues();
byte[] leafMinValue = values.getMinPackedValue(field);
if (leafMinValue == null) {
continue;
}
if (minValue == null) {
minValue = leafMinValue.clone();
} else {
final int numDimensions = values.getNumDimensions(field);
final int numBytesPerDimension = values.getBytesPerDimension(field);
for (int i = 0; i < numDimensions; ++i) {
int offset = i * numBytesPerDimension;
if (StringHelper.compare(numBytesPerDimension, leafMinValue, offset, minValue, offset) < 0) {
System.arraycopy(leafMinValue, offset, minValue, offset, numBytesPerDimension);
}
}
}
}
return minValue;
}
/** Return the maximum packed values across all leaves of the given
* {@link IndexReader}. Leaves that do not have points for the given field
* are ignored.
* @see PointValues#getMaxPackedValue(String) */
public static byte[] getMaxPackedValue(IndexReader reader, String field) throws IOException {
byte[] maxValue = null;
for (LeafReaderContext ctx : reader.leaves()) {
FieldInfo info = ctx.reader().getFieldInfos().fieldInfo(field);
if (info == null || info.getPointDimensionCount() == 0) {
continue;
}
PointValues values = ctx.reader().getPointValues();
byte[] leafMaxValue = values.getMaxPackedValue(field);
if (leafMaxValue == null) {
continue;
}
if (maxValue == null) {
maxValue = leafMaxValue.clone();
} else {
final int numDimensions = values.getNumDimensions(field);
final int numBytesPerDimension = values.getBytesPerDimension(field);
for (int i = 0; i < numDimensions; ++i) {
int offset = i * numBytesPerDimension;
if (StringHelper.compare(numBytesPerDimension, leafMaxValue, offset, maxValue, offset) > 0) {
System.arraycopy(leafMaxValue, offset, maxValue, offset, numBytesPerDimension);
}
}
}
}
return maxValue;
}
/** Default constructor */
private XPointValues() {
}
}

View File

@ -523,16 +523,14 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.index.shard.IndexShardRelocatedException::new, 45), org.elasticsearch.index.shard.IndexShardRelocatedException::new, 45),
NODE_SHOULD_NOT_CONNECT_EXCEPTION(org.elasticsearch.transport.NodeShouldNotConnectException.class, NODE_SHOULD_NOT_CONNECT_EXCEPTION(org.elasticsearch.transport.NodeShouldNotConnectException.class,
org.elasticsearch.transport.NodeShouldNotConnectException::new, 46), org.elasticsearch.transport.NodeShouldNotConnectException::new, 46),
INDEX_TEMPLATE_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexTemplateAlreadyExistsException.class, // 47 used to be for IndexTemplateAlreadyExistsException which was deprecated in 5.1 removed in 6.0
org.elasticsearch.indices.IndexTemplateAlreadyExistsException::new, 47),
TRANSLOG_CORRUPTED_EXCEPTION(org.elasticsearch.index.translog.TranslogCorruptedException.class, TRANSLOG_CORRUPTED_EXCEPTION(org.elasticsearch.index.translog.TranslogCorruptedException.class,
org.elasticsearch.index.translog.TranslogCorruptedException::new, 48), org.elasticsearch.index.translog.TranslogCorruptedException::new, 48),
CLUSTER_BLOCK_EXCEPTION(org.elasticsearch.cluster.block.ClusterBlockException.class, CLUSTER_BLOCK_EXCEPTION(org.elasticsearch.cluster.block.ClusterBlockException.class,
org.elasticsearch.cluster.block.ClusterBlockException::new, 49), org.elasticsearch.cluster.block.ClusterBlockException::new, 49),
FETCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.fetch.FetchPhaseExecutionException.class, FETCH_PHASE_EXECUTION_EXCEPTION(org.elasticsearch.search.fetch.FetchPhaseExecutionException.class,
org.elasticsearch.search.fetch.FetchPhaseExecutionException::new, 50), org.elasticsearch.search.fetch.FetchPhaseExecutionException::new, 50),
INDEX_SHARD_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.IndexShardAlreadyExistsException.class, // 51 used to be for IndexShardAlreadyExistsException which was deprecated in 5.1 removed in 6.0
org.elasticsearch.index.IndexShardAlreadyExistsException::new, 51),
VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class, VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class,
org.elasticsearch.index.engine.VersionConflictEngineException::new, 52), org.elasticsearch.index.engine.VersionConflictEngineException::new, 52),
ENGINE_EXCEPTION(org.elasticsearch.index.engine.EngineException.class, org.elasticsearch.index.engine.EngineException::new, 53), ENGINE_EXCEPTION(org.elasticsearch.index.engine.EngineException.class, org.elasticsearch.index.engine.EngineException::new, 53),
@ -553,7 +551,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62), org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper::new, 62),
ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class, ALIAS_FILTER_PARSING_EXCEPTION(org.elasticsearch.indices.AliasFilterParsingException.class,
org.elasticsearch.indices.AliasFilterParsingException::new, 63), org.elasticsearch.indices.AliasFilterParsingException::new, 63),
// 64 was DeleteByQueryFailedEngineException, which was removed in 3.0 // 64 was DeleteByQueryFailedEngineException, which was removed in 5.0
GATEWAY_EXCEPTION(org.elasticsearch.gateway.GatewayException.class, org.elasticsearch.gateway.GatewayException::new, 65), GATEWAY_EXCEPTION(org.elasticsearch.gateway.GatewayException.class, org.elasticsearch.gateway.GatewayException::new, 65),
INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class, INDEX_SHARD_NOT_RECOVERING_EXCEPTION(org.elasticsearch.index.shard.IndexShardNotRecoveringException.class,
org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66), org.elasticsearch.index.shard.IndexShardNotRecoveringException::new, 66),

View File

@ -19,6 +19,7 @@
package org.elasticsearch; package org.elasticsearch;
import org.apache.lucene.util.MathUtil;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.SuppressForbidden;
@ -298,7 +299,27 @@ public class Version {
* is a beta or RC release then the version itself is returned. * is a beta or RC release then the version itself is returned.
*/ */
public Version minimumCompatibilityVersion() { public Version minimumCompatibilityVersion() {
return Version.smallest(this, fromId(major * 1000000 + 99)); final int bwcMajor;
final int bwcMinor;
if (this.onOrAfter(Version.V_6_0_0_alpha1)) {
bwcMajor = major-1;
bwcMinor = 0; // TODO we have to move this to the latest released minor of the last major but for now we just keep
} else {
bwcMajor = major;
bwcMinor = 0;
}
return Version.smallest(this, fromId(bwcMajor * 1000000 + bwcMinor * 10000 + 99));
}
/**
* Returns <code>true</code> iff both version are compatible. Otherwise <code>false</code>
*/
public boolean isCompatible(Version version) {
boolean compatible = onOrAfter(version.minimumCompatibilityVersion())
&& version.onOrAfter(minimumCompatibilityVersion());
assert compatible == false || Math.max(major, version.major) - Math.min(major, version.major) <= 1;
return compatible;
} }
@SuppressForbidden(reason = "System.out.*") @SuppressForbidden(reason = "System.out.*")

View File

@ -356,7 +356,7 @@ public class ActionModule extends AbstractModule {
register(handler.getAction().name(), handler); register(handler.getAction().name(), handler);
} }
public <Request extends ActionRequest<Request>, Response extends ActionResponse> void register( public <Request extends ActionRequest, Response extends ActionResponse> void register(
GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction,
Class<?>... supportTransportActions) { Class<?>... supportTransportActions) {
register(new ActionHandler<>(action, transportAction, supportTransportActions)); register(new ActionHandler<>(action, transportAction, supportTransportActions));

View File

@ -25,7 +25,7 @@ import org.elasticsearch.transport.TransportRequest;
import java.io.IOException; import java.io.IOException;
public abstract class ActionRequest<Request extends ActionRequest<Request>> extends TransportRequest { public abstract class ActionRequest extends TransportRequest {
public ActionRequest() { public ActionRequest() {
super(); super();

View File

@ -25,7 +25,7 @@ import org.elasticsearch.action.ActionRequestValidationException;
* Transport level private response for the transport handler registered under * Transport level private response for the transport handler registered under
* {@value org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction#NAME} * {@value org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction#NAME}
*/ */
public final class LivenessRequest extends ActionRequest<LivenessRequest> { public final class LivenessRequest extends ActionRequest {
@Override @Override
public ActionRequestValidationException validate() { public ActionRequestValidationException validate() {
return null; return null;

View File

@ -33,7 +33,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
/** /**
* A request to get node tasks * A request to get node tasks
*/ */
public class GetTaskRequest extends ActionRequest<GetTaskRequest> { public class GetTaskRequest extends ActionRequest {
private TaskId taskId = TaskId.EMPTY_TASK_ID; private TaskId taskId = TaskId.EMPTY_TASK_ID;
private boolean waitForCompletion = false; private boolean waitForCompletion = false;
private TimeValue timeout = null; private TimeValue timeout = null;

View File

@ -30,7 +30,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException; import java.io.IOException;
/** Request the mappings of specific fields */ /** Request the mappings of specific fields */
public class GetFieldMappingsRequest extends ActionRequest<GetFieldMappingsRequest> implements IndicesRequest.Replaceable { public class GetFieldMappingsRequest extends ActionRequest implements IndicesRequest.Replaceable {
protected boolean local = false; protected boolean local = false;

View File

@ -32,7 +32,7 @@ public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpda
private boolean updateAllTypes = false; private boolean updateAllTypes = false;
PutMappingClusterStateUpdateRequest() { public PutMappingClusterStateUpdateRequest() {
} }

View File

@ -64,8 +64,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(PutIndexTemplateRequest.class)); private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(PutIndexTemplateRequest.class));
public static final Version V_5_1_0 = Version.fromId(5010099);
private String name; private String name;
private String cause = ""; private String cause = "";
@ -152,7 +150,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
/** /**
* Set to <tt>true</tt> to force only creation, not an update of an index template. If it already * Set to <tt>true</tt> to force only creation, not an update of an index template. If it already
* exists, it will fail with an {@link org.elasticsearch.indices.IndexTemplateAlreadyExistsException}. * exists, it will fail with an {@link IllegalArgumentException}.
*/ */
public PutIndexTemplateRequest create(boolean create) { public PutIndexTemplateRequest create(boolean create) {
this.create = create; this.create = create;
@ -473,7 +471,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
cause = in.readString(); cause = in.readString();
name = in.readString(); name = in.readString();
if (in.getVersion().onOrAfter(V_5_1_0)) { if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
indexPatterns = in.readList(StreamInput::readString); indexPatterns = in.readList(StreamInput::readString);
} else { } else {
indexPatterns = Collections.singletonList(in.readString()); indexPatterns = Collections.singletonList(in.readString());
@ -503,7 +501,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
super.writeTo(out); super.writeTo(out);
out.writeString(cause); out.writeString(cause);
out.writeString(name); out.writeString(name);
if (out.getVersion().onOrAfter(V_5_1_0)) { if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
out.writeStringList(indexPatterns); out.writeStringList(indexPatterns);
} else { } else {
out.writeString(indexPatterns.size() > 0 ? indexPatterns.get(0) : ""); out.writeString(indexPatterns.size() > 0 ? indexPatterns.get(0) : "");

View File

@ -76,7 +76,7 @@ public class PutIndexTemplateRequestBuilder
/** /**
* Set to <tt>true</tt> to force only creation, not an update of an index template. If it already * Set to <tt>true</tt> to force only creation, not an update of an index template. If it already
* exists, it will fail with an {@link org.elasticsearch.indices.IndexTemplateAlreadyExistsException}. * exists, it will fail with an {@link IllegalArgumentException}.
*/ */
public PutIndexTemplateRequestBuilder setCreate(boolean create) { public PutIndexTemplateRequestBuilder setCreate(boolean create) {
request.create(create); request.create(create);

View File

@ -61,7 +61,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
* Note that we only support refresh on the bulk request not per item. * Note that we only support refresh on the bulk request not per item.
* @see org.elasticsearch.client.Client#bulk(BulkRequest) * @see org.elasticsearch.client.Client#bulk(BulkRequest)
*/ */
public class BulkRequest extends ActionRequest<BulkRequest> implements CompositeIndicesRequest, WriteRequest<BulkRequest> { public class BulkRequest extends ActionRequest implements CompositeIndicesRequest, WriteRequest<BulkRequest> {
private static final DeprecationLogger DEPRECATION_LOGGER = private static final DeprecationLogger DEPRECATION_LOGGER =
new DeprecationLogger(Loggers.getLogger(BulkRequest.class)); new DeprecationLogger(Loggers.getLogger(BulkRequest.class));

View File

@ -48,7 +48,7 @@ import java.util.Collections;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements Iterable<MultiGetRequest.Item>, CompositeIndicesRequest, RealtimeRequest { public class MultiGetRequest extends ActionRequest implements Iterable<MultiGetRequest.Item>, CompositeIndicesRequest, RealtimeRequest {
/** /**
* A single get item. * A single get item.

View File

@ -59,7 +59,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
} }
@Override @Override
public <Request extends ActionRequest<Request>, Response extends ActionResponse> void apply(Task task, String action, Request request, ActionListener<Response> listener, ActionFilterChain<Request, Response> chain) { public <Request extends ActionRequest, Response extends ActionResponse> void apply(Task task, String action, Request request, ActionListener<Response> listener, ActionFilterChain<Request, Response> chain) {
switch (action) { switch (action) {
case IndexAction.NAME: case IndexAction.NAME:
IndexRequest indexRequest = (IndexRequest) request; IndexRequest indexRequest = (IndexRequest) request;

View File

@ -54,7 +54,7 @@ public final class IngestProxyActionFilter implements ActionFilter {
} }
@Override @Override
public <Request extends ActionRequest<Request>, Response extends ActionResponse> void apply(Task task, String action, Request request, ActionListener<Response> listener, ActionFilterChain<Request, Response> chain) { public <Request extends ActionRequest, Response extends ActionResponse> void apply(Task task, String action, Request request, ActionListener<Response> listener, ActionFilterChain<Request, Response> chain) {
Action ingestAction; Action ingestAction;
switch (action) { switch (action) {
case IndexAction.NAME: case IndexAction.NAME:

View File

@ -37,7 +37,7 @@ import java.util.Map;
import static org.elasticsearch.ingest.IngestDocument.MetaData; import static org.elasticsearch.ingest.IngestDocument.MetaData;
public class SimulatePipelineRequest extends ActionRequest<SimulatePipelineRequest> { public class SimulatePipelineRequest extends ActionRequest {
private String id; private String id;
private boolean verbose; private boolean verbose;

View File

@ -22,7 +22,7 @@ package org.elasticsearch.action.main;
import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
public class MainRequest extends ActionRequest<MainRequest> { public class MainRequest extends ActionRequest {
@Override @Override
public ActionRequestValidationException validate() { public ActionRequestValidationException validate() {

View File

@ -31,7 +31,7 @@ import java.util.List;
import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.action.ValidateActions.addValidationError;
public class ClearScrollRequest extends ActionRequest<ClearScrollRequest> { public class ClearScrollRequest extends ActionRequest {
private List<String> scrollIds; private List<String> scrollIds;

View File

@ -36,7 +36,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
/** /**
* A multi search API request. * A multi search API request.
*/ */
public class MultiSearchRequest extends ActionRequest<MultiSearchRequest> implements CompositeIndicesRequest { public class MultiSearchRequest extends ActionRequest implements CompositeIndicesRequest {
private int maxConcurrentSearchRequests = 0; private int maxConcurrentSearchRequests = 0;
private List<SearchRequest> requests = new ArrayList<>(); private List<SearchRequest> requests = new ArrayList<>();

View File

@ -49,7 +49,7 @@ import java.util.Objects;
* @see org.elasticsearch.client.Client#search(SearchRequest) * @see org.elasticsearch.client.Client#search(SearchRequest)
* @see SearchResponse * @see SearchResponse
*/ */
public final class SearchRequest extends ActionRequest<SearchRequest> implements IndicesRequest.Replaceable { public final class SearchRequest extends ActionRequest implements IndicesRequest.Replaceable {
private SearchType searchType = SearchType.DEFAULT; private SearchType searchType = SearchType.DEFAULT;

View File

@ -33,7 +33,7 @@ import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.action.ValidateActions.addValidationError;
public class SearchScrollRequest extends ActionRequest<SearchScrollRequest> { public class SearchScrollRequest extends ActionRequest {
private String scrollId; private String scrollId;
private Scroll scroll; private Scroll scroll;

View File

@ -40,7 +40,7 @@ public interface ActionFilter {
* Enables filtering the execution of an action on the request side, either by sending a response through the * Enables filtering the execution of an action on the request side, either by sending a response through the
* {@link ActionListener} or by continuing the execution through the given {@link ActionFilterChain chain} * {@link ActionListener} or by continuing the execution through the given {@link ActionFilterChain chain}
*/ */
<Request extends ActionRequest<Request>, Response extends ActionResponse> void apply(Task task, String action, Request request, <Request extends ActionRequest, Response extends ActionResponse> void apply(Task task, String action, Request request,
ActionListener<Response> listener, ActionFilterChain<Request, Response> chain); ActionListener<Response> listener, ActionFilterChain<Request, Response> chain);
/** /**
@ -62,7 +62,7 @@ public interface ActionFilter {
} }
@Override @Override
public final <Request extends ActionRequest<Request>, Response extends ActionResponse> void apply(Task task, String action, Request request, public final <Request extends ActionRequest, Response extends ActionResponse> void apply(Task task, String action, Request request,
ActionListener<Response> listener, ActionFilterChain<Request, Response> chain) { ActionListener<Response> listener, ActionFilterChain<Request, Response> chain) {
if (apply(action, request, listener)) { if (apply(action, request, listener)) {
chain.proceed(task, action, request, listener); chain.proceed(task, action, request, listener);
@ -73,7 +73,7 @@ public interface ActionFilter {
* Applies this filter and returns {@code true} if the execution chain should proceed, or {@code false} * Applies this filter and returns {@code true} if the execution chain should proceed, or {@code false}
* if it should be aborted since the filter already handled the request and called the given listener. * if it should be aborted since the filter already handled the request and called the given listener.
*/ */
protected abstract boolean apply(String action, ActionRequest<?> request, ActionListener<?> listener); protected abstract boolean apply(String action, ActionRequest request, ActionListener<?> listener);
@Override @Override
public final <Response extends ActionResponse> void apply(String action, Response response, ActionListener<Response> listener, public final <Response extends ActionResponse> void apply(String action, Response response, ActionListener<Response> listener,

View File

@ -27,7 +27,7 @@ import org.elasticsearch.tasks.Task;
/** /**
* A filter chain allowing to continue and process the transport action request * A filter chain allowing to continue and process the transport action request
*/ */
public interface ActionFilterChain<Request extends ActionRequest<Request>, Response extends ActionResponse> { public interface ActionFilterChain<Request extends ActionRequest, Response extends ActionResponse> {
/** /**
* Continue processing the request. Should only be called if a response has not been sent through * Continue processing the request. Should only be called if a response has not been sent through

View File

@ -35,7 +35,7 @@ import java.util.function.Supplier;
/** /**
* A TransportAction that self registers a handler into the transport service * A TransportAction that self registers a handler into the transport service
*/ */
public abstract class HandledTransportAction<Request extends ActionRequest<Request>, Response extends ActionResponse> public abstract class HandledTransportAction<Request extends ActionRequest, Response extends ActionResponse>
extends TransportAction<Request, Response> { extends TransportAction<Request, Response> {
protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,

View File

@ -38,7 +38,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import static org.elasticsearch.action.support.PlainActionFuture.newFuture; import static org.elasticsearch.action.support.PlainActionFuture.newFuture;
public abstract class TransportAction<Request extends ActionRequest<Request>, Response extends ActionResponse> extends AbstractComponent { public abstract class TransportAction<Request extends ActionRequest, Response extends ActionResponse> extends AbstractComponent {
protected final ThreadPool threadPool; protected final ThreadPool threadPool;
protected final String actionName; protected final String actionName;
@ -148,7 +148,7 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
protected abstract void doExecute(Request request, ActionListener<Response> listener); protected abstract void doExecute(Request request, ActionListener<Response> listener);
private static class RequestFilterChain<Request extends ActionRequest<Request>, Response extends ActionResponse> private static class RequestFilterChain<Request extends ActionRequest, Response extends ActionResponse>
implements ActionFilterChain<Request, Response> { implements ActionFilterChain<Request, Response> {
private final TransportAction<Request, Response> action; private final TransportAction<Request, Response> action;
@ -184,7 +184,7 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
} }
} }
private static class ResponseFilterChain<Request extends ActionRequest<Request>, Response extends ActionResponse> private static class ResponseFilterChain<Request extends ActionRequest, Response extends ActionResponse>
implements ActionFilterChain<Request, Response> { implements ActionFilterChain<Request, Response> {
private final ActionFilter[] filters; private final ActionFilter[] filters;

View File

@ -28,7 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException; import java.io.IOException;
public class BroadcastRequest<Request extends BroadcastRequest<Request>> extends ActionRequest<Request> implements IndicesRequest.Replaceable { public class BroadcastRequest<Request extends BroadcastRequest<Request>> extends ActionRequest implements IndicesRequest.Replaceable {
protected String[] indices; protected String[] indices;
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed();

View File

@ -29,7 +29,7 @@ import java.io.IOException;
/** /**
* A based request for master based operation. * A based request for master based operation.
*/ */
public abstract class MasterNodeRequest<Request extends MasterNodeRequest<Request>> extends ActionRequest<Request> { public abstract class MasterNodeRequest<Request extends MasterNodeRequest<Request>> extends ActionRequest {
public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30);

View File

@ -29,7 +29,7 @@ import org.elasticsearch.common.unit.TimeValue;
import java.io.IOException; import java.io.IOException;
public abstract class BaseNodesRequest<Request extends BaseNodesRequest<Request>> extends ActionRequest<Request> { public abstract class BaseNodesRequest<Request extends BaseNodesRequest<Request>> extends ActionRequest {
/** /**
* the list of nodesIds that will be used to resolve this request and {@link #concreteNodes} * the list of nodesIds that will be used to resolve this request and {@link #concreteNodes}

View File

@ -43,7 +43,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
* Requests that are run on a particular replica, first on the primary and then on the replicas like {@link IndexRequest} or * Requests that are run on a particular replica, first on the primary and then on the replicas like {@link IndexRequest} or
* {@link TransportShardRefreshAction}. * {@link TransportShardRefreshAction}.
*/ */
public abstract class ReplicationRequest<Request extends ReplicationRequest<Request>> extends ActionRequest<Request> public abstract class ReplicationRequest<Request extends ReplicationRequest<Request>> extends ActionRequest
implements IndicesRequest { implements IndicesRequest {
public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES);

View File

@ -32,7 +32,7 @@ import org.elasticsearch.index.shard.ShardId;
import java.io.IOException; import java.io.IOException;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
public abstract class InstanceShardOperationRequest<Request extends InstanceShardOperationRequest<Request>> extends ActionRequest<Request> public abstract class InstanceShardOperationRequest<Request extends InstanceShardOperationRequest<Request>> extends ActionRequest
implements IndicesRequest { implements IndicesRequest {
public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES);

View File

@ -31,7 +31,7 @@ import org.elasticsearch.index.shard.ShardId;
import java.io.IOException; import java.io.IOException;
public abstract class SingleShardRequest<Request extends SingleShardRequest<Request>> extends ActionRequest<Request> implements IndicesRequest { public abstract class SingleShardRequest<Request extends SingleShardRequest<Request>> extends ActionRequest implements IndicesRequest {
public static final IndicesOptions INDICES_OPTIONS = IndicesOptions.strictSingleIndexNoExpandForbidClosed(); public static final IndicesOptions INDICES_OPTIONS = IndicesOptions.strictSingleIndexNoExpandForbidClosed();

View File

@ -36,7 +36,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
/** /**
* A base class for task requests * A base class for task requests
*/ */
public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends ActionRequest<Request> { public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends ActionRequest {
public static final String[] ALL_ACTIONS = Strings.EMPTY_ARRAY; public static final String[] ALL_ACTIONS = Strings.EMPTY_ARRAY;

View File

@ -41,7 +41,7 @@ import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
public class MultiTermVectorsRequest extends ActionRequest<MultiTermVectorsRequest> implements Iterable<TermVectorsRequest>, CompositeIndicesRequest, RealtimeRequest { public class MultiTermVectorsRequest extends ActionRequest implements Iterable<TermVectorsRequest>, CompositeIndicesRequest, RealtimeRequest {
String preference; String preference;
List<TermVectorsRequest> requests = new ArrayList<>(); List<TermVectorsRequest> requests = new ArrayList<>();

View File

@ -20,10 +20,10 @@
package org.elasticsearch.bootstrap; package org.elasticsearch.bootstrap;
import org.elasticsearch.SecureSM; import org.elasticsearch.SecureSM;
import org.elasticsearch.Version;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.HttpTransportSettings;
@ -266,12 +266,14 @@ final class Security {
} }
} }
static void addBindPermissions(Permissions policy, Settings settings) throws IOException { /**
// http is simple * Add dynamic {@link SocketPermission}s based on HTTP and transport settings.
String httpRange = HttpTransportSettings.SETTING_HTTP_PORT.get(settings).getPortRangeString(); *
// listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted. * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to.
// see SocketPermission implies() code * @param settings the {@link Settings} instance to read the HTTP and transport settings from
policy.add(new SocketPermission("*:" + httpRange, "listen,resolve")); */
static void addBindPermissions(Permissions policy, Settings settings) {
addSocketPermissionForHttp(policy, settings);
// transport is waaaay overengineered // transport is waaaay overengineered
Map<String, Settings> profiles = TransportSettings.TRANSPORT_PROFILES_SETTING.get(settings).getAsGroups(); Map<String, Settings> profiles = TransportSettings.TRANSPORT_PROFILES_SETTING.get(settings).getAsGroups();
if (!profiles.containsKey(TransportSettings.DEFAULT_PROFILE)) { if (!profiles.containsKey(TransportSettings.DEFAULT_PROFILE)) {
@ -284,16 +286,76 @@ final class Security {
for (Map.Entry<String, Settings> entry : profiles.entrySet()) { for (Map.Entry<String, Settings> entry : profiles.entrySet()) {
Settings profileSettings = entry.getValue(); Settings profileSettings = entry.getValue();
String name = entry.getKey(); String name = entry.getKey();
String transportRange = profileSettings.get("port", TransportSettings.PORT.get(settings));
// a profile is only valid if its the default profile, or if it has an actual name and specifies a port // a profile is only valid if its the default profile, or if it has an actual name and specifies a port
boolean valid = TransportSettings.DEFAULT_PROFILE.equals(name) || (Strings.hasLength(name) && profileSettings.get("port") != null); boolean valid = TransportSettings.DEFAULT_PROFILE.equals(name) || (Strings.hasLength(name) && profileSettings.get("port") != null);
if (valid) { if (valid) {
addSocketPermissionForTransportProfile(policy, profileSettings, settings);
}
}
for (final Settings tribeNodeSettings : settings.getGroups("tribe", true).values()) {
// tribe nodes have HTTP disabled by default, so we check if HTTP is enabled before granting
if (NetworkModule.HTTP_ENABLED.exists(tribeNodeSettings) && NetworkModule.HTTP_ENABLED.get(tribeNodeSettings)) {
addSocketPermissionForHttp(policy, tribeNodeSettings);
}
addSocketPermissionForTransport(policy, tribeNodeSettings);
}
}
/**
* Add dynamic {@link SocketPermission} based on HTTP settings.
*
* @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to.
* @param settings the {@link Settings} instance to read the HTTP settingsfrom
*/
private static void addSocketPermissionForHttp(final Permissions policy, final Settings settings) {
// http is simple
final String httpRange = HttpTransportSettings.SETTING_HTTP_PORT.get(settings).getPortRangeString();
addSocketPermissionForPortRange(policy, httpRange);
}
/**
* Add dynamic {@link SocketPermission} based on transport settings. This method will first check if there is a port range specified in
* the transport profile specified by {@code profileSettings} and will fall back to {@code settings}.
*
* @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to
* @param profileSettings the {@link Settings} to read the transport profile from
* @param settings the {@link Settings} instance to read the transport settings from
*/
private static void addSocketPermissionForTransportProfile(
final Permissions policy,
final Settings profileSettings,
final Settings settings) {
final String transportRange = profileSettings.get("port");
if (transportRange != null) {
addSocketPermissionForPortRange(policy, transportRange);
} else {
addSocketPermissionForTransport(policy, settings);
}
}
/**
* Add dynamic {@link SocketPermission} based on transport settings.
*
* @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to
* @param settings the {@link Settings} instance to read the transport settings from
*/
private static void addSocketPermissionForTransport(final Permissions policy, final Settings settings) {
final String transportRange = TransportSettings.PORT.get(settings);
addSocketPermissionForPortRange(policy, transportRange);
}
/**
* Add dynamic {@link SocketPermission} for the specified port range.
*
* @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission} to.
* @param portRange the port range
*/
private static void addSocketPermissionForPortRange(final Permissions policy, final String portRange) {
// listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted. // listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted.
// see SocketPermission implies() code // see SocketPermission implies() code
policy.add(new SocketPermission("*:" + transportRange, "listen,resolve")); policy.add(new SocketPermission("*:" + portRange, "listen,resolve"));
}
}
} }
/** /**

View File

@ -40,7 +40,7 @@ public interface ElasticsearchClient {
* @param <RequestBuilder> The request builder type. * @param <RequestBuilder> The request builder type.
* @return A future allowing to get back the response. * @return A future allowing to get back the response.
*/ */
<Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute( <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
final Action<Request, Response, RequestBuilder> action, final Request request); final Action<Request, Response, RequestBuilder> action, final Request request);
/** /**
@ -53,7 +53,7 @@ public interface ElasticsearchClient {
* @param <Response> The response type. * @param <Response> The response type.
* @param <RequestBuilder> The request builder type. * @param <RequestBuilder> The request builder type.
*/ */
<Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute( <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener); final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
/** /**
@ -65,7 +65,7 @@ public interface ElasticsearchClient {
* @param <RequestBuilder> The request builder. * @param <RequestBuilder> The request builder.
* @return The request builder, that can, at a later stage, execute the request. * @return The request builder, that can, at a later stage, execute the request.
*/ */
<Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute( <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
final Action<Request, Response, RequestBuilder> action); final Action<Request, Response, RequestBuilder> action);
/** /**

View File

@ -62,7 +62,7 @@ public abstract class FilterClient extends AbstractClient {
} }
@Override @Override
protected <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute( protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) { Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
in().execute(action, request, listener); in().execute(action, request, listener);
} }

View File

@ -58,7 +58,7 @@ public class ParentTaskAssigningClient extends FilterClient {
} }
@Override @Override
protected < Request extends ActionRequest<Request>, protected < Request extends ActionRequest,
Response extends ActionResponse, Response extends ActionResponse,
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>
> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) { > void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {

View File

@ -56,7 +56,7 @@ public class NodeClient extends AbstractClient {
} }
@Override @Override
public < Request extends ActionRequest<Request>, public < Request extends ActionRequest,
Response extends ActionResponse, Response extends ActionResponse,
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>
> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) { > void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
@ -69,7 +69,7 @@ public class NodeClient extends AbstractClient {
* method if you don't need access to the task when listening for the response. This is the method used to implement the {@link Client} * method if you don't need access to the task when listening for the response. This is the method used to implement the {@link Client}
* interface. * interface.
*/ */
public < Request extends ActionRequest<Request>, public < Request extends ActionRequest,
Response extends ActionResponse Response extends ActionResponse
> Task executeLocally(GenericAction<Request, Response> action, Request request, ActionListener<Response> listener) { > Task executeLocally(GenericAction<Request, Response> action, Request request, ActionListener<Response> listener) {
return transportAction(action).execute(request, listener); return transportAction(action).execute(request, listener);
@ -79,7 +79,7 @@ public class NodeClient extends AbstractClient {
* Execute an {@link Action} locally, returning that {@link Task} used to track it, and linking an {@link TaskListener}. Prefer this * Execute an {@link Action} locally, returning that {@link Task} used to track it, and linking an {@link TaskListener}. Prefer this
* method if you need access to the task when listening for the response. * method if you need access to the task when listening for the response.
*/ */
public < Request extends ActionRequest<Request>, public < Request extends ActionRequest,
Response extends ActionResponse Response extends ActionResponse
> Task executeLocally(GenericAction<Request, Response> action, Request request, TaskListener<Response> listener) { > Task executeLocally(GenericAction<Request, Response> action, Request request, TaskListener<Response> listener) {
return transportAction(action).execute(request, listener); return transportAction(action).execute(request, listener);
@ -89,7 +89,7 @@ public class NodeClient extends AbstractClient {
* Get the {@link TransportAction} for an {@link Action}, throwing exceptions if the action isn't available. * Get the {@link TransportAction} for an {@link Action}, throwing exceptions if the action isn't available.
*/ */
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
private < Request extends ActionRequest<Request>, private < Request extends ActionRequest,
Response extends ActionResponse Response extends ActionResponse
> TransportAction<Request, Response> transportAction(GenericAction<Request, Response> action) { > TransportAction<Request, Response> transportAction(GenericAction<Request, Response> action) {
if (actions == null) { if (actions == null) {

View File

@ -377,13 +377,13 @@ public abstract class AbstractClient extends AbstractComponent implements Client
} }
@Override @Override
public final <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute( public final <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
final Action<Request, Response, RequestBuilder> action) { final Action<Request, Response, RequestBuilder> action) {
return action.newRequestBuilder(this); return action.newRequestBuilder(this);
} }
@Override @Override
public final <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute( public final <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
Action<Request, Response, RequestBuilder> action, Request request) { Action<Request, Response, RequestBuilder> action, Request request) {
PlainActionFuture<Response> actionFuture = PlainActionFuture.newFuture(); PlainActionFuture<Response> actionFuture = PlainActionFuture.newFuture();
execute(action, request, actionFuture); execute(action, request, actionFuture);
@ -394,13 +394,13 @@ public abstract class AbstractClient extends AbstractComponent implements Client
* This is the single execution point of *all* clients. * This is the single execution point of *all* clients.
*/ */
@Override @Override
public final <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute( public final <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) { Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
listener = threadedWrapper.wrap(listener); listener = threadedWrapper.wrap(listener);
doExecute(action, request, listener); doExecute(action, request, listener);
} }
protected abstract <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener); protected abstract <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
@Override @Override
public ActionFuture<IndexResponse> index(final IndexRequest request) { public ActionFuture<IndexResponse> index(final IndexRequest request) {
@ -696,19 +696,19 @@ public abstract class AbstractClient extends AbstractComponent implements Client
} }
@Override @Override
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute( public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
Action<Request, Response, RequestBuilder> action, Request request) { Action<Request, Response, RequestBuilder> action, Request request) {
return client.execute(action, request); return client.execute(action, request);
} }
@Override @Override
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute( public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) { Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
client.execute(action, request, listener); client.execute(action, request, listener);
} }
@Override @Override
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute( public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
Action<Request, Response, RequestBuilder> action) { Action<Request, Response, RequestBuilder> action) {
return client.prepareExecute(action); return client.prepareExecute(action);
} }
@ -1212,19 +1212,19 @@ public abstract class AbstractClient extends AbstractComponent implements Client
} }
@Override @Override
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute( public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
Action<Request, Response, RequestBuilder> action, Request request) { Action<Request, Response, RequestBuilder> action, Request request) {
return client.execute(action, request); return client.execute(action, request);
} }
@Override @Override
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute( public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) { Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
client.execute(action, request, listener); client.execute(action, request, listener);
} }
@Override @Override
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute( public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
Action<Request, Response, RequestBuilder> action) { Action<Request, Response, RequestBuilder> action) {
return client.prepareExecute(action); return client.prepareExecute(action);
} }
@ -1745,7 +1745,7 @@ public abstract class AbstractClient extends AbstractComponent implements Client
public Client filterWithHeader(Map<String, String> headers) { public Client filterWithHeader(Map<String, String> headers) {
return new FilterClient(this) { return new FilterClient(this) {
@Override @Override
protected <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) { protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
ThreadContext threadContext = threadPool().getThreadContext(); ThreadContext threadContext = threadPool().getThreadContext();
try (ThreadContext.StoredContext ctx = threadContext.stashAndMergeHeaders(headers)) { try (ThreadContext.StoredContext ctx = threadContext.stashAndMergeHeaders(headers)) {
super.doExecute(action, request, listener); super.doExecute(action, request, listener);

View File

@ -329,7 +329,7 @@ public abstract class TransportClient extends AbstractClient {
} }
@Override @Override
protected <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) { protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
proxy.execute(action, request, listener); proxy.execute(action, request, listener);
} }
} }

View File

@ -49,8 +49,6 @@ import java.util.Set;
public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaData> { public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaData> {
public static final Version V_5_1_0 = Version.fromId(5010099);
public static final IndexTemplateMetaData PROTO = IndexTemplateMetaData.builder("").build(); public static final IndexTemplateMetaData PROTO = IndexTemplateMetaData.builder("").build();
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(IndexTemplateMetaData.class)); private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(IndexTemplateMetaData.class));
@ -210,7 +208,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
public IndexTemplateMetaData readFrom(StreamInput in) throws IOException { public IndexTemplateMetaData readFrom(StreamInput in) throws IOException {
Builder builder = new Builder(in.readString()); Builder builder = new Builder(in.readString());
builder.order(in.readInt()); builder.order(in.readInt());
if (in.getVersion().onOrAfter(V_5_1_0)) { if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
builder.patterns(in.readList(StreamInput::readString)); builder.patterns(in.readList(StreamInput::readString));
} else { } else {
builder.patterns(Collections.singletonList(in.readString())); builder.patterns(Collections.singletonList(in.readString()));
@ -241,7 +239,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
out.writeString(name); out.writeString(name);
out.writeInt(order); out.writeInt(order);
if (out.getVersion().onOrAfter(V_5_1_0)) { if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
out.writeStringList(patterns); out.writeStringList(patterns);
} else { } else {
out.writeString(patterns.size() > 0 ? patterns.get(0) : ""); out.writeString(patterns.size() > 0 ? patterns.get(0) : "");

View File

@ -40,7 +40,6 @@ import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.indices.IndexTemplateAlreadyExistsException;
import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.indices.IndexTemplateMissingException;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.indices.InvalidIndexTemplateException;
@ -161,7 +160,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
@Override @Override
public ClusterState execute(ClusterState currentState) throws Exception { public ClusterState execute(ClusterState currentState) throws Exception {
if (request.create && currentState.metaData().templates().containsKey(request.name)) { if (request.create && currentState.metaData().templates().containsKey(request.name)) {
throw new IndexTemplateAlreadyExistsException(request.name); throw new IllegalArgumentException("index_template [" + request.name + "] already exists");
} }
validateAndAddTemplate(request, templateBuilder, indicesService); validateAndAddTemplate(request, templateBuilder, indicesService);

View File

@ -20,9 +20,9 @@
package org.elasticsearch.cluster.metadata; package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier; import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
import org.elasticsearch.cluster.AckedClusterStateTaskListener; import org.elasticsearch.cluster.AckedClusterStateTaskListener;
@ -34,7 +34,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority; import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
@ -51,10 +50,8 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set;
/** /**
* Service responsible for submitting mapping changes * Service responsible for submitting mapping changes
*/ */
@ -215,28 +212,24 @@ public class MetaDataMappingService extends AbstractComponent {
@Override @Override
public BatchResult<PutMappingClusterStateUpdateRequest> execute(ClusterState currentState, public BatchResult<PutMappingClusterStateUpdateRequest> execute(ClusterState currentState,
List<PutMappingClusterStateUpdateRequest> tasks) throws Exception { List<PutMappingClusterStateUpdateRequest> tasks) throws Exception {
Set<Index> indicesToClose = new HashSet<>(); Map<Index, MapperService> indexMapperServices = new HashMap<>();
BatchResult.Builder<PutMappingClusterStateUpdateRequest> builder = BatchResult.builder(); BatchResult.Builder<PutMappingClusterStateUpdateRequest> builder = BatchResult.builder();
try { try {
// precreate incoming indices;
for (PutMappingClusterStateUpdateRequest request : tasks) { for (PutMappingClusterStateUpdateRequest request : tasks) {
try { try {
for (Index index : request.indices()) { for (Index index : request.indices()) {
final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index); final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index);
if (indicesService.hasIndex(indexMetaData.getIndex()) == false) { if (indexMapperServices.containsKey(indexMetaData.getIndex()) == false) {
// if the index does not exists we create it once, add all types to the mapper service and MapperService mapperService = indicesService.createIndexMapperService(indexMetaData);
// close it later once we are done with mapping update indexMapperServices.put(index, mapperService);
indicesToClose.add(indexMetaData.getIndex());
IndexService indexService =
indicesService.createIndex(indexMetaData, Collections.emptyList(), shardId -> {});
// add mappings for all types, we need them for cross-type validation // add mappings for all types, we need them for cross-type validation
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) { for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), mapperService.merge(mapping.value.type(), mapping.value.source(),
MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes()); MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
} }
} }
} }
currentState = applyRequest(currentState, request); currentState = applyRequest(currentState, request, indexMapperServices);
builder.success(request); builder.success(request);
} catch (Exception e) { } catch (Exception e) {
builder.failure(request, e); builder.failure(request, e);
@ -244,34 +237,33 @@ public class MetaDataMappingService extends AbstractComponent {
} }
return builder.build(currentState); return builder.build(currentState);
} finally { } finally {
for (Index index : indicesToClose) { IOUtils.close(indexMapperServices.values());
indicesService.removeIndex(index, "created for mapping processing");
}
} }
} }
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException { private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request,
Map<Index, MapperService> indexMapperServices) throws IOException {
String mappingType = request.type(); String mappingType = request.type();
CompressedXContent mappingUpdateSource = new CompressedXContent(request.source()); CompressedXContent mappingUpdateSource = new CompressedXContent(request.source());
final MetaData metaData = currentState.metaData(); final MetaData metaData = currentState.metaData();
final List<Tuple<IndexService, IndexMetaData>> updateList = new ArrayList<>(); final List<IndexMetaData> updateList = new ArrayList<>();
for (Index index : request.indices()) { for (Index index : request.indices()) {
IndexService indexService = indicesService.indexServiceSafe(index); MapperService mapperService = indexMapperServices.get(index);
// IMPORTANT: always get the metadata from the state since it get's batched // IMPORTANT: always get the metadata from the state since it get's batched
// and if we pull it from the indexService we might miss an update etc. // and if we pull it from the indexService we might miss an update etc.
final IndexMetaData indexMetaData = currentState.getMetaData().getIndexSafe(index); final IndexMetaData indexMetaData = currentState.getMetaData().getIndexSafe(index);
// this is paranoia... just to be sure we use the exact same indexService and metadata tuple on the update that // this is paranoia... just to be sure we use the exact same metadata tuple on the update that
// we used for the validation, it makes this mechanism little less scary (a little) // we used for the validation, it makes this mechanism little less scary (a little)
updateList.add(new Tuple<>(indexService, indexMetaData)); updateList.add(indexMetaData);
// try and parse it (no need to add it here) so we can bail early in case of parsing exception // try and parse it (no need to add it here) so we can bail early in case of parsing exception
DocumentMapper newMapper; DocumentMapper newMapper;
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type()); DocumentMapper existingMapper = mapperService.documentMapper(request.type());
if (MapperService.DEFAULT_MAPPING.equals(request.type())) { if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default // _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, false); newMapper = mapperService.parse(request.type(), mappingUpdateSource, false);
} else { } else {
newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, existingMapper == null); newMapper = mapperService.parse(request.type(), mappingUpdateSource, existingMapper == null);
if (existingMapper != null) { if (existingMapper != null) {
// first, simulate: just call merge and ignore the result // first, simulate: just call merge and ignore the result
existingMapper.merge(newMapper.mapping(), request.updateAllTypes()); existingMapper.merge(newMapper.mapping(), request.updateAllTypes());
@ -287,7 +279,7 @@ public class MetaDataMappingService extends AbstractComponent {
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) { for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
String parentType = newMapper.parentFieldMapper().type(); String parentType = newMapper.parentFieldMapper().type();
if (parentType.equals(mapping.value.type()) && if (parentType.equals(mapping.value.type()) &&
indexService.mapperService().getParentTypes().contains(parentType) == false) { mapperService.getParentTypes().contains(parentType) == false) {
throw new IllegalArgumentException("can't add a _parent field that points to an " + throw new IllegalArgumentException("can't add a _parent field that points to an " +
"already existing type, that isn't already a parent"); "already existing type, that isn't already a parent");
} }
@ -307,24 +299,25 @@ public class MetaDataMappingService extends AbstractComponent {
throw new InvalidTypeNameException("Document mapping type name can't start with '_', found: [" + mappingType + "]"); throw new InvalidTypeNameException("Document mapping type name can't start with '_', found: [" + mappingType + "]");
} }
MetaData.Builder builder = MetaData.builder(metaData); MetaData.Builder builder = MetaData.builder(metaData);
for (Tuple<IndexService, IndexMetaData> toUpdate : updateList) { boolean updated = false;
for (IndexMetaData indexMetaData : updateList) {
// do the actual merge here on the master, and update the mapping source // do the actual merge here on the master, and update the mapping source
// we use the exact same indexService and metadata we used to validate above here to actually apply the update // we use the exact same indexService and metadata we used to validate above here to actually apply the update
final IndexService indexService = toUpdate.v1();
final IndexMetaData indexMetaData = toUpdate.v2();
final Index index = indexMetaData.getIndex(); final Index index = indexMetaData.getIndex();
final MapperService mapperService = indexMapperServices.get(index);
CompressedXContent existingSource = null; CompressedXContent existingSource = null;
DocumentMapper existingMapper = indexService.mapperService().documentMapper(mappingType); DocumentMapper existingMapper = mapperService.documentMapper(mappingType);
if (existingMapper != null) { if (existingMapper != null) {
existingSource = existingMapper.mappingSource(); existingSource = existingMapper.mappingSource();
} }
DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes()); DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes());
CompressedXContent updatedSource = mergedMapper.mappingSource(); CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) { if (existingSource != null) {
if (existingSource.equals(updatedSource)) { if (existingSource.equals(updatedSource)) {
// same source, no changes, ignore it // same source, no changes, ignore it
} else { } else {
updated = true;
// use the merged mapping source // use the merged mapping source
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("{} update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource); logger.debug("{} update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
@ -334,6 +327,7 @@ public class MetaDataMappingService extends AbstractComponent {
} }
} else { } else {
updated = true;
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("{} create_mapping [{}] with source [{}]", index, mappingType, updatedSource); logger.debug("{} create_mapping [{}] with source [{}]", index, mappingType, updatedSource);
} else if (logger.isInfoEnabled()) { } else if (logger.isInfoEnabled()) {
@ -344,13 +338,16 @@ public class MetaDataMappingService extends AbstractComponent {
IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData); IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
// Mapping updates on a single type may have side-effects on other types so we need to // Mapping updates on a single type may have side-effects on other types so we need to
// update mapping metadata on all types // update mapping metadata on all types
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) { for (DocumentMapper mapper : mapperService.docMappers(true)) {
indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource())); indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource()));
} }
builder.put(indexMetaDataBuilder); builder.put(indexMetaDataBuilder);
} }
if (updated) {
return ClusterState.builder(currentState).metaData(builder).build(); return ClusterState.builder(currentState).metaData(builder).build();
} else {
return currentState;
}
} }
@Override @Override

View File

@ -33,6 +33,7 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
@ -581,14 +582,6 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
} }
public Builder addShard(ShardRouting shardEntry) { public Builder addShard(ShardRouting shardEntry) {
for (ShardRouting shard : shards) {
// don't add two that map to the same node id
// we rely on the fact that a node does not have primary and backup of the same shard
if (shard.assignedToNode() && shardEntry.assignedToNode()
&& shard.currentNodeId().equals(shardEntry.currentNodeId())) {
return this;
}
}
shards.add(shardEntry); shards.add(shardEntry);
return this; return this;
} }
@ -599,9 +592,28 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
} }
public IndexShardRoutingTable build() { public IndexShardRoutingTable build() {
// don't allow more than one shard copy with same id to be allocated to same node
assert distinctNodes(shards) : "more than one shard with same id assigned to same node (shards: " + shards + ")";
return new IndexShardRoutingTable(shardId, Collections.unmodifiableList(new ArrayList<>(shards))); return new IndexShardRoutingTable(shardId, Collections.unmodifiableList(new ArrayList<>(shards)));
} }
static boolean distinctNodes(List<ShardRouting> shards) {
Set<String> nodes = new HashSet<>();
for (ShardRouting shard : shards) {
if (shard.assignedToNode()) {
if (nodes.add(shard.currentNodeId()) == false) {
return false;
}
if (shard.relocating()) {
if (nodes.add(shard.relocatingNodeId()) == false) {
return false;
}
}
}
}
return true;
}
public static IndexShardRoutingTable readFrom(StreamInput in) throws IOException { public static IndexShardRoutingTable readFrom(StreamInput in) throws IOException {
Index index = new Index(in); Index index = new Index(in);
return readFromThin(in, index); return readFromThin(in, index);

View File

@ -83,6 +83,7 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiConsumer; import java.util.function.BiConsumer;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
@ -123,7 +124,7 @@ public class ClusterService extends AbstractLifecycleComponent {
private final Queue<NotifyTimeout> onGoingTimeouts = ConcurrentCollections.newQueue(); private final Queue<NotifyTimeout> onGoingTimeouts = ConcurrentCollections.newQueue();
private final AtomicReference<ClusterServiceState> state = new AtomicReference<>(); private final AtomicReference<ClusterServiceState> state;
private final ClusterBlocks.Builder initialBlocks; private final ClusterBlocks.Builder initialBlocks;
@ -137,7 +138,7 @@ public class ClusterService extends AbstractLifecycleComponent {
this.clusterSettings = clusterSettings; this.clusterSettings = clusterSettings;
this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
// will be replaced on doStart. // will be replaced on doStart.
this.state.set(new ClusterServiceState(ClusterState.builder(clusterName).build(), ClusterStateStatus.UNKNOWN)); this.state = new AtomicReference<>(new ClusterServiceState(ClusterState.builder(clusterName).build(), ClusterStateStatus.UNKNOWN));
this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
this::setSlowTaskLoggingThreshold); this::setSlowTaskLoggingThreshold);
@ -158,14 +159,45 @@ public class ClusterService extends AbstractLifecycleComponent {
} }
public synchronized void setLocalNode(DiscoveryNode localNode) { public synchronized void setLocalNode(DiscoveryNode localNode) {
assert state.get().getClusterState().nodes().getLocalNodeId() == null : "local node is already set"; assert clusterServiceState().getClusterState().nodes().getLocalNodeId() == null : "local node is already set";
this.state.getAndUpdate(css -> { updateState(css -> {
ClusterState clusterState = css.getClusterState(); ClusterState clusterState = css.getClusterState();
DiscoveryNodes nodes = DiscoveryNodes.builder(clusterState.nodes()).add(localNode).localNodeId(localNode.getId()).build(); DiscoveryNodes nodes = DiscoveryNodes.builder(clusterState.nodes()).add(localNode).localNodeId(localNode.getId()).build();
return new ClusterServiceState(ClusterState.builder(clusterState).nodes(nodes).build(), css.getClusterStateStatus()); return new ClusterServiceState(ClusterState.builder(clusterState).nodes(nodes).build(), css.getClusterStateStatus());
}); });
} }
private void updateState(UnaryOperator<ClusterServiceState> updateFunction) {
this.state.getAndUpdate(oldClusterServiceState -> {
ClusterServiceState newClusterServiceState = updateFunction.apply(oldClusterServiceState);
assert validStateTransition(oldClusterServiceState, newClusterServiceState) :
"Invalid cluster service state transition from " + oldClusterServiceState + " to " + newClusterServiceState;
return newClusterServiceState;
});
}
private static boolean validStateTransition(ClusterServiceState oldClusterServiceState, ClusterServiceState newClusterServiceState) {
if (oldClusterServiceState == null || newClusterServiceState == null) {
return false;
}
ClusterStateStatus oldStatus = oldClusterServiceState.getClusterStateStatus();
ClusterStateStatus newStatus = newClusterServiceState.getClusterStateStatus();
// only go from UNKNOWN to UNKNOWN or BEING_APPLIED
if (oldStatus == ClusterStateStatus.UNKNOWN && newStatus == ClusterStateStatus.APPLIED) {
return false;
}
// only go from BEING_APPLIED to APPLIED
if (oldStatus == ClusterStateStatus.BEING_APPLIED && newStatus != ClusterStateStatus.APPLIED) {
return false;
}
// only go from APPLIED to BEING_APPLIED
if (oldStatus == ClusterStateStatus.APPLIED && newStatus != ClusterStateStatus.BEING_APPLIED) {
return false;
}
boolean identicalClusterState = oldClusterServiceState.getClusterState() == newClusterServiceState.getClusterState();
return identicalClusterState == (oldStatus == ClusterStateStatus.BEING_APPLIED && newStatus == ClusterStateStatus.APPLIED);
}
public synchronized void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) { public synchronized void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) {
assert this.nodeConnectionsService == null : "nodeConnectionsService is already set"; assert this.nodeConnectionsService == null : "nodeConnectionsService is already set";
this.nodeConnectionsService = nodeConnectionsService; this.nodeConnectionsService = nodeConnectionsService;
@ -201,10 +233,10 @@ public class ClusterService extends AbstractLifecycleComponent {
@Override @Override
protected synchronized void doStart() { protected synchronized void doStart() {
Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting"); Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting");
Objects.requireNonNull(state.get().getClusterState().nodes().getLocalNode(), "please set the local node before starting"); Objects.requireNonNull(clusterServiceState().getClusterState().nodes().getLocalNode(), "please set the local node before starting");
Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting"); Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting");
add(localNodeMasterListeners); add(localNodeMasterListeners);
this.state.getAndUpdate(css -> new ClusterServiceState( updateState(css -> new ClusterServiceState(
ClusterState.builder(css.getClusterState()).blocks(initialBlocks).build(), ClusterState.builder(css.getClusterState()).blocks(initialBlocks).build(),
css.getClusterStateStatus())); css.getClusterStateStatus()));
this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME), this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME),
@ -240,7 +272,7 @@ public class ClusterService extends AbstractLifecycleComponent {
* The local node. * The local node.
*/ */
public DiscoveryNode localNode() { public DiscoveryNode localNode() {
DiscoveryNode localNode = state.get().getClusterState().getNodes().getLocalNode(); DiscoveryNode localNode = state().getNodes().getLocalNode();
if (localNode == null) { if (localNode == null) {
throw new IllegalStateException("No local node found. Is the node started?"); throw new IllegalStateException("No local node found. Is the node started?");
} }
@ -255,7 +287,7 @@ public class ClusterService extends AbstractLifecycleComponent {
* The current cluster state. * The current cluster state.
*/ */
public ClusterState state() { public ClusterState state() {
return this.state.get().getClusterState(); return clusterServiceState().getClusterState();
} }
/** /**
@ -507,6 +539,13 @@ public class ClusterService extends AbstractLifecycleComponent {
return true; return true;
} }
/** asserts that the current thread is <b>NOT</b> the cluster state update thread */
public static boolean assertNotClusterStateUpdateThread(String reason) {
assert Thread.currentThread().getName().contains(UPDATE_THREAD_NAME) == false :
"Expected current thread [" + Thread.currentThread() + "] to not be the cluster state update thread. Reason: [" + reason + "]";
return true;
}
public ClusterName getClusterName() { public ClusterName getClusterName() {
return clusterName; return clusterName;
} }
@ -554,7 +593,7 @@ public class ClusterService extends AbstractLifecycleComponent {
return; return;
} }
logger.debug("processing [{}]: execute", tasksSummary); logger.debug("processing [{}]: execute", tasksSummary);
ClusterState previousClusterState = state.get().getClusterState(); ClusterState previousClusterState = clusterServiceState().getClusterState();
if (!previousClusterState.nodes().isLocalNodeElectedMaster() && executor.runOnlyOnMaster()) { if (!previousClusterState.nodes().isLocalNodeElectedMaster() && executor.runOnlyOnMaster()) {
logger.debug("failing [{}]: local node is no longer master", tasksSummary); logger.debug("failing [{}]: local node is no longer master", tasksSummary);
toExecute.stream().forEach(task -> task.listener.onNoLongerMaster(task.source)); toExecute.stream().forEach(task -> task.listener.onNoLongerMaster(task.source));
@ -704,7 +743,8 @@ public class ClusterService extends AbstractLifecycleComponent {
} }
// update the current cluster state // update the current cluster state
state.set(new ClusterServiceState(newClusterState, ClusterStateStatus.BEING_APPLIED)); ClusterState finalNewClusterState = newClusterState;
updateState(css -> new ClusterServiceState(finalNewClusterState, ClusterStateStatus.BEING_APPLIED));
logger.debug("set local cluster state to version {}", newClusterState.version()); logger.debug("set local cluster state to version {}", newClusterState.version());
try { try {
// nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency
@ -726,7 +766,7 @@ public class ClusterService extends AbstractLifecycleComponent {
nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().removedNodes()); nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().removedNodes());
state.getAndUpdate(css -> new ClusterServiceState(css.getClusterState(), ClusterStateStatus.APPLIED)); updateState(css -> new ClusterServiceState(css.getClusterState(), ClusterStateStatus.APPLIED));
for (ClusterStateListener listener : postAppliedListeners) { for (ClusterStateListener listener : postAppliedListeners) {
try { try {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.common.transport; package org.elasticsearch.common.transport;
import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable;
@ -68,6 +69,12 @@ public final class TransportAddress implements Writeable {
* Read from a stream. * Read from a stream.
*/ */
public TransportAddress(StreamInput in) throws IOException { public TransportAddress(StreamInput in) throws IOException {
if (in.getVersion().before(Version.V_6_0_0_alpha1)) { // bwc layer for 5.x where we had more than one transport address
final short i = in.readShort();
if(i != 1) { // we fail hard to ensure nobody tries to use some custom transport address impl even if that is difficult to add
throw new AssertionError("illegal transport ID from node of version: " + in.getVersion() + " got: " + i + " expected: 1");
}
}
final int len = in.readByte(); final int len = in.readByte();
final byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6) final byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6)
in.readFully(a); in.readFully(a);
@ -78,6 +85,9 @@ public final class TransportAddress implements Writeable {
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
out.writeShort((short)1); // this maps to InetSocketTransportAddress in 5.x
}
byte[] bytes = address.getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6) byte[] bytes = address.getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6)
out.writeByte((byte) bytes.length); // 1 byte out.writeByte((byte) bytes.length); // 1 byte
out.write(bytes, 0, bytes.length); out.write(bytes, 0, bytes.length);

View File

@ -19,6 +19,7 @@
package org.elasticsearch.common.util.concurrent; package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.Transports; import org.elasticsearch.transport.Transports;
@ -60,7 +61,9 @@ public abstract class BaseFuture<V> implements Future<V> {
public V get(long timeout, TimeUnit unit) throws InterruptedException, public V get(long timeout, TimeUnit unit) throws InterruptedException,
TimeoutException, ExecutionException { TimeoutException, ExecutionException {
assert timeout <= 0 || assert timeout <= 0 ||
(Transports.assertNotTransportThread(BLOCKING_OP_REASON) && ThreadPool.assertNotScheduleThread(BLOCKING_OP_REASON)); (Transports.assertNotTransportThread(BLOCKING_OP_REASON) &&
ThreadPool.assertNotScheduleThread(BLOCKING_OP_REASON) &&
ClusterService.assertNotClusterStateUpdateThread(BLOCKING_OP_REASON));
return sync.get(unit.toNanos(timeout)); return sync.get(unit.toNanos(timeout));
} }
@ -82,7 +85,9 @@ public abstract class BaseFuture<V> implements Future<V> {
*/ */
@Override @Override
public V get() throws InterruptedException, ExecutionException { public V get() throws InterruptedException, ExecutionException {
assert Transports.assertNotTransportThread(BLOCKING_OP_REASON) && ThreadPool.assertNotScheduleThread(BLOCKING_OP_REASON); assert Transports.assertNotTransportThread(BLOCKING_OP_REASON) &&
ThreadPool.assertNotScheduleThread(BLOCKING_OP_REASON) &&
ClusterService.assertNotClusterStateUpdateThread(BLOCKING_OP_REASON);
return sync.get(); return sync.get();
} }

View File

@ -53,11 +53,9 @@ public class DiscoveryModule {
new Setting<>("discovery.zen.hosts_provider", (String)null, Optional::ofNullable, Property.NodeScope); new Setting<>("discovery.zen.hosts_provider", (String)null, Optional::ofNullable, Property.NodeScope);
private final Discovery discovery; private final Discovery discovery;
private final ZenPing zenPing;
public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService, NetworkService networkService, public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService, NetworkService networkService,
ClusterService clusterService, Function<UnicastHostsProvider, ZenPing> createZenPing, ClusterService clusterService, List<DiscoveryPlugin> plugins) {
List<DiscoveryPlugin> plugins) {
final UnicastHostsProvider hostsProvider; final UnicastHostsProvider hostsProvider;
Map<String, Supplier<UnicastHostsProvider>> hostProviders = new HashMap<>(); Map<String, Supplier<UnicastHostsProvider>> hostProviders = new HashMap<>();
@ -79,14 +77,11 @@ public class DiscoveryModule {
hostsProvider = Collections::emptyList; hostsProvider = Collections::emptyList;
} }
zenPing = createZenPing.apply(hostsProvider);
Map<String, Supplier<Discovery>> discoveryTypes = new HashMap<>(); Map<String, Supplier<Discovery>> discoveryTypes = new HashMap<>();
discoveryTypes.put("zen", discoveryTypes.put("zen", () -> new ZenDiscovery(settings, threadPool, transportService, clusterService, hostsProvider));
() -> new ZenDiscovery(settings, threadPool, transportService, clusterService, clusterService.getClusterSettings(), zenPing));
discoveryTypes.put("none", () -> new NoneDiscovery(settings, clusterService, clusterService.getClusterSettings())); discoveryTypes.put("none", () -> new NoneDiscovery(settings, clusterService, clusterService.getClusterSettings()));
for (DiscoveryPlugin plugin : plugins) { for (DiscoveryPlugin plugin : plugins) {
plugin.getDiscoveryTypes(threadPool, transportService, clusterService, zenPing).entrySet().forEach(entry -> { plugin.getDiscoveryTypes(threadPool, transportService, clusterService, hostsProvider).entrySet().forEach(entry -> {
if (discoveryTypes.put(entry.getKey(), entry.getValue()) != null) { if (discoveryTypes.put(entry.getKey(), entry.getValue()) != null) {
throw new IllegalArgumentException("Cannot register discovery type [" + entry.getKey() + "] twice"); throw new IllegalArgumentException("Cannot register discovery type [" + entry.getKey() + "] twice");
} }
@ -103,9 +98,4 @@ public class DiscoveryModule {
public Discovery getDiscovery() { public Discovery getDiscovery() {
return discovery; return discovery;
} }
// TODO: remove this, it should be completely local to discovery, but service disruption tests want to mess with it
public ZenPing getZenPing() {
return zenPing;
}
} }

View File

@ -107,7 +107,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
private AllocationService allocationService; private AllocationService allocationService;
private final ClusterName clusterName; private final ClusterName clusterName;
private final DiscoverySettings discoverySettings; private final DiscoverySettings discoverySettings;
private final ZenPing zenPing; protected final ZenPing zenPing; // protected to allow tests access
private final MasterFaultDetection masterFD; private final MasterFaultDetection masterFD;
private final NodesFaultDetection nodesFD; private final NodesFaultDetection nodesFD;
private final PublishClusterStateAction publishClusterState; private final PublishClusterStateAction publishClusterState;
@ -139,13 +139,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor;
public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService, public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService,
ClusterService clusterService, ClusterSettings clusterSettings, ZenPing zenPing) { ClusterService clusterService, UnicastHostsProvider hostsProvider) {
super(settings); super(settings);
this.clusterService = clusterService; this.clusterService = clusterService;
this.clusterName = clusterService.getClusterName(); this.clusterName = clusterService.getClusterName();
this.transportService = transportService; this.transportService = transportService;
this.discoverySettings = new DiscoverySettings(settings, clusterSettings); this.discoverySettings = new DiscoverySettings(settings, clusterService.getClusterSettings());
this.zenPing = zenPing; this.zenPing = newZenPing(settings, threadPool, transportService, hostsProvider);
this.electMaster = new ElectMasterService(settings); this.electMaster = new ElectMasterService(settings);
this.pingTimeout = PING_TIMEOUT_SETTING.get(settings); this.pingTimeout = PING_TIMEOUT_SETTING.get(settings);
this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings); this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings);
@ -160,11 +160,14 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.ignore_non_master [{}]", logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.ignore_non_master [{}]",
this.pingTimeout, joinTimeout, masterElectionIgnoreNonMasters); this.pingTimeout, joinTimeout, masterElectionIgnoreNonMasters);
clusterSettings.addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, this::handleMinimumMasterNodesChanged, (value) -> { clusterService.getClusterSettings().addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING,
this::handleMinimumMasterNodesChanged, (value) -> {
final ClusterState clusterState = clusterService.state(); final ClusterState clusterState = clusterService.state();
int masterNodes = clusterState.nodes().getMasterNodes().size(); int masterNodes = clusterState.nodes().getMasterNodes().size();
if (value > masterNodes) { if (value > masterNodes) {
throw new IllegalArgumentException("cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " to more than the current master nodes count [" + masterNodes + "]"); throw new IllegalArgumentException("cannot set "
+ ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " to more than the current" +
" master nodes count [" + masterNodes + "]");
} }
}); });
@ -188,6 +191,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
DISCOVERY_REJOIN_ACTION_NAME, RejoinClusterRequest::new, ThreadPool.Names.SAME, new RejoinClusterRequestHandler()); DISCOVERY_REJOIN_ACTION_NAME, RejoinClusterRequest::new, ThreadPool.Names.SAME, new RejoinClusterRequestHandler());
} }
// protected to allow overriding in tests
protected ZenPing newZenPing(Settings settings, ThreadPool threadPool, TransportService transportService,
UnicastHostsProvider hostsProvider) {
return new UnicastZenPing(settings, threadPool, transportService, hostsProvider);
}
@Override @Override
public void setAllocationService(AllocationService allocationService) { public void setAllocationService(AllocationService allocationService) {
this.allocationService = allocationService; this.allocationService = allocationService;

View File

@ -33,6 +33,7 @@ import org.elasticsearch.index.cache.query.DisabledQueryCache;
import org.elasticsearch.index.cache.query.IndexQueryCache; import org.elasticsearch.index.cache.query.IndexQueryCache;
import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexEventListener;
import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexSearcherWrapper;
import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.IndexingOperationListener;
@ -370,6 +371,16 @@ public final class IndexModule {
globalCheckpointSyncer, searchOperationListeners, indexOperationListeners); globalCheckpointSyncer, searchOperationListeners, indexOperationListeners);
} }
/**
* creates a new mapper service to do administrative work like mapping updates. This *should not* be used for document parsing.
* doing so will result in an exception.
*/
public MapperService newIndexMapperService(MapperRegistry mapperRegistry) throws IOException {
return new MapperService(indexSettings, analysisRegistry.build(indexSettings),
new SimilarityService(indexSettings, similarities), mapperRegistry,
() -> { throw new UnsupportedOperationException("no index query shard context available"); });
}
/** /**
* Forces a certain query cache to use instead of the default one. If this is set * Forces a certain query cache to use instead of the default one. If this is set
* and query caching is not disabled with {@code index.queries.cache.enabled}, then * and query caching is not disabled with {@code index.queries.cache.enabled}, then

View File

@ -94,7 +94,6 @@ import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
public class IndexService extends AbstractIndexComponent implements IndicesClusterStateService.AllocatedIndex<IndexShard> { public class IndexService extends AbstractIndexComponent implements IndicesClusterStateService.AllocatedIndex<IndexShard> {
private final IndexEventListener eventListener; private final IndexEventListener eventListener;
private final IndexAnalyzers indexAnalyzers;
private final IndexFieldDataService indexFieldData; private final IndexFieldDataService indexFieldData;
private final BitsetFilterCache bitsetFilterCache; private final BitsetFilterCache bitsetFilterCache;
private final NodeEnvironment nodeEnv; private final NodeEnvironment nodeEnv;
@ -147,12 +146,11 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
super(indexSettings); super(indexSettings);
this.indexSettings = indexSettings; this.indexSettings = indexSettings;
this.globalCheckpointSyncer = globalCheckpointSyncer; this.globalCheckpointSyncer = globalCheckpointSyncer;
this.indexAnalyzers = registry.build(indexSettings);
this.similarityService = similarityService; this.similarityService = similarityService;
this.mapperService = new MapperService(indexSettings, indexAnalyzers, similarityService, mapperRegistry, this.mapperService = new MapperService(indexSettings, registry.build(indexSettings), similarityService, mapperRegistry,
// we parse all percolator queries as they would be parsed on shard 0 // we parse all percolator queries as they would be parsed on shard 0
() -> newQueryShardContext(0, null, () -> { () -> newQueryShardContext(0, null, () -> {
throw new IllegalArgumentException("Percolator queries are not allowed to use the curent timestamp"); throw new IllegalArgumentException("Percolator queries are not allowed to use the current timestamp");
})); }));
this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService, mapperService); this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService, mapperService);
this.shardStoreDeleter = shardStoreDeleter; this.shardStoreDeleter = shardStoreDeleter;
@ -231,7 +229,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
} }
public IndexAnalyzers getIndexAnalyzers() { public IndexAnalyzers getIndexAnalyzers() {
return this.indexAnalyzers; return this.mapperService.getIndexAnalyzers();
} }
public MapperService mapperService() { public MapperService mapperService() {
@ -255,7 +253,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
} }
} }
} finally { } finally {
IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, indexAnalyzers, refreshTask, fsyncTask, globalCheckpointTask); IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, mapperService, refreshTask, fsyncTask, globalCheckpointTask);
} }
} }
} }
@ -336,7 +334,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
} }
if (shards.containsKey(shardId.id())) { if (shards.containsKey(shardId.id())) {
throw new IndexShardAlreadyExistsException(shardId + " already exists"); throw new IllegalStateException(shardId + " already exists");
} }
logger.debug("creating shard_id {}", shardId); logger.debug("creating shard_id {}", shardId);

View File

@ -1,36 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.io.stream.StreamInput;
import java.io.IOException;
public class IndexShardAlreadyExistsException extends ElasticsearchException {
public IndexShardAlreadyExistsException(String message) {
super(message);
}
public IndexShardAlreadyExistsException(StreamInput in) throws IOException {
super(in);
}
}

View File

@ -23,7 +23,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.StoredField; import org.apache.lucene.document.StoredField;
import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.LongPoint;
import org.apache.lucene.index.XPointValues; import org.apache.lucene.index.PointValues;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.BoostQuery;
@ -300,13 +300,13 @@ public class DateFieldMapper extends FieldMapper {
@Override @Override
public FieldStats.Date stats(IndexReader reader) throws IOException { public FieldStats.Date stats(IndexReader reader) throws IOException {
String field = name(); String field = name();
long size = XPointValues.size(reader, field); long size = PointValues.size(reader, field);
if (size == 0) { if (size == 0) {
return null; return null;
} }
int docCount = XPointValues.getDocCount(reader, field); int docCount = PointValues.getDocCount(reader, field);
byte[] min = XPointValues.getMinPackedValue(reader, field); byte[] min = PointValues.getMinPackedValue(reader, field);
byte[] max = XPointValues.getMaxPackedValue(reader, field); byte[] max = PointValues.getMaxPackedValue(reader, field);
return new FieldStats.Date(reader.maxDoc(),docCount, -1L, size, return new FieldStats.Date(reader.maxDoc(),docCount, -1L, size,
isSearchable(), isAggregatable(), isSearchable(), isAggregatable(),
dateTimeFormatter(), LongPoint.decodeDimension(min, 0), LongPoint.decodeDimension(max, 0)); dateTimeFormatter(), LongPoint.decodeDimension(min, 0), LongPoint.decodeDimension(max, 0));
@ -321,13 +321,13 @@ public class DateFieldMapper extends FieldMapper {
dateParser = this.dateMathParser; dateParser = this.dateMathParser;
} }
if (XPointValues.size(reader, name()) == 0) { if (PointValues.size(reader, name()) == 0) {
// no points, so nothing matches // no points, so nothing matches
return Relation.DISJOINT; return Relation.DISJOINT;
} }
long minValue = LongPoint.decodeDimension(XPointValues.getMinPackedValue(reader, name()), 0); long minValue = LongPoint.decodeDimension(PointValues.getMinPackedValue(reader, name()), 0);
long maxValue = LongPoint.decodeDimension(XPointValues.getMaxPackedValue(reader, name()), 0); long maxValue = LongPoint.decodeDimension(PointValues.getMaxPackedValue(reader, name()), 0);
long fromInclusive = Long.MIN_VALUE; long fromInclusive = Long.MIN_VALUE;
if (from != null) { if (from != null) {

View File

@ -25,7 +25,7 @@ import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.document.StoredField; import org.apache.lucene.document.StoredField;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.XPointValues; import org.apache.lucene.index.PointValues;
import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
@ -212,13 +212,13 @@ public class IpFieldMapper extends FieldMapper {
@Override @Override
public FieldStats.Ip stats(IndexReader reader) throws IOException { public FieldStats.Ip stats(IndexReader reader) throws IOException {
String field = name(); String field = name();
long size = XPointValues.size(reader, field); long size = PointValues.size(reader, field);
if (size == 0) { if (size == 0) {
return null; return null;
} }
int docCount = XPointValues.getDocCount(reader, field); int docCount = PointValues.getDocCount(reader, field);
byte[] min = XPointValues.getMinPackedValue(reader, field); byte[] min = PointValues.getMinPackedValue(reader, field);
byte[] max = XPointValues.getMaxPackedValue(reader, field); byte[] max = PointValues.getMaxPackedValue(reader, field);
return new FieldStats.Ip(reader.maxDoc(), docCount, -1L, size, return new FieldStats.Ip(reader.maxDoc(), docCount, -1L, size,
isSearchable(), isAggregatable(), isSearchable(), isAggregatable(),
InetAddressPoint.decode(min), InetAddressPoint.decode(max)); InetAddressPoint.decode(min), InetAddressPoint.decode(max));

View File

@ -44,6 +44,7 @@ import org.elasticsearch.indices.InvalidTypeNameException;
import org.elasticsearch.indices.TypeMissingException; import org.elasticsearch.indices.TypeMissingException;
import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.mapper.MapperRegistry;
import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
@ -62,7 +63,7 @@ import static java.util.Collections.emptySet;
import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
public class MapperService extends AbstractIndexComponent { public class MapperService extends AbstractIndexComponent implements Closeable {
/** /**
* The reason why a mapping is being merged. * The reason why a mapping is being merged.
@ -624,6 +625,11 @@ public class MapperService extends AbstractIndexComponent {
return parentTypes; return parentTypes;
} }
@Override
public void close() throws IOException {
indexAnalyzers.close();
}
/** /**
* @return Whether a field is a metadata field. * @return Whether a field is a metadata field.
*/ */

View File

@ -29,7 +29,7 @@ import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.document.StoredField; import org.apache.lucene.document.StoredField;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.XPointValues; import org.apache.lucene.index.PointValues;
import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
@ -241,13 +241,13 @@ public class NumberFieldMapper extends FieldMapper {
@Override @Override
FieldStats.Double stats(IndexReader reader, String fieldName, FieldStats.Double stats(IndexReader reader, String fieldName,
boolean isSearchable, boolean isAggregatable) throws IOException { boolean isSearchable, boolean isAggregatable) throws IOException {
long size = XPointValues.size(reader, fieldName); long size = PointValues.size(reader, fieldName);
if (size == 0) { if (size == 0) {
return null; return null;
} }
int docCount = XPointValues.getDocCount(reader, fieldName); int docCount = PointValues.getDocCount(reader, fieldName);
byte[] min = XPointValues.getMinPackedValue(reader, fieldName); byte[] min = PointValues.getMinPackedValue(reader, fieldName);
byte[] max = XPointValues.getMaxPackedValue(reader, fieldName); byte[] max = PointValues.getMaxPackedValue(reader, fieldName);
return new FieldStats.Double(reader.maxDoc(),docCount, -1L, size, return new FieldStats.Double(reader.maxDoc(),docCount, -1L, size,
isSearchable, isAggregatable, isSearchable, isAggregatable,
HalfFloatPoint.decodeDimension(min, 0), HalfFloatPoint.decodeDimension(max, 0)); HalfFloatPoint.decodeDimension(min, 0), HalfFloatPoint.decodeDimension(max, 0));
@ -325,13 +325,13 @@ public class NumberFieldMapper extends FieldMapper {
@Override @Override
FieldStats.Double stats(IndexReader reader, String fieldName, FieldStats.Double stats(IndexReader reader, String fieldName,
boolean isSearchable, boolean isAggregatable) throws IOException { boolean isSearchable, boolean isAggregatable) throws IOException {
long size = XPointValues.size(reader, fieldName); long size = PointValues.size(reader, fieldName);
if (size == 0) { if (size == 0) {
return null; return null;
} }
int docCount = XPointValues.getDocCount(reader, fieldName); int docCount = PointValues.getDocCount(reader, fieldName);
byte[] min = XPointValues.getMinPackedValue(reader, fieldName); byte[] min = PointValues.getMinPackedValue(reader, fieldName);
byte[] max = XPointValues.getMaxPackedValue(reader, fieldName); byte[] max = PointValues.getMaxPackedValue(reader, fieldName);
return new FieldStats.Double(reader.maxDoc(),docCount, -1L, size, return new FieldStats.Double(reader.maxDoc(),docCount, -1L, size,
isSearchable, isAggregatable, isSearchable, isAggregatable,
FloatPoint.decodeDimension(min, 0), FloatPoint.decodeDimension(max, 0)); FloatPoint.decodeDimension(min, 0), FloatPoint.decodeDimension(max, 0));
@ -409,13 +409,13 @@ public class NumberFieldMapper extends FieldMapper {
@Override @Override
FieldStats.Double stats(IndexReader reader, String fieldName, FieldStats.Double stats(IndexReader reader, String fieldName,
boolean isSearchable, boolean isAggregatable) throws IOException { boolean isSearchable, boolean isAggregatable) throws IOException {
long size = XPointValues.size(reader, fieldName); long size = PointValues.size(reader, fieldName);
if (size == 0) { if (size == 0) {
return null; return null;
} }
int docCount = XPointValues.getDocCount(reader, fieldName); int docCount = PointValues.getDocCount(reader, fieldName);
byte[] min = XPointValues.getMinPackedValue(reader, fieldName); byte[] min = PointValues.getMinPackedValue(reader, fieldName);
byte[] max = XPointValues.getMaxPackedValue(reader, fieldName); byte[] max = PointValues.getMaxPackedValue(reader, fieldName);
return new FieldStats.Double(reader.maxDoc(),docCount, -1L, size, return new FieldStats.Double(reader.maxDoc(),docCount, -1L, size,
isSearchable, isAggregatable, isSearchable, isAggregatable,
DoublePoint.decodeDimension(min, 0), DoublePoint.decodeDimension(max, 0)); DoublePoint.decodeDimension(min, 0), DoublePoint.decodeDimension(max, 0));
@ -627,13 +627,13 @@ public class NumberFieldMapper extends FieldMapper {
@Override @Override
FieldStats.Long stats(IndexReader reader, String fieldName, FieldStats.Long stats(IndexReader reader, String fieldName,
boolean isSearchable, boolean isAggregatable) throws IOException { boolean isSearchable, boolean isAggregatable) throws IOException {
long size = XPointValues.size(reader, fieldName); long size = PointValues.size(reader, fieldName);
if (size == 0) { if (size == 0) {
return null; return null;
} }
int docCount = XPointValues.getDocCount(reader, fieldName); int docCount = PointValues.getDocCount(reader, fieldName);
byte[] min = XPointValues.getMinPackedValue(reader, fieldName); byte[] min = PointValues.getMinPackedValue(reader, fieldName);
byte[] max = XPointValues.getMaxPackedValue(reader, fieldName); byte[] max = PointValues.getMaxPackedValue(reader, fieldName);
return new FieldStats.Long(reader.maxDoc(),docCount, -1L, size, return new FieldStats.Long(reader.maxDoc(),docCount, -1L, size,
isSearchable, isAggregatable, isSearchable, isAggregatable,
IntPoint.decodeDimension(min, 0), IntPoint.decodeDimension(max, 0)); IntPoint.decodeDimension(min, 0), IntPoint.decodeDimension(max, 0));
@ -723,13 +723,13 @@ public class NumberFieldMapper extends FieldMapper {
@Override @Override
FieldStats.Long stats(IndexReader reader, String fieldName, FieldStats.Long stats(IndexReader reader, String fieldName,
boolean isSearchable, boolean isAggregatable) throws IOException { boolean isSearchable, boolean isAggregatable) throws IOException {
long size = XPointValues.size(reader, fieldName); long size = PointValues.size(reader, fieldName);
if (size == 0) { if (size == 0) {
return null; return null;
} }
int docCount = XPointValues.getDocCount(reader, fieldName); int docCount = PointValues.getDocCount(reader, fieldName);
byte[] min = XPointValues.getMinPackedValue(reader, fieldName); byte[] min = PointValues.getMinPackedValue(reader, fieldName);
byte[] max = XPointValues.getMaxPackedValue(reader, fieldName); byte[] max = PointValues.getMaxPackedValue(reader, fieldName);
return new FieldStats.Long(reader.maxDoc(),docCount, -1L, size, return new FieldStats.Long(reader.maxDoc(),docCount, -1L, size,
isSearchable, isAggregatable, isSearchable, isAggregatable,
LongPoint.decodeDimension(min, 0), LongPoint.decodeDimension(max, 0)); LongPoint.decodeDimension(min, 0), LongPoint.decodeDimension(max, 0));

View File

@ -1,56 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.indices;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
public class IndexTemplateAlreadyExistsException extends ElasticsearchException {
private final String name;
public IndexTemplateAlreadyExistsException(String name) {
super("index_template [" + name + "] already exists");
this.name = name;
}
public IndexTemplateAlreadyExistsException(StreamInput in) throws IOException {
super(in);
name = in.readOptionalString();
}
public String name() {
return this.name;
}
@Override
public RestStatus status() {
return RestStatus.BAD_REQUEST;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalString(name);
}
}

View File

@ -20,7 +20,6 @@
package org.elasticsearch.indices; package org.elasticsearch.indices;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DirectoryReader;
@ -464,6 +463,21 @@ public class IndicesService extends AbstractLifecycleComponent
indicesFieldDataCache); indicesFieldDataCache);
} }
/**
* creates a new mapper service for the given index, in order to do administrative work like mapping updates.
* This *should not* be used for document parsing. Doing so will result in an exception.
*
* Note: the returned {@link MapperService} should be closed when unneeded.
*/
public synchronized MapperService createIndexMapperService(IndexMetaData indexMetaData) throws IOException {
final Index index = indexMetaData.getIndex();
final Predicate<String> indexNameMatcher = (indexExpression) -> indexNameExpressionResolver.matchesIndex(index.getName(), indexExpression, clusterService.state());
final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexNameMatcher, indexScopeSetting);
final IndexModule indexModule = new IndexModule(idxSettings, indexStoreConfig, analysisRegistry);
pluginsService.onIndexModule(indexModule);
return indexModule.newIndexMapperService(mapperRegistry);
}
/** /**
* This method verifies that the given {@code metaData} holds sane values to create an {@link IndexService}. * This method verifies that the given {@code metaData} holds sane values to create an {@link IndexService}.
* This method tries to update the meta data of the created {@link IndexService} if the given {@code metaDataUpdate} is different from the given {@code metaData}. * This method tries to update the meta data of the created {@link IndexService} if the given {@code metaDataUpdate} is different from the given {@code metaData}.

View File

@ -51,7 +51,6 @@ import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexComponent; import org.elasticsearch.index.IndexComponent;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexShardAlreadyExistsException;
import org.elasticsearch.index.seqno.GlobalCheckpointService; import org.elasticsearch.index.seqno.GlobalCheckpointService;
import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction;
import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexEventListener;
@ -539,10 +538,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
RecoveryState recoveryState = new RecoveryState(shardRouting, nodes.getLocalNode(), sourceNode); RecoveryState recoveryState = new RecoveryState(shardRouting, nodes.getLocalNode(), sourceNode);
indicesService.createShard(shardRouting, recoveryState, recoveryTargetService, new RecoveryListener(shardRouting), indicesService.createShard(shardRouting, recoveryState, recoveryTargetService, new RecoveryListener(shardRouting),
repositoriesService, failedShardHandler); repositoriesService, failedShardHandler);
} catch (IndexShardAlreadyExistsException e) {
// ignore this, the method call can happen several times
logger.debug("Trying to create shard that already exists", e);
assert false;
} catch (Exception e) { } catch (Exception e) {
failAndRemoveShard(shardRouting, true, "failed to create shard", e); failAndRemoveShard(shardRouting, true, "failed to create shard", e);
} }

Some files were not shown because too many files have changed in this diff Show More