merge from master

This commit is contained in:
Boaz Leskes 2016-03-19 13:52:45 +01:00
commit 858610d0d1
912 changed files with 28916 additions and 19787 deletions

View File

@ -116,6 +116,7 @@ subprojects {
"org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar',
"org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm',
"org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb',
"org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage',
]
configurations.all {
resolutionStrategy.dependencySubstitution { DependencySubstitutions subs ->

View File

@ -307,6 +307,12 @@ class BuildPlugin implements Plugin<Project> {
/** Adds repositores used by ES dependencies */
static void configureRepositories(Project project) {
RepositoryHandler repos = project.repositories
if (System.getProperty("repos.mavenlocal") != null) {
// with -Drepos.mavenlocal=true we can force checking the local .m2 repo which is
// useful for development ie. bwc tests where we install stuff in the local repository
// such that we don't have to pass hardcoded files to gradle
repos.mavenLocal()
}
repos.mavenCentral()
repos.maven {
name 'sonatype-snapshots'
@ -407,6 +413,7 @@ class BuildPlugin implements Plugin<Project> {
systemProperty 'jna.nosys', 'true'
// default test sysprop values
systemProperty 'tests.ifNoTests', 'fail'
// TODO: remove setting logging level via system property
systemProperty 'es.logger.level', 'WARN'
for (Map.Entry<String, String> property : System.properties.entrySet()) {
if (property.getKey().startsWith('tests.') ||

View File

@ -0,0 +1,98 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.precommit
import org.elasticsearch.gradle.LoggedExec
import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.InputFiles
import org.gradle.api.tasks.OutputFile
/**
* Runs LoggerUsageCheck on a set of directories.
*/
public class LoggerUsageTask extends LoggedExec {
/**
* We use a simple "marker" file that we touch when the task succeeds
* as the task output. This is compared against the modified time of the
* inputs (ie the jars/class files).
*/
private File successMarker = new File(project.buildDir, 'markers/loggerUsage')
private FileCollection classpath;
private List<File> classDirectories;
public LoggerUsageTask() {
project.afterEvaluate {
dependsOn(classpath)
description = "Runs LoggerUsageCheck on ${classDirectories}"
executable = new File(project.javaHome, 'bin/java')
if (classDirectories == null) {
classDirectories = []
if (project.sourceSets.findByName("main") && project.sourceSets.main.output.classesDir.exists()) {
classDirectories += [project.sourceSets.main.output.classesDir]
dependsOn project.tasks.classes
}
if (project.sourceSets.findByName("test") && project.sourceSets.test.output.classesDir.exists()) {
classDirectories += [project.sourceSets.test.output.classesDir]
dependsOn project.tasks.testClasses
}
}
doFirst({
args('-cp', getClasspath().asPath, 'org.elasticsearch.test.loggerusage.ESLoggerUsageChecker')
getClassDirectories().each {
args it.getAbsolutePath()
}
})
doLast({
successMarker.parentFile.mkdirs()
successMarker.setText("", 'UTF-8')
})
}
}
@InputFiles
FileCollection getClasspath() {
return classpath
}
void setClasspath(FileCollection classpath) {
this.classpath = classpath
}
@InputFiles
List<File> getClassDirectories() {
return classDirectories
}
void setClassDirectories(List<File> classDirectories) {
this.classDirectories = classDirectories
}
@OutputFile
File getSuccessMarker() {
return successMarker
}
void setSuccessMarker(File successMarker) {
this.successMarker = successMarker
}
}

View File

@ -34,6 +34,7 @@ class PrecommitTasks {
configureForbiddenApis(project),
configureCheckstyle(project),
configureNamingConventions(project),
configureLoggerUsage(project),
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
project.tasks.create('jarHell', JarHellTask.class),
@ -63,21 +64,21 @@ class PrecommitTasks {
project.forbiddenApis {
internalRuntimeForbidden = true
failOnUnsupportedJava = false
bundledSignatures = ['jdk-unsafe', 'jdk-deprecated']
signaturesURLs = [getClass().getResource('/forbidden/all-signatures.txt')]
bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-system-out']
signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'),
getClass().getResource('/forbidden/es-all-signatures.txt')]
suppressAnnotations = ['**.SuppressForbidden']
}
Task mainForbidden = project.tasks.findByName('forbiddenApisMain')
if (mainForbidden != null) {
mainForbidden.configure {
bundledSignatures += 'jdk-system-out'
signaturesURLs += getClass().getResource('/forbidden/core-signatures.txt')
signaturesURLs += getClass().getResource('/forbidden/es-core-signatures.txt')
}
}
Task testForbidden = project.tasks.findByName('forbiddenApisTest')
if (testForbidden != null) {
testForbidden.configure {
signaturesURLs += getClass().getResource('/forbidden/test-signatures.txt')
signaturesURLs += getClass().getResource('/forbidden/es-test-signatures.txt')
}
}
Task forbiddenApis = project.tasks.findByName('forbiddenApis')
@ -117,4 +118,18 @@ class PrecommitTasks {
}
return null
}
private static Task configureLoggerUsage(Project project) {
Task loggerUsageTask = project.tasks.create('loggerUsageCheck', LoggerUsageTask.class)
project.configurations.create('loggerUsagePlugin')
project.dependencies.add('loggerUsagePlugin',
"org.elasticsearch.test:logger-usage:${org.elasticsearch.gradle.VersionProperties.elasticsearch}")
loggerUsageTask.configure {
classpath = project.configurations.loggerUsagePlugin
}
return loggerUsageTask
}
}

View File

@ -23,8 +23,6 @@ import org.gradle.api.Project
import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.Input
import java.time.LocalDateTime
/** Configuration for an elasticsearch cluster, used for integration tests. */
class ClusterConfiguration {
@ -34,6 +32,12 @@ class ClusterConfiguration {
@Input
int numNodes = 1
@Input
int numBwcNodes = 0
@Input
String bwcVersion = null
@Input
int httpPort = 0
@ -49,6 +53,15 @@ class ClusterConfiguration {
@Input
String jvmArgs = System.getProperty('tests.jvm.argline', '')
/**
* The seed nodes port file. In the case the cluster has more than one node we use a seed node
* to form the cluster. The file is null if there is no seed node yet available.
*
* Note: this can only be null if the cluster has only one node or if the first node is not yet
* configured. All nodes but the first node should see a non null value.
*/
File seedNodePortsFile
/**
* A closure to call before the cluster is considered ready. The closure is passed the node info,
* as well as a groovy AntBuilder, to enable running ant condition checks. The default wait
@ -119,4 +132,12 @@ class ClusterConfiguration {
}
extraConfigFiles.put(path, sourceFile)
}
/** Returns an address and port suitable for a uri to connect to this clusters seed node over transport protocol*/
String seedNodeTransportUri() {
if (seedNodePortsFile != null) {
return seedNodePortsFile.readLines("UTF-8").get(0)
}
return null;
}
}

View File

@ -53,13 +53,59 @@ class ClusterFormationTasks {
// no need to add cluster formation tasks if the task won't run!
return
}
configureDistributionDependency(project, config.distribution)
List<Task> startTasks = []
File sharedDir = new File(project.buildDir, "cluster/shared")
// first we remove everything in the shared cluster directory to ensure there are no leftovers in repos or anything
// in theory this should not be necessary but repositories are only deleted in the cluster-state and not on-disk
// such that snapshots survive failures / test runs and there is no simple way today to fix that.
Task cleanup = project.tasks.create(name: "${task.name}#prepareCluster.cleanShared", type: Delete, dependsOn: task.dependsOn.collect()) {
delete sharedDir
doLast {
sharedDir.mkdirs()
}
}
List<Task> startTasks = [cleanup]
List<NodeInfo> nodes = []
if (config.numNodes < config.numBwcNodes) {
throw new GradleException("numNodes must be >= numBwcNodes [${config.numNodes} < ${config.numBwcNodes}]")
}
if (config.numBwcNodes > 0 && config.bwcVersion == null) {
throw new GradleException("bwcVersion must not be null if numBwcNodes is > 0")
}
// this is our current version distribution configuration we use for all kinds of REST tests etc.
project.configurations {
elasticsearchDistro
}
configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchDistro, VersionProperties.elasticsearch)
if (config.bwcVersion != null && config.numBwcNodes > 0) {
// if we have a cluster that has a BWC cluster we also need to configure a dependency on the BWC version
// this version uses the same distribution etc. and only differs in the version we depend on.
// from here on everything else works the same as if it's the current version, we fetch the BWC version
// from mirrors using gradles built-in mechanism etc.
project.configurations {
elasticsearchBwcDistro
}
configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchBwcDistro, config.bwcVersion)
}
for (int i = 0; i < config.numNodes; ++i) {
NodeInfo node = new NodeInfo(config, i, project, task)
// we start N nodes and out of these N nodes there might be M bwc nodes.
// for each of those nodes we might have a different configuratioon
String elasticsearchVersion = VersionProperties.elasticsearch
Configuration configuration = project.configurations.elasticsearchDistro
if (i < config.numBwcNodes) {
elasticsearchVersion = config.bwcVersion
configuration = project.configurations.elasticsearchBwcDistro
}
NodeInfo node = new NodeInfo(config, i, project, task, elasticsearchVersion, sharedDir)
if (i == 0) {
if (config.seedNodePortsFile != null) {
// we might allow this in the future to be set but for now we are the only authority to set this!
throw new GradleException("seedNodePortsFile has a non-null value but first node has not been intialized")
}
config.seedNodePortsFile = node.transportPortsFile;
}
nodes.add(node)
startTasks.add(configureNode(project, task, node))
startTasks.add(configureNode(project, task, cleanup, node, configuration))
}
Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks)
@ -70,20 +116,14 @@ class ClusterFormationTasks {
}
/** Adds a dependency on the given distribution */
static void configureDistributionDependency(Project project, String distro) {
String elasticsearchVersion = VersionProperties.elasticsearch
static void configureDistributionDependency(Project project, String distro, Configuration configuration, String elasticsearchVersion) {
String packaging = distro
if (distro == 'tar') {
packaging = 'tar.gz'
} else if (distro == 'integ-test-zip') {
packaging = 'zip'
}
project.configurations {
elasticsearchDistro
}
project.dependencies {
elasticsearchDistro "org.elasticsearch.distribution.${distro}:elasticsearch:${elasticsearchVersion}@${packaging}"
}
project.dependencies.add(configuration.name, "org.elasticsearch.distribution.${distro}:elasticsearch:${elasticsearchVersion}@${packaging}")
}
/**
@ -103,10 +143,10 @@ class ClusterFormationTasks {
*
* @return a task which starts the node.
*/
static Task configureNode(Project project, Task task, NodeInfo node) {
static Task configureNode(Project project, Task task, Object dependsOn, NodeInfo node, Configuration configuration) {
// tasks are chained so their execution order is maintained
Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: task.dependsOn.collect()) {
Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: dependsOn) {
delete node.homeDir
delete node.cwd
doLast {
@ -115,7 +155,7 @@ class ClusterFormationTasks {
}
setup = configureCheckPreviousTask(taskName(task, node, 'checkPrevious'), project, setup, node)
setup = configureStopTask(taskName(task, node, 'stopPrevious'), project, setup, node)
setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node)
setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node, configuration)
setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node)
setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node)
setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node)
@ -151,27 +191,28 @@ class ClusterFormationTasks {
}
/** Adds a task to extract the elasticsearch distribution */
static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node) {
List extractDependsOn = [project.configurations.elasticsearchDistro, setup]
/* project.configurations.elasticsearchDistro.singleFile will be an
external artifact if this is being run by a plugin not living in the
elasticsearch source tree. If this is a plugin built in the
elasticsearch source tree or this is a distro in the elasticsearch
source tree then this should be the version of elasticsearch built
by the source tree. If it isn't then Bad Things(TM) will happen. */
static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node, Configuration configuration) {
List extractDependsOn = [configuration, setup]
/* configuration.singleFile will be an external artifact if this is being run by a plugin not living in the
elasticsearch source tree. If this is a plugin built in the elasticsearch source tree or this is a distro in
the elasticsearch source tree then this should be the version of elasticsearch built by the source tree.
If it isn't then Bad Things(TM) will happen. */
Task extract
switch (node.config.distribution) {
case 'integ-test-zip':
case 'zip':
extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) {
from { project.zipTree(project.configurations.elasticsearchDistro.singleFile) }
from {
project.zipTree(configuration.singleFile)
}
into node.baseDir
}
break;
case 'tar':
extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) {
from {
project.tarTree(project.resources.gzip(project.configurations.elasticsearchDistro.singleFile))
project.tarTree(project.resources.gzip(configuration.singleFile))
}
into node.baseDir
}
@ -180,7 +221,7 @@ class ClusterFormationTasks {
File rpmDatabase = new File(node.baseDir, 'rpm-database')
File rpmExtracted = new File(node.baseDir, 'rpm-extracted')
/* Delay reading the location of the rpm file until task execution */
Object rpm = "${ -> project.configurations.elasticsearchDistro.singleFile}"
Object rpm = "${ -> configuration.singleFile}"
extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) {
commandLine 'rpm', '--badreloc', '--nodeps', '--noscripts', '--notriggers',
'--dbpath', rpmDatabase,
@ -195,7 +236,7 @@ class ClusterFormationTasks {
case 'deb':
/* Delay reading the location of the deb file until task execution */
File debExtracted = new File(node.baseDir, 'deb-extracted')
Object deb = "${ -> project.configurations.elasticsearchDistro.singleFile}"
Object deb = "${ -> configuration.singleFile}"
extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) {
commandLine 'dpkg-deb', '-x', deb, debExtracted
doFirst {
@ -214,26 +255,28 @@ class ClusterFormationTasks {
Map esConfig = [
'cluster.name' : node.clusterName,
'pidfile' : node.pidFile,
'path.repo' : "${node.homeDir}/repo",
'path.shared_data' : "${node.homeDir}/../",
'path.repo' : "${node.sharedDir}/repo",
'path.shared_data' : "${node.sharedDir}/",
// Define a node attribute so we can test that it exists
'node.testattr' : 'test',
'repositories.url.allowed_urls': 'http://snapshot.test*'
]
if (node.config.numNodes == 1) {
esConfig['http.port'] = node.config.httpPort
esConfig['transport.tcp.port'] = node.config.transportPort
} else {
// TODO: fix multi node so it doesn't use hardcoded prots
esConfig['http.port'] = 9400 + node.nodeNum
esConfig['transport.tcp.port'] = 9500 + node.nodeNum
esConfig['discovery.zen.ping.unicast.hosts'] = (0..<node.config.numNodes).collect{"localhost:${9500 + it}"}.join(',')
}
esConfig['http.port'] = node.config.httpPort
esConfig['transport.tcp.port'] = node.config.transportPort
esConfig.putAll(node.config.settings)
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
writeConfig.doFirst {
if (node.nodeNum > 0) { // multi-node cluster case, we have to wait for the seed node to startup
ant.waitfor(maxwait: '20', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') {
resourceexists {
file(file: node.config.seedNodePortsFile.toString())
}
}
// the seed node is enough to form the cluster - all subsequent nodes will get the seed node as a unicast
// host and join the cluster via that.
esConfig['discovery.zen.ping.unicast.hosts'] = "\"${node.config.seedNodeTransportUri()}\""
}
File configFile = new File(node.confDir, 'elasticsearch.yml')
logger.info("Configuring ${configFile}")
configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8')

View File

@ -40,6 +40,9 @@ class NodeInfo {
/** root directory all node files and operations happen under */
File baseDir
/** shared data directory all nodes share */
File sharedDir
/** the pid file the node will use */
File pidFile
@ -89,14 +92,15 @@ class NodeInfo {
ByteArrayOutputStream buffer = new ByteArrayOutputStream()
/** Creates a node to run as part of a cluster for the given task */
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task) {
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task, String nodeVersion, File sharedDir) {
this.config = config
this.nodeNum = nodeNum
this.sharedDir = sharedDir
clusterName = "${task.path.replace(':', '_').substring(1)}"
baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}")
pidFile = new File(baseDir, 'es.pid')
homeDir = homeDir(baseDir, config.distribution)
confDir = confDir(baseDir, config.distribution)
homeDir = homeDir(baseDir, config.distribution, nodeVersion)
confDir = confDir(baseDir, config.distribution, nodeVersion)
configFile = new File(confDir, 'elasticsearch.yml')
// even for rpm/deb, the logs are under home because we dont start with real services
File logsDir = new File(homeDir, 'logs')
@ -129,14 +133,15 @@ class NodeInfo {
'JAVA_HOME' : project.javaHome,
'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc
]
args.add("-Des.node.portsfile=true")
args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" })
args.addAll("-E", "es.node.portsfile=true")
env.put('ES_JAVA_OPTS', config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" "))
for (Map.Entry<String, String> property : System.properties.entrySet()) {
if (property.getKey().startsWith('es.')) {
args.add("-D${property.getKey()}=${property.getValue()}")
args.add("-E")
args.add("${property.getKey()}=${property.getValue()}")
}
}
args.add("-Des.path.conf=${confDir}")
args.addAll("-E", "es.path.conf=${confDir}")
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
args.add('"') // end the entire command, quoted
}
@ -181,13 +186,13 @@ class NodeInfo {
}
/** Returns the directory elasticsearch home is contained in for the given distribution */
static File homeDir(File baseDir, String distro) {
static File homeDir(File baseDir, String distro, String nodeVersion) {
String path
switch (distro) {
case 'integ-test-zip':
case 'zip':
case 'tar':
path = "elasticsearch-${VersionProperties.elasticsearch}"
path = "elasticsearch-${nodeVersion}"
break
case 'rpm':
case 'deb':
@ -199,12 +204,12 @@ class NodeInfo {
return new File(baseDir, path)
}
static File confDir(File baseDir, String distro) {
static File confDir(File baseDir, String distro, String nodeVersion) {
switch (distro) {
case 'integ-test-zip':
case 'zip':
case 'tar':
return new File(homeDir(baseDir, distro), 'config')
return new File(homeDir(baseDir, distro, nodeVersion), 'config')
case 'rpm':
case 'deb':
return new File(baseDir, "${distro}-extracted/etc/elasticsearch")

View File

@ -208,7 +208,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportClearScrollAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportMultiSearchAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]suggest[/\\]SuggestResponse.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]suggest[/\\]TransportSuggestAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]ActionFilter.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]AutoCreateIndex.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]DelegatingActionListener.java" checks="LineLength" />
@ -259,7 +258,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]update[/\\]UpdateRequest.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]update[/\\]UpdateRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]Bootstrap.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]BootstrapCLIParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JNAKernel32Library.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JNANatives.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JVMCheck.java" checks="LineLength" />
@ -269,7 +267,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cache[/\\]recycler[/\\]PageCacheRecycler.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]ElasticsearchClient.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]FilterClient.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]Requests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]node[/\\]NodeClient.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]support[/\\]AbstractClient.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]TransportClient.java" checks="LineLength" />
@ -393,7 +390,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]CollectionUtils.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]ExtensionPoint.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]EsExecutors.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]EsThreadPoolExecutor.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]PrioritizedEsThreadPoolExecutor.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]ThreadBarrier.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]ThreadContext.java" checks="LineLength" />
@ -464,7 +460,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]codec[/\\]PerFieldMappingPostingFormatCodec.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]ElasticsearchConcurrentMergeScheduler.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]Engine.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]EngineConfig.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]InternalEngine.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]LiveVersionMap.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]ShadowEngine.java" checks="LineLength" />
@ -617,7 +612,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]similarity[/\\]SimilarityService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]snapshots[/\\]blobstore[/\\]BlobStoreIndexShardRepository.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]snapshots[/\\]blobstore[/\\]BlobStoreIndexShardSnapshots.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]FsDirectoryService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]IndexStore.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]IndexStoreConfig.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]LegacyVerification.java" checks="LineLength" />
@ -896,7 +890,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]SuggestUtils.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]Suggesters.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]CompletionSuggestParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]CompletionSuggester.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]CategoryContextMapping.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]ContextMapping.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]GeoContextMapping.java" checks="LineLength" />
@ -907,12 +900,9 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]LinearInterpoatingScorer.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]NoisyChannelSpellChecker.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]PhraseSuggestParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]PhraseSuggester.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]PhraseSuggestionBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]StupidBackoffScorer.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]WordScorer.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]term[/\\]TermSuggestParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]term[/\\]TermSuggester.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]RestoreService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotInfo.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotShardFailure.java" checks="LineLength" />
@ -1403,7 +1393,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]basic[/\\]SearchWithRandomIOExceptionsIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]basic[/\\]TransportSearchFailuresIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]basic[/\\]TransportTwoNodesSearchIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]builder[/\\]SearchSourceBuilderTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]child[/\\]ChildQuerySearchIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]child[/\\]ParentFieldLoadingIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSubPhasePluginIT.java" checks="LineLength" />
@ -1425,7 +1414,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]query[/\\]ExistsIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]query[/\\]MultiMatchQueryIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]query[/\\]SearchQueryIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]rescore[/\\]QueryRescoreBuilderTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]scroll[/\\]DuelScrollIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]scroll[/\\]SearchScrollIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]scroll[/\\]SearchScrollWithFailingNodesIT.java" checks="LineLength" />
@ -1436,12 +1424,10 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]CompletionSuggestSearchIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]ContextCompletionSuggestSearchIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]CustomSuggester.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]CustomSuggesterSearchIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]CategoryContextMappingTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]GeoContextMappingTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]DirectCandidateGeneratorTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]NoisyChannelSpellCheckerTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]SmoothingModelTestCase.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]similarity[/\\]SimilarityIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]AbstractSnapshotIntegTestCase.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]BlobStoreFormatIT.java" checks="LineLength" />
@ -1597,7 +1583,6 @@
<suppress files="plugins[/\\]repository-s3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]aws[/\\]blobstore[/\\]MockDefaultS3OutputStream.java" checks="LineLength" />
<suppress files="plugins[/\\]repository-s3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]s3[/\\]AbstractS3SnapshotRestoreTest.java" checks="LineLength" />
<suppress files="plugins[/\\]store-smb[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]store[/\\]SmbDirectoryWrapper.java" checks="LineLength" />
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]BootstrapCliParserTests.java" checks="LineLength" />
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]ESPolicyUnitTests.java" checks="LineLength" />
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]EvilSecurityTests.java" checks="LineLength" />
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]cli[/\\]CheckFileCommandTests.java" checks="LineLength" />

View File

@ -0,0 +1,30 @@
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead.
java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead.
java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.getFileStore() instead, impacted by JDK-8034057
java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.isWritable() instead, impacted by JDK-8034057
@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness
java.util.Random#<init>()
java.util.concurrent.ThreadLocalRandom
java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests
@defaultMessage this should not have been added to lucene in the first place
org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey()

View File

@ -33,9 +33,6 @@ java.util.Formatter#<init>(java.lang.String,java.lang.String,java.util.Locale)
java.io.RandomAccessFile
java.nio.file.Path#toFile()
java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead.
java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead.
@defaultMessage Specify a location for the temp file/directory instead.
java.nio.file.Files#createTempDirectory(java.lang.String,java.nio.file.attribute.FileAttribute[])
java.nio.file.Files#createTempFile(java.lang.String,java.lang.String,java.nio.file.attribute.FileAttribute[])
@ -48,9 +45,6 @@ java.io.ObjectInput
java.nio.file.Files#isHidden(java.nio.file.Path) @ Dependent on the operating system, use FileSystemUtils.isHidden instead
java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.getFileStore() instead, impacted by JDK-8034057
java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.isWritable() instead, impacted by JDK-8034057
@defaultMessage Resolve hosts explicitly to the address(es) you want with InetAddress.
java.net.InetSocketAddress#<init>(java.lang.String,int)
java.net.Socket#<init>(java.lang.String,int)
@ -89,9 +83,6 @@ java.lang.Class#getDeclaredMethods() @ Do not violate java's access system: Use
java.lang.reflect.AccessibleObject#setAccessible(boolean)
java.lang.reflect.AccessibleObject#setAccessible(java.lang.reflect.AccessibleObject[], boolean)
@defaultMessage this should not have been added to lucene in the first place
org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey()
@defaultMessage this method needs special permission
java.lang.Thread#getAllStackTraces()
@ -112,8 +103,3 @@ java.util.Collections#EMPTY_MAP
java.util.Collections#EMPTY_SET
java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness
@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness
java.util.Random#<init>()
java.util.concurrent.ThreadLocalRandom
java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests

View File

@ -1,5 +1,5 @@
elasticsearch = 5.0.0
lucene = 6.0.0-snapshot-bea235f
lucene = 6.0.0-snapshot-f0aa4fc
# optional dependencies
spatial4j = 0.6

View File

@ -49,7 +49,7 @@ dependencies {
compile 'org.elasticsearch:securesm:1.0'
// utilities
compile 'commons-cli:commons-cli:1.3.1'
compile 'net.sf.jopt-simple:jopt-simple:4.9'
compile 'com.carrotsearch:hppc:0.7.1'
// time handling, remove with java 8 time

View File

@ -787,8 +787,9 @@ public class MapperQueryParser extends QueryParser {
assert q instanceof BoostQuery == false;
return pq;
} else if (q instanceof MultiPhraseQuery) {
((MultiPhraseQuery) q).setSlop(slop);
return q;
MultiPhraseQuery.Builder builder = new MultiPhraseQuery.Builder((MultiPhraseQuery) q);
builder.setSlop(slop);
return builder.build();
} else {
return q;
}

View File

@ -34,7 +34,6 @@ import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
/**
*
@ -68,7 +67,7 @@ public class CustomFieldQuery extends FieldQuery {
flatten(((FiltersFunctionScoreQuery) sourceQuery).getSubQuery(), reader, flatQueries, boost);
} else if (sourceQuery instanceof MultiPhraseQuery) {
MultiPhraseQuery q = ((MultiPhraseQuery) sourceQuery);
convertMultiPhraseQuery(0, new int[q.getTermArrays().size()], q, q.getTermArrays(), q.getPositions(), reader, flatQueries);
convertMultiPhraseQuery(0, new int[q.getTermArrays().length], q, q.getTermArrays(), q.getPositions(), reader, flatQueries);
} else if (sourceQuery instanceof BlendedTermQuery) {
final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery;
flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost);
@ -77,7 +76,7 @@ public class CustomFieldQuery extends FieldQuery {
}
}
private void convertMultiPhraseQuery(int currentPos, int[] termsIdx, MultiPhraseQuery orig, List<Term[]> terms, int[] pos, IndexReader reader, Collection<Query> flatQueries) throws IOException {
private void convertMultiPhraseQuery(int currentPos, int[] termsIdx, MultiPhraseQuery orig, Term[][] terms, int[] pos, IndexReader reader, Collection<Query> flatQueries) throws IOException {
if (currentPos == 0) {
// if we have more than 16 terms
int numTerms = 0;
@ -97,16 +96,16 @@ public class CustomFieldQuery extends FieldQuery {
* we walk all possible ways and for each path down the MPQ we create a PhraseQuery this is what FieldQuery supports.
* It seems expensive but most queries will pretty small.
*/
if (currentPos == terms.size()) {
if (currentPos == terms.length) {
PhraseQuery.Builder queryBuilder = new PhraseQuery.Builder();
queryBuilder.setSlop(orig.getSlop());
for (int i = 0; i < termsIdx.length; i++) {
queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]);
queryBuilder.add(terms[i][termsIdx[i]], pos[i]);
}
Query query = queryBuilder.build();
this.flatten(query, reader, flatQueries, 1F);
} else {
Term[] t = terms.get(currentPos);
Term[] t = terms[currentPos];
for (int i = 0; i < t.length; i++) {
termsIdx[currentPos] = i;
convertMultiPhraseQuery(currentPos+1, termsIdx, orig, terms, pos, reader, flatQueries);

View File

@ -60,6 +60,8 @@ public class Version {
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_2_0_ID = 2020099;
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
public static final int V_2_2_1_ID = 2020199;
public static final Version V_2_2_1 = new Version(V_2_2_1_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
public static final int V_2_3_0_ID = 2030099;
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_5_0_0_ID = 5000099;
@ -81,6 +83,8 @@ public class Version {
return V_5_0_0;
case V_2_3_0_ID:
return V_2_3_0;
case V_2_2_1_ID:
return V_2_2_1;
case V_2_2_0_ID:
return V_2_2_0;
case V_2_1_2_ID:

View File

@ -213,7 +213,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
}
if (request.indices() != null && request.indices().length > 0) {
try {
indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.strictExpand(), request.indices());
indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.strictExpand(), request.indices());
waitForCounter++;
} catch (IndexNotFoundException e) {
response.setStatus(ClusterHealthStatus.RED); // no indices, make sure its RED
@ -280,7 +280,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
String[] concreteIndices;
try {
concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
} catch (IndexNotFoundException e) {
// one of the specified indices is not there - treat it as RED.
ClusterHealthResponse response = new ClusterHealthResponse(clusterName.value(), Strings.EMPTY_ARRAY, clusterState,

View File

@ -231,8 +231,7 @@ public class NodeInfo extends BaseNodeResponse {
plugins.readFrom(in);
}
if (in.readBoolean()) {
ingest = new IngestInfo();
ingest.readFrom(in);
ingest = new IngestInfo(in);
}
}

View File

@ -31,6 +31,7 @@ import org.elasticsearch.discovery.DiscoveryStats;
import org.elasticsearch.http.HttpStats;
import org.elasticsearch.indices.NodeIndicesStats;
import org.elasticsearch.indices.breaker.AllCircuitBreakerStats;
import org.elasticsearch.ingest.IngestStats;
import org.elasticsearch.monitor.fs.FsInfo;
import org.elasticsearch.monitor.jvm.JvmStats;
import org.elasticsearch.monitor.os.OsStats;
@ -81,6 +82,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
@Nullable
private DiscoveryStats discoveryStats;
@Nullable
private IngestStats ingestStats;
NodeStats() {
}
@ -89,7 +93,8 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
@Nullable FsInfo fs, @Nullable TransportStats transport, @Nullable HttpStats http,
@Nullable AllCircuitBreakerStats breaker,
@Nullable ScriptStats scriptStats,
@Nullable DiscoveryStats discoveryStats) {
@Nullable DiscoveryStats discoveryStats,
@Nullable IngestStats ingestStats) {
super(node);
this.timestamp = timestamp;
this.indices = indices;
@ -103,6 +108,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
this.breaker = breaker;
this.scriptStats = scriptStats;
this.discoveryStats = discoveryStats;
this.ingestStats = ingestStats;
}
public long getTimestamp() {
@ -187,6 +193,11 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
return this.discoveryStats;
}
@Nullable
public IngestStats getIngestStats() {
return ingestStats;
}
public static NodeStats readNodeStats(StreamInput in) throws IOException {
NodeStats nodeInfo = new NodeStats();
nodeInfo.readFrom(in);
@ -224,7 +235,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in);
scriptStats = in.readOptionalStreamable(ScriptStats::new);
discoveryStats = in.readOptionalStreamable(() -> new DiscoveryStats(null));
ingestStats = in.readOptionalWritable(IngestStats::new);
}
@Override
@ -282,6 +293,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
out.writeOptionalStreamable(breaker);
out.writeOptionalStreamable(scriptStats);
out.writeOptionalStreamable(discoveryStats);
out.writeOptionalWriteable(ingestStats);
}
@Override
@ -337,6 +349,10 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
getDiscoveryStats().toXContent(builder, params);
}
if (getIngestStats() != null) {
getIngestStats().toXContent(builder, params);
}
return builder;
}
}

View File

@ -42,6 +42,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
private boolean breaker;
private boolean script;
private boolean discovery;
private boolean ingest;
public NodesStatsRequest() {
}
@ -69,6 +70,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
this.breaker = true;
this.script = true;
this.discovery = true;
this.ingest = true;
return this;
}
@ -87,6 +89,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
this.breaker = false;
this.script = false;
this.discovery = false;
this.ingest = false;
return this;
}
@ -250,6 +253,17 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
return this;
}
public boolean ingest() {
return ingest;
}
/**
* Should ingest statistics be returned.
*/
public NodesStatsRequest ingest(boolean ingest) {
this.ingest = ingest;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
@ -265,6 +279,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
breaker = in.readBoolean();
script = in.readBoolean();
discovery = in.readBoolean();
ingest = in.readBoolean();
}
@Override
@ -281,6 +296,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
out.writeBoolean(breaker);
out.writeBoolean(script);
out.writeBoolean(discovery);
out.writeBoolean(ingest);
}
}

View File

@ -137,4 +137,12 @@ public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder<Nodes
request.discovery(discovery);
return this;
}
/**
* Should ingest statistics be returned.
*/
public NodesStatsRequestBuilder ingest(boolean ingest) {
request.ingest(ingest);
return this;
}
}

View File

@ -80,7 +80,8 @@ public class TransportNodesStatsAction extends TransportNodesAction<NodesStatsRe
protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) {
NodesStatsRequest request = nodeStatsRequest.request;
return nodeService.stats(request.indices(), request.os(), request.process(), request.jvm(), request.threadPool(),
request.fs(), request.transport(), request.http(), request.breaker(), request.script(), request.discovery());
request.fs(), request.transport(), request.http(), request.breaker(), request.script(), request.discovery(),
request.ingest());
}
@Override

View File

@ -67,7 +67,7 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent {
@Override
public void readFrom(StreamInput in) throws IOException {
index = Index.readIndex(in);
index = new Index(in);
shardId = in.readVInt();
shards = new ShardRouting[in.readVInt()];
for (int i = 0; i < shards.length; i++) {

View File

@ -59,7 +59,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA
@Override
protected ClusterBlockException checkBlock(ClusterSearchShardsRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
@ -70,7 +70,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA
@Override
protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener<ClusterSearchShardsResponse> listener) {
ClusterState clusterState = clusterService.state();
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(state, request.routing(), request.indices());
Set<String> nodeIds = new HashSet<>();
GroupShardsIterator groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());

View File

@ -66,7 +66,7 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction<Cre
if (clusterBlockException != null) {
return clusterBlockException;
}
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override

View File

@ -106,7 +106,7 @@ public class TransportClusterStateAction extends TransportMasterNodeReadAction<C
}
if (request.indices().length > 0) {
String[] indices = indexNameExpressionResolver.concreteIndices(currentState, request);
String[] indices = indexNameExpressionResolver.concreteIndexNames(currentState, request);
for (String filteredIndex : indices) {
IndexMetaData indexMetaData = currentState.metaData().index(filteredIndex);
if (indexMetaData != null) {

View File

@ -99,7 +99,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
@Override
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true, false);
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false);
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false, false);
List<ShardStats> shardsStats = new ArrayList<>();
for (IndexService indexService : indicesService) {
for (IndexShard indexShard : indexService) {

View File

@ -90,11 +90,11 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeAction<Ind
Set<String> aliases = new HashSet<>();
for (AliasActions action : actions) {
//expand indices
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), action.indices());
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), action.indices());
//collect the aliases
Collections.addAll(aliases, action.aliases());
for (String index : concreteIndices) {
for (String alias : action.concreteAliases(state.metaData(), index)) {
for (String alias : action.concreteAliases(state.metaData(), index)) {
AliasAction finalAction = new AliasAction(action.aliasAction());
finalAction.index(index);
finalAction.alias(alias);

View File

@ -50,7 +50,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadAction<G
@Override
protected ClusterBlockException checkBlock(GetAliasesRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
@ -60,7 +60,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadAction<G
@Override
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<AliasesExistResponse> listener) {
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
boolean result = state.metaData().hasAliases(request.aliases(), concreteIndices);
listener.onResponse(new AliasesExistResponse(result));
}

View File

@ -53,7 +53,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction<Get
@Override
protected ClusterBlockException checkBlock(GetAliasesRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
@ -63,7 +63,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction<Get
@Override
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<GetAliasesResponse> listener) {
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
@SuppressWarnings("unchecked")
ImmutableOpenMap<String, List<AliasMetaData>> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices);
listener.onResponse(new GetAliasesResponse(result));

View File

@ -33,7 +33,9 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -46,7 +48,8 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
private final MetaDataIndexStateService indexStateService;
private final DestructiveOperations destructiveOperations;
private volatile boolean closeIndexEnabled;
public static final Setting<Boolean> CLUSTER_INDICES_CLOSE_ENABLE_SETTING = Setting.boolSetting("cluster.indices.close.enable", true, true, Setting.Scope.CLUSTER);
public static final Setting<Boolean> CLUSTER_INDICES_CLOSE_ENABLE_SETTING =
Setting.boolSetting("cluster.indices.close.enable", true, Property.Dynamic, Property.NodeScope);
@Inject
public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
@ -86,12 +89,12 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
@Override
protected ClusterBlockException checkBlock(CloseIndexRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener<CloseIndexResponse> listener) {
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest()
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.indices(concreteIndices);

View File

@ -31,10 +31,15 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
/**
* Delete index action.
*/
@ -70,13 +75,13 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
@Override
protected ClusterBlockException checkBlock(DeleteIndexRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener<DeleteIndexResponse> listener) {
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
if (concreteIndices.length == 0) {
final Set<Index> concreteIndices = new HashSet<>(Arrays.asList(indexNameExpressionResolver.concreteIndices(state, request)));
if (concreteIndices.isEmpty()) {
listener.onResponse(new DeleteIndexResponse(true));
return;
}

View File

@ -60,7 +60,7 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction<
protected ClusterBlockException checkBlock(IndicesExistsRequest request, ClusterState state) {
//make sure through indices options that the concrete indices call never throws IndexMissingException
IndicesOptions indicesOptions = IndicesOptions.fromOptions(true, true, request.indicesOptions().expandWildcardsOpen(), request.indicesOptions().expandWildcardsClosed());
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, indicesOptions, request.indices()));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, indicesOptions, request.indices()));
}
@Override
@ -68,7 +68,7 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction<
boolean exists;
try {
// Similar as the previous behaviour, but now also aliases and wildcards are supported.
indexNameExpressionResolver.concreteIndices(state, request);
indexNameExpressionResolver.concreteIndexNames(state, request);
exists = true;
} catch (IndexNotFoundException e) {
exists = false;

View File

@ -57,12 +57,12 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadAction<Ty
@Override
protected ClusterBlockException checkBlock(TypesExistsRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
protected void masterOperation(final TypesExistsRequest request, final ClusterState state, final ActionListener<TypesExistsResponse> listener) {
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), request.indices());
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), request.indices());
if (concreteIndices.length == 0) {
listener.onResponse(new TypesExistsResponse(false));
return;

View File

@ -46,10 +46,9 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
@Inject
public TransportShardFlushAction(Settings settings, TransportService transportService, ClusterService clusterService,
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
actionFilters, indexNameExpressionResolver, ShardFlushRequest::new, ShardFlushRequest::new, ThreadPool.Names.FLUSH);
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
actionFilters, indexNameExpressionResolver, ShardFlushRequest::new, ShardFlushRequest::new, ThreadPool.Names.FLUSH);
}
@Override

View File

@ -60,7 +60,7 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
@Override
protected ClusterBlockException checkBlock(GetIndexRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override

View File

@ -56,7 +56,7 @@ public class TransportGetFieldMappingsAction extends HandledTransportAction<GetF
@Override
protected void doExecute(GetFieldMappingsRequest request, final ActionListener<GetFieldMappingsResponse> listener) {
ClusterState clusterState = clusterService.state();
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
final AtomicInteger indexCounter = new AtomicInteger();
final AtomicInteger completionCounter = new AtomicInteger(concreteIndices.length);
final AtomicReferenceArray<Object> indexResponses = new AtomicReferenceArray<>(concreteIndices.length);

View File

@ -52,7 +52,7 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction<GetMa
@Override
protected ClusterBlockException checkBlock(GetMappingsRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override

View File

@ -32,9 +32,12 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.Index;
import java.io.IOException;
import java.util.Arrays;
import java.util.Map;
import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
@ -65,6 +68,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
private String source;
private boolean updateAllTypes = false;
private Index concreteIndex;
public PutMappingRequest() {
}
@ -90,6 +94,10 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
} else if (source.isEmpty()) {
validationException = addValidationError("mapping source is empty", validationException);
}
if (concreteIndex != null && (indices != null && indices.length > 0)) {
validationException = addValidationError("either concrete index or unresolved indices can be set, concrete index: ["
+ concreteIndex + "] and indices: " + Arrays.asList(indices) , validationException);
}
return validationException;
}
@ -102,6 +110,22 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
return this;
}
/**
* Sets a concrete index for this put mapping request.
*/
public PutMappingRequest setConcreteIndex(Index index) {
Objects.requireNonNull(indices, "index must not be null");
this.concreteIndex = index;
return this;
}
/**
* Returns a concrete index for this mapping or <code>null</code> if no concrete index is defined
*/
public Index getConcreteIndex() {
return concreteIndex;
}
/**
* The indices the mappings will be put.
*/
@ -259,6 +283,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
source = in.readString();
updateAllTypes = in.readBoolean();
readTimeout(in);
concreteIndex = in.readOptionalWritable(Index::new);
}
@Override
@ -270,5 +295,6 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
out.writeString(source);
out.writeBoolean(updateAllTypes);
writeTimeout(out);
out.writeOptionalWriteable(concreteIndex);
}
}

View File

@ -23,6 +23,7 @@ import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.Index;
import java.util.Map;
@ -40,6 +41,11 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder<PutMapp
return this;
}
public PutMappingRequestBuilder setConcreteIndex(Index index) {
request.setConcreteIndex(index);
return this;
}
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions.
* <p>

View File

@ -31,6 +31,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataMappingService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -63,13 +64,19 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
@Override
protected ClusterBlockException checkBlock(PutMappingRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
String[] indices;
if (request.getConcreteIndex() == null) {
indices = indexNameExpressionResolver.concreteIndexNames(state, request);
} else {
indices = new String[] {request.getConcreteIndex().getName()};
}
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indices);
}
@Override
protected void masterOperation(final PutMappingRequest request, final ClusterState state, final ActionListener<PutMappingResponse> listener) {
try {
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
final Index[] concreteIndices = request.getConcreteIndex() == null ? indexNameExpressionResolver.concreteIndices(state, request) : new Index[] {request.getConcreteIndex()};
PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest()
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.indices(concreteIndices).type(request.type())

View File

@ -32,6 +32,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -73,12 +74,12 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
@Override
protected ClusterBlockException checkBlock(OpenIndexRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
protected void masterOperation(final OpenIndexRequest request, final ClusterState state, final ActionListener<OpenIndexResponse> listener) {
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
OpenIndexClusterStateUpdateRequest updateRequest = new OpenIndexClusterStateUpdateRequest()
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.indices(concreteIndices);

View File

@ -48,10 +48,9 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Basi
@Inject
public TransportShardRefreshAction(Settings settings, TransportService transportService, ClusterService clusterService,
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
actionFilters, indexNameExpressionResolver, BasicReplicationRequest::new, BasicReplicationRequest::new, ThreadPool.Names.REFRESH);
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
actionFilters, indexNameExpressionResolver, BasicReplicationRequest::new, BasicReplicationRequest::new, ThreadPool.Names.REFRESH);
}
@Override

View File

@ -34,6 +34,7 @@ import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.index.Index;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -61,7 +62,7 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction<Ge
@Override
protected ClusterBlockException checkBlock(GetSettingsRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@ -72,9 +73,9 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction<Ge
@Override
protected void masterOperation(GetSettingsRequest request, ClusterState state, ActionListener<GetSettingsResponse> listener) {
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
ImmutableOpenMap.Builder<String, Settings> indexToSettingsBuilder = ImmutableOpenMap.builder();
for (String concreteIndex : concreteIndices) {
for (Index concreteIndex : concreteIndices) {
IndexMetaData indexMetaData = state.getMetaData().index(concreteIndex);
if (indexMetaData == null) {
continue;
@ -93,7 +94,7 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction<Ge
}
settings = settingsBuilder.build();
}
indexToSettingsBuilder.put(concreteIndex, settings);
indexToSettingsBuilder.put(concreteIndex.getName(), settings);
}
listener.onResponse(new GetSettingsResponse(indexToSettingsBuilder.build()));
}

View File

@ -32,6 +32,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -65,7 +66,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
if (request.settings().getAsMap().size() == 1 && IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.exists(request.settings()) || IndexMetaData.INDEX_READ_ONLY_SETTING.exists(request.settings())) {
return null;
}
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
@ -75,7 +76,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
@Override
protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, final ActionListener<UpdateSettingsResponse> listener) {
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest()
.indices(concreteIndices)
.settings(request.settings())

View File

@ -87,7 +87,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
protected void masterOperation(IndicesShardStoresRequest request, ClusterState state, ActionListener<IndicesShardStoresResponse> listener) {
final RoutingTable routingTables = state.routingTable();
final RoutingNodes routingNodes = state.getRoutingNodes();
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
final Set<ShardId> shardIdsToFetch = new HashSet<>();
logger.trace("using cluster state version [{}] to determine shards", state.version());
@ -115,7 +115,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
@Override
protected ClusterBlockException checkBlock(IndicesShardStoresRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
private class AsyncShardStoresInfoFetches {

View File

@ -107,7 +107,7 @@ public class ShardStats implements Streamable, ToXContent {
statePath = in.readString();
dataPath = in.readString();
isCustomDataPath = in.readBoolean();
seqNoStats = in.readOptionalStreamableReader(SeqNoStats::new);
seqNoStats = in.readOptionalWritable(SeqNoStats::new);
}
@Override
@ -118,7 +118,7 @@ public class ShardStats implements Streamable, ToXContent {
out.writeString(statePath);
out.writeString(dataPath);
out.writeBoolean(isCustomDataPath);
out.writeOptionalWritable(seqNoStats);
out.writeOptionalWriteable(seqNoStats);
}
@Override

View File

@ -47,6 +47,7 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndexAlreadyExistsException;
@ -245,17 +246,18 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
if (addFailureIfIndexIsUnavailable(documentRequest, bulkRequest, responses, i, concreteIndices, metaData)) {
continue;
}
String concreteIndex = concreteIndices.resolveIfAbsent(documentRequest);
Index concreteIndex = concreteIndices.resolveIfAbsent(documentRequest);
if (request instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) request;
MappingMetaData mappingMd = null;
if (metaData.hasIndex(concreteIndex)) {
mappingMd = metaData.index(concreteIndex).mappingOrDefault(indexRequest.type());
final IndexMetaData indexMetaData = metaData.index(concreteIndex);
if (indexMetaData != null) {
mappingMd = indexMetaData.mappingOrDefault(indexRequest.type());
}
try {
indexRequest.process(metaData, mappingMd, allowIdGeneration, concreteIndex);
indexRequest.process(metaData, mappingMd, allowIdGeneration, concreteIndex.getName());
} catch (ElasticsearchParseException | RoutingMissingException e) {
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, indexRequest.type(), indexRequest.id(), e);
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), indexRequest.type(), indexRequest.id(), e);
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "index", failure);
responses.set(i, bulkItemResponse);
// make sure the request gets never processed again
@ -263,9 +265,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
}
} else if (request instanceof DeleteRequest) {
try {
TransportDeleteAction.resolveAndValidateRouting(metaData, concreteIndex, (DeleteRequest)request);
TransportDeleteAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (DeleteRequest)request);
} catch(RoutingMissingException e) {
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, documentRequest.type(), documentRequest.id(), e);
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), documentRequest.type(), documentRequest.id(), e);
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "delete", failure);
responses.set(i, bulkItemResponse);
// make sure the request gets never processed again
@ -274,9 +276,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
} else if (request instanceof UpdateRequest) {
try {
TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex, (UpdateRequest)request);
TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (UpdateRequest)request);
} catch(RoutingMissingException e) {
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, documentRequest.type(), documentRequest.id(), e);
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), documentRequest.type(), documentRequest.id(), e);
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "update", failure);
responses.set(i, bulkItemResponse);
// make sure the request gets never processed again
@ -294,7 +296,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
ActionRequest request = bulkRequest.requests.get(i);
if (request instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) request;
String concreteIndex = concreteIndices.getConcreteIndex(indexRequest.index());
String concreteIndex = concreteIndices.getConcreteIndex(indexRequest.index()).getName();
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, indexRequest.type(), indexRequest.id(), indexRequest.routing()).shardId();
List<BulkItemRequest> list = requestsByShard.get(shardId);
if (list == null) {
@ -304,7 +306,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
list.add(new BulkItemRequest(i, request));
} else if (request instanceof DeleteRequest) {
DeleteRequest deleteRequest = (DeleteRequest) request;
String concreteIndex = concreteIndices.getConcreteIndex(deleteRequest.index());
String concreteIndex = concreteIndices.getConcreteIndex(deleteRequest.index()).getName();
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()).shardId();
List<BulkItemRequest> list = requestsByShard.get(shardId);
if (list == null) {
@ -314,7 +316,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
list.add(new BulkItemRequest(i, request));
} else if (request instanceof UpdateRequest) {
UpdateRequest updateRequest = (UpdateRequest) request;
String concreteIndex = concreteIndices.getConcreteIndex(updateRequest.index());
String concreteIndex = concreteIndices.getConcreteIndex(updateRequest.index()).getName();
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, updateRequest.type(), updateRequest.id(), updateRequest.routing()).shardId();
List<BulkItemRequest> list = requestsByShard.get(shardId);
if (list == null) {
@ -356,18 +358,19 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
public void onFailure(Throwable e) {
// create failures for all relevant requests
for (BulkItemRequest request : requests) {
final String indexName = concreteIndices.getConcreteIndex(request.index()).getName();
if (request.request() instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), indexRequest.opType().toString().toLowerCase(Locale.ENGLISH),
new BulkItemResponse.Failure(concreteIndices.getConcreteIndex(indexRequest.index()), indexRequest.type(), indexRequest.id(), e)));
new BulkItemResponse.Failure(indexName, indexRequest.type(), indexRequest.id(), e)));
} else if (request.request() instanceof DeleteRequest) {
DeleteRequest deleteRequest = (DeleteRequest) request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), "delete",
new BulkItemResponse.Failure(concreteIndices.getConcreteIndex(deleteRequest.index()), deleteRequest.type(), deleteRequest.id(), e)));
new BulkItemResponse.Failure(indexName, deleteRequest.type(), deleteRequest.id(), e)));
} else if (request.request() instanceof UpdateRequest) {
UpdateRequest updateRequest = (UpdateRequest) request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), "update",
new BulkItemResponse.Failure(concreteIndices.getConcreteIndex(updateRequest.index()), updateRequest.type(), updateRequest.id(), e)));
new BulkItemResponse.Failure(indexName, updateRequest.type(), updateRequest.id(), e)));
}
}
if (counter.decrementAndGet() == 0) {
@ -385,7 +388,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
private boolean addFailureIfIndexIsUnavailable(DocumentRequest request, BulkRequest bulkRequest, AtomicArray<BulkItemResponse> responses, int idx,
final ConcreteIndices concreteIndices,
final MetaData metaData) {
String concreteIndex = concreteIndices.getConcreteIndex(request.index());
Index concreteIndex = concreteIndices.getConcreteIndex(request.index());
Exception unavailableException = null;
if (concreteIndex == null) {
try {
@ -397,9 +400,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
}
}
if (unavailableException == null) {
IndexMetaData indexMetaData = metaData.index(concreteIndex);
IndexMetaData indexMetaData = metaData.getIndexSafe(concreteIndex);
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
unavailableException = new IndexClosedException(metaData.index(request.index()).getIndex());
unavailableException = new IndexClosedException(concreteIndex);
}
}
if (unavailableException != null) {
@ -425,19 +428,19 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
private static class ConcreteIndices {
private final ClusterState state;
private final IndexNameExpressionResolver indexNameExpressionResolver;
private final Map<String, String> indices = new HashMap<>();
private final Map<String, Index> indices = new HashMap<>();
ConcreteIndices(ClusterState state, IndexNameExpressionResolver indexNameExpressionResolver) {
this.state = state;
this.indexNameExpressionResolver = indexNameExpressionResolver;
}
String getConcreteIndex(String indexOrAlias) {
Index getConcreteIndex(String indexOrAlias) {
return indices.get(indexOrAlias);
}
String resolveIfAbsent(DocumentRequest request) {
String concreteIndex = indices.get(request.index());
Index resolveIfAbsent(DocumentRequest request) {
Index concreteIndex = indices.get(request.index());
if (concreteIndex == null) {
concreteIndex = indexNameExpressionResolver.concreteSingleIndex(state, request);
indices.put(request.index(), concreteIndex);

View File

@ -47,7 +47,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine;
@ -75,17 +74,19 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
private final UpdateHelper updateHelper;
private final boolean allowIdGeneration;
private final MappingUpdatedAction mappingUpdatedAction;
@Inject
public TransportShardBulkAction(Settings settings, TransportService transportService, ClusterService clusterService,
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
MappingUpdatedAction mappingUpdatedAction, UpdateHelper updateHelper, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
actionFilters, indexNameExpressionResolver,
super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
actionFilters, indexNameExpressionResolver,
BulkShardRequest::new, BulkShardRequest::new, ThreadPool.Names.BULK);
this.updateHelper = updateHelper;
this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true);
this.mappingUpdatedAction = mappingUpdatedAction;
}
@Override

View File

@ -60,10 +60,10 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
public TransportDeleteAction(Settings settings, TransportService transportService, ClusterService clusterService,
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
TransportCreateIndexAction createIndexAction, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, MappingUpdatedAction mappingUpdatedAction,
IndexNameExpressionResolver indexNameExpressionResolver,
AutoCreateIndex autoCreateIndex) {
super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
mappingUpdatedAction, actionFilters, indexNameExpressionResolver,
actionFilters, indexNameExpressionResolver,
DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.INDEX);
this.createIndexAction = createIndexAction;
this.autoCreateIndex = autoCreateIndex;

View File

@ -28,6 +28,8 @@ import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.mapper.ip.IpFieldMapper;
import org.joda.time.DateTime;
import java.io.IOException;
@ -146,6 +148,17 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
*/
protected abstract T valueOf(String value, String optionalFormat);
/**
* @param value
* The value to be converted to a String
* @param optionalFormat
* A string describing how to print the specified value. Whether
* this parameter is supported depends on the implementation. If
* optionalFormat is specified and the implementation doesn't
* support it an {@link UnsupportedOperationException} is thrown
*/
public abstract String stringValueOf(Object value, String optionalFormat);
/**
* Merges the provided stats into this stats instance.
*/
@ -274,6 +287,18 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
return java.lang.Long.valueOf(value);
}
@Override
public String stringValueOf(Object value, String optionalFormat) {
if (optionalFormat != null) {
throw new UnsupportedOperationException("custom format isn't supported");
}
if (value instanceof Number) {
return java.lang.Long.toString(((Number) value).longValue());
} else {
throw new IllegalArgumentException("value must be a Long: " + value);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -327,6 +352,18 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
return java.lang.Float.valueOf(value);
}
@Override
public String stringValueOf(Object value, String optionalFormat) {
if (optionalFormat != null) {
throw new UnsupportedOperationException("custom format isn't supported");
}
if (value instanceof Number) {
return java.lang.Float.toString(((Number) value).floatValue());
} else {
throw new IllegalArgumentException("value must be a Float: " + value);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -380,6 +417,18 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
return java.lang.Double.valueOf(value);
}
@Override
public String stringValueOf(Object value, String optionalFormat) {
if (optionalFormat != null) {
throw new UnsupportedOperationException("custom format isn't supported");
}
if (value instanceof Number) {
return java.lang.Double.toString(((Number) value).doubleValue());
} else {
throw new IllegalArgumentException("value must be a Double: " + value);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -437,6 +486,18 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
return new BytesRef(value);
}
@Override
public String stringValueOf(Object value, String optionalFormat) {
if (optionalFormat != null) {
throw new UnsupportedOperationException("custom format isn't supported");
}
if (value instanceof BytesRef) {
return ((BytesRef) value).utf8ToString();
} else {
throw new IllegalArgumentException("value must be a BytesRef: " + value);
}
}
@Override
protected void toInnerXContent(XContentBuilder builder) throws IOException {
builder.field(Fields.MIN_VALUE, getMinValueAsString());
@ -490,6 +551,25 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
return dateFormatter.parser().parseMillis(value);
}
@Override
public String stringValueOf(Object value, String optionalFormat) {
FormatDateTimeFormatter dateFormatter = this.dateFormatter;
if (optionalFormat != null) {
dateFormatter = Joda.forPattern(optionalFormat);
}
long millis;
if (value instanceof java.lang.Long) {
millis = ((java.lang.Long) value).longValue();
} else if (value instanceof DateTime) {
millis = ((DateTime) value).getMillis();
} else if (value instanceof BytesRef) {
millis = dateFormatter.parser().parseMillis(((BytesRef) value).utf8ToString());
} else {
throw new IllegalArgumentException("value must be either a DateTime or a long: " + value);
}
return dateFormatter.printer().print(millis);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -504,6 +584,28 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
}
public static class Ip extends Long {
public Ip(int maxDoc, int docCount, long sumDocFreq, long sumTotalTermFreq, long minValue, long maxValue) {
super(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, minValue, maxValue);
}
protected Ip(int type, long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, long minValue, long maxValue) {
super(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, minValue, maxValue);
}
public Ip() {
}
@Override
public String stringValueOf(Object value, String optionalFormat) {
if (value instanceof BytesRef) {
return super.stringValueOf(IpFieldMapper.ipToLong(((BytesRef) value).utf8ToString()), optionalFormat);
}
return super.stringValueOf(value, optionalFormat);
}
}
public static FieldStats read(StreamInput in) throws IOException {
FieldStats stats;
byte type = in.readByte();

View File

@ -69,7 +69,7 @@ public class TransportMultiGetAction extends HandledTransportAction<MultiGetRequ
continue;
}
item.routing(clusterState.metaData().resolveIndexRouting(item.parent(), item.routing(), item.index()));
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, item);
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, item).getName();
if (item.routing() == null && clusterState.getMetaData().routingRequired(concreteSingleIndex, item.type())) {
responses.set(i, new MultiGetItemResponse(null, new MultiGetResponse.Failure(concreteSingleIndex, item.type(), item.id(),
new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + item.type() + "]/[" + item.id() + "]"))));

View File

@ -42,6 +42,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
@ -584,14 +585,6 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
return this.versionType;
}
private Version getVersion(MetaData metaData, String concreteIndex) {
// this can go away in 3.0 but is here now for easy backporting - since in 2.x we need the version on the timestamp stuff
final IndexMetaData indexMetaData = metaData.getIndices().get(concreteIndex);
if (indexMetaData == null) {
throw new IndexNotFoundException(concreteIndex);
}
return Version.indexCreated(indexMetaData.getSettings());
}
public void process(MetaData metaData, @Nullable MappingMetaData mappingMd, boolean allowIdGeneration, String concreteIndex) {
// resolve the routing if needed
@ -600,8 +593,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
// resolve timestamp if provided externally
if (timestamp != null) {
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(timestamp,
mappingMd != null ? mappingMd.timestamp().dateTimeFormatter() : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER,
getVersion(metaData, concreteIndex));
mappingMd != null ? mappingMd.timestamp().dateTimeFormatter() : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER);
}
if (mappingMd != null) {
// might as well check for routing here
@ -645,7 +637,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
// assigned again because mappingMd and
// mappingMd#timestamp() are not null
assert mappingMd != null;
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(defaultTimestamp, mappingMd.timestamp().dateTimeFormatter(), getVersion(metaData, concreteIndex));
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(defaultTimestamp, mappingMd.timestamp().dateTimeFormatter());
}
}
}

View File

@ -69,6 +69,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
private final TransportCreateIndexAction createIndexAction;
private final ClusterService clusterService;
private final MappingUpdatedAction mappingUpdatedAction;
@Inject
public TransportIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
@ -76,8 +77,9 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
TransportCreateIndexAction createIndexAction, MappingUpdatedAction mappingUpdatedAction,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
AutoCreateIndex autoCreateIndex) {
super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX);
super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX);
this.mappingUpdatedAction = mappingUpdatedAction;
this.createIndexAction = createIndexAction;
this.autoCreateIndex = autoCreateIndex;
this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true);
@ -143,7 +145,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Exception {
// validate, if routing is required, that we got routing
IndexMetaData indexMetaData = metaData.index(request.shardId().getIndex());
IndexMetaData indexMetaData = metaData.getIndexSafe(request.shardId().getIndex());
MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());
if (mappingMd != null && mappingMd.routing().required()) {
if (request.routing() == null) {
@ -205,8 +207,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
final ShardId shardId = indexShard.shardId();
if (update != null) {
final String indexName = shardId.getIndexName();
mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update);
mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update);
operation = prepareIndexOperationOnPrimary(request, indexShard);
update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) {

View File

@ -112,7 +112,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
logger.error("failed to execute pipeline for a bulk request", throwable);
listener.onFailure(throwable);
} else {
long ingestTookInMillis = TimeUnit.MILLISECONDS.convert(System.nanoTime() - ingestStartTimeInNanos, TimeUnit.NANOSECONDS);
long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos);
BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest();
ActionListener<BulkResponse> actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener);
if (bulkRequest.requests().isEmpty()) {

View File

@ -173,7 +173,7 @@ public class TransportMultiPercolateAction extends HandledTransportAction<MultiP
PercolateRequest percolateRequest = (PercolateRequest) element;
String[] concreteIndices;
try {
concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, percolateRequest);
concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, percolateRequest);
} catch (IndexNotFoundException e) {
reducedResponses.set(slot, e);
responsesByItemAndShard.set(slot, new AtomicReferenceArray(0));

View File

@ -96,7 +96,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
// of just for the _search api
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request.indicesOptions(),
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request.indicesOptions(),
startTime(), request.indices());
for (String index : concreteIndices) {

View File

@ -393,9 +393,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
}
/**
* Delegates to
* {@link org.elasticsearch.search.suggest.SuggestBuilder#addSuggestion(org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder)}
* .
* Delegates to {@link SearchSourceBuilder#suggest(SuggestBuilder)}
*/
public SearchRequestBuilder suggest(SuggestBuilder suggestBuilder) {
sourceBuilder().suggest(suggestBuilder);

View File

@ -64,7 +64,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
// optimize search type for cases where there is only one shard group to search on
try {
ClusterState clusterState = clusterService.state();
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, searchRequest);
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState,
searchRequest.routing(), searchRequest.indices());
int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap);

View File

@ -20,10 +20,10 @@
package org.elasticsearch.action.suggest;
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.suggest.SuggestBuilder;
import java.io.IOException;
@ -32,29 +32,29 @@ import java.io.IOException;
*/
public final class ShardSuggestRequest extends BroadcastShardRequest {
private BytesReference suggestSource;
private SuggestBuilder suggest;
public ShardSuggestRequest() {
}
ShardSuggestRequest(ShardId shardId, SuggestRequest request) {
super(shardId, request);
this.suggestSource = request.suggest();
this.suggest = request.suggest();
}
public BytesReference suggest() {
return suggestSource;
public SuggestBuilder suggest() {
return suggest;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
suggestSource = in.readBytesReference();
suggest = SuggestBuilder.PROTOTYPE.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBytesReference(suggestSource);
suggest.writeTo(out);
}
}

View File

@ -21,27 +21,25 @@ package org.elasticsearch.action.suggest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.search.suggest.SuggestBuilder;
import java.io.IOException;
import java.util.Arrays;
import java.util.Objects;
/**
* A request to get suggestions for corrections of phrases. Best created with
* {@link org.elasticsearch.client.Requests#suggestRequest(String...)}.
* <p>
* The request requires the suggest query source to be set either using
* {@link #suggest(org.elasticsearch.common.bytes.BytesReference)} / {@link #suggest(org.elasticsearch.common.bytes.BytesReference)}
* or by using {@link #suggest(org.elasticsearch.search.suggest.SuggestBuilder)}
* (Best created using the {link @org.elasticsearch.search.suggest.SuggestBuilders)}).
* The request requires the suggest query source to be set using
* {@link #suggest(org.elasticsearch.search.suggest.SuggestBuilder)}
*
* @see SuggestResponse
* @see org.elasticsearch.client.Client#suggest(SuggestRequest)
@ -56,7 +54,7 @@ public final class SuggestRequest extends BroadcastRequest<SuggestRequest> {
@Nullable
private String preference;
private BytesReference suggestSource;
private SuggestBuilder suggest;
public SuggestRequest() {
}
@ -76,40 +74,21 @@ public final class SuggestRequest extends BroadcastRequest<SuggestRequest> {
}
/**
* The Phrase to get correction suggestions for
* The suggestion query to get correction suggestions for
*/
public BytesReference suggest() {
return suggestSource;
public SuggestBuilder suggest() {
return suggest;
}
/**
* set a new source for the suggest query
* set a new source for the suggest query
*/
public SuggestRequest suggest(BytesReference suggestSource) {
this.suggestSource = suggestSource;
public SuggestRequest suggest(SuggestBuilder suggest) {
Objects.requireNonNull(suggest, "suggest must not be null");
this.suggest = suggest;
return this;
}
/**
* set a new source using a {@link org.elasticsearch.search.suggest.SuggestBuilder}
* for phrase and term suggestion lookup
*/
public SuggestRequest suggest(SuggestBuilder suggestBuilder) {
return suggest(suggestBuilder.buildAsBytes(Requests.CONTENT_TYPE));
}
/**
* set a new source using a {@link org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder}
* for completion suggestion lookup
*/
public SuggestRequest suggest(SuggestBuilder.SuggestionBuilder suggestionBuilder) {
return suggest(suggestionBuilder.buildAsBytes(Requests.CONTENT_TYPE));
}
public SuggestRequest suggest(String source) {
return suggest(new BytesArray(source));
}
/**
* A comma separated list of routing values to control the shards the search will be executed on.
*/
@ -147,25 +126,29 @@ public final class SuggestRequest extends BroadcastRequest<SuggestRequest> {
super.readFrom(in);
routing = in.readOptionalString();
preference = in.readOptionalString();
suggest(in.readBytesReference());
suggest = SuggestBuilder.PROTOTYPE.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
Objects.requireNonNull(suggest, "suggest must not be null");
super.writeTo(out);
out.writeOptionalString(routing);
out.writeOptionalString(preference);
out.writeBytesReference(suggestSource);
suggest.writeTo(out);
}
@Override
public String toString() {
Objects.requireNonNull(suggest, "suggest must not be null");
String sSource = "_na_";
try {
sSource = XContentHelper.convertToJson(suggestSource, false);
XContentBuilder builder = JsonXContent.contentBuilder();
builder = suggest.toXContent(builder, ToXContent.EMPTY_PARAMS);
sSource = builder.string();
} catch (Exception e) {
// ignore
}
return "[" + Arrays.toString(indices) + "]" + ", suggestSource[" + sSource + "]";
return "[" + Arrays.toString(indices) + "]" + ", suggest[" + sSource + "]";
}
}

View File

@ -19,17 +19,10 @@
package org.elasticsearch.action.suggest;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.search.suggest.SuggestBuilder;
import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder;
import java.io.IOException;
import org.elasticsearch.search.suggest.SuggestionBuilder;
/**
* A suggest action request builder.
@ -44,9 +37,11 @@ public class SuggestRequestBuilder extends BroadcastOperationRequestBuilder<Sugg
/**
* Add a definition for suggestions to the request
* @param name the name for the suggestion that will also be used in the response
* @param suggestion the suggestion configuration
*/
public <T> SuggestRequestBuilder addSuggestion(SuggestionBuilder<T> suggestion) {
suggest.addSuggestion(suggestion);
public SuggestRequestBuilder addSuggestion(String name, SuggestionBuilder<?> suggestion) {
suggest.addSuggestion(name, suggestion);
return this;
}
@ -59,7 +54,7 @@ public class SuggestRequestBuilder extends BroadcastOperationRequestBuilder<Sugg
}
public SuggestRequestBuilder setSuggestText(String globalText) {
this.suggest.setText(globalText);
this.suggest.setGlobalText(globalText);
return this;
}
@ -84,13 +79,7 @@ public class SuggestRequestBuilder extends BroadcastOperationRequestBuilder<Sugg
@Override
protected SuggestRequest beforeExecute(SuggestRequest request) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(Requests.CONTENT_TYPE);
suggest.toXContent(builder, ToXContent.EMPTY_PARAMS);
request.suggest(builder.bytes());
} catch (IOException e) {
throw new ElasticsearchException("Unable to build suggestion request", e);
}
request.suggest(suggest);
return request;
}
}

View File

@ -32,17 +32,15 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.suggest.stats.ShardSuggestMetric;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.search.suggest.Suggest;
import org.elasticsearch.search.suggest.SuggestBuilder;
import org.elasticsearch.search.suggest.SuggestPhase;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
import org.elasticsearch.threadpool.ThreadPool;
@ -58,15 +56,16 @@ import java.util.concurrent.atomic.AtomicReferenceArray;
/**
* Defines the transport of a suggestion request across the cluster
*/
public class TransportSuggestAction extends TransportBroadcastAction<SuggestRequest, SuggestResponse, ShardSuggestRequest, ShardSuggestResponse> {
public class TransportSuggestAction
extends TransportBroadcastAction<SuggestRequest, SuggestResponse, ShardSuggestRequest, ShardSuggestResponse> {
private final IndicesService indicesService;
private final SuggestPhase suggestPhase;
@Inject
public TransportSuggestAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
IndicesService indicesService, SuggestPhase suggestPhase, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
public TransportSuggestAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, IndicesService indicesService, SuggestPhase suggestPhase,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, SuggestAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
SuggestRequest::new, ShardSuggestRequest::new, ThreadPool.Names.SUGGEST);
this.indicesService = indicesService;
@ -85,7 +84,8 @@ public class TransportSuggestAction extends TransportBroadcastAction<SuggestRequ
@Override
protected GroupShardsIterator shards(ClusterState clusterState, SuggestRequest request, String[] concreteIndices) {
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices());
Map<String, Set<String>> routingMap =
indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices());
return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
}
@ -124,7 +124,8 @@ public class TransportSuggestAction extends TransportBroadcastAction<SuggestRequ
}
}
return new SuggestResponse(new Suggest(Suggest.reduce(groupedSuggestions)), shardsResponses.length(), successfulShards, failedShards, shardFailures);
return new SuggestResponse(new Suggest(Suggest.reduce(groupedSuggestions)), shardsResponses.length(),
successfulShards, failedShards, shardFailures);
}
@Override
@ -134,16 +135,10 @@ public class TransportSuggestAction extends TransportBroadcastAction<SuggestRequ
ShardSuggestMetric suggestMetric = indexShard.getSuggestMetric();
suggestMetric.preSuggest();
long startTime = System.nanoTime();
XContentParser parser = null;
try (Engine.Searcher searcher = indexShard.acquireSearcher("suggest")) {
BytesReference suggest = request.suggest();
if (suggest != null && suggest.length() > 0) {
parser = XContentFactory.xContent(suggest).createParser(suggest);
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new IllegalArgumentException("suggest content missing");
}
final SuggestionSearchContext context = suggestPhase.parseElement().parseInternal(parser, indexService.mapperService(),
indexService.fieldData(), request.shardId());
SuggestBuilder suggest = request.suggest();
if (suggest != null) {
final SuggestionSearchContext context = suggest.build(indexService.newQueryShardContext());
final Suggest result = suggestPhase.execute(context, searcher.searcher());
return new ShardSuggestResponse(request.shardId(), result);
}
@ -151,9 +146,6 @@ public class TransportSuggestAction extends TransportBroadcastAction<SuggestRequ
} catch (Throwable ex) {
throw new ElasticsearchException("failed to execute suggest", ex);
} finally {
if (parser != null) {
parser.close();
}
suggestMetric.postSuggest(System.nanoTime() - startTime);
}
}

View File

@ -27,6 +27,7 @@ import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.MapperService;
@ -39,7 +40,8 @@ import java.util.List;
*/
public final class AutoCreateIndex {
public static final Setting<AutoCreate> AUTO_CREATE_INDEX_SETTING = new Setting<>("action.auto_create_index", "true", AutoCreate::new, false, Setting.Scope.CLUSTER);
public static final Setting<AutoCreate> AUTO_CREATE_INDEX_SETTING =
new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope);
private final boolean dynamicMappingDisabled;
private final IndexNameExpressionResolver resolver;

View File

@ -23,6 +23,7 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
/**
@ -33,7 +34,8 @@ public final class DestructiveOperations extends AbstractComponent {
/**
* Setting which controls whether wildcard usage (*, prefix*, _all) is allowed.
*/
public static final Setting<Boolean> REQUIRES_NAME_SETTING = Setting.boolSetting("action.destructive_requires_name", false, true, Setting.Scope.CLUSTER);
public static final Setting<Boolean> REQUIRES_NAME_SETTING =
Setting.boolSetting("action.destructive_requires_name", false, Property.Dynamic, Property.NodeScope);
private volatile boolean destructiveRequiresName;
@Inject

View File

@ -125,7 +125,7 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
throw blockException;
}
// update to concrete indices
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
blockException = checkRequestBlock(clusterState, request, concreteIndices);
if (blockException != null) {
throw blockException;

View File

@ -51,7 +51,6 @@ import org.elasticsearch.transport.BaseTransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
@ -241,7 +240,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
throw globalBlockException;
}
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
ClusterBlockException requestBlockException = checkRequestBlock(clusterState, request, concreteIndices);
if (requestBlockException != null) {
throw requestBlockException;

View File

@ -24,6 +24,7 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -37,7 +38,8 @@ import java.util.function.Supplier;
public abstract class TransportMasterNodeReadAction<Request extends MasterNodeReadRequest<Request>, Response extends ActionResponse>
extends TransportMasterNodeAction<Request, Response> {
public static final Setting<Boolean> FORCE_LOCAL_SETTING = Setting.boolSetting("action.master.force_local", false, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> FORCE_LOCAL_SETTING =
Setting.boolSetting("action.master.force_local", false, Property.NodeScope);
private final boolean forceLocal;

View File

@ -50,7 +50,7 @@ public abstract class TransportClusterInfoAction<Request extends ClusterInfoRequ
@Override
protected final void masterOperation(final Request request, final ClusterState state, final ActionListener<Response> listener) {
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
doMasterOperation(request, concreteIndices, state, listener);
}

View File

@ -97,7 +97,7 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
@Override
public void onFailure(Throwable e) {
logger.trace("{}: got failure from {}", actionName, shardId);
int totalNumCopies = clusterState.getMetaData().index(shardId.getIndexName()).getNumberOfReplicas() + 1;
int totalNumCopies = clusterState.getMetaData().getIndexSafe(shardId.getIndex()).getNumberOfReplicas() + 1;
ShardResponse shardResponse = newShardResponse();
ReplicationResponse.ShardInfo.Failure[] failures;
if (TransportActions.isShardNotAvailableException(e)) {
@ -130,7 +130,7 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
*/
protected List<ShardId> shards(Request request, ClusterState clusterState) {
List<ShardId> shardIds = new ArrayList<>();
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
for (String index : concreteIndices) {
IndexMetaData indexMetaData = clusterState.metaData().getIndices().get(index);
if (indexMetaData != null) {

View File

@ -103,7 +103,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
protected final ShardStateAction shardStateAction;
protected final WriteConsistencyLevel defaultWriteConsistencyLevel;
protected final TransportRequestOptions transportOptions;
protected final MappingUpdatedAction mappingUpdatedAction;
final String transportReplicaAction;
final String transportPrimaryAction;
@ -113,7 +112,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
protected TransportReplicationAction(Settings settings, String actionName, TransportService transportService,
ClusterService clusterService, IndicesService indicesService,
ThreadPool threadPool, ShardStateAction shardStateAction,
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request,
Supplier<ReplicaRequest> replicaRequest, String executor) {
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager());
@ -121,7 +120,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
this.clusterService = clusterService;
this.indicesService = indicesService;
this.shardStateAction = shardStateAction;
this.mappingUpdatedAction = mappingUpdatedAction;
this.transportPrimaryAction = actionName + "[p]";
this.transportReplicaAction = actionName + "[r]";
@ -270,7 +268,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
try {
channel.sendResponse(e);
} catch (Throwable e1) {
logger.warn("Failed to send response for " + actionName, e1);
logger.warn("Failed to send response for {}", e1, actionName);
}
}
});
@ -395,7 +393,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
try {
channel.sendResponse(t);
} catch (IOException responseException) {
logger.warn("failed to send error message back to client for action [" + transportReplicaAction + "]", responseException);
logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction);
logger.warn("actual Exception", t);
}
}
@ -525,7 +523,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
private String concreteIndex(ClusterState state) {
return resolveIndex() ? indexNameExpressionResolver.concreteSingleIndex(state, request) : request.index();
return resolveIndex() ? indexNameExpressionResolver.concreteSingleIndex(state, request).getName() : request.index();
}
private ShardRouting primary(ClusterState state) {
@ -1109,7 +1107,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
try {
channel.sendResponse(finalResponse);
} catch (IOException responseException) {
logger.warn("failed to send error message back to client for action [" + transportReplicaAction + "]", responseException);
logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction);
}
if (logger.isTraceEnabled()) {
logger.trace("action [{}] completed on all replicas [{}] for request [{}]", transportReplicaAction, shardId, replicaRequest);

View File

@ -138,7 +138,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
throw blockException;
}
}
request.concreteIndex(indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request));
request.concreteIndex(indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request).getName());
resolveRequest(observer.observedState(), request);
blockException = checkRequestBlock(observer.observedState(), request);
if (blockException != null) {

View File

@ -141,7 +141,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
String concreteSingleIndex;
if (resolveIndex(request)) {
concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, request);
concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, request).getName();
} else {
concreteSingleIndex = request.index();
}

View File

@ -72,7 +72,7 @@ public class TransportMultiTermVectorsAction extends HandledTransportAction<Mult
termVectorsRequest.type(), termVectorsRequest.id(), new IndexNotFoundException(termVectorsRequest.index()))));
continue;
}
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, (DocumentRequest) termVectorsRequest);
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, termVectorsRequest).getName();
if (termVectorsRequest.routing() == null && clusterState.getMetaData().routingRequired(concreteSingleIndex, termVectorsRequest.type())) {
responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id(),
new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + termVectorsRequest.type() + "]/[" + termVectorsRequest.id() + "]"))));

View File

@ -44,6 +44,7 @@ import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.ScriptService.ScriptType;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -671,9 +672,15 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
} else if ("detect_noop".equals(currentFieldName)) {
detectNoop(parser.booleanValue());
} else if ("fields".equals(currentFieldName)) {
List<Object> values = parser.list();
String[] fields = values.toArray(new String[values.size()]);
fields(fields);
List<Object> fields = null;
if (token == XContentParser.Token.START_ARRAY) {
fields = (List) parser.list();
} else if (token.isValue()) {
fields = Collections.singletonList(parser.text());
}
if (fields != null) {
fields(fields.toArray(new String[fields.size()]));
}
} else {
//here we don't have settings available, unable to throw deprecation exceptions
scriptParameterParser.token(currentFieldName, token, parser, ParseFieldMatcher.EMPTY);

View File

@ -24,10 +24,10 @@ import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.StringHelper;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.cli.Terminal;
import org.elasticsearch.common.PidFile;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.LogConfigurator;
@ -45,10 +45,9 @@ import java.io.IOException;
import java.io.PrintStream;
import java.nio.file.Path;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
/**
* Internal startup code.
*/
@ -189,9 +188,13 @@ final class Bootstrap {
node = new Node(nodeSettings);
}
private static Environment initialSettings(boolean foreground) {
private static Environment initialSettings(boolean foreground, String pidFile) {
Terminal terminal = foreground ? Terminal.DEFAULT : null;
return InternalSettingsPreparer.prepareEnvironment(EMPTY_SETTINGS, terminal);
Settings.Builder builder = Settings.builder();
if (Strings.hasLength(pidFile)) {
builder.put(Environment.PIDFILE_SETTING.getKey(), pidFile);
}
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal);
}
private void start() {
@ -218,22 +221,18 @@ final class Bootstrap {
* This method is invoked by {@link Elasticsearch#main(String[])}
* to startup elasticsearch.
*/
static void init(String[] args) throws Throwable {
static void init(
final boolean foreground,
final String pidFile,
final Map<String, String> esSettings) throws Throwable {
// Set the system property before anything has a chance to trigger its use
initLoggerPrefix();
BootstrapCLIParser bootstrapCLIParser = new BootstrapCLIParser();
CliTool.ExitStatus status = bootstrapCLIParser.execute(args);
if (CliTool.ExitStatus.OK != status) {
exit(status.status());
}
elasticsearchSettings(esSettings);
INSTANCE = new Bootstrap();
boolean foreground = !"false".equals(System.getProperty("es.foreground", System.getProperty("es-foreground")));
Environment environment = initialSettings(foreground);
Environment environment = initialSettings(foreground, pidFile);
Settings settings = environment.settings();
LogConfigurator.configure(settings, true);
checkForCustomConfFile();
@ -297,6 +296,13 @@ final class Bootstrap {
}
}
@SuppressForbidden(reason = "Sets system properties passed as CLI parameters")
private static void elasticsearchSettings(Map<String, String> esSettings) {
for (Map.Entry<String, String> esSetting : esSettings.entrySet()) {
System.setProperty(esSetting.getKey(), esSetting.getValue());
}
}
@SuppressForbidden(reason = "System#out")
private static void closeSystOut() {
System.out.close();
@ -307,14 +313,6 @@ final class Bootstrap {
System.err.close();
}
@SuppressForbidden(reason = "System#err")
private static void sysError(String line, boolean flush) {
System.err.println(line);
if (flush) {
System.err.flush();
}
}
private static void checkForCustomConfFile() {
String confFileSetting = System.getProperty("es.default.config");
checkUnsetAndMaybeExit(confFileSetting, "es.default.config");

View File

@ -1,183 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.elasticsearch.Build;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliToolConfig;
import org.elasticsearch.common.cli.UserError;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.monitor.jvm.JvmInfo;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.optionBuilder;
final class BootstrapCLIParser extends CliTool {
private static final CliToolConfig CONFIG = CliToolConfig.config("elasticsearch", BootstrapCLIParser.class)
.cmds(Start.CMD, Version.CMD)
.build();
public BootstrapCLIParser() {
super(CONFIG);
}
public BootstrapCLIParser(Terminal terminal) {
super(CONFIG, terminal);
}
@Override
protected Command parse(String cmdName, CommandLine cli) throws Exception {
switch (cmdName.toLowerCase(Locale.ROOT)) {
case Start.NAME:
return Start.parse(terminal, cli);
case Version.NAME:
return Version.parse(terminal, cli);
default:
assert false : "should never get here, if the user enters an unknown command, an error message should be shown before parse is called";
return null;
}
}
static class Version extends CliTool.Command {
private static final String NAME = "version";
private static final CliToolConfig.Cmd CMD = cmd(NAME, Version.class).build();
public static Command parse(Terminal terminal, CommandLine cli) {
return new Version(terminal);
}
public Version(Terminal terminal) {
super(terminal);
}
@Override
public ExitStatus execute(Settings settings, Environment env) throws Exception {
terminal.println("Version: " + org.elasticsearch.Version.CURRENT
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
+ ", JVM: " + JvmInfo.jvmInfo().version());
return ExitStatus.OK_AND_EXIT;
}
}
static class Start extends CliTool.Command {
private static final String NAME = "start";
private static final CliToolConfig.Cmd CMD = cmd(NAME, Start.class)
.options(
optionBuilder("d", "daemonize").hasArg(false).required(false),
optionBuilder("p", "pidfile").hasArg(true).required(false),
optionBuilder("V", "version").hasArg(false).required(false),
Option.builder("D").argName("property=value").valueSeparator('=').numberOfArgs(2)
)
.stopAtNonOption(true) // needed to parse the --foo.bar options, so this parser must be lenient
.build();
// TODO: don't use system properties as a way to do this, its horrible...
@SuppressForbidden(reason = "Sets system properties passed as CLI parameters")
public static Command parse(Terminal terminal, CommandLine cli) throws UserError {
if (cli.hasOption("V")) {
return Version.parse(terminal, cli);
}
if (cli.hasOption("d")) {
System.setProperty("es.foreground", "false");
}
String pidFile = cli.getOptionValue("pidfile");
if (!Strings.isNullOrEmpty(pidFile)) {
System.setProperty("es.pidfile", pidFile);
}
if (cli.hasOption("D")) {
Properties properties = cli.getOptionProperties("D");
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
String key = (String) entry.getKey();
String propertyName = key.startsWith("es.") ? key : "es." + key;
System.setProperty(propertyName, entry.getValue().toString());
}
}
// hacky way to extract all the fancy extra args, there is no CLI tool helper for this
Iterator<String> iterator = cli.getArgList().iterator();
final Map<String, String> properties = new HashMap<>();
while (iterator.hasNext()) {
String arg = iterator.next();
if (!arg.startsWith("--")) {
if (arg.startsWith("-D") || arg.startsWith("-d") || arg.startsWith("-p")) {
throw new UserError(ExitStatus.USAGE,
"Parameter [" + arg + "] starting with \"-D\", \"-d\" or \"-p\" must be before any parameters starting with --"
);
} else {
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "]does not start with --");
}
}
// if there is no = sign, we have to get the next argu
arg = arg.replace("--", "");
if (arg.contains("=")) {
String[] splitArg = arg.split("=", 2);
String key = splitArg[0];
String value = splitArg[1];
properties.put("es." + key, value);
} else {
if (iterator.hasNext()) {
String value = iterator.next();
if (value.startsWith("--")) {
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
}
properties.put("es." + arg, value);
} else {
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
}
}
}
for (Map.Entry<String, String> entry : properties.entrySet()) {
System.setProperty(entry.getKey(), entry.getValue());
}
return new Start(terminal);
}
public Start(Terminal terminal) {
super(terminal);
}
@Override
public ExitStatus execute(Settings settings, Environment env) throws Exception {
return ExitStatus.OK;
}
}
}

View File

@ -20,7 +20,7 @@
package org.elasticsearch.bootstrap;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.settings.Setting.Property;
public final class BootstrapSettings {
@ -29,10 +29,13 @@ public final class BootstrapSettings {
// TODO: remove this hack when insecure defaults are removed from java
public static final Setting<Boolean> SECURITY_FILTER_BAD_DEFAULTS_SETTING =
Setting.boolSetting("security.manager.filter_bad_defaults", true, false, Scope.CLUSTER);
Setting.boolSetting("security.manager.filter_bad_defaults", true, Property.NodeScope);
public static final Setting<Boolean> MLOCKALL_SETTING = Setting.boolSetting("bootstrap.mlockall", false, false, Scope.CLUSTER);
public static final Setting<Boolean> SECCOMP_SETTING = Setting.boolSetting("bootstrap.seccomp", true, false, Scope.CLUSTER);
public static final Setting<Boolean> CTRLHANDLER_SETTING = Setting.boolSetting("bootstrap.ctrlhandler", true, false, Scope.CLUSTER);
public static final Setting<Boolean> MLOCKALL_SETTING =
Setting.boolSetting("bootstrap.mlockall", false, Property.NodeScope);
public static final Setting<Boolean> SECCOMP_SETTING =
Setting.boolSetting("bootstrap.seccomp", true, Property.NodeScope);
public static final Setting<Boolean> CTRLHANDLER_SETTING =
Setting.boolSetting("bootstrap.ctrlhandler", true, Property.NodeScope);
}

View File

@ -19,23 +19,94 @@
package org.elasticsearch.bootstrap;
import joptsimple.OptionSet;
import joptsimple.OptionSpec;
import joptsimple.util.KeyValuePair;
import org.elasticsearch.Build;
import org.elasticsearch.cli.Command;
import org.elasticsearch.cli.ExitCodes;
import org.elasticsearch.cli.Terminal;
import org.elasticsearch.cli.UserError;
import org.elasticsearch.monitor.jvm.JvmInfo;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
/**
* This class starts elasticsearch.
*/
public final class Elasticsearch {
class Elasticsearch extends Command {
/** no instantiation */
private Elasticsearch() {}
private final OptionSpec<Void> versionOption;
private final OptionSpec<Void> daemonizeOption;
private final OptionSpec<String> pidfileOption;
private final OptionSpec<KeyValuePair> propertyOption;
// visible for testing
Elasticsearch() {
super("starts elasticsearch");
// TODO: in jopt-simple 5.0, make this mutually exclusive with all other options
versionOption = parser.acceptsAll(Arrays.asList("V", "version"),
"Prints elasticsearch version information and exits");
daemonizeOption = parser.acceptsAll(Arrays.asList("d", "daemonize"),
"Starts Elasticsearch in the background");
// TODO: in jopt-simple 5.0 this option type can be a Path
pidfileOption = parser.acceptsAll(Arrays.asList("p", "pidfile"),
"Creates a pid file in the specified path on start")
.withRequiredArg();
propertyOption = parser.accepts("E", "Configure an Elasticsearch setting").withRequiredArg().ofType(KeyValuePair.class);
}
/**
* Main entry point for starting elasticsearch
*/
public static void main(String[] args) throws StartupError {
public static void main(final String[] args) throws Exception {
final Elasticsearch elasticsearch = new Elasticsearch();
int status = main(args, elasticsearch, Terminal.DEFAULT);
if (status != ExitCodes.OK) {
exit(status);
}
}
static int main(final String[] args, final Elasticsearch elasticsearch, final Terminal terminal) throws Exception {
return elasticsearch.main(args, terminal);
}
@Override
protected void execute(Terminal terminal, OptionSet options) throws Exception {
if (options.has(versionOption)) {
if (options.has(daemonizeOption) || options.has(pidfileOption)) {
throw new UserError(ExitCodes.USAGE, "Elasticsearch version option is mutually exclusive with any other option");
}
terminal.println("Version: " + org.elasticsearch.Version.CURRENT
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
+ ", JVM: " + JvmInfo.jvmInfo().version());
return;
}
final boolean daemonize = options.has(daemonizeOption);
final String pidFile = pidfileOption.value(options);
final Map<String, String> esSettings = new HashMap<>();
for (final KeyValuePair kvp : propertyOption.values(options)) {
if (!kvp.key.startsWith("es.")) {
throw new UserError(ExitCodes.USAGE, "Elasticsearch settings must be prefixed with [es.] but was [" + kvp.key + "]");
}
if (kvp.value.isEmpty()) {
throw new UserError(ExitCodes.USAGE, "Elasticsearch setting [" + kvp.key + "] must not be empty");
}
esSettings.put(kvp.key, kvp.value);
}
init(daemonize, pidFile, esSettings);
}
void init(final boolean daemonize, final String pidFile, final Map<String, String> esSettings) {
try {
Bootstrap.init(args);
} catch (Throwable t) {
Bootstrap.init(!daemonize, pidFile, esSettings);
} catch (final Throwable t) {
// format exceptions to the console in a special way
// to avoid 2MB stacktraces from guice, etc.
throw new StartupError(t);

View File

@ -76,7 +76,7 @@ class JNANatives {
softLimit = rlimit.rlim_cur.longValue();
hardLimit = rlimit.rlim_max.longValue();
} else {
logger.warn("Unable to retrieve resource limits: " + JNACLibrary.strerror(Native.getLastError()));
logger.warn("Unable to retrieve resource limits: {}", JNACLibrary.strerror(Native.getLastError()));
}
}
} catch (UnsatisfiedLinkError e) {
@ -85,19 +85,20 @@ class JNANatives {
}
// mlockall failed for some reason
logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg);
logger.warn("Unable to lock JVM Memory: error={}, reason={}", errno , errMsg);
logger.warn("This can result in part of the JVM being swapped out.");
if (errno == JNACLibrary.ENOMEM) {
if (rlimitSuccess) {
logger.warn("Increase RLIMIT_MEMLOCK, soft limit: " + rlimitToString(softLimit) + ", hard limit: " + rlimitToString(hardLimit));
logger.warn("Increase RLIMIT_MEMLOCK, soft limit: {}, hard limit: {}", rlimitToString(softLimit), rlimitToString(hardLimit));
if (Constants.LINUX) {
// give specific instructions for the linux case to make it easy
String user = System.getProperty("user.name");
logger.warn("These can be adjusted by modifying /etc/security/limits.conf, for example: \n" +
"\t# allow user '" + user + "' mlockall\n" +
"\t" + user + " soft memlock unlimited\n" +
"\t" + user + " hard memlock unlimited"
);
"\t# allow user '{}' mlockall\n" +
"\t{} soft memlock unlimited\n" +
"\t{} hard memlock unlimited",
user, user, user
);
logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect.");
}
} else {
@ -155,7 +156,7 @@ class JNANatives {
// the amount of memory we wish to lock, plus a small overhead (1MB).
SizeT size = new SizeT(JvmInfo.jvmInfo().getMem().getHeapInit().getBytes() + (1024 * 1024));
if (!kernel.SetProcessWorkingSetSize(process, size, size)) {
logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code " + Native.getLastError());
logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code {}", Native.getLastError());
} else {
JNAKernel32Library.MemoryBasicInformation memInfo = new JNAKernel32Library.MemoryBasicInformation();
long address = 0;
@ -188,7 +189,7 @@ class JNANatives {
if (result) {
logger.debug("console ctrl handler correctly set");
} else {
logger.warn("unknown error " + Native.getLastError() + " when adding console ctrl handler:");
logger.warn("unknown error {} when adding console ctrl handler", Native.getLastError());
}
} catch (UnsatisfiedLinkError e) {
// this will have already been logged by Kernel32Library, no need to repeat it

View File

@ -200,7 +200,7 @@ final class JVMCheck {
HotSpotCheck bug = JVM_BROKEN_HOTSPOT_VERSIONS.get(Constants.JVM_VERSION);
if (bug != null && bug.check()) {
if (bug.getWorkaround().isPresent() && ManagementFactory.getRuntimeMXBean().getInputArguments().contains(bug.getWorkaround().get())) {
Loggers.getLogger(JVMCheck.class).warn(bug.getWarningMessage().get());
Loggers.getLogger(JVMCheck.class).warn("{}", bug.getWarningMessage().get());
} else {
throw new RuntimeException(bug.getErrorMessage());
}

View File

@ -394,7 +394,7 @@ final class Seccomp {
method = 0;
int errno1 = Native.getLastError();
if (logger.isDebugEnabled()) {
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): " + JNACLibrary.strerror(errno1) + ", falling back to prctl(PR_SET_SECCOMP)...");
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", JNACLibrary.strerror(errno1));
}
if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) != 0) {
int errno2 = Native.getLastError();

View File

@ -19,13 +19,13 @@
package org.elasticsearch.cache.recycler;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.recycler.AbstractRecyclerC;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays;
@ -43,13 +43,19 @@ import static org.elasticsearch.common.recycler.Recyclers.none;
/** A recycler of fixed-size pages. */
public class PageCacheRecycler extends AbstractComponent implements Releasable {
public static final Setting<Type> TYPE_SETTING = new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> LIMIT_HEAP_SETTING = Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", false, Setting.Scope.CLUSTER);
public static final Setting<Double> WEIGHT_BYTES_SETTING = Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, false, Setting.Scope.CLUSTER);
public static final Setting<Double> WEIGHT_LONG_SETTING = Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, false, Setting.Scope.CLUSTER);
public static final Setting<Double> WEIGHT_INT_SETTING = Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, false, Setting.Scope.CLUSTER);
public static final Setting<Type> TYPE_SETTING =
new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, Property.NodeScope);
public static final Setting<ByteSizeValue> LIMIT_HEAP_SETTING =
Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", Property.NodeScope);
public static final Setting<Double> WEIGHT_BYTES_SETTING =
Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, Property.NodeScope);
public static final Setting<Double> WEIGHT_LONG_SETTING =
Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, Property.NodeScope);
public static final Setting<Double> WEIGHT_INT_SETTING =
Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, Property.NodeScope);
// object pages are less useful to us so we give them a lower weight by default
public static final Setting<Double> WEIGHT_OBJECTS_SETTING = Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, false, Setting.Scope.CLUSTER);
public static final Setting<Double> WEIGHT_OBJECTS_SETTING =
Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, Property.NodeScope);
private final Recycler<byte[]> bytePage;
private final Recycler<int[]> intPage;

View File

@ -0,0 +1,112 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cli;
import java.io.IOException;
import java.util.Arrays;
import joptsimple.OptionException;
import joptsimple.OptionParser;
import joptsimple.OptionSet;
import joptsimple.OptionSpec;
import org.elasticsearch.common.SuppressForbidden;
/**
* An action to execute within a cli.
*/
public abstract class Command {
/** A description of the command, used in the help output. */
protected final String description;
/** The option parser for this command. */
protected final OptionParser parser = new OptionParser();
private final OptionSpec<Void> helpOption = parser.acceptsAll(Arrays.asList("h", "help"), "show help").forHelp();
private final OptionSpec<Void> silentOption = parser.acceptsAll(Arrays.asList("s", "silent"), "show minimal output");
private final OptionSpec<Void> verboseOption = parser.acceptsAll(Arrays.asList("v", "verbose"), "show verbose output");
public Command(String description) {
this.description = description;
}
/** Parses options for this command from args and executes it. */
public final int main(String[] args, Terminal terminal) throws Exception {
try {
mainWithoutErrorHandling(args, terminal);
} catch (OptionException e) {
printHelp(terminal);
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage());
return ExitCodes.USAGE;
} catch (UserError e) {
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage());
return e.exitCode;
}
return ExitCodes.OK;
}
/**
* Executes the command, but all errors are thrown.
*/
void mainWithoutErrorHandling(String[] args, Terminal terminal) throws Exception {
final OptionSet options = parser.parse(args);
if (options.has(helpOption)) {
printHelp(terminal);
return;
}
if (options.has(silentOption)) {
if (options.has(verboseOption)) {
// mutually exclusive, we can remove this with jopt-simple 5.0, which natively supports it
throw new UserError(ExitCodes.USAGE, "Cannot specify -s and -v together");
}
terminal.setVerbosity(Terminal.Verbosity.SILENT);
} else if (options.has(verboseOption)) {
terminal.setVerbosity(Terminal.Verbosity.VERBOSE);
} else {
terminal.setVerbosity(Terminal.Verbosity.NORMAL);
}
execute(terminal, options);
}
/** Prints a help message for the command to the terminal. */
private void printHelp(Terminal terminal) throws IOException {
terminal.println(description);
terminal.println("");
printAdditionalHelp(terminal);
parser.printHelpOn(terminal.getWriter());
}
/** Prints additional help information, specific to the command */
protected void printAdditionalHelp(Terminal terminal) {}
@SuppressForbidden(reason = "Allowed to exit explicitly from #main()")
protected static void exit(int status) {
System.exit(status);
}
/**
* Executes this command.
*
* Any runtime user errors (like an input file that does not exist), should throw a {@link UserError}. */
protected abstract void execute(Terminal terminal, OptionSet options) throws Exception;
}

View File

@ -0,0 +1,42 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cli;
/**
* POSIX exit codes.
*/
public class ExitCodes {
public static final int OK = 0;
public static final int USAGE = 64; /* command line usage error */
public static final int DATA_ERROR = 65; /* data format error */
public static final int NO_INPUT = 66; /* cannot open input */
public static final int NO_USER = 67; /* addressee unknown */
public static final int NO_HOST = 68; /* host name unknown */
public static final int UNAVAILABLE = 69; /* service unavailable */
public static final int CODE_ERROR = 70; /* internal software error */
public static final int CANT_CREATE = 73; /* can't create (user) output file */
public static final int IO_ERROR = 74; /* input/output error */
public static final int TEMP_FAILURE = 75; /* temp failure; user is invited to retry */
public static final int PROTOCOL = 76; /* remote error in protocol */
public static final int NOPERM = 77; /* permission denied */
public static final int CONFIG = 78; /* configuration error */
private ExitCodes() { /* no instance, just constants */ }
}

View File

@ -0,0 +1,71 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cli;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.Map;
import joptsimple.NonOptionArgumentSpec;
import joptsimple.OptionSet;
/**
* A cli tool which is made up of multiple subcommands.
*/
public class MultiCommand extends Command {
protected final Map<String, Command> subcommands = new LinkedHashMap<>();
private final NonOptionArgumentSpec<String> arguments = parser.nonOptions("command");
public MultiCommand(String description) {
super(description);
parser.posixlyCorrect(true);
}
@Override
protected void printAdditionalHelp(Terminal terminal) {
if (subcommands.isEmpty()) {
throw new IllegalStateException("No subcommands configured");
}
terminal.println("Commands");
terminal.println("--------");
for (Map.Entry<String, Command> subcommand : subcommands.entrySet()) {
terminal.println(subcommand.getKey() + " - " + subcommand.getValue().description);
}
terminal.println("");
}
@Override
protected void execute(Terminal terminal, OptionSet options) throws Exception {
if (subcommands.isEmpty()) {
throw new IllegalStateException("No subcommands configured");
}
String[] args = arguments.values(options).toArray(new String[0]);
if (args.length == 0) {
throw new UserError(ExitCodes.USAGE, "Missing command");
}
Command subcommand = subcommands.get(args[0]);
if (subcommand == null) {
throw new UserError(ExitCodes.USAGE, "Unknown command [" + args[0] + "]");
}
subcommand.mainWithoutErrorHandling(Arrays.copyOfRange(args, 1, args.length), terminal);
}
}

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.common.cli;
package org.elasticsearch.cli;
import java.io.BufferedReader;
import java.io.Console;
@ -29,7 +29,7 @@ import java.nio.charset.Charset;
import org.elasticsearch.common.SuppressForbidden;
/**
* A Terminal wraps access to reading input and writing output for a {@link CliTool}.
* A Terminal wraps access to reading input and writing output for a cli.
*
* The available methods are similar to those of {@link Console}, with the ability
* to read either normal text or a password, and the ability to print a line
@ -61,7 +61,7 @@ public abstract class Terminal {
}
/** Sets the verbosity of the terminal. */
void setVerbosity(Verbosity verbosity) {
public void setVerbosity(Verbosity verbosity) {
this.verbosity = verbosity;
}
@ -89,35 +89,35 @@ public abstract class Terminal {
private static class ConsoleTerminal extends Terminal {
private static final Console console = System.console();
private static final Console CONSOLE = System.console();
ConsoleTerminal() {
super(System.lineSeparator());
}
static boolean isSupported() {
return console != null;
return CONSOLE != null;
}
@Override
public PrintWriter getWriter() {
return console.writer();
return CONSOLE.writer();
}
@Override
public String readText(String prompt) {
return console.readLine("%s", prompt);
return CONSOLE.readLine("%s", prompt);
}
@Override
public char[] readSecret(String prompt) {
return console.readPassword("%s", prompt);
return CONSOLE.readPassword("%s", prompt);
}
}
private static class SystemTerminal extends Terminal {
private final PrintWriter writer = newWriter();
private static final PrintWriter WRITER = newWriter();
SystemTerminal() {
super(System.lineSeparator());
@ -130,7 +130,7 @@ public abstract class Terminal {
@Override
public PrintWriter getWriter() {
return writer;
return WRITER;
}
@Override

View File

@ -17,19 +17,19 @@
* under the License.
*/
package org.elasticsearch.common.cli;
package org.elasticsearch.cli;
/**
* An exception representing a user fixable problem in {@link CliTool} usage.
* An exception representing a user fixable problem in {@link Command} usage.
*/
public class UserError extends Exception {
/** The exist status the cli should use when catching this user error. */
public final CliTool.ExitStatus exitStatus;
public final int exitCode;
/** Constructs a UserError with an exit status and message to show the user. */
public UserError(CliTool.ExitStatus exitStatus, String msg) {
public UserError(int exitCode, String msg) {
super(msg);
this.exitStatus = exitStatus;
this.exitCode = exitCode;
}
}

View File

@ -19,12 +19,8 @@
package org.elasticsearch.client;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
@ -87,6 +83,7 @@ import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import java.util.Map;
@ -114,7 +111,7 @@ public interface Client extends ElasticsearchClient, Releasable {
default:
throw new IllegalArgumentException("Can't parse [client.type] must be one of [node, transport]");
}
}, false, Setting.Scope.CLUSTER);
}, Property.NodeScope);
/**
* The admin client that can be used to perform administrative operations.

View File

@ -62,6 +62,7 @@ import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.action.suggest.SuggestRequest;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.search.suggest.SuggestBuilder;
/**
* A handy one stop shop for creating requests (make sure to import static this class).
@ -127,7 +128,7 @@ public class Requests {
/**
* Creates a suggest request for getting suggestions from provided <code>indices</code>.
* The suggest query has to be set using the JSON source using {@link org.elasticsearch.action.suggest.SuggestRequest#suggest(org.elasticsearch.common.bytes.BytesReference)}.
* The suggest query has to be set using {@link org.elasticsearch.action.suggest.SuggestRequest#suggest(SuggestBuilder)}.
* @param indices The indices to suggest from. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @see org.elasticsearch.client.Client#suggest(org.elasticsearch.action.suggest.SuggestRequest)
*/
@ -342,7 +343,8 @@ public class Requests {
/**
* Creates a cluster health request.
*
* @param indices The indices to provide additional cluster health information for. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @param indices The indices to provide additional cluster health information for.
* Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @return The cluster health request
* @see org.elasticsearch.client.ClusterAdminClient#health(org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest)
*/

View File

@ -34,6 +34,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
@ -100,10 +101,14 @@ public class TransportClientNodesService extends AbstractComponent {
private volatile boolean closed;
public static final Setting<TimeValue> CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL = Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> CLIENT_TRANSPORT_PING_TIMEOUT = Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME = Setting.boolSetting("client.transport.ignore_cluster_name", false, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> CLIENT_TRANSPORT_SNIFF = Setting.boolSetting("client.transport.sniff", false, false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL =
Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), Property.NodeScope);
public static final Setting<TimeValue> CLIENT_TRANSPORT_PING_TIMEOUT =
Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), Property.NodeScope);
public static final Setting<Boolean> CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME =
Setting.boolSetting("client.transport.ignore_cluster_name", false, Property.NodeScope);
public static final Setting<Boolean> CLIENT_TRANSPORT_SNIFF =
Setting.boolSetting("client.transport.sniff", false, Property.NodeScope);
@Inject
public TransportClientNodesService(Settings settings, ClusterName clusterName, TransportService transportService,
@ -119,7 +124,7 @@ public class TransportClientNodesService extends AbstractComponent {
this.ignoreClusterName = CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings);
if (logger.isDebugEnabled()) {
logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]");
logger.debug("node_sampler_interval[{}]", nodesSamplerInterval);
}
if (CLIENT_TRANSPORT_SNIFF.get(this.settings)) {
@ -318,7 +323,7 @@ public class TransportClientNodesService extends AbstractComponent {
transportService.connectToNode(node);
} catch (Throwable e) {
it.remove();
logger.debug("failed to connect to discovered node [" + node + "]", e);
logger.debug("failed to connect to discovered node [{}]", e, node);
}
}
}

View File

@ -135,8 +135,8 @@ public class ClusterChangedEvent {
List<Index> deleted = null;
for (ObjectCursor<IndexMetaData> cursor : previousState.metaData().indices().values()) {
IndexMetaData index = cursor.value;
IndexMetaData current = state.metaData().index(index.getIndex().getName());
if (current == null || index.getIndexUUID().equals(current.getIndexUUID()) == false) {
IndexMetaData current = state.metaData().index(index.getIndex());
if (current == null) {
if (deleted == null) {
deleted = new ArrayList<>();
}

View File

@ -58,6 +58,7 @@ import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.gateway.GatewayAllocator;
@ -74,7 +75,8 @@ public class ClusterModule extends AbstractModule {
public static final String EVEN_SHARD_COUNT_ALLOCATOR = "even_shard";
public static final String BALANCED_ALLOCATOR = "balanced"; // default
public static final Setting<String> SHARDS_ALLOCATOR_TYPE_SETTING = new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<String> SHARDS_ALLOCATOR_TYPE_SETTING =
new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope);
public static final List<Class<? extends AllocationDecider>> DEFAULT_ALLOCATION_DECIDERS =
Collections.unmodifiableList(Arrays.asList(
SameShardAllocationDecider.class,
@ -136,6 +138,7 @@ public class ClusterModule extends AbstractModule {
bind(AllocationService.class).asEagerSingleton();
bind(DiscoveryNodeService.class).asEagerSingleton();
bind(ClusterService.class).to(InternalClusterService.class).asEagerSingleton();
bind(NodeConnectionsService.class).asEagerSingleton();
bind(OperationRouting.class).asEagerSingleton();
bind(MetaDataCreateIndexService.class).asEagerSingleton();
bind(MetaDataDeleteIndexService.class).asEagerSingleton();

View File

@ -23,6 +23,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import java.io.IOException;
@ -37,7 +38,7 @@ public class ClusterName implements Streamable {
throw new IllegalArgumentException("[cluster.name] must not be empty");
}
return s;
}, false, Setting.Scope.CLUSTER);
}, Property.NodeScope);
public static final ClusterName DEFAULT = new ClusterName(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY).intern());

View File

@ -26,7 +26,6 @@ import org.elasticsearch.cluster.service.PendingClusterTask;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.tasks.TaskManager;
import java.util.List;
@ -154,9 +153,4 @@ public interface ClusterService extends LifecycleComponent<ClusterService> {
* @return A zero time value if the queue is empty, otherwise the time value oldest task waiting in the queue
*/
TimeValue getMaxTaskWaitTime();
/**
* Returns task manager created in the cluster service
*/
TaskManager getTaskManager();
}

View File

@ -39,6 +39,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
@ -64,8 +65,12 @@ import java.util.concurrent.TimeUnit;
*/
public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener {
public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), true, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), true, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING =
Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10),
Property.Dynamic, Property.NodeScope);
public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING =
Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15),
Property.Dynamic, Property.NodeScope);
private volatile TimeValue updateFrequency;

View File

@ -0,0 +1,160 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.util.concurrent.KeyedLock;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ScheduledFuture;
import static org.elasticsearch.common.settings.Setting.Property;
import static org.elasticsearch.common.settings.Setting.positiveTimeSetting;
/**
* This component is responsible for connecting to nodes once they are added to the cluster state, and disconnect when they are
* removed. Also, it periodically checks that all connections are still open and if needed restores them.
* Note that this component is *not* responsible for removing nodes from the cluster if they disconnect / do not respond
* to pings. This is done by {@link org.elasticsearch.discovery.zen.fd.NodesFaultDetection}. Master fault detection
* is done by {@link org.elasticsearch.discovery.zen.fd.MasterFaultDetection}.
*/
public class NodeConnectionsService extends AbstractLifecycleComponent<NodeConnectionsService> {
public static final Setting<TimeValue> CLUSTER_NODE_RECONNECT_INTERVAL_SETTING =
positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), Property.NodeScope);
private final ThreadPool threadPool;
private final TransportService transportService;
// map between current node and the number of failed connection attempts. 0 means successfully connected.
// if a node doesn't appear in this list it shouldn't be monitored
private ConcurrentMap<DiscoveryNode, Integer> nodes = ConcurrentCollections.newConcurrentMap();
final private KeyedLock<DiscoveryNode> nodeLocks = new KeyedLock<>();
private final TimeValue reconnectInterval;
private volatile ScheduledFuture<?> backgroundFuture = null;
@Inject
public NodeConnectionsService(Settings settings, ThreadPool threadPool, TransportService transportService) {
super(settings);
this.threadPool = threadPool;
this.transportService = transportService;
this.reconnectInterval = NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.get(settings);
}
public void connectToAddedNodes(ClusterChangedEvent event) {
// TODO: do this in parallel (and wait)
for (final DiscoveryNode node : event.nodesDelta().addedNodes()) {
try (Releasable ignored = nodeLocks.acquire(node)) {
Integer current = nodes.put(node, 0);
assert current == null : "node " + node + " was added in event but already in internal nodes";
validateNodeConnected(node);
}
}
}
public void disconnectFromRemovedNodes(ClusterChangedEvent event) {
for (final DiscoveryNode node : event.nodesDelta().removedNodes()) {
try (Releasable ignored = nodeLocks.acquire(node)) {
Integer current = nodes.remove(node);
assert current != null : "node " + node + " was removed in event but not in internal nodes";
try {
transportService.disconnectFromNode(node);
} catch (Throwable e) {
logger.warn("failed to disconnect to node [" + node + "]", e);
}
}
}
}
void validateNodeConnected(DiscoveryNode node) {
assert nodeLocks.isHeldByCurrentThread(node) : "validateNodeConnected must be called under lock";
if (lifecycle.stoppedOrClosed() ||
nodes.containsKey(node) == false) { // we double check existence of node since connectToNode might take time...
// nothing to do
} else {
try {
// connecting to an already connected node is a noop
transportService.connectToNode(node);
nodes.put(node, 0);
} catch (Exception e) {
Integer nodeFailureCount = nodes.get(node);
assert nodeFailureCount != null : node + " didn't have a counter in nodes map";
nodeFailureCount = nodeFailureCount + 1;
// log every 6th failure
if ((nodeFailureCount % 6) == 1) {
logger.warn("failed to connect to node {} (tried [{}] times)", e, node, nodeFailureCount);
}
nodes.put(node, nodeFailureCount);
}
}
}
class ConnectionChecker extends AbstractRunnable {
@Override
public void onFailure(Throwable t) {
logger.warn("unexpected error while checking for node reconnects", t);
}
protected void doRun() {
for (DiscoveryNode node : nodes.keySet()) {
try (Releasable ignored = nodeLocks.acquire(node)) {
validateNodeConnected(node);
}
}
}
@Override
public void onAfter() {
if (lifecycle.started()) {
backgroundFuture = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, this);
}
}
}
@Override
protected void doStart() {
backgroundFuture = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ConnectionChecker());
}
@Override
protected void doStop() {
FutureUtils.cancel(backgroundFuture);
}
@Override
protected void doClose() {
}
}

View File

@ -69,15 +69,17 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
private final State state;
private final SnapshotId snapshotId;
private final boolean includeGlobalState;
private final boolean partial;
private final ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards;
private final List<String> indices;
private final ImmutableOpenMap<String, List<ShardId>> waitingIndices;
private final long startTime;
public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, List<String> indices, long startTime, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
public Entry(SnapshotId snapshotId, boolean includeGlobalState, boolean partial, State state, List<String> indices, long startTime, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
this.state = state;
this.snapshotId = snapshotId;
this.includeGlobalState = includeGlobalState;
this.partial = partial;
this.indices = indices;
this.startTime = startTime;
if (shards == null) {
@ -90,7 +92,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
}
public Entry(Entry entry, State state, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
this(entry.snapshotId, entry.includeGlobalState, state, entry.indices, entry.startTime, shards);
this(entry.snapshotId, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, shards);
}
public Entry(Entry entry, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
@ -121,6 +123,10 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
return includeGlobalState;
}
public boolean partial() {
return partial;
}
public long startTime() {
return startTime;
}
@ -133,6 +139,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
Entry entry = (Entry) o;
if (includeGlobalState != entry.includeGlobalState) return false;
if (partial != entry.partial) return false;
if (startTime != entry.startTime) return false;
if (!indices.equals(entry.indices)) return false;
if (!shards.equals(entry.shards)) return false;
@ -148,6 +155,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
int result = state.hashCode();
result = 31 * result + snapshotId.hashCode();
result = 31 * result + (includeGlobalState ? 1 : 0);
result = 31 * result + (partial ? 1 : 0);
result = 31 * result + shards.hashCode();
result = 31 * result + indices.hashCode();
result = 31 * result + waitingIndices.hashCode();
@ -360,6 +368,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
for (int i = 0; i < entries.length; i++) {
SnapshotId snapshotId = SnapshotId.readSnapshotId(in);
boolean includeGlobalState = in.readBoolean();
boolean partial = in.readBoolean();
State state = State.fromValue(in.readByte());
int indices = in.readVInt();
List<String> indexBuilder = new ArrayList<>();
@ -375,7 +384,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
State shardState = State.fromValue(in.readByte());
builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState));
}
entries[i] = new Entry(snapshotId, includeGlobalState, state, Collections.unmodifiableList(indexBuilder), startTime, builder.build());
entries[i] = new Entry(snapshotId, includeGlobalState, partial, state, Collections.unmodifiableList(indexBuilder), startTime, builder.build());
}
return new SnapshotsInProgress(entries);
}
@ -386,6 +395,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
for (Entry entry : entries) {
entry.snapshotId().writeTo(out);
out.writeBoolean(entry.includeGlobalState());
out.writeBoolean(entry.partial());
out.writeByte(entry.state().value());
out.writeVInt(entry.indices().size());
for (String index : entry.indices()) {
@ -406,6 +416,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
static final XContentBuilderString SNAPSHOTS = new XContentBuilderString("snapshots");
static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot");
static final XContentBuilderString INCLUDE_GLOBAL_STATE = new XContentBuilderString("include_global_state");
static final XContentBuilderString PARTIAL = new XContentBuilderString("partial");
static final XContentBuilderString STATE = new XContentBuilderString("state");
static final XContentBuilderString INDICES = new XContentBuilderString("indices");
static final XContentBuilderString START_TIME_MILLIS = new XContentBuilderString("start_time_millis");
@ -431,6 +442,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
builder.field(Fields.REPOSITORY, entry.snapshotId().getRepository());
builder.field(Fields.SNAPSHOT, entry.snapshotId().getSnapshot());
builder.field(Fields.INCLUDE_GLOBAL_STATE, entry.includeGlobalState());
builder.field(Fields.PARTIAL, entry.partial());
builder.field(Fields.STATE, entry.state());
builder.startArray(Fields.INDICES);
{

Some files were not shown because too many files have changed in this diff Show More