Merge branch 'master' into pr/16598-register-filter-settings
# Conflicts: # core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java # core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java # core/src/main/java/org/elasticsearch/common/settings/Setting.java
This commit is contained in:
commit
9acb0bb28c
|
@ -116,6 +116,7 @@ subprojects {
|
|||
"org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar',
|
||||
"org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm',
|
||||
"org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb',
|
||||
"org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage',
|
||||
]
|
||||
configurations.all {
|
||||
resolutionStrategy.dependencySubstitution { DependencySubstitutions subs ->
|
||||
|
|
|
@ -68,7 +68,7 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}"
|
||||
// we "upgrade" these optional deps to provided for plugins, since they will run
|
||||
// with a full elasticsearch server that includes optional deps
|
||||
provided "com.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||
provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||
provided "com.vividsolutions:jts:${project.versions.jts}"
|
||||
provided "log4j:log4j:${project.versions.log4j}"
|
||||
provided "log4j:apache-log4j-extras:${project.versions.log4j}"
|
||||
|
|
|
@ -68,11 +68,17 @@ class PluginPropertiesTask extends Copy {
|
|||
}
|
||||
|
||||
Map generateSubstitutions() {
|
||||
def stringSnap = { version ->
|
||||
if (version.endsWith("-SNAPSHOT")) {
|
||||
return version.substring(0, version.length() - 9)
|
||||
}
|
||||
return version
|
||||
}
|
||||
return [
|
||||
'name': extension.name,
|
||||
'description': extension.description,
|
||||
'version': extension.version,
|
||||
'elasticsearchVersion': VersionProperties.elasticsearch,
|
||||
'version': stringSnap(extension.version),
|
||||
'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch),
|
||||
'javaVersion': project.targetCompatibility as String,
|
||||
'isolated': extension.isolated as String,
|
||||
'classname': extension.classname
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
|
||||
/**
|
||||
* Runs LoggerUsageCheck on a set of directories.
|
||||
*/
|
||||
public class LoggerUsageTask extends LoggedExec {
|
||||
|
||||
/**
|
||||
* We use a simple "marker" file that we touch when the task succeeds
|
||||
* as the task output. This is compared against the modified time of the
|
||||
* inputs (ie the jars/class files).
|
||||
*/
|
||||
private File successMarker = new File(project.buildDir, 'markers/loggerUsage')
|
||||
|
||||
private FileCollection classpath;
|
||||
|
||||
private List<File> classDirectories;
|
||||
|
||||
public LoggerUsageTask() {
|
||||
project.afterEvaluate {
|
||||
dependsOn(classpath)
|
||||
description = "Runs LoggerUsageCheck on ${classDirectories}"
|
||||
executable = new File(project.javaHome, 'bin/java')
|
||||
if (classDirectories == null) {
|
||||
classDirectories = []
|
||||
if (project.sourceSets.findByName("main") && project.sourceSets.main.output.classesDir.exists()) {
|
||||
classDirectories += [project.sourceSets.main.output.classesDir]
|
||||
dependsOn project.tasks.classes
|
||||
}
|
||||
if (project.sourceSets.findByName("test") && project.sourceSets.test.output.classesDir.exists()) {
|
||||
classDirectories += [project.sourceSets.test.output.classesDir]
|
||||
dependsOn project.tasks.testClasses
|
||||
}
|
||||
}
|
||||
doFirst({
|
||||
args('-cp', getClasspath().asPath, 'org.elasticsearch.test.loggerusage.ESLoggerUsageChecker')
|
||||
getClassDirectories().each {
|
||||
args it.getAbsolutePath()
|
||||
}
|
||||
})
|
||||
doLast({
|
||||
successMarker.parentFile.mkdirs()
|
||||
successMarker.setText("", 'UTF-8')
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@InputFiles
|
||||
FileCollection getClasspath() {
|
||||
return classpath
|
||||
}
|
||||
|
||||
void setClasspath(FileCollection classpath) {
|
||||
this.classpath = classpath
|
||||
}
|
||||
|
||||
@InputFiles
|
||||
List<File> getClassDirectories() {
|
||||
return classDirectories
|
||||
}
|
||||
|
||||
void setClassDirectories(List<File> classDirectories) {
|
||||
this.classDirectories = classDirectories
|
||||
}
|
||||
|
||||
@OutputFile
|
||||
File getSuccessMarker() {
|
||||
return successMarker
|
||||
}
|
||||
|
||||
void setSuccessMarker(File successMarker) {
|
||||
this.successMarker = successMarker
|
||||
}
|
||||
}
|
|
@ -34,6 +34,7 @@ class PrecommitTasks {
|
|||
configureForbiddenApis(project),
|
||||
configureCheckstyle(project),
|
||||
configureNamingConventions(project),
|
||||
configureLoggerUsage(project),
|
||||
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
|
||||
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
|
||||
project.tasks.create('jarHell', JarHellTask.class),
|
||||
|
@ -64,20 +65,21 @@ class PrecommitTasks {
|
|||
internalRuntimeForbidden = true
|
||||
failOnUnsupportedJava = false
|
||||
bundledSignatures = ['jdk-unsafe', 'jdk-deprecated']
|
||||
signaturesURLs = [getClass().getResource('/forbidden/all-signatures.txt')]
|
||||
signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'),
|
||||
getClass().getResource('/forbidden/es-all-signatures.txt')]
|
||||
suppressAnnotations = ['**.SuppressForbidden']
|
||||
}
|
||||
Task mainForbidden = project.tasks.findByName('forbiddenApisMain')
|
||||
if (mainForbidden != null) {
|
||||
mainForbidden.configure {
|
||||
bundledSignatures += 'jdk-system-out'
|
||||
signaturesURLs += getClass().getResource('/forbidden/core-signatures.txt')
|
||||
signaturesURLs += getClass().getResource('/forbidden/es-core-signatures.txt')
|
||||
}
|
||||
}
|
||||
Task testForbidden = project.tasks.findByName('forbiddenApisTest')
|
||||
if (testForbidden != null) {
|
||||
testForbidden.configure {
|
||||
signaturesURLs += getClass().getResource('/forbidden/test-signatures.txt')
|
||||
signaturesURLs += getClass().getResource('/forbidden/es-test-signatures.txt')
|
||||
}
|
||||
}
|
||||
Task forbiddenApis = project.tasks.findByName('forbiddenApis')
|
||||
|
@ -117,4 +119,18 @@ class PrecommitTasks {
|
|||
}
|
||||
return null
|
||||
}
|
||||
|
||||
private static Task configureLoggerUsage(Project project) {
|
||||
Task loggerUsageTask = project.tasks.create('loggerUsageCheck', LoggerUsageTask.class)
|
||||
|
||||
project.configurations.create('loggerUsagePlugin')
|
||||
project.dependencies.add('loggerUsagePlugin',
|
||||
"org.elasticsearch.test:logger-usage:${org.elasticsearch.gradle.VersionProperties.elasticsearch}")
|
||||
|
||||
loggerUsageTask.configure {
|
||||
classpath = project.configurations.loggerUsagePlugin
|
||||
}
|
||||
|
||||
return loggerUsageTask
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,6 +49,15 @@ class ClusterConfiguration {
|
|||
@Input
|
||||
String jvmArgs = System.getProperty('tests.jvm.argline', '')
|
||||
|
||||
/**
|
||||
* The seed nodes port file. In the case the cluster has more than one node we use a seed node
|
||||
* to form the cluster. The file is null if there is no seed node yet available.
|
||||
*
|
||||
* Note: this can only be null if the cluster has only one node or if the first node is not yet
|
||||
* configured. All nodes but the first node should see a non null value.
|
||||
*/
|
||||
File seedNodePortsFile
|
||||
|
||||
/**
|
||||
* A closure to call before the cluster is considered ready. The closure is passed the node info,
|
||||
* as well as a groovy AntBuilder, to enable running ant condition checks. The default wait
|
||||
|
@ -119,4 +128,12 @@ class ClusterConfiguration {
|
|||
}
|
||||
extraConfigFiles.put(path, sourceFile)
|
||||
}
|
||||
|
||||
/** Returns an address and port suitable for a uri to connect to this clusters seed node over transport protocol*/
|
||||
String seedNodeTransportUri() {
|
||||
if (seedNodePortsFile != null) {
|
||||
return seedNodePortsFile.readLines("UTF-8").get(0)
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,6 +58,13 @@ class ClusterFormationTasks {
|
|||
List<NodeInfo> nodes = []
|
||||
for (int i = 0; i < config.numNodes; ++i) {
|
||||
NodeInfo node = new NodeInfo(config, i, project, task)
|
||||
if (i == 0) {
|
||||
if (config.seedNodePortsFile != null) {
|
||||
// we might allow this in the future to be set but for now we are the only authority to set this!
|
||||
throw new GradleException("seedNodePortsFile has a non-null value but first node has not been intialized")
|
||||
}
|
||||
config.seedNodePortsFile = node.transportPortsFile;
|
||||
}
|
||||
nodes.add(node)
|
||||
startTasks.add(configureNode(project, task, node))
|
||||
}
|
||||
|
@ -220,20 +227,22 @@ class ClusterFormationTasks {
|
|||
'node.testattr' : 'test',
|
||||
'repositories.url.allowed_urls': 'http://snapshot.test*'
|
||||
]
|
||||
if (node.config.numNodes == 1) {
|
||||
esConfig['http.port'] = node.config.httpPort
|
||||
esConfig['transport.tcp.port'] = node.config.transportPort
|
||||
} else {
|
||||
// TODO: fix multi node so it doesn't use hardcoded prots
|
||||
esConfig['http.port'] = 9400 + node.nodeNum
|
||||
esConfig['transport.tcp.port'] = 9500 + node.nodeNum
|
||||
esConfig['discovery.zen.ping.unicast.hosts'] = (0..<node.config.numNodes).collect{"localhost:${9500 + it}"}.join(',')
|
||||
|
||||
}
|
||||
esConfig.putAll(node.config.settings)
|
||||
|
||||
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
|
||||
writeConfig.doFirst {
|
||||
if (node.nodeNum > 0) { // multi-node cluster case, we have to wait for the seed node to startup
|
||||
ant.waitfor(maxwait: '20', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') {
|
||||
resourceexists {
|
||||
file(file: node.config.seedNodePortsFile.toString())
|
||||
}
|
||||
}
|
||||
// the seed node is enough to form the cluster - all subsequent nodes will get the seed node as a unicast
|
||||
// host and join the cluster via that.
|
||||
esConfig['discovery.zen.ping.unicast.hosts'] = "\"${node.config.seedNodeTransportUri()}\""
|
||||
}
|
||||
File configFile = new File(node.confDir, 'elasticsearch.yml')
|
||||
logger.info("Configuring ${configFile}")
|
||||
configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8')
|
||||
|
|
|
@ -1486,7 +1486,6 @@
|
|||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptEngineService.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BucketScriptTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BulkTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DateRangeTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DoubleTermsTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]EquivalenceTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]FunctionScoreTests.java" checks="LineLength" />
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead.
|
||||
java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead.
|
||||
|
||||
java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.getFileStore() instead, impacted by JDK-8034057
|
||||
java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.isWritable() instead, impacted by JDK-8034057
|
||||
|
||||
@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness
|
||||
java.util.Random#<init>()
|
||||
java.util.concurrent.ThreadLocalRandom
|
||||
|
||||
java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests
|
||||
|
||||
@defaultMessage this should not have been added to lucene in the first place
|
||||
org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey()
|
|
@ -41,14 +41,10 @@ org.apache.lucene.index.IndexReader#addReaderClosedListener(org.apache.lucene.in
|
|||
org.apache.lucene.index.IndexReader#removeReaderClosedListener(org.apache.lucene.index.IndexReader$ReaderClosedListener)
|
||||
|
||||
@defaultMessage Pass the precision step from the mappings explicitly instead
|
||||
org.apache.lucene.search.NumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
|
||||
@defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead.
|
||||
java.lang.Object#wait()
|
||||
|
@ -88,9 +84,6 @@ java.util.concurrent.Future#cancel(boolean)
|
|||
org.elasticsearch.common.io.PathUtils#get(java.lang.String, java.lang.String[])
|
||||
org.elasticsearch.common.io.PathUtils#get(java.net.URI)
|
||||
|
||||
@defaultMessage Don't use deprecated Query#setBoost, wrap the query into a BoostQuery instead
|
||||
org.apache.lucene.search.Query#setBoost(float)
|
||||
|
||||
@defaultMessage Constructing a DateTime without a time zone is dangerous
|
||||
org.joda.time.DateTime#<init>()
|
||||
org.joda.time.DateTime#<init>(long)
|
|
@ -33,23 +33,6 @@ java.util.Formatter#<init>(java.lang.String,java.lang.String,java.util.Locale)
|
|||
java.io.RandomAccessFile
|
||||
java.nio.file.Path#toFile()
|
||||
|
||||
@defaultMessage Don't use deprecated lucene apis
|
||||
org.apache.lucene.index.DocsEnum
|
||||
org.apache.lucene.index.DocsAndPositionsEnum
|
||||
org.apache.lucene.queries.TermFilter
|
||||
org.apache.lucene.queries.TermsFilter
|
||||
org.apache.lucene.search.Filter
|
||||
org.apache.lucene.search.FilteredQuery
|
||||
org.apache.lucene.search.TermRangeFilter
|
||||
org.apache.lucene.search.NumericRangeFilter
|
||||
org.apache.lucene.search.PrefixFilter
|
||||
org.apache.lucene.search.QueryWrapperFilter
|
||||
org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter
|
||||
org.apache.lucene.index.IndexWriter#isLocked(org.apache.lucene.store.Directory)
|
||||
|
||||
java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead.
|
||||
java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead.
|
||||
|
||||
@defaultMessage Specify a location for the temp file/directory instead.
|
||||
java.nio.file.Files#createTempDirectory(java.lang.String,java.nio.file.attribute.FileAttribute[])
|
||||
java.nio.file.Files#createTempFile(java.lang.String,java.lang.String,java.nio.file.attribute.FileAttribute[])
|
||||
|
@ -62,9 +45,6 @@ java.io.ObjectInput
|
|||
|
||||
java.nio.file.Files#isHidden(java.nio.file.Path) @ Dependent on the operating system, use FileSystemUtils.isHidden instead
|
||||
|
||||
java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.getFileStore() instead, impacted by JDK-8034057
|
||||
java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.isWritable() instead, impacted by JDK-8034057
|
||||
|
||||
@defaultMessage Resolve hosts explicitly to the address(es) you want with InetAddress.
|
||||
java.net.InetSocketAddress#<init>(java.lang.String,int)
|
||||
java.net.Socket#<init>(java.lang.String,int)
|
||||
|
@ -103,9 +83,6 @@ java.lang.Class#getDeclaredMethods() @ Do not violate java's access system: Use
|
|||
java.lang.reflect.AccessibleObject#setAccessible(boolean)
|
||||
java.lang.reflect.AccessibleObject#setAccessible(java.lang.reflect.AccessibleObject[], boolean)
|
||||
|
||||
@defaultMessage this should not have been added to lucene in the first place
|
||||
org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey()
|
||||
|
||||
@defaultMessage this method needs special permission
|
||||
java.lang.Thread#getAllStackTraces()
|
||||
|
||||
|
@ -126,8 +103,3 @@ java.util.Collections#EMPTY_MAP
|
|||
java.util.Collections#EMPTY_SET
|
||||
|
||||
java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness
|
||||
@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness
|
||||
java.util.Random#<init>()
|
||||
java.util.concurrent.ThreadLocalRandom
|
||||
|
||||
java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests
|
|
@ -1,8 +1,8 @@
|
|||
elasticsearch = 5.0.0
|
||||
lucene = 5.5.0
|
||||
lucene = 6.0.0-snapshot-bea235f
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.5
|
||||
spatial4j = 0.6
|
||||
jts = 1.13
|
||||
jackson = 2.7.1
|
||||
log4j = 1.2.17
|
||||
|
|
|
@ -42,13 +42,14 @@ dependencies {
|
|||
compile "org.apache.lucene:lucene-queryparser:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-sandbox:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-spatial:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-spatial-extras:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-suggest:${versions.lucene}"
|
||||
|
||||
compile 'org.elasticsearch:securesm:1.0'
|
||||
|
||||
// utilities
|
||||
compile 'commons-cli:commons-cli:1.3.1'
|
||||
compile 'net.sf.jopt-simple:jopt-simple:4.9'
|
||||
compile 'com.carrotsearch:hppc:0.7.1'
|
||||
|
||||
// time handling, remove with java 8 time
|
||||
|
@ -71,7 +72,7 @@ dependencies {
|
|||
compile 'org.hdrhistogram:HdrHistogram:2.1.6'
|
||||
|
||||
// lucene spatial
|
||||
compile "com.spatial4j:spatial4j:${versions.spatial4j}", optional
|
||||
compile "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional
|
||||
compile "com.vividsolutions:jts:${versions.jts}", optional
|
||||
|
||||
// logging
|
||||
|
@ -168,11 +169,6 @@ thirdPartyAudit.excludes = [
|
|||
'org.apache.commons.logging.Log',
|
||||
'org.apache.commons.logging.LogFactory',
|
||||
|
||||
// from org.apache.lucene.sandbox.queries.regex.JakartaRegexpCapabilities$JakartaRegexMatcher (lucene-sandbox)
|
||||
'org.apache.regexp.CharacterIterator',
|
||||
'org.apache.regexp.RE',
|
||||
'org.apache.regexp.REProgram',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.OpenSslEngine (netty)
|
||||
'org.apache.tomcat.jni.Buffer',
|
||||
'org.apache.tomcat.jni.Library',
|
||||
|
@ -210,7 +206,7 @@ thirdPartyAudit.excludes = [
|
|||
'org.jboss.marshalling.MarshallingConfiguration',
|
||||
'org.jboss.marshalling.Unmarshaller',
|
||||
|
||||
// from com.spatial4j.core.io.GeoJSONReader (spatial4j)
|
||||
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
|
||||
'org.noggit.JSONParser',
|
||||
|
||||
// from org.jboss.netty.container.osgi.NettyBundleActivator (netty)
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.InPlaceMergeSorter;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -247,14 +246,15 @@ public abstract class BlendedTermQuery extends Query {
|
|||
if (boosts != null) {
|
||||
boost = boosts[i];
|
||||
}
|
||||
builder.append(ToStringUtils.boost(boost));
|
||||
if (boost != 1f) {
|
||||
builder.append('^').append(boost);
|
||||
}
|
||||
builder.append(", ");
|
||||
}
|
||||
if (terms.length > 0) {
|
||||
builder.setLength(builder.length() - 2);
|
||||
}
|
||||
builder.append("])");
|
||||
builder.append(ToStringUtils.boost(getBoost()));
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.analysis.TokenStream;
|
|||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.DisjunctionMaxQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
|
@ -165,7 +166,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getFieldQuerySingle(field, queryText, quoted);
|
||||
|
@ -267,7 +268,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return super.getFieldQuery(field, queryText, slop);
|
||||
|
@ -318,7 +319,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -380,7 +381,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getFuzzyQuerySingle(field, termStr, minSimilarity);
|
||||
|
@ -445,7 +446,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getPrefixQuerySingle(field, termStr);
|
||||
|
@ -520,7 +521,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
for (String token : tlist) {
|
||||
clauses.add(new BooleanClause(super.getPrefixQuery(field, token), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -575,7 +576,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getWildcardQuerySingle(field, termStr);
|
||||
|
@ -704,7 +705,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getRegexpQuerySingle(field, termStr);
|
||||
|
@ -740,9 +741,23 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated review all use of this, don't rely on coord
|
||||
*/
|
||||
@Deprecated
|
||||
protected Query getBooleanQueryCoordDisabled(List<BooleanClause> clauses) throws ParseException {
|
||||
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
builder.setDisableCoord(true);
|
||||
for (BooleanClause clause : clauses) {
|
||||
builder.add(clause);
|
||||
}
|
||||
return fixNegativeQueryIfNeeded(builder.build());
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected Query getBooleanQuery(List<BooleanClause> clauses, boolean disableCoord) throws ParseException {
|
||||
Query q = super.getBooleanQuery(clauses, disableCoord);
|
||||
protected Query getBooleanQuery(List<BooleanClause> clauses) throws ParseException {
|
||||
Query q = super.getBooleanQuery(clauses);
|
||||
if (q == null) {
|
||||
return null;
|
||||
}
|
||||
|
@ -769,7 +784,6 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
pq = builder.build();
|
||||
//make sure that the boost hasn't been set beforehand, otherwise we'd lose it
|
||||
assert q.getBoost() == 1f;
|
||||
assert q instanceof BoostQuery == false;
|
||||
return pq;
|
||||
} else if (q instanceof MultiPhraseQuery) {
|
||||
|
|
|
@ -26,8 +26,7 @@ import java.io.IOException;
|
|||
/**
|
||||
* Abstract decorator class of a DocIdSetIterator
|
||||
* implementation that provides on-demand filter/validation
|
||||
* mechanism on an underlying DocIdSetIterator. See {@link
|
||||
* FilteredDocIdSet}.
|
||||
* mechanism on an underlying DocIdSetIterator.
|
||||
*/
|
||||
public abstract class XFilteredDocIdSetIterator extends DocIdSetIterator {
|
||||
protected DocIdSetIterator _innerIter;
|
||||
|
|
|
@ -87,7 +87,7 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
if (numTerms > 16) {
|
||||
for (Term[] currentPosTerm : terms) {
|
||||
for (Term term : currentPosTerm) {
|
||||
super.flatten(new TermQuery(term), reader, flatQueries, orig.getBoost());
|
||||
super.flatten(new TermQuery(term), reader, flatQueries, 1F);
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
@ -104,7 +104,7 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]);
|
||||
}
|
||||
Query query = queryBuilder.build();
|
||||
this.flatten(query, reader, flatQueries, orig.getBoost());
|
||||
this.flatten(query, reader, flatQueries, 1F);
|
||||
} else {
|
||||
Term[] t = terms.get(currentPos);
|
||||
for (int i = 0; i < t.length; i++) {
|
||||
|
|
|
@ -35,212 +35,10 @@ import java.io.IOException;
|
|||
@SuppressWarnings("deprecation")
|
||||
public class Version {
|
||||
|
||||
// The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is Beta/RC indicator
|
||||
// AA values below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release
|
||||
// The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator
|
||||
// AA values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release
|
||||
// the (internal) format of the id is there so we can easily do after/before checks on the id
|
||||
|
||||
// NOTE: indexes created with 3.6 use this constant for e.g. analysis chain emulation (imperfect)
|
||||
public static final org.apache.lucene.util.Version LUCENE_3_EMULATION_VERSION = org.apache.lucene.util.Version.LUCENE_4_0_0;
|
||||
|
||||
public static final int V_0_18_0_ID = /*00*/180099;
|
||||
public static final Version V_0_18_0 = new Version(V_0_18_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_1_ID = /*00*/180199;
|
||||
public static final Version V_0_18_1 = new Version(V_0_18_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_2_ID = /*00*/180299;
|
||||
public static final Version V_0_18_2 = new Version(V_0_18_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_3_ID = /*00*/180399;
|
||||
public static final Version V_0_18_3 = new Version(V_0_18_3_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_4_ID = /*00*/180499;
|
||||
public static final Version V_0_18_4 = new Version(V_0_18_4_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_5_ID = /*00*/180599;
|
||||
public static final Version V_0_18_5 = new Version(V_0_18_5_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_6_ID = /*00*/180699;
|
||||
public static final Version V_0_18_6 = new Version(V_0_18_6_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_7_ID = /*00*/180799;
|
||||
public static final Version V_0_18_7 = new Version(V_0_18_7_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_8_ID = /*00*/180899;
|
||||
public static final Version V_0_18_8 = new Version(V_0_18_8_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_RC1_ID = /*00*/190051;
|
||||
public static final Version V_0_19_0_RC1 = new Version(V_0_19_0_RC1_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_RC2_ID = /*00*/190052;
|
||||
public static final Version V_0_19_0_RC2 = new Version(V_0_19_0_RC2_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_RC3_ID = /*00*/190053;
|
||||
public static final Version V_0_19_0_RC3 = new Version(V_0_19_0_RC3_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_ID = /*00*/190099;
|
||||
public static final Version V_0_19_0 = new Version(V_0_19_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_1_ID = /*00*/190199;
|
||||
public static final Version V_0_19_1 = new Version(V_0_19_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_2_ID = /*00*/190299;
|
||||
public static final Version V_0_19_2 = new Version(V_0_19_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_3_ID = /*00*/190399;
|
||||
public static final Version V_0_19_3 = new Version(V_0_19_3_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_4_ID = /*00*/190499;
|
||||
public static final Version V_0_19_4 = new Version(V_0_19_4_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_5_ID = /*00*/190599;
|
||||
public static final Version V_0_19_5 = new Version(V_0_19_5_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_6_ID = /*00*/190699;
|
||||
public static final Version V_0_19_6 = new Version(V_0_19_6_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_7_ID = /*00*/190799;
|
||||
public static final Version V_0_19_7 = new Version(V_0_19_7_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_8_ID = /*00*/190899;
|
||||
public static final Version V_0_19_8 = new Version(V_0_19_8_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_9_ID = /*00*/190999;
|
||||
public static final Version V_0_19_9 = new Version(V_0_19_9_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_10_ID = /*00*/191099;
|
||||
public static final Version V_0_19_10 = new Version(V_0_19_10_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_11_ID = /*00*/191199;
|
||||
public static final Version V_0_19_11 = new Version(V_0_19_11_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_12_ID = /*00*/191299;
|
||||
public static final Version V_0_19_12 = new Version(V_0_19_12_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_13_ID = /*00*/191399;
|
||||
public static final Version V_0_19_13 = new Version(V_0_19_13_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_20_0_RC1_ID = /*00*/200051;
|
||||
public static final Version V_0_20_0_RC1 = new Version(V_0_20_0_RC1_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_0_ID = /*00*/200099;
|
||||
public static final Version V_0_20_0 = new Version(V_0_20_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_1_ID = /*00*/200199;
|
||||
public static final Version V_0_20_1 = new Version(V_0_20_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_2_ID = /*00*/200299;
|
||||
public static final Version V_0_20_2 = new Version(V_0_20_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_3_ID = /*00*/200399;
|
||||
public static final Version V_0_20_3 = new Version(V_0_20_3_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_4_ID = /*00*/200499;
|
||||
public static final Version V_0_20_4 = new Version(V_0_20_4_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_5_ID = /*00*/200599;
|
||||
public static final Version V_0_20_5 = new Version(V_0_20_5_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_6_ID = /*00*/200699;
|
||||
public static final Version V_0_20_6 = new Version(V_0_20_6_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_90_0_Beta1_ID = /*00*/900001;
|
||||
public static final Version V_0_90_0_Beta1 = new Version(V_0_90_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_1);
|
||||
public static final int V_0_90_0_RC1_ID = /*00*/900051;
|
||||
public static final Version V_0_90_0_RC1 = new Version(V_0_90_0_RC1_ID, org.apache.lucene.util.Version.LUCENE_4_1);
|
||||
public static final int V_0_90_0_RC2_ID = /*00*/900052;
|
||||
public static final Version V_0_90_0_RC2 = new Version(V_0_90_0_RC2_ID, org.apache.lucene.util.Version.LUCENE_4_2);
|
||||
public static final int V_0_90_0_ID = /*00*/900099;
|
||||
public static final Version V_0_90_0 = new Version(V_0_90_0_ID, org.apache.lucene.util.Version.LUCENE_4_2);
|
||||
public static final int V_0_90_1_ID = /*00*/900199;
|
||||
public static final Version V_0_90_1 = new Version(V_0_90_1_ID, org.apache.lucene.util.Version.LUCENE_4_3);
|
||||
public static final int V_0_90_2_ID = /*00*/900299;
|
||||
public static final Version V_0_90_2 = new Version(V_0_90_2_ID, org.apache.lucene.util.Version.LUCENE_4_3);
|
||||
public static final int V_0_90_3_ID = /*00*/900399;
|
||||
public static final Version V_0_90_3 = new Version(V_0_90_3_ID, org.apache.lucene.util.Version.LUCENE_4_4);
|
||||
public static final int V_0_90_4_ID = /*00*/900499;
|
||||
public static final Version V_0_90_4 = new Version(V_0_90_4_ID, org.apache.lucene.util.Version.LUCENE_4_4);
|
||||
public static final int V_0_90_5_ID = /*00*/900599;
|
||||
public static final Version V_0_90_5 = new Version(V_0_90_5_ID, org.apache.lucene.util.Version.LUCENE_4_4);
|
||||
public static final int V_0_90_6_ID = /*00*/900699;
|
||||
public static final Version V_0_90_6 = new Version(V_0_90_6_ID, org.apache.lucene.util.Version.LUCENE_4_5);
|
||||
public static final int V_0_90_7_ID = /*00*/900799;
|
||||
public static final Version V_0_90_7 = new Version(V_0_90_7_ID, org.apache.lucene.util.Version.LUCENE_4_5);
|
||||
public static final int V_0_90_8_ID = /*00*/900899;
|
||||
public static final Version V_0_90_8 = new Version(V_0_90_8_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_9_ID = /*00*/900999;
|
||||
public static final Version V_0_90_9 = new Version(V_0_90_9_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_10_ID = /*00*/901099;
|
||||
public static final Version V_0_90_10 = new Version(V_0_90_10_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_11_ID = /*00*/901199;
|
||||
public static final Version V_0_90_11 = new Version(V_0_90_11_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_12_ID = /*00*/901299;
|
||||
public static final Version V_0_90_12 = new Version(V_0_90_12_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_13_ID = /*00*/901399;
|
||||
public static final Version V_0_90_13 = new Version(V_0_90_13_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
|
||||
public static final int V_1_0_0_Beta1_ID = 1000001;
|
||||
public static final Version V_1_0_0_Beta1 = new Version(V_1_0_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_5);
|
||||
public static final int V_1_0_0_Beta2_ID = 1000002;
|
||||
public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_0_RC1_ID = 1000051;
|
||||
public static final Version V_1_0_0_RC1 = new Version(V_1_0_0_RC1_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_0_RC2_ID = 1000052;
|
||||
public static final Version V_1_0_0_RC2 = new Version(V_1_0_0_RC2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_0_ID = 1000099;
|
||||
public static final Version V_1_0_0 = new Version(V_1_0_0_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_1_ID = 1000199;
|
||||
public static final Version V_1_0_1 = new Version(V_1_0_1_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_2_ID = 1000299;
|
||||
public static final Version V_1_0_2 = new Version(V_1_0_2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_3_ID = 1000399;
|
||||
public static final Version V_1_0_3 = new Version(V_1_0_3_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_1_0_ID = 1010099;
|
||||
public static final Version V_1_1_0 = new Version(V_1_1_0_ID, org.apache.lucene.util.Version.LUCENE_4_7);
|
||||
public static final int V_1_1_1_ID = 1010199;
|
||||
public static final Version V_1_1_1 = new Version(V_1_1_1_ID, org.apache.lucene.util.Version.LUCENE_4_7);
|
||||
public static final int V_1_1_2_ID = 1010299;
|
||||
public static final Version V_1_1_2 = new Version(V_1_1_2_ID, org.apache.lucene.util.Version.LUCENE_4_7);
|
||||
public static final int V_1_2_0_ID = 1020099;
|
||||
public static final Version V_1_2_0 = new Version(V_1_2_0_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_1_ID = 1020199;
|
||||
public static final Version V_1_2_1 = new Version(V_1_2_1_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_2_ID = 1020299;
|
||||
public static final Version V_1_2_2 = new Version(V_1_2_2_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_3_ID = 1020399;
|
||||
public static final Version V_1_2_3 = new Version(V_1_2_3_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_4_ID = 1020499;
|
||||
public static final Version V_1_2_4 = new Version(V_1_2_4_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_3_0_ID = 1030099;
|
||||
public static final Version V_1_3_0 = new Version(V_1_3_0_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_1_ID = 1030199;
|
||||
public static final Version V_1_3_1 = new Version(V_1_3_1_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_2_ID = 1030299;
|
||||
public static final Version V_1_3_2 = new Version(V_1_3_2_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_3_ID = 1030399;
|
||||
public static final Version V_1_3_3 = new Version(V_1_3_3_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_4_ID = 1030499;
|
||||
public static final Version V_1_3_4 = new Version(V_1_3_4_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_5_ID = 1030599;
|
||||
public static final Version V_1_3_5 = new Version(V_1_3_5_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_6_ID = 1030699;
|
||||
public static final Version V_1_3_6 = new Version(V_1_3_6_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_7_ID = 1030799;
|
||||
public static final Version V_1_3_7 = new Version(V_1_3_7_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_8_ID = 1030899;
|
||||
public static final Version V_1_3_8 = new Version(V_1_3_8_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_9_ID = 1030999;
|
||||
public static final Version V_1_3_9 = new Version(V_1_3_9_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_4_0_Beta1_ID = 1040001;
|
||||
public static final Version V_1_4_0_Beta1 = new Version(V_1_4_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_10_1);
|
||||
public static final int V_1_4_0_ID = 1040099;
|
||||
public static final Version V_1_4_0 = new Version(V_1_4_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
||||
public static final int V_1_4_1_ID = 1040199;
|
||||
public static final Version V_1_4_1 = new Version(V_1_4_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
||||
public static final int V_1_4_2_ID = 1040299;
|
||||
public static final Version V_1_4_2 = new Version(V_1_4_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
||||
public static final int V_1_4_3_ID = 1040399;
|
||||
public static final Version V_1_4_3 = new Version(V_1_4_3_ID, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
||||
public static final int V_1_4_4_ID = 1040499;
|
||||
public static final Version V_1_4_4 = new Version(V_1_4_4_ID, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
||||
public static final int V_1_4_5_ID = 1040599;
|
||||
public static final Version V_1_4_5 = new Version(V_1_4_5_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_0_ID = 1050099;
|
||||
public static final Version V_1_5_0 = new Version(V_1_5_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_1_ID = 1050199;
|
||||
public static final Version V_1_5_1 = new Version(V_1_5_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_2_ID = 1050299;
|
||||
public static final Version V_1_5_2 = new Version(V_1_5_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_0_ID = 1060099;
|
||||
public static final Version V_1_6_0 = new Version(V_1_6_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_1_ID = 1060199;
|
||||
public static final Version V_1_6_1 = new Version(V_1_6_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_2_ID = 1060299;
|
||||
public static final Version V_1_6_2 = new Version(V_1_6_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_0_ID = 1070099;
|
||||
public static final Version V_1_7_0 = new Version(V_1_7_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_1_ID = 1070199;
|
||||
public static final Version V_1_7_1 = new Version(V_1_7_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_2_ID = 1070299;
|
||||
public static final Version V_1_7_2 = new Version(V_1_7_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_3_ID = 1070399;
|
||||
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_4_ID = 1070499;
|
||||
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_5_ID = 1070599;
|
||||
public static final Version V_1_7_5 = new Version(V_1_7_5_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
|
||||
public static final int V_2_0_0_beta1_ID = 2000001;
|
||||
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
|
@ -265,7 +63,7 @@ public class Version {
|
|||
public static final int V_2_3_0_ID = 2030099;
|
||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_5_0_0_ID = 5000099;
|
||||
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final Version CURRENT = V_5_0_0;
|
||||
|
||||
static {
|
||||
|
@ -303,198 +101,6 @@ public class Version {
|
|||
return V_2_0_0_beta2;
|
||||
case V_2_0_0_beta1_ID:
|
||||
return V_2_0_0_beta1;
|
||||
case V_1_7_5_ID:
|
||||
return V_1_7_5;
|
||||
case V_1_7_4_ID:
|
||||
return V_1_7_4;
|
||||
case V_1_7_3_ID:
|
||||
return V_1_7_3;
|
||||
case V_1_7_2_ID:
|
||||
return V_1_7_2;
|
||||
case V_1_7_1_ID:
|
||||
return V_1_7_1;
|
||||
case V_1_7_0_ID:
|
||||
return V_1_7_0;
|
||||
case V_1_6_2_ID:
|
||||
return V_1_6_2;
|
||||
case V_1_6_1_ID:
|
||||
return V_1_6_1;
|
||||
case V_1_6_0_ID:
|
||||
return V_1_6_0;
|
||||
case V_1_5_2_ID:
|
||||
return V_1_5_2;
|
||||
case V_1_5_1_ID:
|
||||
return V_1_5_1;
|
||||
case V_1_5_0_ID:
|
||||
return V_1_5_0;
|
||||
case V_1_4_5_ID:
|
||||
return V_1_4_5;
|
||||
case V_1_4_4_ID:
|
||||
return V_1_4_4;
|
||||
case V_1_4_3_ID:
|
||||
return V_1_4_3;
|
||||
case V_1_4_2_ID:
|
||||
return V_1_4_2;
|
||||
case V_1_4_1_ID:
|
||||
return V_1_4_1;
|
||||
case V_1_4_0_ID:
|
||||
return V_1_4_0;
|
||||
case V_1_4_0_Beta1_ID:
|
||||
return V_1_4_0_Beta1;
|
||||
case V_1_3_9_ID:
|
||||
return V_1_3_9;
|
||||
case V_1_3_8_ID:
|
||||
return V_1_3_8;
|
||||
case V_1_3_7_ID:
|
||||
return V_1_3_7;
|
||||
case V_1_3_6_ID:
|
||||
return V_1_3_6;
|
||||
case V_1_3_5_ID:
|
||||
return V_1_3_5;
|
||||
case V_1_3_4_ID:
|
||||
return V_1_3_4;
|
||||
case V_1_3_3_ID:
|
||||
return V_1_3_3;
|
||||
case V_1_3_2_ID:
|
||||
return V_1_3_2;
|
||||
case V_1_3_1_ID:
|
||||
return V_1_3_1;
|
||||
case V_1_3_0_ID:
|
||||
return V_1_3_0;
|
||||
case V_1_2_4_ID:
|
||||
return V_1_2_4;
|
||||
case V_1_2_3_ID:
|
||||
return V_1_2_3;
|
||||
case V_1_2_2_ID:
|
||||
return V_1_2_2;
|
||||
case V_1_2_1_ID:
|
||||
return V_1_2_1;
|
||||
case V_1_2_0_ID:
|
||||
return V_1_2_0;
|
||||
case V_1_1_2_ID:
|
||||
return V_1_1_2;
|
||||
case V_1_1_1_ID:
|
||||
return V_1_1_1;
|
||||
case V_1_1_0_ID:
|
||||
return V_1_1_0;
|
||||
case V_1_0_3_ID:
|
||||
return V_1_0_3;
|
||||
case V_1_0_2_ID:
|
||||
return V_1_0_2;
|
||||
case V_1_0_1_ID:
|
||||
return V_1_0_1;
|
||||
case V_1_0_0_ID:
|
||||
return V_1_0_0;
|
||||
case V_1_0_0_RC2_ID:
|
||||
return V_1_0_0_RC2;
|
||||
case V_1_0_0_RC1_ID:
|
||||
return V_1_0_0_RC1;
|
||||
case V_1_0_0_Beta2_ID:
|
||||
return V_1_0_0_Beta2;
|
||||
case V_1_0_0_Beta1_ID:
|
||||
return V_1_0_0_Beta1;
|
||||
case V_0_90_13_ID:
|
||||
return V_0_90_13;
|
||||
case V_0_90_12_ID:
|
||||
return V_0_90_12;
|
||||
case V_0_90_11_ID:
|
||||
return V_0_90_11;
|
||||
case V_0_90_10_ID:
|
||||
return V_0_90_10;
|
||||
case V_0_90_9_ID:
|
||||
return V_0_90_9;
|
||||
case V_0_90_8_ID:
|
||||
return V_0_90_8;
|
||||
case V_0_90_7_ID:
|
||||
return V_0_90_7;
|
||||
case V_0_90_6_ID:
|
||||
return V_0_90_6;
|
||||
case V_0_90_5_ID:
|
||||
return V_0_90_5;
|
||||
case V_0_90_4_ID:
|
||||
return V_0_90_4;
|
||||
case V_0_90_3_ID:
|
||||
return V_0_90_3;
|
||||
case V_0_90_2_ID:
|
||||
return V_0_90_2;
|
||||
case V_0_90_1_ID:
|
||||
return V_0_90_1;
|
||||
case V_0_90_0_ID:
|
||||
return V_0_90_0;
|
||||
case V_0_90_0_RC2_ID:
|
||||
return V_0_90_0_RC2;
|
||||
case V_0_90_0_RC1_ID:
|
||||
return V_0_90_0_RC1;
|
||||
case V_0_90_0_Beta1_ID:
|
||||
return V_0_90_0_Beta1;
|
||||
case V_0_20_6_ID:
|
||||
return V_0_20_6;
|
||||
case V_0_20_5_ID:
|
||||
return V_0_20_5;
|
||||
case V_0_20_4_ID:
|
||||
return V_0_20_4;
|
||||
case V_0_20_3_ID:
|
||||
return V_0_20_3;
|
||||
case V_0_20_2_ID:
|
||||
return V_0_20_2;
|
||||
case V_0_20_1_ID:
|
||||
return V_0_20_1;
|
||||
case V_0_20_0_ID:
|
||||
return V_0_20_0;
|
||||
case V_0_20_0_RC1_ID:
|
||||
return V_0_20_0_RC1;
|
||||
case V_0_19_0_RC1_ID:
|
||||
return V_0_19_0_RC1;
|
||||
case V_0_19_0_RC2_ID:
|
||||
return V_0_19_0_RC2;
|
||||
case V_0_19_0_RC3_ID:
|
||||
return V_0_19_0_RC3;
|
||||
case V_0_19_0_ID:
|
||||
return V_0_19_0;
|
||||
case V_0_19_1_ID:
|
||||
return V_0_19_1;
|
||||
case V_0_19_2_ID:
|
||||
return V_0_19_2;
|
||||
case V_0_19_3_ID:
|
||||
return V_0_19_3;
|
||||
case V_0_19_4_ID:
|
||||
return V_0_19_4;
|
||||
case V_0_19_5_ID:
|
||||
return V_0_19_5;
|
||||
case V_0_19_6_ID:
|
||||
return V_0_19_6;
|
||||
case V_0_19_7_ID:
|
||||
return V_0_19_7;
|
||||
case V_0_19_8_ID:
|
||||
return V_0_19_8;
|
||||
case V_0_19_9_ID:
|
||||
return V_0_19_9;
|
||||
case V_0_19_10_ID:
|
||||
return V_0_19_10;
|
||||
case V_0_19_11_ID:
|
||||
return V_0_19_11;
|
||||
case V_0_19_12_ID:
|
||||
return V_0_19_12;
|
||||
case V_0_19_13_ID:
|
||||
return V_0_19_13;
|
||||
case V_0_18_0_ID:
|
||||
return V_0_18_0;
|
||||
case V_0_18_1_ID:
|
||||
return V_0_18_1;
|
||||
case V_0_18_2_ID:
|
||||
return V_0_18_2;
|
||||
case V_0_18_3_ID:
|
||||
return V_0_18_3;
|
||||
case V_0_18_4_ID:
|
||||
return V_0_18_4;
|
||||
case V_0_18_5_ID:
|
||||
return V_0_18_5;
|
||||
case V_0_18_6_ID:
|
||||
return V_0_18_6;
|
||||
case V_0_18_7_ID:
|
||||
return V_0_18_7;
|
||||
case V_0_18_8_ID:
|
||||
return V_0_18_8;
|
||||
default:
|
||||
return new Version(id, org.apache.lucene.util.Version.LATEST);
|
||||
}
|
||||
|
@ -531,15 +137,23 @@ public class Version {
|
|||
if (!Strings.hasLength(version)) {
|
||||
return Version.CURRENT;
|
||||
}
|
||||
final boolean snapshot; // this is some BWC for 2.x and before indices
|
||||
if (snapshot = version.endsWith("-SNAPSHOT")) {
|
||||
version = version.substring(0, version.length() - 9);
|
||||
}
|
||||
String[] parts = version.split("\\.|\\-");
|
||||
if (parts.length < 3 || parts.length > 4) {
|
||||
throw new IllegalArgumentException("the version needs to contain major, minor, and revision, and optionally the build: " + version);
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
final int rawMajor = Integer.parseInt(parts[0]);
|
||||
if (rawMajor >= 5 && snapshot) { // we don't support snapshot as part of the version here anymore
|
||||
throw new IllegalArgumentException("illegal version format - snapshots are only supported until version 2.x");
|
||||
}
|
||||
final int betaOffset = rawMajor < 5 ? 0 : 25;
|
||||
//we reverse the version id calculation based on some assumption as we can't reliably reverse the modulo
|
||||
final int major = Integer.parseInt(parts[0]) * 1000000;
|
||||
final int major = rawMajor * 1000000;
|
||||
final int minor = Integer.parseInt(parts[1]) * 10000;
|
||||
final int revision = Integer.parseInt(parts[2]) * 100;
|
||||
|
||||
|
@ -547,11 +161,17 @@ public class Version {
|
|||
int build = 99;
|
||||
if (parts.length == 4) {
|
||||
String buildStr = parts[3];
|
||||
if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) {
|
||||
build = Integer.parseInt(buildStr.substring(4));
|
||||
}
|
||||
if (buildStr.startsWith("RC") || buildStr.startsWith("rc")) {
|
||||
if (buildStr.startsWith("alpha")) {
|
||||
assert rawMajor >= 5 : "major must be >= 5 but was " + major;
|
||||
build = Integer.parseInt(buildStr.substring(5));
|
||||
assert build < 25 : "expected a beta build but " + build + " >= 25";
|
||||
} else if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) {
|
||||
build = betaOffset + Integer.parseInt(buildStr.substring(4));
|
||||
assert build < 50 : "expected a beta build but " + build + " >= 50";
|
||||
} else if (buildStr.startsWith("RC") || buildStr.startsWith("rc")) {
|
||||
build = Integer.parseInt(buildStr.substring(2)) + 50;
|
||||
} else {
|
||||
throw new IllegalArgumentException("unable to parse version " + version);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -614,13 +234,16 @@ public class Version {
|
|||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(major).append('.').append(minor).append('.').append(revision);
|
||||
if (isBeta()) {
|
||||
if (isAlpha()) {
|
||||
sb.append("-alpha");
|
||||
sb.append(build);
|
||||
} else if (isBeta()) {
|
||||
if (major >= 2) {
|
||||
sb.append("-beta");
|
||||
} else {
|
||||
sb.append(".Beta");
|
||||
}
|
||||
sb.append(build);
|
||||
sb.append(major < 5 ? build : build-25);
|
||||
} else if (build < 99) {
|
||||
if (major >= 2) {
|
||||
sb.append("-rc");
|
||||
|
@ -656,7 +279,16 @@ public class Version {
|
|||
}
|
||||
|
||||
public boolean isBeta() {
|
||||
return build < 50;
|
||||
return major < 5 ? build < 50 : build >= 25 && build < 50;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true iff this version is an alpha version
|
||||
* Note: This has been introduced in elasticsearch version 5. Previous versions will never
|
||||
* have an alpha version.
|
||||
*/
|
||||
public boolean isAlpha() {
|
||||
return major < 5 ? false : build < 25;
|
||||
}
|
||||
|
||||
public boolean isRC() {
|
||||
|
|
|
@ -197,9 +197,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
|||
numberOfPendingTasks = in.readInt();
|
||||
timedOut = in.readBoolean();
|
||||
numberOfInFlightFetch = in.readInt();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_7_0)) {
|
||||
delayedUnassignedShards= in.readInt();
|
||||
}
|
||||
taskMaxWaitingTime = TimeValue.readTimeValue(in);
|
||||
}
|
||||
|
||||
|
@ -212,9 +210,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
|||
out.writeInt(numberOfPendingTasks);
|
||||
out.writeBoolean(timedOut);
|
||||
out.writeInt(numberOfInFlightFetch);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_7_0)) {
|
||||
out.writeInt(delayedUnassignedShards);
|
||||
}
|
||||
taskMaxWaitingTime.writeTo(out);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.http.HttpInfo;
|
||||
import org.elasticsearch.ingest.core.IngestInfo;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.monitor.os.OsInfo;
|
||||
import org.elasticsearch.monitor.process.ProcessInfo;
|
||||
|
@ -74,12 +75,15 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
@Nullable
|
||||
private PluginsAndModules plugins;
|
||||
|
||||
NodeInfo() {
|
||||
@Nullable
|
||||
private IngestInfo ingest;
|
||||
|
||||
public NodeInfo() {
|
||||
}
|
||||
|
||||
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map<String, String> serviceAttributes, @Nullable Settings settings,
|
||||
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins) {
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest) {
|
||||
super(node);
|
||||
this.version = version;
|
||||
this.build = build;
|
||||
|
@ -92,6 +96,7 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
this.transport = transport;
|
||||
this.http = http;
|
||||
this.plugins = plugins;
|
||||
this.ingest = ingest;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -176,6 +181,11 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
return this.plugins;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public IngestInfo getIngest() {
|
||||
return ingest;
|
||||
}
|
||||
|
||||
public static NodeInfo readNodeInfo(StreamInput in) throws IOException {
|
||||
NodeInfo nodeInfo = new NodeInfo();
|
||||
nodeInfo.readFrom(in);
|
||||
|
@ -220,6 +230,10 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
plugins = new PluginsAndModules();
|
||||
plugins.readFrom(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
ingest = new IngestInfo();
|
||||
ingest.readFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -285,5 +299,11 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
out.writeBoolean(true);
|
||||
plugins.writeTo(out);
|
||||
}
|
||||
if (ingest == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
ingest.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
private boolean transport = true;
|
||||
private boolean http = true;
|
||||
private boolean plugins = true;
|
||||
private boolean ingest = true;
|
||||
|
||||
public NodesInfoRequest() {
|
||||
}
|
||||
|
@ -62,6 +63,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
transport = false;
|
||||
http = false;
|
||||
plugins = false;
|
||||
ingest = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -77,6 +79,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
transport = true;
|
||||
http = true;
|
||||
plugins = true;
|
||||
ingest = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -202,6 +205,22 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
return plugins;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should information about ingest be returned
|
||||
* @param ingest true if you want info
|
||||
*/
|
||||
public NodesInfoRequest ingest(boolean ingest) {
|
||||
this.ingest = ingest;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if information about ingest is requested
|
||||
*/
|
||||
public boolean ingest() {
|
||||
return ingest;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
@ -213,6 +232,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
transport = in.readBoolean();
|
||||
http = in.readBoolean();
|
||||
plugins = in.readBoolean();
|
||||
ingest = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -226,5 +246,6 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
|||
out.writeBoolean(transport);
|
||||
out.writeBoolean(http);
|
||||
out.writeBoolean(plugins);
|
||||
out.writeBoolean(ingest);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -110,4 +110,12 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesI
|
|||
request().plugins(plugins);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the node ingest info be returned.
|
||||
*/
|
||||
public NodesInfoRequestBuilder setIngest(boolean ingest) {
|
||||
request().ingest(ingest);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -121,6 +121,9 @@ public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements To
|
|||
if (nodeInfo.getPlugins() != null) {
|
||||
nodeInfo.getPlugins().toXContent(builder, params);
|
||||
}
|
||||
if (nodeInfo.getIngest() != null) {
|
||||
nodeInfo.getIngest().toXContent(builder, params);
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
|
|||
protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) {
|
||||
NodesInfoRequest request = nodeRequest.request;
|
||||
return nodeService.info(request.settings(), request.os(), request.process(), request.jvm(), request.threadPool(),
|
||||
request.transport(), request.http(), request.plugins());
|
||||
request.transport(), request.http(), request.plugins(), request.ingest());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -95,7 +95,7 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
|
|||
public NodeInfoRequest() {
|
||||
}
|
||||
|
||||
NodeInfoRequest(String nodeId, NodesInfoRequest request) {
|
||||
public NodeInfoRequest(String nodeId, NodesInfoRequest request) {
|
||||
super(nodeId);
|
||||
this.request = request;
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.discovery.DiscoveryStats;
|
|||
import org.elasticsearch.http.HttpStats;
|
||||
import org.elasticsearch.indices.NodeIndicesStats;
|
||||
import org.elasticsearch.indices.breaker.AllCircuitBreakerStats;
|
||||
import org.elasticsearch.ingest.IngestStats;
|
||||
import org.elasticsearch.monitor.fs.FsInfo;
|
||||
import org.elasticsearch.monitor.jvm.JvmStats;
|
||||
import org.elasticsearch.monitor.os.OsStats;
|
||||
|
@ -81,6 +82,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
|||
@Nullable
|
||||
private DiscoveryStats discoveryStats;
|
||||
|
||||
@Nullable
|
||||
private IngestStats ingestStats;
|
||||
|
||||
NodeStats() {
|
||||
}
|
||||
|
||||
|
@ -89,7 +93,8 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
|||
@Nullable FsInfo fs, @Nullable TransportStats transport, @Nullable HttpStats http,
|
||||
@Nullable AllCircuitBreakerStats breaker,
|
||||
@Nullable ScriptStats scriptStats,
|
||||
@Nullable DiscoveryStats discoveryStats) {
|
||||
@Nullable DiscoveryStats discoveryStats,
|
||||
@Nullable IngestStats ingestStats) {
|
||||
super(node);
|
||||
this.timestamp = timestamp;
|
||||
this.indices = indices;
|
||||
|
@ -103,6 +108,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
|||
this.breaker = breaker;
|
||||
this.scriptStats = scriptStats;
|
||||
this.discoveryStats = discoveryStats;
|
||||
this.ingestStats = ingestStats;
|
||||
}
|
||||
|
||||
public long getTimestamp() {
|
||||
|
@ -187,6 +193,11 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
|||
return this.discoveryStats;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public IngestStats getIngestStats() {
|
||||
return ingestStats;
|
||||
}
|
||||
|
||||
public static NodeStats readNodeStats(StreamInput in) throws IOException {
|
||||
NodeStats nodeInfo = new NodeStats();
|
||||
nodeInfo.readFrom(in);
|
||||
|
@ -224,7 +235,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
|||
breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in);
|
||||
scriptStats = in.readOptionalStreamable(ScriptStats::new);
|
||||
discoveryStats = in.readOptionalStreamable(() -> new DiscoveryStats(null));
|
||||
|
||||
ingestStats = in.readOptionalWritable(IngestStats.PROTO);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -282,6 +293,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
|||
out.writeOptionalStreamable(breaker);
|
||||
out.writeOptionalStreamable(scriptStats);
|
||||
out.writeOptionalStreamable(discoveryStats);
|
||||
out.writeOptionalWriteable(ingestStats);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -337,6 +349,10 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
|||
getDiscoveryStats().toXContent(builder, params);
|
||||
}
|
||||
|
||||
if (getIngestStats() != null) {
|
||||
getIngestStats().toXContent(builder, params);
|
||||
}
|
||||
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
|||
private boolean breaker;
|
||||
private boolean script;
|
||||
private boolean discovery;
|
||||
private boolean ingest;
|
||||
|
||||
public NodesStatsRequest() {
|
||||
}
|
||||
|
@ -69,6 +70,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
|||
this.breaker = true;
|
||||
this.script = true;
|
||||
this.discovery = true;
|
||||
this.ingest = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -87,6 +89,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
|||
this.breaker = false;
|
||||
this.script = false;
|
||||
this.discovery = false;
|
||||
this.ingest = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -250,6 +253,17 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
|||
return this;
|
||||
}
|
||||
|
||||
public boolean ingest() {
|
||||
return ingest;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should ingest statistics be returned.
|
||||
*/
|
||||
public NodesStatsRequest ingest(boolean ingest) {
|
||||
this.ingest = ingest;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
|
@ -265,6 +279,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
|||
breaker = in.readBoolean();
|
||||
script = in.readBoolean();
|
||||
discovery = in.readBoolean();
|
||||
ingest = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -281,6 +296,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
|||
out.writeBoolean(breaker);
|
||||
out.writeBoolean(script);
|
||||
out.writeBoolean(discovery);
|
||||
out.writeBoolean(ingest);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -137,4 +137,12 @@ public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder<Nodes
|
|||
request.discovery(discovery);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should ingest statistics be returned.
|
||||
*/
|
||||
public NodesStatsRequestBuilder ingest(boolean ingest) {
|
||||
request.ingest(ingest);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -80,7 +80,8 @@ public class TransportNodesStatsAction extends TransportNodesAction<NodesStatsRe
|
|||
protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) {
|
||||
NodesStatsRequest request = nodeStatsRequest.request;
|
||||
return nodeService.stats(request.indices(), request.os(), request.process(), request.jvm(), request.threadPool(),
|
||||
request.fs(), request.transport(), request.http(), request.breaker(), request.script(), request.discovery());
|
||||
request.fs(), request.transport(), request.http(), request.breaker(), request.script(), request.discovery(),
|
||||
request.ingest());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -53,12 +53,18 @@ public class CancelTasksRequest extends BaseTasksRequest<CancelTasksRequest> {
|
|||
return super.match(task) && task instanceof CancellableTask;
|
||||
}
|
||||
|
||||
public CancelTasksRequest reason(String reason) {
|
||||
/**
|
||||
* Set the reason for canceling the task.
|
||||
*/
|
||||
public CancelTasksRequest setReason(String reason) {
|
||||
this.reason = reason;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String reason() {
|
||||
/**
|
||||
* The reason for canceling the task.
|
||||
*/
|
||||
public String getReason() {
|
||||
return reason;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -84,21 +84,21 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
|
|||
}
|
||||
|
||||
protected void processTasks(CancelTasksRequest request, Consumer<CancellableTask> operation) {
|
||||
if (request.taskId().isSet() == false) {
|
||||
if (request.getTaskId().isSet() == false) {
|
||||
// we are only checking one task, we can optimize it
|
||||
CancellableTask task = taskManager.getCancellableTask(request.taskId().getId());
|
||||
CancellableTask task = taskManager.getCancellableTask(request.getTaskId().getId());
|
||||
if (task != null) {
|
||||
if (request.match(task)) {
|
||||
operation.accept(task);
|
||||
} else {
|
||||
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support this operation");
|
||||
throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support this operation");
|
||||
}
|
||||
} else {
|
||||
if (taskManager.getTask(request.taskId().getId()) != null) {
|
||||
if (taskManager.getTask(request.getTaskId().getId()) != null) {
|
||||
// The task exists, but doesn't support cancellation
|
||||
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support cancellation");
|
||||
throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support cancellation");
|
||||
} else {
|
||||
throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.taskId());
|
||||
throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.getTaskId());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -113,14 +113,14 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
|
|||
@Override
|
||||
protected synchronized TaskInfo taskOperation(CancelTasksRequest request, CancellableTask cancellableTask) {
|
||||
final BanLock banLock = new BanLock(nodes -> removeBanOnNodes(cancellableTask, nodes));
|
||||
Set<String> childNodes = taskManager.cancel(cancellableTask, request.reason(), banLock::onTaskFinished);
|
||||
Set<String> childNodes = taskManager.cancel(cancellableTask, request.getReason(), banLock::onTaskFinished);
|
||||
if (childNodes != null) {
|
||||
if (childNodes.isEmpty()) {
|
||||
logger.trace("cancelling task {} with no children", cancellableTask.getId());
|
||||
return cancellableTask.taskInfo(clusterService.localNode(), false);
|
||||
} else {
|
||||
logger.trace("cancelling task {} with children on nodes [{}]", cancellableTask.getId(), childNodes);
|
||||
setBanOnNodes(request.reason(), cancellableTask, childNodes, banLock);
|
||||
setBanOnNodes(request.getReason(), cancellableTask, childNodes, banLock);
|
||||
return cancellableTask.taskInfo(clusterService.localNode(), false);
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -31,31 +31,49 @@ import java.io.IOException;
|
|||
public class ListTasksRequest extends BaseTasksRequest<ListTasksRequest> {
|
||||
|
||||
private boolean detailed = false;
|
||||
private boolean waitForCompletion = false;
|
||||
|
||||
/**
|
||||
* Should the detailed task information be returned.
|
||||
*/
|
||||
public boolean detailed() {
|
||||
public boolean getDetailed() {
|
||||
return this.detailed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the detailed task information be returned.
|
||||
*/
|
||||
public ListTasksRequest detailed(boolean detailed) {
|
||||
public ListTasksRequest setDetailed(boolean detailed) {
|
||||
this.detailed = detailed;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this request wait for all found tasks to complete?
|
||||
*/
|
||||
public boolean getWaitForCompletion() {
|
||||
return waitForCompletion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this request wait for all found tasks to complete?
|
||||
*/
|
||||
public ListTasksRequest setWaitForCompletion(boolean waitForCompletion) {
|
||||
this.waitForCompletion = waitForCompletion;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
detailed = in.readBoolean();
|
||||
waitForCompletion = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(detailed);
|
||||
out.writeBoolean(waitForCompletion);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,15 @@ public class ListTasksRequestBuilder extends TasksRequestBuilder<ListTasksReques
|
|||
* Should detailed task information be returned.
|
||||
*/
|
||||
public ListTasksRequestBuilder setDetailed(boolean detailed) {
|
||||
request.detailed(detailed);
|
||||
request.setDetailed(detailed);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this request wait for all found tasks to complete?
|
||||
*/
|
||||
public final ListTasksRequestBuilder setWaitForCompletion(boolean waitForCompletion) {
|
||||
request.setWaitForCompletion(waitForCompletion);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
@ -29,18 +31,24 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportListTasksAction extends TransportTasksAction<Task, ListTasksRequest, ListTasksResponse, TaskInfo> {
|
||||
private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100);
|
||||
private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30);
|
||||
|
||||
@Inject
|
||||
public TransportListTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
|
@ -59,7 +67,34 @@ public class TransportListTasksAction extends TransportTasksAction<Task, ListTas
|
|||
|
||||
@Override
|
||||
protected TaskInfo taskOperation(ListTasksRequest request, Task task) {
|
||||
return task.taskInfo(clusterService.localNode(), request.detailed());
|
||||
return task.taskInfo(clusterService.localNode(), request.getDetailed());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processTasks(ListTasksRequest request, Consumer<Task> operation) {
|
||||
if (false == request.getWaitForCompletion()) {
|
||||
super.processTasks(request, operation);
|
||||
return;
|
||||
}
|
||||
// If we should wait for completion then we have to intercept every found task and wait for it to leave the manager.
|
||||
TimeValue timeout = request.getTimeout();
|
||||
if (timeout == null) {
|
||||
timeout = DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT;
|
||||
}
|
||||
long timeoutTime = System.nanoTime() + timeout.nanos();
|
||||
super.processTasks(request, operation.andThen((Task t) -> {
|
||||
while (System.nanoTime() - timeoutTime < 0) {
|
||||
if (taskManager.getTask(t.getId()) == null) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis());
|
||||
} catch (InterruptedException e) {
|
||||
throw new ElasticsearchException("Interrupted waiting for completion of [{}]", e, t);
|
||||
}
|
||||
}
|
||||
throw new ElasticsearchTimeoutException("Timed out waiting for completion of [{}]", t);
|
||||
}));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -98,8 +98,8 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
|
||||
@Override
|
||||
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
|
||||
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true);
|
||||
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false);
|
||||
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true, false);
|
||||
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false, false);
|
||||
List<ShardStats> shardsStats = new ArrayList<>();
|
||||
for (IndexService indexService : indicesService) {
|
||||
for (IndexShard indexShard : indexService) {
|
||||
|
|
|
@ -77,7 +77,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
|
|||
|
||||
@Override
|
||||
protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) {
|
||||
IndexService service = indicesService.indexService(shardRouting.getIndexName());
|
||||
IndexService service = indicesService.indexService(shardRouting.index());
|
||||
if (service != null) {
|
||||
IndexShard shard = service.getShardOrNull(shardRouting.id());
|
||||
boolean clearedAtLeastOne = false;
|
||||
|
|
|
@ -93,7 +93,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeActi
|
|||
|
||||
@Override
|
||||
protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndexName());
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardRouting.index());
|
||||
IndexShard indexShard = indexService.getShard(shardRouting.id());
|
||||
return new ShardSegments(indexShard.routingEntry(), indexShard.segments(request.verbose()));
|
||||
}
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
|
@ -104,8 +105,9 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
|
||||
@Override
|
||||
protected Tuple<BulkShardResponse, BulkShardRequest> shardOperationOnPrimary(MetaData metaData, BulkShardRequest request) {
|
||||
final IndexService indexService = indicesService.indexServiceSafe(request.index());
|
||||
final IndexShard indexShard = indexService.getShard(request.shardId().id());
|
||||
ShardId shardId = request.shardId();
|
||||
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
final IndexShard indexShard = indexService.getShard(shardId.getId());
|
||||
|
||||
long[] preVersions = new long[request.items().length];
|
||||
VersionType[] preVersionTypes = new VersionType[request.items().length];
|
||||
|
|
|
@ -112,7 +112,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
|
|||
logger.error("failed to execute pipeline for a bulk request", throwable);
|
||||
listener.onFailure(throwable);
|
||||
} else {
|
||||
long ingestTookInMillis = TimeUnit.MILLISECONDS.convert(System.nanoTime() - ingestStartTimeInNanos, TimeUnit.NANOSECONDS);
|
||||
long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos);
|
||||
BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest();
|
||||
ActionListener<BulkResponse> actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener);
|
||||
if (bulkRequest.requests().isEmpty()) {
|
||||
|
|
|
@ -20,6 +20,10 @@
|
|||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
|
@ -27,24 +31,32 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.ingest.PipelineStore;
|
||||
import org.elasticsearch.ingest.core.IngestInfo;
|
||||
import org.elasticsearch.node.service.NodeService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class PutPipelineTransportAction extends TransportMasterNodeAction<PutPipelineRequest, WritePipelineResponse> {
|
||||
|
||||
private final PipelineStore pipelineStore;
|
||||
private final ClusterService clusterService;
|
||||
private final TransportNodesInfoAction nodesInfoAction;
|
||||
|
||||
@Inject
|
||||
public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) {
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService,
|
||||
TransportNodesInfoAction nodesInfoAction) {
|
||||
super(settings, PutPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.nodesInfoAction = nodesInfoAction;
|
||||
this.pipelineStore = nodeService.getIngestService().getPipelineStore();
|
||||
}
|
||||
|
||||
|
@ -60,7 +72,28 @@ public class PutPipelineTransportAction extends TransportMasterNodeAction<PutPip
|
|||
|
||||
@Override
|
||||
protected void masterOperation(PutPipelineRequest request, ClusterState state, ActionListener<WritePipelineResponse> listener) throws Exception {
|
||||
pipelineStore.put(clusterService, request, listener);
|
||||
NodesInfoRequest nodesInfoRequest = new NodesInfoRequest();
|
||||
nodesInfoRequest.clear();
|
||||
nodesInfoRequest.ingest(true);
|
||||
nodesInfoAction.execute(nodesInfoRequest, new ActionListener<NodesInfoResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesInfoResponse nodeInfos) {
|
||||
try {
|
||||
Map<DiscoveryNode, IngestInfo> ingestInfos = new HashMap<>();
|
||||
for (NodeInfo nodeInfo : nodeInfos) {
|
||||
ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest());
|
||||
}
|
||||
pipelineStore.put(clusterService, ingestInfos, request, listener);
|
||||
} catch (Exception e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -32,8 +32,6 @@ import org.elasticsearch.search.SearchShardTarget;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget;
|
||||
|
||||
/**
|
||||
* Represents a failure to search on a specific shard.
|
||||
*/
|
||||
|
@ -106,7 +104,7 @@ public class ShardSearchFailure implements ShardOperationFailedException {
|
|||
@Override
|
||||
public int shardId() {
|
||||
if (shardTarget != null) {
|
||||
return shardTarget.shardId();
|
||||
return shardTarget.shardId().id();
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
@ -133,7 +131,7 @@ public class ShardSearchFailure implements ShardOperationFailedException {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
if (in.readBoolean()) {
|
||||
shardTarget = readSearchShardTarget(in);
|
||||
shardTarget = new SearchShardTarget(in);
|
||||
}
|
||||
reason = in.readString();
|
||||
status = RestStatus.readFrom(in);
|
||||
|
|
|
@ -143,7 +143,7 @@ public class TransportSuggestAction extends TransportBroadcastAction<SuggestRequ
|
|||
throw new IllegalArgumentException("suggest content missing");
|
||||
}
|
||||
final SuggestionSearchContext context = suggestPhase.parseElement().parseInternal(parser, indexService.mapperService(),
|
||||
indexService.fieldData(), request.shardId().getIndexName(), request.shardId().id());
|
||||
indexService.fieldData(), request.shardId());
|
||||
final Suggest result = suggestPhase.execute(context, searcher.searcher());
|
||||
return new ShardSuggestResponse(request.shardId(), result);
|
||||
}
|
||||
|
|
|
@ -53,6 +53,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
@ -269,7 +270,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (Throwable e1) {
|
||||
logger.warn("Failed to send response for " + actionName, e1);
|
||||
logger.warn("Failed to send response for {}", e1, actionName);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -372,18 +373,18 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
}
|
||||
|
||||
private void failReplicaIfNeeded(Throwable t) {
|
||||
String index = request.shardId().getIndex().getName();
|
||||
Index index = request.shardId().getIndex();
|
||||
int shardId = request.shardId().id();
|
||||
logger.trace("failure on replica [{}][{}], action [{}], request [{}]", t, index, shardId, actionName, request);
|
||||
if (ignoreReplicaException(t) == false) {
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId);
|
||||
logger.debug("ignoring failed replica {}[{}] because index was already removed.", index, shardId);
|
||||
return;
|
||||
}
|
||||
IndexShard indexShard = indexService.getShardOrNull(shardId);
|
||||
if (indexShard == null) {
|
||||
logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId);
|
||||
logger.debug("ignoring failed replica {}[{}] because index was already removed.", index, shardId);
|
||||
return;
|
||||
}
|
||||
indexShard.failShard(actionName + " failed on replica", t);
|
||||
|
@ -394,7 +395,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
try {
|
||||
channel.sendResponse(t);
|
||||
} catch (IOException responseException) {
|
||||
logger.warn("failed to send error message back to client for action [" + transportReplicaAction + "]", responseException);
|
||||
logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction);
|
||||
logger.warn("actual Exception", t);
|
||||
}
|
||||
}
|
||||
|
@ -1106,7 +1107,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
try {
|
||||
channel.sendResponse(finalResponse);
|
||||
} catch (IOException responseException) {
|
||||
logger.warn("failed to send error message back to client for action [" + transportReplicaAction + "]", responseException);
|
||||
logger.warn("failed to send error message back to client for action [{}]", responseException, transportReplicaAction);
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("action [{}] completed on all replicas [{}] for request [{}]", transportReplicaAction, shardId, replicaRequest);
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.action.support.IndicesOptions;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -42,8 +43,8 @@ public abstract class InstanceShardOperationRequest<Request extends InstanceShar
|
|||
protected TimeValue timeout = DEFAULT_TIMEOUT;
|
||||
|
||||
protected String index;
|
||||
// -1 means its not set, allows to explicitly direct a request to a specific shard
|
||||
protected int shardId = -1;
|
||||
// null means its not set, allows to explicitly direct a request to a specific shard
|
||||
protected ShardId shardId = null;
|
||||
|
||||
private String concreteIndex;
|
||||
|
||||
|
@ -115,7 +116,11 @@ public abstract class InstanceShardOperationRequest<Request extends InstanceShar
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
index = in.readString();
|
||||
shardId = in.readInt();
|
||||
if (in.readBoolean()) {
|
||||
shardId = ShardId.readShardId(in);
|
||||
} else {
|
||||
shardId = null;
|
||||
}
|
||||
timeout = TimeValue.readTimeValue(in);
|
||||
concreteIndex = in.readOptionalString();
|
||||
}
|
||||
|
@ -124,7 +129,7 @@ public abstract class InstanceShardOperationRequest<Request extends InstanceShar
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(index);
|
||||
out.writeInt(shardId);
|
||||
out.writeOptionalStreamable(shardId);
|
||||
timeout.writeTo(out);
|
||||
out.writeOptionalString(concreteIndex);
|
||||
}
|
||||
|
|
|
@ -172,7 +172,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||
return;
|
||||
}
|
||||
|
||||
request.shardId = shardIt.shardId().id();
|
||||
request.shardId = shardIt.shardId();
|
||||
DiscoveryNode node = nodes.get(shard.currentNodeId());
|
||||
transportService.sendRequest(node, shardActionName, request, transportOptions(), new BaseTransportResponseHandler<Response>() {
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
* Sets the list of action masks for the actions that should be returned
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request actions(String... actions) {
|
||||
public final Request setActions(String... actions) {
|
||||
this.actions = actions;
|
||||
return (Request) this;
|
||||
}
|
||||
|
@ -79,16 +79,16 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
/**
|
||||
* Return the list of action masks for the actions that should be returned
|
||||
*/
|
||||
public String[] actions() {
|
||||
public String[] getActions() {
|
||||
return actions;
|
||||
}
|
||||
|
||||
public final String[] nodesIds() {
|
||||
public final String[] getNodesIds() {
|
||||
return nodesIds;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request nodesIds(String... nodesIds) {
|
||||
public final Request setNodesIds(String... nodesIds) {
|
||||
this.nodesIds = nodesIds;
|
||||
return (Request) this;
|
||||
}
|
||||
|
@ -98,12 +98,12 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
*
|
||||
* By default tasks with any ids are returned.
|
||||
*/
|
||||
public TaskId taskId() {
|
||||
public TaskId getTaskId() {
|
||||
return taskId;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request taskId(TaskId taskId) {
|
||||
public final Request setTaskId(TaskId taskId) {
|
||||
this.taskId = taskId;
|
||||
return (Request) this;
|
||||
}
|
||||
|
@ -112,29 +112,29 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
/**
|
||||
* Returns the parent task id that tasks should be filtered by
|
||||
*/
|
||||
public TaskId parentTaskId() {
|
||||
public TaskId getParentTaskId() {
|
||||
return parentTaskId;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public Request parentTaskId(TaskId parentTaskId) {
|
||||
public Request setParentTaskId(TaskId parentTaskId) {
|
||||
this.parentTaskId = parentTaskId;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
|
||||
public TimeValue timeout() {
|
||||
public TimeValue getTimeout() {
|
||||
return this.timeout;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request timeout(TimeValue timeout) {
|
||||
public final Request setTimeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request timeout(String timeout) {
|
||||
public final Request setTimeout(String timeout) {
|
||||
this.timeout = TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout");
|
||||
return (Request) this;
|
||||
}
|
||||
|
@ -162,11 +162,11 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
}
|
||||
|
||||
public boolean match(Task task) {
|
||||
if (actions() != null && actions().length > 0 && Regex.simpleMatch(actions(), task.getAction()) == false) {
|
||||
if (getActions() != null && getActions().length > 0 && Regex.simpleMatch(getActions(), task.getAction()) == false) {
|
||||
return false;
|
||||
}
|
||||
if (taskId().isSet() == false) {
|
||||
if(taskId().getId() != task.getId()) {
|
||||
if (getTaskId().isSet() == false) {
|
||||
if(getTaskId().getId() != task.getId()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,19 +35,19 @@ public class TasksRequestBuilder <Request extends BaseTasksRequest<Request>, Res
|
|||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder setNodesIds(String... nodesIds) {
|
||||
request.nodesIds(nodesIds);
|
||||
request.setNodesIds(nodesIds);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder setActions(String... actions) {
|
||||
request.actions(actions);
|
||||
request.setActions(actions);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder setTimeout(TimeValue timeout) {
|
||||
request.timeout(timeout);
|
||||
request.setTimeout(timeout);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -124,25 +124,25 @@ public abstract class TransportTasksAction<
|
|||
}
|
||||
|
||||
protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) {
|
||||
if (request.taskId().isSet()) {
|
||||
return clusterState.nodes().resolveNodesIds(request.nodesIds());
|
||||
if (request.getTaskId().isSet()) {
|
||||
return clusterState.nodes().resolveNodesIds(request.getNodesIds());
|
||||
} else {
|
||||
return new String[]{request.taskId().getNodeId()};
|
||||
return new String[]{request.getTaskId().getNodeId()};
|
||||
}
|
||||
}
|
||||
|
||||
protected void processTasks(TasksRequest request, Consumer<OperationTask> operation) {
|
||||
if (request.taskId().isSet() == false) {
|
||||
if (request.getTaskId().isSet() == false) {
|
||||
// we are only checking one task, we can optimize it
|
||||
Task task = taskManager.getTask(request.taskId().getId());
|
||||
Task task = taskManager.getTask(request.getTaskId().getId());
|
||||
if (task != null) {
|
||||
if (request.match(task)) {
|
||||
operation.accept((OperationTask) task);
|
||||
} else {
|
||||
throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.taskId());
|
||||
throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.getTaskId());
|
||||
}
|
||||
} else {
|
||||
throw new ResourceNotFoundException("task [{}] is missing", request.taskId());
|
||||
throw new ResourceNotFoundException("task [{}] is missing", request.getTaskId());
|
||||
}
|
||||
} else {
|
||||
for (Task task : taskManager.getTasks().values()) {
|
||||
|
@ -224,8 +224,8 @@ public abstract class TransportTasksAction<
|
|||
}
|
||||
} else {
|
||||
TransportRequestOptions.Builder builder = TransportRequestOptions.builder();
|
||||
if (request.timeout() != null) {
|
||||
builder.withTimeout(request.timeout());
|
||||
if (request.getTimeout() != null) {
|
||||
builder.withTimeout(request.getTimeout());
|
||||
}
|
||||
builder.withCompress(transportCompress());
|
||||
for (int i = 0; i < nodesIds.length; i++) {
|
||||
|
|
|
@ -75,12 +75,12 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
|
|||
|
||||
@Override
|
||||
protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequest request, ShardId shardId) {
|
||||
MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse();
|
||||
final MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse();
|
||||
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
final IndexShard indexShard = indexService.getShard(shardId.id());
|
||||
for (int i = 0; i < request.locations.size(); i++) {
|
||||
TermVectorsRequest termVectorsRequest = request.requests.get(i);
|
||||
try {
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.index());
|
||||
IndexShard indexShard = indexService.getShard(shardId.id());
|
||||
TermVectorsResponse termVectorsResponse = TermVectorsService.getTermVectors(indexShard, termVectorsRequest);
|
||||
termVectorsResponse.updateTookInMillis(termVectorsRequest.startTime());
|
||||
response.add(request.locations.get(i), termVectorsResponse);
|
||||
|
|
|
@ -51,6 +51,7 @@ import org.elasticsearch.common.xcontent.XContentType;
|
|||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -147,8 +148,8 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
|
||||
@Override
|
||||
protected ShardIterator shards(ClusterState clusterState, UpdateRequest request) {
|
||||
if (request.shardId() != -1) {
|
||||
return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId()).primaryShardIt();
|
||||
if (request.getShardId() != null) {
|
||||
return clusterState.routingTable().index(request.concreteIndex()).shard(request.getShardId().getId()).primaryShardIt();
|
||||
}
|
||||
ShardIterator shardIterator = clusterService.operationRouting()
|
||||
.indexShards(clusterState, request.concreteIndex(), request.type(), request.id(), request.routing());
|
||||
|
@ -167,8 +168,9 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
}
|
||||
|
||||
protected void shardOperation(final UpdateRequest request, final ActionListener<UpdateResponse> listener, final int retryCount) {
|
||||
final IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex());
|
||||
final IndexShard indexShard = indexService.getShard(request.shardId());
|
||||
final ShardId shardId = request.getShardId();
|
||||
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
final IndexShard indexShard = indexService.getShard(shardId.getId());
|
||||
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard);
|
||||
switch (result.operation()) {
|
||||
case UPSERT:
|
||||
|
@ -194,7 +196,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
if (e instanceof VersionConflictEngineException) {
|
||||
if (retryCount < request.retryOnConflict()) {
|
||||
logger.trace("Retry attempt [{}] of [{}] on version conflict on [{}][{}][{}]",
|
||||
retryCount + 1, request.retryOnConflict(), request.index(), request.shardId(), request.id());
|
||||
retryCount + 1, request.retryOnConflict(), request.index(), request.getShardId(), request.id());
|
||||
threadPool.executor(executor()).execute(new ActionRunnable<UpdateResponse>(listener) {
|
||||
@Override
|
||||
protected void doRun() {
|
||||
|
@ -267,9 +269,9 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
break;
|
||||
case NONE:
|
||||
UpdateResponse update = result.action();
|
||||
IndexService indexServiceOrNull = indicesService.indexService(request.concreteIndex());
|
||||
IndexService indexServiceOrNull = indicesService.indexService(shardId.getIndex());
|
||||
if (indexServiceOrNull != null) {
|
||||
IndexShard shard = indexService.getShardOrNull(request.shardId());
|
||||
IndexShard shard = indexService.getShardOrNull(shardId.getId());
|
||||
if (shard != null) {
|
||||
shard.noopUpdate(request.type());
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptParameterParser;
|
||||
import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue;
|
||||
|
@ -88,7 +89,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
}
|
||||
|
||||
public UpdateRequest(String index, String type, String id) {
|
||||
this.index = index;
|
||||
super(index);
|
||||
this.type = type;
|
||||
this.id = id;
|
||||
}
|
||||
|
@ -195,7 +196,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
return parent;
|
||||
}
|
||||
|
||||
int shardId() {
|
||||
public ShardId getShardId() {
|
||||
return this.shardId;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,15 +19,22 @@
|
|||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.common.PidFile;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.cli.CliTool;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.common.inject.CreationException;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.LogConfigurator;
|
||||
|
@ -40,13 +47,6 @@ import org.elasticsearch.monitor.process.ProcessProbe;
|
|||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
|
||||
/**
|
||||
|
@ -222,11 +222,11 @@ final class Bootstrap {
|
|||
// Set the system property before anything has a chance to trigger its use
|
||||
initLoggerPrefix();
|
||||
|
||||
BootstrapCLIParser bootstrapCLIParser = new BootstrapCLIParser();
|
||||
CliTool.ExitStatus status = bootstrapCLIParser.execute(args);
|
||||
BootstrapCliParser parser = new BootstrapCliParser();
|
||||
int status = parser.main(args, Terminal.DEFAULT);
|
||||
|
||||
if (CliTool.ExitStatus.OK != status) {
|
||||
exit(status.status());
|
||||
if (parser.shouldRun() == false || status != ExitCodes.OK) {
|
||||
exit(status);
|
||||
}
|
||||
|
||||
INSTANCE = new Bootstrap();
|
||||
|
@ -307,14 +307,6 @@ final class Bootstrap {
|
|||
System.err.close();
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "System#err")
|
||||
private static void sysError(String line, boolean flush) {
|
||||
System.err.println(line);
|
||||
if (flush) {
|
||||
System.err.flush();
|
||||
}
|
||||
}
|
||||
|
||||
private static void checkForCustomConfFile() {
|
||||
String confFileSetting = System.getProperty("es.default.config");
|
||||
checkUnsetAndMaybeExit(confFileSetting, "es.default.config");
|
||||
|
|
|
@ -1,184 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.cli.Option;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.cli.CliTool;
|
||||
import org.elasticsearch.common.cli.CliToolConfig;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.common.cli.UserError;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
|
||||
import static org.elasticsearch.common.cli.CliToolConfig.Builder.optionBuilder;
|
||||
|
||||
final class BootstrapCLIParser extends CliTool {
|
||||
|
||||
private static final CliToolConfig CONFIG = CliToolConfig.config("elasticsearch", BootstrapCLIParser.class)
|
||||
.cmds(Start.CMD, Version.CMD)
|
||||
.build();
|
||||
|
||||
public BootstrapCLIParser() {
|
||||
super(CONFIG);
|
||||
}
|
||||
|
||||
public BootstrapCLIParser(Terminal terminal) {
|
||||
super(CONFIG, terminal);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Command parse(String cmdName, CommandLine cli) throws Exception {
|
||||
switch (cmdName.toLowerCase(Locale.ROOT)) {
|
||||
case Start.NAME:
|
||||
return Start.parse(terminal, cli);
|
||||
case Version.NAME:
|
||||
return Version.parse(terminal, cli);
|
||||
default:
|
||||
assert false : "should never get here, if the user enters an unknown command, an error message should be shown before parse is called";
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
static class Version extends CliTool.Command {
|
||||
|
||||
private static final String NAME = "version";
|
||||
|
||||
private static final CliToolConfig.Cmd CMD = cmd(NAME, Version.class).build();
|
||||
|
||||
public static Command parse(Terminal terminal, CommandLine cli) {
|
||||
return new Version(terminal);
|
||||
}
|
||||
|
||||
public Version(Terminal terminal) {
|
||||
super(terminal);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ExitStatus execute(Settings settings, Environment env) throws Exception {
|
||||
terminal.println("Version: " + org.elasticsearch.Version.CURRENT
|
||||
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
|
||||
+ ", JVM: " + JvmInfo.jvmInfo().version());
|
||||
return ExitStatus.OK_AND_EXIT;
|
||||
}
|
||||
}
|
||||
|
||||
static class Start extends CliTool.Command {
|
||||
|
||||
private static final String NAME = "start";
|
||||
|
||||
private static final CliToolConfig.Cmd CMD = cmd(NAME, Start.class)
|
||||
.options(
|
||||
optionBuilder("d", "daemonize").hasArg(false).required(false),
|
||||
optionBuilder("p", "pidfile").hasArg(true).required(false),
|
||||
optionBuilder("V", "version").hasArg(false).required(false),
|
||||
Option.builder("D").argName("property=value").valueSeparator('=').numberOfArgs(2)
|
||||
)
|
||||
.stopAtNonOption(true) // needed to parse the --foo.bar options, so this parser must be lenient
|
||||
.build();
|
||||
|
||||
// TODO: don't use system properties as a way to do this, its horrible...
|
||||
@SuppressForbidden(reason = "Sets system properties passed as CLI parameters")
|
||||
public static Command parse(Terminal terminal, CommandLine cli) throws UserError {
|
||||
if (cli.hasOption("V")) {
|
||||
return Version.parse(terminal, cli);
|
||||
}
|
||||
|
||||
if (cli.hasOption("d")) {
|
||||
System.setProperty("es.foreground", "false");
|
||||
}
|
||||
|
||||
String pidFile = cli.getOptionValue("pidfile");
|
||||
if (!Strings.isNullOrEmpty(pidFile)) {
|
||||
System.setProperty("es.pidfile", pidFile);
|
||||
}
|
||||
|
||||
if (cli.hasOption("D")) {
|
||||
Properties properties = cli.getOptionProperties("D");
|
||||
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
|
||||
String key = (String) entry.getKey();
|
||||
String propertyName = key.startsWith("es.") ? key : "es." + key;
|
||||
System.setProperty(propertyName, entry.getValue().toString());
|
||||
}
|
||||
}
|
||||
|
||||
// hacky way to extract all the fancy extra args, there is no CLI tool helper for this
|
||||
Iterator<String> iterator = cli.getArgList().iterator();
|
||||
final Map<String, String> properties = new HashMap<>();
|
||||
while (iterator.hasNext()) {
|
||||
String arg = iterator.next();
|
||||
if (!arg.startsWith("--")) {
|
||||
if (arg.startsWith("-D") || arg.startsWith("-d") || arg.startsWith("-p")) {
|
||||
throw new UserError(ExitStatus.USAGE,
|
||||
"Parameter [" + arg + "] starting with \"-D\", \"-d\" or \"-p\" must be before any parameters starting with --"
|
||||
);
|
||||
} else {
|
||||
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "]does not start with --");
|
||||
}
|
||||
}
|
||||
// if there is no = sign, we have to get the next argu
|
||||
arg = arg.replace("--", "");
|
||||
if (arg.contains("=")) {
|
||||
String[] splitArg = arg.split("=", 2);
|
||||
String key = splitArg[0];
|
||||
String value = splitArg[1];
|
||||
properties.put("es." + key, value);
|
||||
} else {
|
||||
if (iterator.hasNext()) {
|
||||
String value = iterator.next();
|
||||
if (value.startsWith("--")) {
|
||||
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
|
||||
}
|
||||
properties.put("es." + arg, value);
|
||||
} else {
|
||||
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
|
||||
}
|
||||
}
|
||||
}
|
||||
for (Map.Entry<String, String> entry : properties.entrySet()) {
|
||||
System.setProperty(entry.getKey(), entry.getValue());
|
||||
}
|
||||
return new Start(terminal);
|
||||
}
|
||||
|
||||
public Start(Terminal terminal) {
|
||||
super(terminal);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public ExitStatus execute(Settings settings, Environment env) throws Exception {
|
||||
return ExitStatus.OK;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -225,7 +225,7 @@ final class BootstrapCheck {
|
|||
|
||||
static class MaxNumberOfThreadsCheck implements Check {
|
||||
|
||||
private final long maxNumberOfThreadsThreshold = 1 << 15;
|
||||
private final long maxNumberOfThreadsThreshold = 1 << 11;
|
||||
|
||||
@Override
|
||||
public boolean check() {
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.cli.Command;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.UserError;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
||||
final class BootstrapCliParser extends Command {
|
||||
|
||||
private final OptionSpec<Void> versionOption;
|
||||
private final OptionSpec<Void> daemonizeOption;
|
||||
private final OptionSpec<String> pidfileOption;
|
||||
private final OptionSpec<String> propertyOption;
|
||||
private boolean shouldRun = false;
|
||||
|
||||
BootstrapCliParser() {
|
||||
super("Starts elasticsearch");
|
||||
// TODO: in jopt-simple 5.0, make this mutually exclusive with all other options
|
||||
versionOption = parser.acceptsAll(Arrays.asList("V", "version"),
|
||||
"Prints elasticsearch version information and exits");
|
||||
daemonizeOption = parser.acceptsAll(Arrays.asList("d", "daemonize"),
|
||||
"Starts Elasticsearch in the background");
|
||||
// TODO: in jopt-simple 5.0 this option type can be a Path
|
||||
pidfileOption = parser.acceptsAll(Arrays.asList("p", "pidfile"),
|
||||
"Creates a pid file in the specified path on start")
|
||||
.withRequiredArg();
|
||||
propertyOption = parser.accepts("D", "Configures an Elasticsearch setting")
|
||||
.withRequiredArg();
|
||||
}
|
||||
|
||||
// TODO: don't use system properties as a way to do this, its horrible...
|
||||
@SuppressForbidden(reason = "Sets system properties passed as CLI parameters")
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options) throws Exception {
|
||||
if (options.has(versionOption)) {
|
||||
terminal.println("Version: " + org.elasticsearch.Version.CURRENT
|
||||
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
|
||||
+ ", JVM: " + JvmInfo.jvmInfo().version());
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: don't use sysprops for any of these! pass the args through to bootstrap...
|
||||
if (options.has(daemonizeOption)) {
|
||||
System.setProperty("es.foreground", "false");
|
||||
}
|
||||
String pidFile = pidfileOption.value(options);
|
||||
if (Strings.isNullOrEmpty(pidFile) == false) {
|
||||
System.setProperty("es.pidfile", pidFile);
|
||||
}
|
||||
|
||||
for (String property : propertyOption.values(options)) {
|
||||
String[] keyValue = property.split("=", 2);
|
||||
if (keyValue.length != 2) {
|
||||
throw new UserError(ExitCodes.USAGE, "Malformed elasticsearch setting, must be of the form key=value");
|
||||
}
|
||||
String key = keyValue[0];
|
||||
if (key.startsWith("es.") == false) {
|
||||
key = "es." + key;
|
||||
}
|
||||
System.setProperty(key, keyValue[1]);
|
||||
}
|
||||
shouldRun = true;
|
||||
}
|
||||
|
||||
boolean shouldRun() {
|
||||
return shouldRun;
|
||||
}
|
||||
}
|
|
@ -32,7 +32,7 @@ public final class Elasticsearch {
|
|||
/**
|
||||
* Main entry point for starting elasticsearch
|
||||
*/
|
||||
public static void main(String[] args) throws StartupError {
|
||||
public static void main(String[] args) throws Exception {
|
||||
try {
|
||||
Bootstrap.init(args);
|
||||
} catch (Throwable t) {
|
||||
|
|
|
@ -76,7 +76,7 @@ class JNANatives {
|
|||
softLimit = rlimit.rlim_cur.longValue();
|
||||
hardLimit = rlimit.rlim_max.longValue();
|
||||
} else {
|
||||
logger.warn("Unable to retrieve resource limits: " + JNACLibrary.strerror(Native.getLastError()));
|
||||
logger.warn("Unable to retrieve resource limits: {}", JNACLibrary.strerror(Native.getLastError()));
|
||||
}
|
||||
}
|
||||
} catch (UnsatisfiedLinkError e) {
|
||||
|
@ -85,18 +85,19 @@ class JNANatives {
|
|||
}
|
||||
|
||||
// mlockall failed for some reason
|
||||
logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg);
|
||||
logger.warn("Unable to lock JVM Memory: error={}, reason={}", errno , errMsg);
|
||||
logger.warn("This can result in part of the JVM being swapped out.");
|
||||
if (errno == JNACLibrary.ENOMEM) {
|
||||
if (rlimitSuccess) {
|
||||
logger.warn("Increase RLIMIT_MEMLOCK, soft limit: " + rlimitToString(softLimit) + ", hard limit: " + rlimitToString(hardLimit));
|
||||
logger.warn("Increase RLIMIT_MEMLOCK, soft limit: {}, hard limit: {}", rlimitToString(softLimit), rlimitToString(hardLimit));
|
||||
if (Constants.LINUX) {
|
||||
// give specific instructions for the linux case to make it easy
|
||||
String user = System.getProperty("user.name");
|
||||
logger.warn("These can be adjusted by modifying /etc/security/limits.conf, for example: \n" +
|
||||
"\t# allow user '" + user + "' mlockall\n" +
|
||||
"\t" + user + " soft memlock unlimited\n" +
|
||||
"\t" + user + " hard memlock unlimited"
|
||||
"\t# allow user '{}' mlockall\n" +
|
||||
"\t{} soft memlock unlimited\n" +
|
||||
"\t{} hard memlock unlimited",
|
||||
user, user, user
|
||||
);
|
||||
logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect.");
|
||||
}
|
||||
|
@ -155,7 +156,7 @@ class JNANatives {
|
|||
// the amount of memory we wish to lock, plus a small overhead (1MB).
|
||||
SizeT size = new SizeT(JvmInfo.jvmInfo().getMem().getHeapInit().getBytes() + (1024 * 1024));
|
||||
if (!kernel.SetProcessWorkingSetSize(process, size, size)) {
|
||||
logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code " + Native.getLastError());
|
||||
logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code {}", Native.getLastError());
|
||||
} else {
|
||||
JNAKernel32Library.MemoryBasicInformation memInfo = new JNAKernel32Library.MemoryBasicInformation();
|
||||
long address = 0;
|
||||
|
@ -188,7 +189,7 @@ class JNANatives {
|
|||
if (result) {
|
||||
logger.debug("console ctrl handler correctly set");
|
||||
} else {
|
||||
logger.warn("unknown error " + Native.getLastError() + " when adding console ctrl handler:");
|
||||
logger.warn("unknown error {} when adding console ctrl handler", Native.getLastError());
|
||||
}
|
||||
} catch (UnsatisfiedLinkError e) {
|
||||
// this will have already been logged by Kernel32Library, no need to repeat it
|
||||
|
|
|
@ -200,7 +200,7 @@ final class JVMCheck {
|
|||
HotSpotCheck bug = JVM_BROKEN_HOTSPOT_VERSIONS.get(Constants.JVM_VERSION);
|
||||
if (bug != null && bug.check()) {
|
||||
if (bug.getWorkaround().isPresent() && ManagementFactory.getRuntimeMXBean().getInputArguments().contains(bug.getWorkaround().get())) {
|
||||
Loggers.getLogger(JVMCheck.class).warn(bug.getWarningMessage().get());
|
||||
Loggers.getLogger(JVMCheck.class).warn("{}", bug.getWarningMessage().get());
|
||||
} else {
|
||||
throw new RuntimeException(bug.getErrorMessage());
|
||||
}
|
||||
|
|
|
@ -394,7 +394,7 @@ final class Seccomp {
|
|||
method = 0;
|
||||
int errno1 = Native.getLastError();
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): " + JNACLibrary.strerror(errno1) + ", falling back to prctl(PR_SET_SECCOMP)...");
|
||||
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", JNACLibrary.strerror(errno1));
|
||||
}
|
||||
if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) != 0) {
|
||||
int errno2 = Native.getLastError();
|
||||
|
|
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import joptsimple.OptionException;
|
||||
import joptsimple.OptionParser;
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
/**
|
||||
* An action to execute within a cli.
|
||||
*/
|
||||
public abstract class Command {
|
||||
|
||||
/** A description of the command, used in the help output. */
|
||||
protected final String description;
|
||||
|
||||
/** The option parser for this command. */
|
||||
protected final OptionParser parser = new OptionParser();
|
||||
|
||||
private final OptionSpec<Void> helpOption = parser.acceptsAll(Arrays.asList("h", "help"), "show help").forHelp();
|
||||
private final OptionSpec<Void> silentOption = parser.acceptsAll(Arrays.asList("s", "silent"), "show minimal output");
|
||||
private final OptionSpec<Void> verboseOption = parser.acceptsAll(Arrays.asList("v", "verbose"), "show verbose output");
|
||||
|
||||
public Command(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
/** Parses options for this command from args and executes it. */
|
||||
public final int main(String[] args, Terminal terminal) throws Exception {
|
||||
try {
|
||||
mainWithoutErrorHandling(args, terminal);
|
||||
} catch (OptionException e) {
|
||||
printHelp(terminal);
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage());
|
||||
return ExitCodes.USAGE;
|
||||
} catch (UserError e) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage());
|
||||
return e.exitCode;
|
||||
}
|
||||
return ExitCodes.OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the command, but all errors are thrown.
|
||||
*/
|
||||
void mainWithoutErrorHandling(String[] args, Terminal terminal) throws Exception {
|
||||
final OptionSet options = parser.parse(args);
|
||||
|
||||
if (options.has(helpOption)) {
|
||||
printHelp(terminal);
|
||||
return;
|
||||
}
|
||||
|
||||
if (options.has(silentOption)) {
|
||||
if (options.has(verboseOption)) {
|
||||
// mutually exclusive, we can remove this with jopt-simple 5.0, which natively supports it
|
||||
throw new UserError(ExitCodes.USAGE, "Cannot specify -s and -v together");
|
||||
}
|
||||
terminal.setVerbosity(Terminal.Verbosity.SILENT);
|
||||
} else if (options.has(verboseOption)) {
|
||||
terminal.setVerbosity(Terminal.Verbosity.VERBOSE);
|
||||
} else {
|
||||
terminal.setVerbosity(Terminal.Verbosity.NORMAL);
|
||||
}
|
||||
|
||||
execute(terminal, options);
|
||||
}
|
||||
|
||||
/** Prints a help message for the command to the terminal. */
|
||||
private void printHelp(Terminal terminal) throws IOException {
|
||||
terminal.println(description);
|
||||
terminal.println("");
|
||||
printAdditionalHelp(terminal);
|
||||
parser.printHelpOn(terminal.getWriter());
|
||||
}
|
||||
|
||||
/** Prints additional help information, specific to the command */
|
||||
protected void printAdditionalHelp(Terminal terminal) {}
|
||||
|
||||
@SuppressForbidden(reason = "Allowed to exit explicitly from #main()")
|
||||
protected static void exit(int status) {
|
||||
System.exit(status);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes this command.
|
||||
*
|
||||
* Any runtime user errors (like an input file that does not exist), should throw a {@link UserError}. */
|
||||
protected abstract void execute(Terminal terminal, OptionSet options) throws Exception;
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
/**
|
||||
* POSIX exit codes.
|
||||
*/
|
||||
public class ExitCodes {
|
||||
public static final int OK = 0;
|
||||
public static final int USAGE = 64; /* command line usage error */
|
||||
public static final int DATA_ERROR = 65; /* data format error */
|
||||
public static final int NO_INPUT = 66; /* cannot open input */
|
||||
public static final int NO_USER = 67; /* addressee unknown */
|
||||
public static final int NO_HOST = 68; /* host name unknown */
|
||||
public static final int UNAVAILABLE = 69; /* service unavailable */
|
||||
public static final int CODE_ERROR = 70; /* internal software error */
|
||||
public static final int CANT_CREATE = 73; /* can't create (user) output file */
|
||||
public static final int IO_ERROR = 74; /* input/output error */
|
||||
public static final int TEMP_FAILURE = 75; /* temp failure; user is invited to retry */
|
||||
public static final int PROTOCOL = 76; /* remote error in protocol */
|
||||
public static final int NOPERM = 77; /* permission denied */
|
||||
public static final int CONFIG = 78; /* configuration error */
|
||||
|
||||
private ExitCodes() { /* no instance, just constants */ }
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import joptsimple.NonOptionArgumentSpec;
|
||||
import joptsimple.OptionSet;
|
||||
|
||||
/**
|
||||
* A cli tool which is made up of multiple subcommands.
|
||||
*/
|
||||
public class MultiCommand extends Command {
|
||||
|
||||
protected final Map<String, Command> subcommands = new LinkedHashMap<>();
|
||||
|
||||
private final NonOptionArgumentSpec<String> arguments = parser.nonOptions("command");
|
||||
|
||||
public MultiCommand(String description) {
|
||||
super(description);
|
||||
parser.posixlyCorrect(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void printAdditionalHelp(Terminal terminal) {
|
||||
if (subcommands.isEmpty()) {
|
||||
throw new IllegalStateException("No subcommands configured");
|
||||
}
|
||||
terminal.println("Commands");
|
||||
terminal.println("--------");
|
||||
for (Map.Entry<String, Command> subcommand : subcommands.entrySet()) {
|
||||
terminal.println(subcommand.getKey() + " - " + subcommand.getValue().description);
|
||||
}
|
||||
terminal.println("");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options) throws Exception {
|
||||
if (subcommands.isEmpty()) {
|
||||
throw new IllegalStateException("No subcommands configured");
|
||||
}
|
||||
String[] args = arguments.values(options).toArray(new String[0]);
|
||||
if (args.length == 0) {
|
||||
throw new UserError(ExitCodes.USAGE, "Missing command");
|
||||
}
|
||||
Command subcommand = subcommands.get(args[0]);
|
||||
if (subcommand == null) {
|
||||
throw new UserError(ExitCodes.USAGE, "Unknown command [" + args[0] + "]");
|
||||
}
|
||||
subcommand.mainWithoutErrorHandling(Arrays.copyOfRange(args, 1, args.length), terminal);
|
||||
}
|
||||
}
|
|
@ -17,18 +17,19 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.cli;
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.Console;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.PrintWriter;
|
||||
import java.nio.charset.Charset;
|
||||
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
/**
|
||||
* A Terminal wraps access to reading input and writing output for a {@link CliTool}.
|
||||
* A Terminal wraps access to reading input and writing output for a cli.
|
||||
*
|
||||
* The available methods are similar to those of {@link Console}, with the ability
|
||||
* to read either normal text or a password, and the ability to print a line
|
||||
|
@ -52,8 +53,15 @@ public abstract class Terminal {
|
|||
/** The current verbosity for the terminal, defaulting to {@link Verbosity#NORMAL}. */
|
||||
private Verbosity verbosity = Verbosity.NORMAL;
|
||||
|
||||
/** The newline used when calling println. */
|
||||
private final String lineSeparator;
|
||||
|
||||
protected Terminal(String lineSeparator) {
|
||||
this.lineSeparator = lineSeparator;
|
||||
}
|
||||
|
||||
/** Sets the verbosity of the terminal. */
|
||||
void setVerbosity(Verbosity verbosity) {
|
||||
public void setVerbosity(Verbosity verbosity) {
|
||||
this.verbosity = verbosity;
|
||||
}
|
||||
|
||||
|
@ -63,8 +71,8 @@ public abstract class Terminal {
|
|||
/** Reads password text from the terminal input. See {@link Console#readPassword()}}. */
|
||||
public abstract char[] readSecret(String prompt);
|
||||
|
||||
/** Print a message directly to the terminal. */
|
||||
protected abstract void doPrint(String msg);
|
||||
/** Returns a Writer which can be used to write to the terminal directly. */
|
||||
public abstract PrintWriter getWriter();
|
||||
|
||||
/** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level. */
|
||||
public final void println(String msg) {
|
||||
|
@ -74,47 +82,60 @@ public abstract class Terminal {
|
|||
/** Prints a line to the terminal at {@code verbosity} level. */
|
||||
public final void println(Verbosity verbosity, String msg) {
|
||||
if (this.verbosity.ordinal() >= verbosity.ordinal()) {
|
||||
doPrint(msg + System.lineSeparator());
|
||||
getWriter().print(msg + lineSeparator);
|
||||
getWriter().flush();
|
||||
}
|
||||
}
|
||||
|
||||
private static class ConsoleTerminal extends Terminal {
|
||||
|
||||
private static final Console console = System.console();
|
||||
private static final Console CONSOLE = System.console();
|
||||
|
||||
ConsoleTerminal() {
|
||||
super(System.lineSeparator());
|
||||
}
|
||||
|
||||
static boolean isSupported() {
|
||||
return console != null;
|
||||
return CONSOLE != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doPrint(String msg) {
|
||||
console.printf("%s", msg);
|
||||
console.flush();
|
||||
public PrintWriter getWriter() {
|
||||
return CONSOLE.writer();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readText(String prompt) {
|
||||
return console.readLine("%s", prompt);
|
||||
return CONSOLE.readLine("%s", prompt);
|
||||
}
|
||||
|
||||
@Override
|
||||
public char[] readSecret(String prompt) {
|
||||
return console.readPassword("%s", prompt);
|
||||
return CONSOLE.readPassword("%s", prompt);
|
||||
}
|
||||
}
|
||||
|
||||
private static class SystemTerminal extends Terminal {
|
||||
|
||||
private static final PrintWriter WRITER = newWriter();
|
||||
|
||||
SystemTerminal() {
|
||||
super(System.lineSeparator());
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "Writer for System.out")
|
||||
private static PrintWriter newWriter() {
|
||||
return new PrintWriter(System.out);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressForbidden(reason = "System#out")
|
||||
public void doPrint(String msg) {
|
||||
System.out.print(msg);
|
||||
System.out.flush();
|
||||
public PrintWriter getWriter() {
|
||||
return WRITER;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readText(String text) {
|
||||
doPrint(text);
|
||||
getWriter().print(text);
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset()));
|
||||
try {
|
||||
return reader.readLine();
|
|
@ -17,19 +17,19 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.cli;
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
/**
|
||||
* An exception representing a user fixable problem in {@link CliTool} usage.
|
||||
* An exception representing a user fixable problem in {@link Command} usage.
|
||||
*/
|
||||
public class UserError extends Exception {
|
||||
|
||||
/** The exist status the cli should use when catching this user error. */
|
||||
public final CliTool.ExitStatus exitStatus;
|
||||
public final int exitCode;
|
||||
|
||||
/** Constructs a UserError with an exit status and message to show the user. */
|
||||
public UserError(CliTool.ExitStatus exitStatus, String msg) {
|
||||
public UserError(int exitCode, String msg) {
|
||||
super(msg);
|
||||
this.exitStatus = exitStatus;
|
||||
this.exitCode = exitCode;
|
||||
}
|
||||
}
|
|
@ -124,7 +124,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
this.ignoreClusterName = CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings);
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]");
|
||||
logger.debug("node_sampler_interval[{}]", nodesSamplerInterval);
|
||||
}
|
||||
|
||||
if (CLIENT_TRANSPORT_SNIFF.get(this.settings)) {
|
||||
|
@ -323,7 +323,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
transportService.connectToNode(node);
|
||||
} catch (Throwable e) {
|
||||
it.remove();
|
||||
logger.debug("failed to connect to discovered node [" + node + "]", e);
|
||||
logger.debug("failed to connect to discovered node [{}]", e, node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
@ -120,7 +121,7 @@ public class ClusterChangedEvent {
|
|||
/**
|
||||
* Returns the indices deleted in this event
|
||||
*/
|
||||
public List<String> indicesDeleted() {
|
||||
public List<Index> indicesDeleted() {
|
||||
// If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected
|
||||
// master that has had its data directory wiped out, in which case we don't want to delete the indices and lose data;
|
||||
// rather we want to import them as dangling indices instead. So we check here if the cluster UUID differs from the previous
|
||||
|
@ -131,17 +132,18 @@ public class ClusterChangedEvent {
|
|||
if (metaDataChanged() == false || isNewCluster()) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
List<String> deleted = null;
|
||||
for (ObjectCursor<String> cursor : previousState.metaData().indices().keys()) {
|
||||
String index = cursor.value;
|
||||
if (!state.metaData().hasIndex(index)) {
|
||||
List<Index> deleted = null;
|
||||
for (ObjectCursor<IndexMetaData> cursor : previousState.metaData().indices().values()) {
|
||||
IndexMetaData index = cursor.value;
|
||||
IndexMetaData current = state.metaData().index(index.getIndex().getName());
|
||||
if (current == null || index.getIndexUUID().equals(current.getIndexUUID()) == false) {
|
||||
if (deleted == null) {
|
||||
deleted = new ArrayList<>();
|
||||
}
|
||||
deleted.add(index);
|
||||
deleted.add(index.getIndex());
|
||||
}
|
||||
}
|
||||
return deleted == null ? Collections.<String>emptyList() : deleted;
|
||||
return deleted == null ? Collections.<Index>emptyList() : deleted;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -138,6 +138,7 @@ public class ClusterModule extends AbstractModule {
|
|||
bind(AllocationService.class).asEagerSingleton();
|
||||
bind(DiscoveryNodeService.class).asEagerSingleton();
|
||||
bind(ClusterService.class).to(InternalClusterService.class).asEagerSingleton();
|
||||
bind(NodeConnectionsService.class).asEagerSingleton();
|
||||
bind(OperationRouting.class).asEagerSingleton();
|
||||
bind(MetaDataCreateIndexService.class).asEagerSingleton();
|
||||
bind(MetaDataDeleteIndexService.class).asEagerSingleton();
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.cluster.service.PendingClusterTask;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
@ -154,9 +153,4 @@ public interface ClusterService extends LifecycleComponent<ClusterService> {
|
|||
* @return A zero time value if the queue is empty, otherwise the time value oldest task waiting in the queue
|
||||
*/
|
||||
TimeValue getMaxTaskWaitTime();
|
||||
|
||||
/**
|
||||
* Returns task manager created in the cluster service
|
||||
*/
|
||||
TaskManager getTaskManager();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.common.util.concurrent.KeyedLock;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
|
||||
/**
|
||||
* This component is responsible for connecting to nodes once they are added to the cluster state, and disconnect when they are
|
||||
* removed. Also, it periodically checks that all connections are still open and if needed restores them.
|
||||
* Note that this component is *not* responsible for removing nodes from the cluster if they disconnect / do not respond
|
||||
* to pings. This is done by {@link org.elasticsearch.discovery.zen.fd.NodesFaultDetection}. Master fault detection
|
||||
* is done by {@link org.elasticsearch.discovery.zen.fd.MasterFaultDetection}.
|
||||
*/
|
||||
public class NodeConnectionsService extends AbstractLifecycleComponent<NodeConnectionsService> {
|
||||
|
||||
public static final Setting<TimeValue> CLUSTER_NODE_RECONNECT_INTERVAL_SETTING =
|
||||
Setting.positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER);
|
||||
private final ThreadPool threadPool;
|
||||
private final TransportService transportService;
|
||||
|
||||
// map between current node and the number of failed connection attempts. 0 means successfully connected.
|
||||
// if a node doesn't appear in this list it shouldn't be monitored
|
||||
private ConcurrentMap<DiscoveryNode, Integer> nodes = ConcurrentCollections.newConcurrentMap();
|
||||
|
||||
final private KeyedLock<DiscoveryNode> nodeLocks = new KeyedLock<>();
|
||||
|
||||
private final TimeValue reconnectInterval;
|
||||
|
||||
private volatile ScheduledFuture<?> backgroundFuture = null;
|
||||
|
||||
@Inject
|
||||
public NodeConnectionsService(Settings settings, ThreadPool threadPool, TransportService transportService) {
|
||||
super(settings);
|
||||
this.threadPool = threadPool;
|
||||
this.transportService = transportService;
|
||||
this.reconnectInterval = NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.get(settings);
|
||||
}
|
||||
|
||||
public void connectToAddedNodes(ClusterChangedEvent event) {
|
||||
|
||||
// TODO: do this in parallel (and wait)
|
||||
for (final DiscoveryNode node : event.nodesDelta().addedNodes()) {
|
||||
try (Releasable ignored = nodeLocks.acquire(node)) {
|
||||
Integer current = nodes.put(node, 0);
|
||||
assert current == null : "node " + node + " was added in event but already in internal nodes";
|
||||
validateNodeConnected(node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void disconnectFromRemovedNodes(ClusterChangedEvent event) {
|
||||
for (final DiscoveryNode node : event.nodesDelta().removedNodes()) {
|
||||
try (Releasable ignored = nodeLocks.acquire(node)) {
|
||||
Integer current = nodes.remove(node);
|
||||
assert current != null : "node " + node + " was removed in event but not in internal nodes";
|
||||
try {
|
||||
transportService.disconnectFromNode(node);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to disconnect to node [" + node + "]", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void validateNodeConnected(DiscoveryNode node) {
|
||||
assert nodeLocks.isHeldByCurrentThread(node) : "validateNodeConnected must be called under lock";
|
||||
if (lifecycle.stoppedOrClosed() ||
|
||||
nodes.containsKey(node) == false) { // we double check existence of node since connectToNode might take time...
|
||||
// nothing to do
|
||||
} else {
|
||||
try {
|
||||
// connecting to an already connected node is a noop
|
||||
transportService.connectToNode(node);
|
||||
nodes.put(node, 0);
|
||||
} catch (Exception e) {
|
||||
Integer nodeFailureCount = nodes.get(node);
|
||||
assert nodeFailureCount != null : node + " didn't have a counter in nodes map";
|
||||
nodeFailureCount = nodeFailureCount + 1;
|
||||
// log every 6th failure
|
||||
if ((nodeFailureCount % 6) == 1) {
|
||||
logger.warn("failed to connect to node {} (tried [{}] times)", e, node, nodeFailureCount);
|
||||
}
|
||||
nodes.put(node, nodeFailureCount);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class ConnectionChecker extends AbstractRunnable {
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
logger.warn("unexpected error while checking for node reconnects", t);
|
||||
}
|
||||
|
||||
protected void doRun() {
|
||||
for (DiscoveryNode node : nodes.keySet()) {
|
||||
try (Releasable ignored = nodeLocks.acquire(node)) {
|
||||
validateNodeConnected(node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onAfter() {
|
||||
if (lifecycle.started()) {
|
||||
backgroundFuture = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
backgroundFuture = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ConnectionChecker());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {
|
||||
FutureUtils.cancel(backgroundFuture);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
|
||||
}
|
||||
}
|
|
@ -69,15 +69,17 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
private final State state;
|
||||
private final SnapshotId snapshotId;
|
||||
private final boolean includeGlobalState;
|
||||
private final boolean partial;
|
||||
private final ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards;
|
||||
private final List<String> indices;
|
||||
private final ImmutableOpenMap<String, List<ShardId>> waitingIndices;
|
||||
private final long startTime;
|
||||
|
||||
public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, List<String> indices, long startTime, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
|
||||
public Entry(SnapshotId snapshotId, boolean includeGlobalState, boolean partial, State state, List<String> indices, long startTime, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
|
||||
this.state = state;
|
||||
this.snapshotId = snapshotId;
|
||||
this.includeGlobalState = includeGlobalState;
|
||||
this.partial = partial;
|
||||
this.indices = indices;
|
||||
this.startTime = startTime;
|
||||
if (shards == null) {
|
||||
|
@ -90,7 +92,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
}
|
||||
|
||||
public Entry(Entry entry, State state, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
|
||||
this(entry.snapshotId, entry.includeGlobalState, state, entry.indices, entry.startTime, shards);
|
||||
this(entry.snapshotId, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, shards);
|
||||
}
|
||||
|
||||
public Entry(Entry entry, ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards) {
|
||||
|
@ -121,6 +123,10 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
return includeGlobalState;
|
||||
}
|
||||
|
||||
public boolean partial() {
|
||||
return partial;
|
||||
}
|
||||
|
||||
public long startTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
@ -133,6 +139,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
Entry entry = (Entry) o;
|
||||
|
||||
if (includeGlobalState != entry.includeGlobalState) return false;
|
||||
if (partial != entry.partial) return false;
|
||||
if (startTime != entry.startTime) return false;
|
||||
if (!indices.equals(entry.indices)) return false;
|
||||
if (!shards.equals(entry.shards)) return false;
|
||||
|
@ -148,6 +155,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
int result = state.hashCode();
|
||||
result = 31 * result + snapshotId.hashCode();
|
||||
result = 31 * result + (includeGlobalState ? 1 : 0);
|
||||
result = 31 * result + (partial ? 1 : 0);
|
||||
result = 31 * result + shards.hashCode();
|
||||
result = 31 * result + indices.hashCode();
|
||||
result = 31 * result + waitingIndices.hashCode();
|
||||
|
@ -360,6 +368,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
for (int i = 0; i < entries.length; i++) {
|
||||
SnapshotId snapshotId = SnapshotId.readSnapshotId(in);
|
||||
boolean includeGlobalState = in.readBoolean();
|
||||
boolean partial = in.readBoolean();
|
||||
State state = State.fromValue(in.readByte());
|
||||
int indices = in.readVInt();
|
||||
List<String> indexBuilder = new ArrayList<>();
|
||||
|
@ -375,7 +384,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
State shardState = State.fromValue(in.readByte());
|
||||
builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState));
|
||||
}
|
||||
entries[i] = new Entry(snapshotId, includeGlobalState, state, Collections.unmodifiableList(indexBuilder), startTime, builder.build());
|
||||
entries[i] = new Entry(snapshotId, includeGlobalState, partial, state, Collections.unmodifiableList(indexBuilder), startTime, builder.build());
|
||||
}
|
||||
return new SnapshotsInProgress(entries);
|
||||
}
|
||||
|
@ -386,6 +395,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
for (Entry entry : entries) {
|
||||
entry.snapshotId().writeTo(out);
|
||||
out.writeBoolean(entry.includeGlobalState());
|
||||
out.writeBoolean(entry.partial());
|
||||
out.writeByte(entry.state().value());
|
||||
out.writeVInt(entry.indices().size());
|
||||
for (String index : entry.indices()) {
|
||||
|
@ -406,6 +416,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
static final XContentBuilderString SNAPSHOTS = new XContentBuilderString("snapshots");
|
||||
static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot");
|
||||
static final XContentBuilderString INCLUDE_GLOBAL_STATE = new XContentBuilderString("include_global_state");
|
||||
static final XContentBuilderString PARTIAL = new XContentBuilderString("partial");
|
||||
static final XContentBuilderString STATE = new XContentBuilderString("state");
|
||||
static final XContentBuilderString INDICES = new XContentBuilderString("indices");
|
||||
static final XContentBuilderString START_TIME_MILLIS = new XContentBuilderString("start_time_millis");
|
||||
|
@ -431,6 +442,7 @@ public class SnapshotsInProgress extends AbstractDiffable<Custom> implements Cus
|
|||
builder.field(Fields.REPOSITORY, entry.snapshotId().getRepository());
|
||||
builder.field(Fields.SNAPSHOT, entry.snapshotId().getSnapshot());
|
||||
builder.field(Fields.INCLUDE_GLOBAL_STATE, entry.includeGlobalState());
|
||||
builder.field(Fields.PARTIAL, entry.partial());
|
||||
builder.field(Fields.STATE, entry.state());
|
||||
builder.startArray(Fields.INDICES);
|
||||
{
|
||||
|
|
|
@ -81,13 +81,13 @@ public class NodeIndexDeletedAction extends AbstractComponent {
|
|||
transportService.sendRequest(clusterState.nodes().masterNode(),
|
||||
INDEX_DELETED_ACTION_NAME, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME);
|
||||
if (nodes.localNode().isDataNode() == false) {
|
||||
logger.trace("[{}] not acking store deletion (not a data node)");
|
||||
logger.trace("[{}] not acking store deletion (not a data node)", index);
|
||||
return;
|
||||
}
|
||||
threadPool.generic().execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
logger.warn("[{}]failed to ack index store deleted for index", t, index);
|
||||
logger.warn("[{}] failed to ack index store deleted for index", t, index);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -151,7 +151,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
@Override
|
||||
public void onNewClusterState(ClusterState state) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("new cluster state [{}] after waiting for master election to fail shard [{}]", shardRoutingEntry.getShardRouting().shardId(), state.prettyPrint(), shardRoutingEntry);
|
||||
logger.trace("new cluster state [{}] after waiting for master election to fail shard [{}]", state.prettyPrint(), shardRoutingEntry);
|
||||
}
|
||||
sendShardAction(actionName, observer, shardRoutingEntry, listener);
|
||||
}
|
||||
|
@ -321,7 +321,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
if (numberOfUnassignedShards > 0) {
|
||||
String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(reason + ", scheduling a reroute");
|
||||
logger.trace("{}, scheduling a reroute", reason);
|
||||
}
|
||||
routingService.reroute(reason);
|
||||
}
|
||||
|
|
|
@ -686,7 +686,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
}
|
||||
|
||||
private boolean isEmptyOrTrivialWildcard(List<String> expressions) {
|
||||
return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0))) || Regex.isMatchAllPattern(expressions.get(0)));
|
||||
return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0)) || Regex.isMatchAllPattern(expressions.get(0))));
|
||||
}
|
||||
|
||||
private List<String> resolveEmptyOrTrivialWildcard(IndicesOptions options, MetaData metaData, boolean assertEmpty) {
|
||||
|
|
|
@ -53,6 +53,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
|
@ -188,7 +189,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
boolean indexCreated = false;
|
||||
Index createdIndex = null;
|
||||
String removalReason = null;
|
||||
try {
|
||||
validate(request, currentState);
|
||||
|
@ -308,10 +309,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
// Set up everything, now locally create the index to see that things are ok, and apply
|
||||
final IndexMetaData tmpImd = IndexMetaData.builder(request.index()).settings(actualIndexSettings).build();
|
||||
// create the index here (on the master) to validate it can be created, as well as adding the mapping
|
||||
indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList());
|
||||
indexCreated = true;
|
||||
final IndexService indexService = indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList());
|
||||
createdIndex = indexService.index();
|
||||
// now add the mappings
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.index());
|
||||
MapperService mapperService = indexService.mapperService();
|
||||
// first, add the default mapping
|
||||
if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
|
@ -415,9 +415,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
removalReason = "cleaning up after validating index on master";
|
||||
return updatedState;
|
||||
} finally {
|
||||
if (indexCreated) {
|
||||
if (createdIndex != null) {
|
||||
// Index was already partially created - need to clean up
|
||||
indicesService.removeIndex(request.index(), removalReason != null ? removalReason : "failed to create index");
|
||||
indicesService.removeIndex(createdIndex, removalReason != null ? removalReason : "failed to create index");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,11 +34,12 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.snapshots.SnapshotsService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -67,7 +68,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
|
|||
}
|
||||
|
||||
public void deleteIndices(final Request request, final Listener userListener) {
|
||||
Collection<String> indices = Arrays.asList(request.indices);
|
||||
Set<String> indices = Sets.newHashSet(request.indices);
|
||||
final DeleteIndexListener listener = new DeleteIndexListener(userListener);
|
||||
|
||||
clusterService.submitStateUpdateTask("delete-index " + indices, new ClusterStateUpdateTask(Priority.URGENT) {
|
||||
|
@ -84,6 +85,9 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public ClusterState execute(final ClusterState currentState) {
|
||||
// Check if index deletion conflicts with any running snapshots
|
||||
SnapshotsService.checkIndexDeletion(currentState, indices);
|
||||
|
||||
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData());
|
||||
ClusterBlocks.Builder clusterBlocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
|
@ -74,7 +75,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public ClusterState execute(final ClusterState currentState) {
|
||||
List<String> indicesToClose = new ArrayList<>();
|
||||
List<Index> indicesToClose = new ArrayList<>();
|
||||
Map<String, IndexService> indices = new HashMap<>();
|
||||
try {
|
||||
for (AliasAction aliasAction : request.actions()) {
|
||||
|
@ -112,7 +113,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
|||
logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.getIndex());
|
||||
continue;
|
||||
}
|
||||
indicesToClose.add(indexMetaData.getIndex().getName());
|
||||
indicesToClose.add(indexMetaData.getIndex());
|
||||
}
|
||||
indices.put(indexMetaData.getIndex().getName(), indexService);
|
||||
}
|
||||
|
@ -153,7 +154,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
|||
}
|
||||
return currentState;
|
||||
} finally {
|
||||
for (String index : indicesToClose) {
|
||||
for (Index index : indicesToClose) {
|
||||
indicesService.removeIndex(index, "created for alias processing");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,10 +38,14 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.snapshots.RestoreService;
|
||||
import org.elasticsearch.snapshots.SnapshotsService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Service responsible for submitting open/close index requests
|
||||
|
@ -78,7 +82,7 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
List<String> indicesToClose = new ArrayList<>();
|
||||
Set<String> indicesToClose = new HashSet<>();
|
||||
for (String index : request.indices()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
if (indexMetaData == null) {
|
||||
|
@ -94,6 +98,11 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
return currentState;
|
||||
}
|
||||
|
||||
// Check if index closing conflicts with any running restores
|
||||
RestoreService.checkIndexClosing(currentState, indicesToClose);
|
||||
// Check if index closing conflicts with any running snapshots
|
||||
SnapshotsService.checkIndexClosing(currentState, indicesToClose);
|
||||
|
||||
logger.info("closing indices [{}]", indicesAsString);
|
||||
|
||||
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.common.compress.CompressedXContent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
|
@ -112,13 +113,13 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
||||
|
||||
for (Map.Entry<String, List<RefreshTask>> entry : tasksPerIndex.entrySet()) {
|
||||
String index = entry.getKey();
|
||||
IndexMetaData indexMetaData = mdBuilder.get(index);
|
||||
IndexMetaData indexMetaData = mdBuilder.get(entry.getKey());
|
||||
if (indexMetaData == null) {
|
||||
// index got deleted on us, ignore...
|
||||
logger.debug("[{}] ignoring tasks - index meta data doesn't exist", index);
|
||||
logger.debug("[{}] ignoring tasks - index meta data doesn't exist", entry.getKey());
|
||||
continue;
|
||||
}
|
||||
final Index index = indexMetaData.getIndex();
|
||||
// the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep
|
||||
// the latest (based on order) update mapping one per node
|
||||
List<RefreshTask> allIndexTasks = entry.getValue();
|
||||
|
@ -127,7 +128,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
if (indexMetaData.isSameUUID(task.indexUUID)) {
|
||||
hasTaskWithRightUUID = true;
|
||||
} else {
|
||||
logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task);
|
||||
logger.debug("{} ignoring task [{}] - index meta data doesn't match task uuid", index, task);
|
||||
}
|
||||
}
|
||||
if (hasTaskWithRightUUID == false) {
|
||||
|
@ -136,7 +137,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
|
||||
// construct the actual index if needed, and make sure the relevant mappings are there
|
||||
boolean removeIndex = false;
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
IndexService indexService = indicesService.indexService(indexMetaData.getIndex());
|
||||
if (indexService == null) {
|
||||
// we need to create the index here, and add the current mapping to it, so we can merge
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
|
||||
|
@ -208,47 +209,57 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
|
||||
class PutMappingExecutor implements ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> {
|
||||
@Override
|
||||
public BatchResult<PutMappingClusterStateUpdateRequest> execute(ClusterState currentState, List<PutMappingClusterStateUpdateRequest> tasks) throws Exception {
|
||||
Set<String> indicesToClose = new HashSet<>();
|
||||
public BatchResult<PutMappingClusterStateUpdateRequest> execute(ClusterState currentState,
|
||||
List<PutMappingClusterStateUpdateRequest> tasks) throws Exception {
|
||||
Set<Index> indicesToClose = new HashSet<>();
|
||||
BatchResult.Builder<PutMappingClusterStateUpdateRequest> builder = BatchResult.builder();
|
||||
try {
|
||||
// precreate incoming indices;
|
||||
for (PutMappingClusterStateUpdateRequest request : tasks) {
|
||||
// failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up
|
||||
final List<Index> indices = new ArrayList<>(request.indices().length);
|
||||
try {
|
||||
for (String index : request.indices()) {
|
||||
final IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
if (indexMetaData != null && indicesService.hasIndex(index) == false) {
|
||||
// if we don't have the index, we will throw exceptions later;
|
||||
indicesToClose.add(index);
|
||||
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
|
||||
if (indexMetaData != null) {
|
||||
if (indicesService.hasIndex(indexMetaData.getIndex()) == false) {
|
||||
// if the index does not exists we create it once, add all types to the mapper service and
|
||||
// close it later once we are done with mapping update
|
||||
indicesToClose.add(indexMetaData.getIndex());
|
||||
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData,
|
||||
Collections.emptyList());
|
||||
// add mappings for all types, we need them for cross-type validation
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
|
||||
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(),
|
||||
MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
|
||||
}
|
||||
}
|
||||
indices.add(indexMetaData.getIndex());
|
||||
} else {
|
||||
// we didn't find the index in the clusterstate - maybe it was deleted
|
||||
// NOTE: this doesn't fail the entire batch only the current PutMapping request we are processing
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
}
|
||||
for (PutMappingClusterStateUpdateRequest request : tasks) {
|
||||
try {
|
||||
currentState = applyRequest(currentState, request);
|
||||
currentState = applyRequest(currentState, request, indices);
|
||||
builder.success(request);
|
||||
} catch (Throwable t) {
|
||||
builder.failure(request, t);
|
||||
}
|
||||
}
|
||||
|
||||
return builder.build(currentState);
|
||||
} finally {
|
||||
for (String index : indicesToClose) {
|
||||
for (Index index : indicesToClose) {
|
||||
indicesService.removeIndex(index, "created for mapping processing");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException {
|
||||
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request,
|
||||
List<Index> indices) throws IOException {
|
||||
String mappingType = request.type();
|
||||
CompressedXContent mappingUpdateSource = new CompressedXContent(request.source());
|
||||
for (String index : request.indices()) {
|
||||
final MetaData metaData = currentState.metaData();
|
||||
for (Index index : indices) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(index);
|
||||
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
|
||||
DocumentMapper newMapper;
|
||||
|
@ -270,7 +281,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
// and a put mapping api call, so we don't which type did exist before.
|
||||
// Also the order of the mappings may be backwards.
|
||||
if (newMapper.parentFieldMapper().active()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
IndexMetaData indexMetaData = metaData.index(index);
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
|
||||
throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
|
||||
|
@ -290,11 +301,11 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
|
||||
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
|
||||
}
|
||||
MetaData.Builder builder = MetaData.builder(currentState.metaData());
|
||||
for (String index : request.indices()) {
|
||||
MetaData.Builder builder = MetaData.builder(metaData);
|
||||
for (Index index : indices) {
|
||||
// do the actual merge here on the master, and update the mapping source
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
if (indexService == null) { // TODO this seems impossible given we use indexServiceSafe above
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -326,7 +337,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
IndexMetaData indexMetaData = metaData.index(index);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
|
|
|
@ -19,24 +19,40 @@
|
|||
|
||||
package org.elasticsearch.cluster.node;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class DiscoveryNodeService extends AbstractComponent {
|
||||
|
||||
public static final Setting<Long> NODE_ID_SEED_SETTING =
|
||||
// don't use node.id.seed so it won't be seen as an attribute
|
||||
Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER);
|
||||
private final List<CustomAttributesProvider> customAttributesProviders = new CopyOnWriteArrayList<>();
|
||||
private final Version version;
|
||||
|
||||
@Inject
|
||||
public DiscoveryNodeService(Settings settings) {
|
||||
public DiscoveryNodeService(Settings settings, Version version) {
|
||||
super(settings);
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public static String generateNodeId(Settings settings) {
|
||||
Random random = Randomness.get(settings, NODE_ID_SEED_SETTING);
|
||||
return Strings.randomBase64UUID(random);
|
||||
}
|
||||
|
||||
public DiscoveryNodeService addCustomAttributeProvider(CustomAttributesProvider customAttributesProvider) {
|
||||
|
@ -44,7 +60,7 @@ public class DiscoveryNodeService extends AbstractComponent {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Map<String, String> buildAttributes() {
|
||||
public DiscoveryNode buildLocalNode(TransportAddress publishAddress) {
|
||||
Map<String, String> attributes = new HashMap<>(settings.getByPrefix("node.").getAsMap());
|
||||
attributes.remove("name"); // name is extracted in other places
|
||||
if (attributes.containsKey("client")) {
|
||||
|
@ -76,10 +92,11 @@ public class DiscoveryNodeService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
return attributes;
|
||||
final String nodeId = generateNodeId(settings);
|
||||
return new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, attributes, version);
|
||||
}
|
||||
|
||||
public static interface CustomAttributesProvider {
|
||||
public interface CustomAttributesProvider {
|
||||
|
||||
Map<String, String> buildAttributes();
|
||||
}
|
||||
|
|
|
@ -597,6 +597,13 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of routing nodes
|
||||
*/
|
||||
public int size() {
|
||||
return nodesToShards.size();
|
||||
}
|
||||
|
||||
public static final class UnassignedShards implements Iterable<ShardRouting> {
|
||||
|
||||
private final RoutingNodes nodes;
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
|
@ -36,13 +35,13 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
|
|||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
@ -63,14 +62,17 @@ import java.util.stream.Collectors;
|
|||
public class AllocationService extends AbstractComponent {
|
||||
|
||||
private final AllocationDeciders allocationDeciders;
|
||||
private final GatewayAllocator gatewayAllocator;
|
||||
private final ShardsAllocator shardsAllocator;
|
||||
private final ClusterInfoService clusterInfoService;
|
||||
private final ShardsAllocators shardsAllocators;
|
||||
|
||||
@Inject
|
||||
public AllocationService(Settings settings, AllocationDeciders allocationDeciders, ShardsAllocators shardsAllocators, ClusterInfoService clusterInfoService) {
|
||||
public AllocationService(Settings settings, AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator,
|
||||
ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) {
|
||||
super(settings);
|
||||
this.allocationDeciders = allocationDeciders;
|
||||
this.shardsAllocators = shardsAllocators;
|
||||
this.gatewayAllocator = gatewayAllocator;
|
||||
this.shardsAllocator = shardsAllocator;
|
||||
this.clusterInfoService = clusterInfoService;
|
||||
}
|
||||
|
||||
|
@ -92,7 +94,7 @@ public class AllocationService extends AbstractComponent {
|
|||
if (!changed) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
shardsAllocators.applyStartedShards(allocation);
|
||||
gatewayAllocator.applyStartedShards(allocation);
|
||||
if (withReroute) {
|
||||
reroute(allocation);
|
||||
}
|
||||
|
@ -192,7 +194,7 @@ public class AllocationService extends AbstractComponent {
|
|||
if (!changed) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
shardsAllocators.applyFailedShards(allocation);
|
||||
gatewayAllocator.applyFailedShards(allocation);
|
||||
reroute(allocation);
|
||||
final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
|
||||
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString());
|
||||
|
@ -306,14 +308,10 @@ public class AllocationService extends AbstractComponent {
|
|||
if (allocation.routingNodes().unassigned().size() > 0) {
|
||||
updateLeftDelayOfUnassignedShards(allocation, settings);
|
||||
|
||||
changed |= shardsAllocators.allocateUnassigned(allocation);
|
||||
changed |= gatewayAllocator.allocateUnassigned(allocation);
|
||||
}
|
||||
|
||||
// move shards that no longer can be allocated
|
||||
changed |= shardsAllocators.moveShards(allocation);
|
||||
|
||||
// rebalance
|
||||
changed |= shardsAllocators.rebalance(allocation);
|
||||
changed |= shardsAllocator.allocate(allocation);
|
||||
assert RoutingNodes.assertShardStats(allocation.routingNodes());
|
||||
return changed;
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing.allocation.allocator;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.IntroSorter;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -28,9 +27,7 @@ import org.elasticsearch.cluster.routing.RoutingNode;
|
|||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
|
||||
|
@ -43,18 +40,14 @@ import org.elasticsearch.common.settings.Setting.Property;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.PriorityComparator;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
|
||||
|
||||
|
@ -108,27 +101,16 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
|
||||
@Override
|
||||
public void applyStartedShards(StartedRerouteAllocation allocation) { /* ONLY FOR GATEWAYS */ }
|
||||
|
||||
@Override
|
||||
public void applyFailedShards(FailedRerouteAllocation allocation) { /* ONLY FOR GATEWAYS */ }
|
||||
|
||||
@Override
|
||||
public boolean allocateUnassigned(RoutingAllocation allocation) {
|
||||
final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
|
||||
return balancer.allocateUnassigned();
|
||||
public boolean allocate(RoutingAllocation allocation) {
|
||||
if (allocation.routingNodes().size() == 0) {
|
||||
/* with no nodes this is pointless */
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean rebalance(RoutingAllocation allocation) {
|
||||
final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
|
||||
return balancer.balance();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean moveShards(RoutingAllocation allocation) {
|
||||
final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
|
||||
return balancer.moveShards();
|
||||
boolean changed = balancer.allocateUnassigned();
|
||||
changed |= balancer.moveShards();
|
||||
changed |= balancer.balance();
|
||||
return changed;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -208,8 +190,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
|
||||
private float weight(Balancer balancer, ModelNode node, String index, int numAdditionalShards) {
|
||||
final float weightShard = (node.numShards() + numAdditionalShards - balancer.avgShardsPerNode());
|
||||
final float weightIndex = (node.numShards(index) + numAdditionalShards - balancer.avgShardsPerNode(index));
|
||||
final float weightShard = node.numShards() + numAdditionalShards - balancer.avgShardsPerNode();
|
||||
final float weightIndex = node.numShards(index) + numAdditionalShards - balancer.avgShardsPerNode(index);
|
||||
return theta0 * weightShard + theta1 * weightIndex;
|
||||
}
|
||||
|
||||
|
@ -221,7 +203,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
public static class Balancer {
|
||||
private final ESLogger logger;
|
||||
private final Map<String, ModelNode> nodes = new HashMap<>();
|
||||
private final HashSet<String> indices = new HashSet<>();
|
||||
private final RoutingAllocation allocation;
|
||||
private final RoutingNodes routingNodes;
|
||||
private final WeightFunction weight;
|
||||
|
@ -230,19 +211,15 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
private final MetaData metaData;
|
||||
private final float avgShardsPerNode;
|
||||
|
||||
private final Predicate<ShardRouting> assignedFilter = shard -> shard.assignedToNode();
|
||||
|
||||
public Balancer(ESLogger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) {
|
||||
this.logger = logger;
|
||||
this.allocation = allocation;
|
||||
this.weight = weight;
|
||||
this.threshold = threshold;
|
||||
this.routingNodes = allocation.routingNodes();
|
||||
for (RoutingNode node : routingNodes) {
|
||||
nodes.put(node.nodeId(), new ModelNode(node.nodeId()));
|
||||
}
|
||||
metaData = routingNodes.metaData();
|
||||
avgShardsPerNode = ((float) metaData.totalNumberOfShards()) / nodes.size();
|
||||
avgShardsPerNode = ((float) metaData.totalNumberOfShards()) / routingNodes.size();
|
||||
buildModelFromAssigned();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -276,17 +253,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
return new NodeSorter(nodesArray(), weight, this);
|
||||
}
|
||||
|
||||
private boolean initialize(RoutingNodes routing, RoutingNodes.UnassignedShards unassigned) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Start distributing Shards");
|
||||
}
|
||||
for (ObjectCursor<String> index : allocation.routingTable().indicesRouting().keys()) {
|
||||
indices.add(index.value);
|
||||
}
|
||||
buildModelFromAssigned(routing.shards(assignedFilter));
|
||||
return allocateUnassigned(unassigned);
|
||||
}
|
||||
|
||||
private static float absDelta(float lower, float higher) {
|
||||
assert higher >= lower : higher + " lt " + lower +" but was expected to be gte";
|
||||
return Math.abs(higher - lower);
|
||||
|
@ -300,12 +266,36 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
|
||||
/**
|
||||
* Allocates all possible unassigned shards
|
||||
* Balances the nodes on the cluster model according to the weight function.
|
||||
* The actual balancing is delegated to {@link #balanceByWeights()}
|
||||
*
|
||||
* @return <code>true</code> if the current configuration has been
|
||||
* changed, otherwise <code>false</code>
|
||||
*/
|
||||
final boolean allocateUnassigned() {
|
||||
return balance(true);
|
||||
private boolean balance() {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Start balancing cluster");
|
||||
}
|
||||
if (allocation.hasPendingAsyncFetch()) {
|
||||
/*
|
||||
* see https://github.com/elastic/elasticsearch/issues/14387
|
||||
* if we allow rebalance operations while we are still fetching shard store data
|
||||
* we might end up with unnecessary rebalance operations which can be super confusion/frustrating
|
||||
* since once the fetches come back we might just move all the shards back again.
|
||||
* Therefore we only do a rebalance if we have fetched all information.
|
||||
*/
|
||||
logger.debug("skipping rebalance due to in-flight shard/store fetches");
|
||||
return false;
|
||||
}
|
||||
if (allocation.deciders().canRebalance(allocation).type() != Type.YES) {
|
||||
logger.trace("skipping rebalance as it is disabled");
|
||||
return false;
|
||||
}
|
||||
if (nodes.size() < 2) { /* skip if we only have one node */
|
||||
logger.trace("skipping rebalance as single node only");
|
||||
return false;
|
||||
}
|
||||
return balanceByWeights();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -322,28 +312,10 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
* @return <code>true</code> if the current configuration has been
|
||||
* changed, otherwise <code>false</code>
|
||||
*/
|
||||
public boolean balance() {
|
||||
return balance(false);
|
||||
}
|
||||
|
||||
private boolean balance(boolean onlyAssign) {
|
||||
if (this.nodes.isEmpty()) {
|
||||
/* with no nodes this is pointless */
|
||||
return false;
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
if (onlyAssign) {
|
||||
logger.trace("Start balancing cluster");
|
||||
} else {
|
||||
logger.trace("Start assigning unassigned shards");
|
||||
}
|
||||
}
|
||||
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned();
|
||||
boolean changed = initialize(routingNodes, unassigned);
|
||||
if (onlyAssign == false && changed == false && allocation.deciders().canRebalance(allocation).type() == Type.YES) {
|
||||
NodeSorter sorter = newNodeSorter();
|
||||
if (nodes.size() > 1) { /* skip if we only have one node */
|
||||
AllocationDeciders deciders = allocation.deciders();
|
||||
private boolean balanceByWeights() {
|
||||
boolean changed = false;
|
||||
final NodeSorter sorter = newNodeSorter();
|
||||
final AllocationDeciders deciders = allocation.deciders();
|
||||
final ModelNode[] modelNodes = sorter.modelNodes;
|
||||
final float[] weights = sorter.weights;
|
||||
for (String index : buildWeightOrderedIndices(sorter)) {
|
||||
|
@ -355,7 +327,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
for (int i = 0; i < modelNodes.length; i++) {
|
||||
ModelNode modelNode = modelNodes[i];
|
||||
if (modelNode.getIndex(index) != null
|
||||
|| deciders.canAllocate(indexMetaData, modelNode.getRoutingNode(routingNodes), allocation).type() != Type.NO) {
|
||||
|| deciders.canAllocate(indexMetaData, modelNode.getRoutingNode(), allocation).type() != Type.NO) {
|
||||
// swap nodes at position i and relevantNodes
|
||||
modelNodes[i] = modelNodes[relevantNodes];
|
||||
modelNodes[relevantNodes] = modelNode;
|
||||
|
@ -437,8 +409,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
||||
|
@ -456,7 +426,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
* to the nodes we relocated them from.
|
||||
*/
|
||||
private String[] buildWeightOrderedIndices(NodeSorter sorter) {
|
||||
final String[] indices = this.indices.toArray(new String[this.indices.size()]);
|
||||
final String[] indices = allocation.routingTable().indicesRouting().keys().toArray(String.class);
|
||||
final float[] deltas = new float[indices.length];
|
||||
for (int i = 0; i < deltas.length; i++) {
|
||||
sorter.reset(indices[i]);
|
||||
|
@ -508,20 +478,16 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
public boolean moveShards() {
|
||||
if (nodes.isEmpty()) {
|
||||
/* with no nodes this is pointless */
|
||||
return false;
|
||||
}
|
||||
|
||||
// Create a copy of the started shards interleaving between nodes, and check if they can remain. In the presence of throttling
|
||||
// Iterate over the started shards interleaving between nodes, and check if they can remain. In the presence of throttling
|
||||
// shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are
|
||||
// offloading the shards.
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
boolean changed = false;
|
||||
int index = 0;
|
||||
boolean found = true;
|
||||
final NodeSorter sorter = newNodeSorter();
|
||||
while (found) {
|
||||
found = false;
|
||||
for (RoutingNode routingNode : routingNodes) {
|
||||
for (RoutingNode routingNode : allocation.routingNodes()) {
|
||||
if (index >= routingNode.size()) {
|
||||
continue;
|
||||
}
|
||||
|
@ -529,26 +495,26 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
ShardRouting shardRouting = routingNode.get(index);
|
||||
// we can only move started shards...
|
||||
if (shardRouting.started()) {
|
||||
shards.add(shardRouting);
|
||||
final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId());
|
||||
assert sourceNode != null && sourceNode.containsShard(shardRouting);
|
||||
Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
|
||||
if (decision.type() == Decision.Type.NO) {
|
||||
changed |= moveShard(sorter, shardRouting, sourceNode, routingNode);
|
||||
}
|
||||
}
|
||||
}
|
||||
index++;
|
||||
}
|
||||
if (shards.isEmpty()) {
|
||||
return false;
|
||||
|
||||
return changed;
|
||||
}
|
||||
|
||||
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned();
|
||||
boolean changed = initialize(routingNodes, unassigned);
|
||||
if (changed == false) {
|
||||
final NodeSorter sorter = newNodeSorter();
|
||||
final ModelNode[] modelNodes = sorter.modelNodes;
|
||||
for (ShardRouting shardRouting : shards) {
|
||||
final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId());
|
||||
assert sourceNode != null && sourceNode.containsShard(shardRouting);
|
||||
final RoutingNode routingNode = sourceNode.getRoutingNode(routingNodes);
|
||||
Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
|
||||
if (decision.type() == Decision.Type.NO) {
|
||||
/**
|
||||
* Move started shard to the minimal eligible node with respect to the weight function
|
||||
*
|
||||
* @return <code>true</code> if the shard was moved successfully, otherwise <code>false</code>
|
||||
*/
|
||||
private boolean moveShard(NodeSorter sorter, ShardRouting shardRouting, ModelNode sourceNode, RoutingNode routingNode) {
|
||||
logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node());
|
||||
sorter.reset(shardRouting.getIndexName());
|
||||
/*
|
||||
|
@ -557,36 +523,24 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
* This is not guaranteed to be balanced after this operation we still try best effort to
|
||||
* allocate on the minimal eligible node.
|
||||
*/
|
||||
boolean moved = false;
|
||||
for (ModelNode currentNode : modelNodes) {
|
||||
if (currentNode == sourceNode) {
|
||||
continue;
|
||||
}
|
||||
RoutingNode target = currentNode.getRoutingNode(routingNodes);
|
||||
for (ModelNode currentNode : sorter.modelNodes) {
|
||||
if (currentNode != sourceNode) {
|
||||
RoutingNode target = currentNode.getRoutingNode();
|
||||
Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation);
|
||||
Decision rebalanceDecision = allocation.deciders().canRebalance(shardRouting, allocation);
|
||||
if (allocationDecision.type() == Type.YES && rebalanceDecision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
|
||||
Decision sourceDecision = sourceNode.removeShard(shardRouting);
|
||||
sourceNode.removeShard(shardRouting);
|
||||
ShardRouting targetRelocatingShard = routingNodes.relocate(shardRouting, target.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
// re-add (now relocating shard) to source node
|
||||
sourceNode.addShard(shardRouting, sourceDecision);
|
||||
Decision targetDecision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision);
|
||||
currentNode.addShard(targetRelocatingShard, targetDecision);
|
||||
currentNode.addShard(targetRelocatingShard);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Moved shard [{}] to node [{}]", shardRouting, routingNode.node());
|
||||
}
|
||||
moved = true;
|
||||
changed = true;
|
||||
break;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (moved == false) {
|
||||
logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -598,27 +552,31 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
* on the target node which we respect during the allocation / balancing
|
||||
* process. In short, this method recreates the status-quo in the cluster.
|
||||
*/
|
||||
private void buildModelFromAssigned(Iterable<ShardRouting> shards) {
|
||||
for (ShardRouting shard : shards) {
|
||||
assert shard.assignedToNode();
|
||||
private void buildModelFromAssigned() {
|
||||
for (RoutingNode rn : routingNodes) {
|
||||
ModelNode node = new ModelNode(rn);
|
||||
nodes.put(rn.nodeId(), node);
|
||||
for (ShardRouting shard : rn) {
|
||||
assert rn.nodeId().equals(shard.currentNodeId());
|
||||
/* we skip relocating shards here since we expect an initializing shard with the same id coming in */
|
||||
if (shard.state() == RELOCATING) {
|
||||
continue;
|
||||
}
|
||||
ModelNode node = nodes.get(shard.currentNodeId());
|
||||
assert node != null;
|
||||
node.addShard(shard, Decision.single(Type.YES, "Already allocated on node", node.getNodeId()));
|
||||
if (shard.state() != RELOCATING) {
|
||||
node.addShard(shard);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocates all given shards on the minimal eligible node for the shards index
|
||||
* with respect to the weight function. All given shards must be unassigned.
|
||||
* @return <code>true</code> if the current configuration has been
|
||||
* changed, otherwise <code>false</code>
|
||||
*/
|
||||
private boolean allocateUnassigned(RoutingNodes.UnassignedShards unassigned) {
|
||||
private boolean allocateUnassigned() {
|
||||
RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned();
|
||||
assert !nodes.isEmpty();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Start allocating unassigned shards");
|
||||
|
@ -662,7 +620,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
int secondaryLength = 0;
|
||||
int primaryLength = primary.length;
|
||||
ArrayUtil.timSort(primary, comparator);
|
||||
final Set<ModelNode> throttledNodes = Collections.newSetFromMap(new IdentityHashMap<ModelNode, Boolean>());
|
||||
final Set<ModelNode> throttledNodes = Collections.newSetFromMap(new IdentityHashMap<>());
|
||||
do {
|
||||
for (int i = 0; i < primaryLength; i++) {
|
||||
ShardRouting shard = primary[i];
|
||||
|
@ -700,7 +658,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
* don't check deciders
|
||||
*/
|
||||
if (currentWeight <= minWeight) {
|
||||
Decision currentDecision = deciders.canAllocate(shard, node.getRoutingNode(routingNodes), allocation);
|
||||
Decision currentDecision = deciders.canAllocate(shard, node.getRoutingNode(), allocation);
|
||||
NOUPDATE:
|
||||
if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) {
|
||||
if (currentWeight == minWeight) {
|
||||
|
@ -741,7 +699,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
assert decision != null && minNode != null || decision == null && minNode == null;
|
||||
if (minNode != null) {
|
||||
minNode.addShard(shard, decision);
|
||||
minNode.addShard(shard);
|
||||
if (decision.type() == Type.YES) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId());
|
||||
|
@ -750,7 +708,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
changed = true;
|
||||
continue; // don't add to ignoreUnassigned
|
||||
} else {
|
||||
final RoutingNode node = minNode.getRoutingNode(routingNodes);
|
||||
final RoutingNode node = minNode.getRoutingNode();
|
||||
if (deciders.canAllocate(node, allocation).type() != Type.YES) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Can not allocate on node [{}] remove from round decision [{}]", node, decision.type());
|
||||
|
@ -796,10 +754,10 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
ShardRouting candidate = null;
|
||||
final AllocationDeciders deciders = allocation.deciders();
|
||||
for (ShardRouting shard : index.getAllShards()) {
|
||||
for (ShardRouting shard : index) {
|
||||
if (shard.started()) {
|
||||
// skip initializing, unassigned and relocating shards we can't relocate them anyway
|
||||
Decision allocationDecision = deciders.canAllocate(shard, minNode.getRoutingNode(routingNodes), allocation);
|
||||
Decision allocationDecision = deciders.canAllocate(shard, minNode.getRoutingNode(), allocation);
|
||||
Decision rebalanceDecision = deciders.canRebalance(shard, allocation);
|
||||
if (((allocationDecision.type() == Type.YES) || (allocationDecision.type() == Type.THROTTLE))
|
||||
&& ((rebalanceDecision.type() == Type.YES) || (rebalanceDecision.type() == Type.THROTTLE))) {
|
||||
|
@ -820,24 +778,17 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
|
||||
if (candidate != null) {
|
||||
|
||||
/* allocate on the model even if not throttled */
|
||||
maxNode.removeShard(candidate);
|
||||
minNode.addShard(candidate, decision);
|
||||
minNode.addShard(candidate);
|
||||
if (decision.type() == Type.YES) { /* only allocate on the cluster if we are not throttled */
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Relocate shard [{}] from node [{}] to node [{}]", candidate, maxNode.getNodeId(),
|
||||
minNode.getNodeId());
|
||||
}
|
||||
/* now allocate on the cluster - if we are started we need to relocate the shard */
|
||||
if (candidate.started()) {
|
||||
/* now allocate on the cluster */
|
||||
routingNodes.relocate(candidate, minNode.getNodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
|
||||
} else {
|
||||
routingNodes.initialize(candidate, minNode.getNodeId(), null, allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
}
|
||||
return true;
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -851,14 +802,12 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
|
||||
static class ModelNode implements Iterable<ModelIndex> {
|
||||
private final String id;
|
||||
private final Map<String, ModelIndex> indices = new HashMap<>();
|
||||
private int numShards = 0;
|
||||
// lazily calculated
|
||||
private RoutingNode routingNode;
|
||||
private final RoutingNode routingNode;
|
||||
|
||||
public ModelNode(String id) {
|
||||
this.id = id;
|
||||
public ModelNode(RoutingNode routingNode) {
|
||||
this.routingNode = routingNode;
|
||||
}
|
||||
|
||||
public ModelIndex getIndex(String indexId) {
|
||||
|
@ -866,13 +815,10 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
|
||||
public String getNodeId() {
|
||||
return id;
|
||||
return routingNode.nodeId();
|
||||
}
|
||||
|
||||
public RoutingNode getRoutingNode(RoutingNodes routingNodes) {
|
||||
if (routingNode == null) {
|
||||
routingNode = routingNodes.node(id);
|
||||
}
|
||||
public RoutingNode getRoutingNode() {
|
||||
return routingNode;
|
||||
}
|
||||
|
||||
|
@ -893,33 +839,31 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
return -1;
|
||||
}
|
||||
|
||||
public void addShard(ShardRouting shard, Decision decision) {
|
||||
public void addShard(ShardRouting shard) {
|
||||
ModelIndex index = indices.get(shard.getIndexName());
|
||||
if (index == null) {
|
||||
index = new ModelIndex(shard.getIndexName());
|
||||
indices.put(index.getIndexId(), index);
|
||||
}
|
||||
index.addShard(shard, decision);
|
||||
index.addShard(shard);
|
||||
numShards++;
|
||||
}
|
||||
|
||||
public Decision removeShard(ShardRouting shard) {
|
||||
public void removeShard(ShardRouting shard) {
|
||||
ModelIndex index = indices.get(shard.getIndexName());
|
||||
Decision removed = null;
|
||||
if (index != null) {
|
||||
removed = index.removeShard(shard);
|
||||
if (removed != null && index.numShards() == 0) {
|
||||
index.removeShard(shard);
|
||||
if (index.numShards() == 0) {
|
||||
indices.remove(shard.getIndexName());
|
||||
}
|
||||
}
|
||||
numShards--;
|
||||
return removed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("Node(").append(id).append(")");
|
||||
sb.append("Node(").append(routingNode.nodeId()).append(")");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
|
@ -935,9 +879,9 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
|
||||
}
|
||||
|
||||
static final class ModelIndex {
|
||||
static final class ModelIndex implements Iterable<ShardRouting> {
|
||||
private final String id;
|
||||
private final Map<ShardRouting, Decision> shards = new HashMap<>();
|
||||
private final Set<ShardRouting> shards = new HashSet<>(4); // expect few shards of same index to be allocated on same node
|
||||
private int highestPrimary = -1;
|
||||
|
||||
public ModelIndex(String id) {
|
||||
|
@ -947,7 +891,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
public int highestPrimary() {
|
||||
if (highestPrimary == -1) {
|
||||
int maxId = -1;
|
||||
for (ShardRouting shard : shards.keySet()) {
|
||||
for (ShardRouting shard : shards) {
|
||||
if (shard.primary()) {
|
||||
maxId = Math.max(maxId, shard.id());
|
||||
}
|
||||
|
@ -965,24 +909,25 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
return shards.size();
|
||||
}
|
||||
|
||||
public Collection<ShardRouting> getAllShards() {
|
||||
return shards.keySet();
|
||||
@Override
|
||||
public Iterator<ShardRouting> iterator() {
|
||||
return shards.iterator();
|
||||
}
|
||||
|
||||
public Decision removeShard(ShardRouting shard) {
|
||||
public void removeShard(ShardRouting shard) {
|
||||
highestPrimary = -1;
|
||||
return shards.remove(shard);
|
||||
assert shards.contains(shard) : "Shard not allocated on current node: " + shard;
|
||||
shards.remove(shard);
|
||||
}
|
||||
|
||||
public void addShard(ShardRouting shard, Decision decision) {
|
||||
public void addShard(ShardRouting shard) {
|
||||
highestPrimary = -1;
|
||||
assert decision != null;
|
||||
assert !shards.containsKey(shard) : "Shard already allocated on current node: " + shards.get(shard) + " " + shard;
|
||||
shards.put(shard, decision);
|
||||
assert !shards.contains(shard) : "Shard already allocated on current node: " + shard;
|
||||
shards.add(shard);
|
||||
}
|
||||
|
||||
public boolean containsShard(ShardRouting shard) {
|
||||
return shards.containsKey(shard);
|
||||
return shards.contains(shard);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,56 +19,25 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing.allocation.allocator;
|
||||
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* A {@link ShardsAllocator} is the main entry point for shard allocation on nodes in the cluster.
|
||||
* The allocator makes basic decision where a shard instance will be allocated, if already allocated instances
|
||||
* need relocate to other nodes due to node failures or due to rebalancing decisions.
|
||||
* need to relocate to other nodes due to node failures or due to rebalancing decisions.
|
||||
* </p>
|
||||
*/
|
||||
public interface ShardsAllocator {
|
||||
|
||||
/**
|
||||
* Applies changes on started nodes based on the implemented algorithm. For example if a
|
||||
* shard has changed to {@link ShardRoutingState#STARTED} from {@link ShardRoutingState#RELOCATING}
|
||||
* this allocator might apply some cleanups on the node that used to hold the shard.
|
||||
* @param allocation all started {@link ShardRouting shards}
|
||||
*/
|
||||
void applyStartedShards(StartedRerouteAllocation allocation);
|
||||
|
||||
/**
|
||||
* Applies changes on failed nodes based on the implemented algorithm.
|
||||
* @param allocation all failed {@link ShardRouting shards}
|
||||
*/
|
||||
void applyFailedShards(FailedRerouteAllocation allocation);
|
||||
|
||||
/**
|
||||
* Assign all unassigned shards to nodes
|
||||
* Allocates shards to nodes in the cluster. An implementation of this method should:
|
||||
* - assign unassigned shards
|
||||
* - relocate shards that cannot stay on a node anymore
|
||||
* - relocate shards to find a good shard balance in the cluster
|
||||
*
|
||||
* @param allocation current node allocation
|
||||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
boolean allocateUnassigned(RoutingAllocation allocation);
|
||||
|
||||
/**
|
||||
* Rebalancing number of shards on all nodes
|
||||
*
|
||||
* @param allocation current node allocation
|
||||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
boolean rebalance(RoutingAllocation allocation);
|
||||
|
||||
/**
|
||||
* Move started shards that can not be allocated to a node anymore
|
||||
*
|
||||
* @param allocation current node allocation
|
||||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
boolean moveShards(RoutingAllocation allocation);
|
||||
boolean allocate(RoutingAllocation allocation);
|
||||
}
|
||||
|
|
|
@ -1,100 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation.allocator;
|
||||
|
||||
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
|
||||
/**
|
||||
* The {@link ShardsAllocator} class offers methods for allocating shard within a cluster.
|
||||
* These methods include moving shards and re-balancing the cluster. It also allows management
|
||||
* of shards by their state.
|
||||
*/
|
||||
public class ShardsAllocators extends AbstractComponent implements ShardsAllocator {
|
||||
|
||||
private final GatewayAllocator gatewayAllocator;
|
||||
private final ShardsAllocator allocator;
|
||||
|
||||
public ShardsAllocators(GatewayAllocator allocator) {
|
||||
this(Settings.Builder.EMPTY_SETTINGS, allocator);
|
||||
}
|
||||
|
||||
public ShardsAllocators(Settings settings, GatewayAllocator allocator) {
|
||||
this(settings, allocator, new BalancedShardsAllocator(settings));
|
||||
}
|
||||
|
||||
@Inject
|
||||
public ShardsAllocators(Settings settings, GatewayAllocator gatewayAllocator, ShardsAllocator allocator) {
|
||||
super(settings);
|
||||
this.gatewayAllocator = gatewayAllocator;
|
||||
this.allocator = allocator;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyStartedShards(StartedRerouteAllocation allocation) {
|
||||
gatewayAllocator.applyStartedShards(allocation);
|
||||
allocator.applyStartedShards(allocation);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyFailedShards(FailedRerouteAllocation allocation) {
|
||||
gatewayAllocator.applyFailedShards(allocation);
|
||||
allocator.applyFailedShards(allocation);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean allocateUnassigned(RoutingAllocation allocation) {
|
||||
boolean changed = false;
|
||||
changed |= gatewayAllocator.allocateUnassigned(allocation);
|
||||
changed |= allocator.allocateUnassigned(allocation);
|
||||
return changed;
|
||||
}
|
||||
|
||||
protected long nanoTime() {
|
||||
return System.nanoTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean rebalance(RoutingAllocation allocation) {
|
||||
if (allocation.hasPendingAsyncFetch() == false) {
|
||||
/*
|
||||
* see https://github.com/elastic/elasticsearch/issues/14387
|
||||
* if we allow rebalance operations while we are still fetching shard store data
|
||||
* we might end up with unnecessary rebalance operations which can be super confusion/frustrating
|
||||
* since once the fetches come back we might just move all the shards back again.
|
||||
* Therefore we only do a rebalance if we have fetched all information.
|
||||
*/
|
||||
return allocator.rebalance(allocation);
|
||||
} else {
|
||||
logger.debug("skipping rebalance due to in-flight shard/store fetches");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean moveShards(RoutingAllocation allocation) {
|
||||
return allocator.moveShards(allocation);
|
||||
}
|
||||
}
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster.service;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
|
@ -32,19 +31,18 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor;
|
|||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.LocalNodeMasterListener;
|
||||
import org.elasticsearch.cluster.NodeConnectionsService;
|
||||
import org.elasticsearch.cluster.TimeoutClusterStateListener;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodeService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.OperationRouting;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -55,7 +53,6 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
|
@ -66,9 +63,7 @@ import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
|
|||
import org.elasticsearch.common.util.concurrent.PrioritizedRunnable;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
|
@ -79,8 +74,6 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Queue;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.Future;
|
||||
|
@ -100,26 +93,15 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
public static final Setting<TimeValue> CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING =
|
||||
Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<TimeValue> CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING =
|
||||
Setting.positiveTimeSetting("cluster.service.reconnect_interval", TimeValue.timeValueSeconds(10), Property.NodeScope);
|
||||
|
||||
public static final String UPDATE_THREAD_NAME = "clusterService#updateTask";
|
||||
public static final Setting<Long> NODE_ID_SEED_SETTING =
|
||||
// don't use node.id.seed so it won't be seen as an attribute
|
||||
Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, Property.NodeScope);
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private BiConsumer<ClusterChangedEvent, Discovery.AckListener> clusterStatePublisher;
|
||||
|
||||
private final OperationRouting operationRouting;
|
||||
|
||||
private final TransportService transportService;
|
||||
|
||||
private final ClusterSettings clusterSettings;
|
||||
private final DiscoveryNodeService discoveryNodeService;
|
||||
private final Version version;
|
||||
|
||||
private final TimeValue reconnectInterval;
|
||||
|
||||
private TimeValue slowTaskLoggingThreshold;
|
||||
|
||||
|
@ -144,47 +126,49 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
private final ClusterBlocks.Builder initialBlocks;
|
||||
|
||||
private final TaskManager taskManager;
|
||||
|
||||
private volatile ScheduledFuture reconnectToNodes;
|
||||
private NodeConnectionsService nodeConnectionsService;
|
||||
|
||||
@Inject
|
||||
public InternalClusterService(Settings settings, OperationRouting operationRouting, TransportService transportService,
|
||||
ClusterSettings clusterSettings, ThreadPool threadPool, ClusterName clusterName, DiscoveryNodeService discoveryNodeService, Version version) {
|
||||
public InternalClusterService(Settings settings, OperationRouting operationRouting,
|
||||
ClusterSettings clusterSettings, ThreadPool threadPool, ClusterName clusterName) {
|
||||
super(settings);
|
||||
this.operationRouting = operationRouting;
|
||||
this.transportService = transportService;
|
||||
this.threadPool = threadPool;
|
||||
this.clusterSettings = clusterSettings;
|
||||
this.discoveryNodeService = discoveryNodeService;
|
||||
this.version = version;
|
||||
|
||||
// will be replaced on doStart.
|
||||
this.clusterState = ClusterState.builder(clusterName).build();
|
||||
|
||||
this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold);
|
||||
|
||||
this.reconnectInterval = CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING.get(settings);
|
||||
|
||||
this.slowTaskLoggingThreshold = CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings);
|
||||
|
||||
localNodeMasterListeners = new LocalNodeMasterListeners(threadPool);
|
||||
|
||||
initialBlocks = ClusterBlocks.builder();
|
||||
|
||||
taskManager = transportService.getTaskManager();
|
||||
}
|
||||
|
||||
private void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) {
|
||||
this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
|
||||
}
|
||||
|
||||
public void setClusterStatePublisher(BiConsumer<ClusterChangedEvent, Discovery.AckListener> publisher) {
|
||||
synchronized public void setClusterStatePublisher(BiConsumer<ClusterChangedEvent, Discovery.AckListener> publisher) {
|
||||
clusterStatePublisher = publisher;
|
||||
}
|
||||
|
||||
synchronized public void setLocalNode(DiscoveryNode localNode) {
|
||||
assert clusterState.nodes().localNodeId() == null : "local node is already set";
|
||||
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.id());
|
||||
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build();
|
||||
}
|
||||
|
||||
synchronized public void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) {
|
||||
assert this.nodeConnectionsService == null : "nodeConnectionsService is already set";
|
||||
this.nodeConnectionsService = nodeConnectionsService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException {
|
||||
synchronized public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException {
|
||||
if (lifecycle.started()) {
|
||||
throw new IllegalStateException("can't set initial block when started");
|
||||
}
|
||||
|
@ -192,12 +176,12 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
|
||||
@Override
|
||||
public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException {
|
||||
synchronized public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException {
|
||||
removeInitialStateBlock(block.id());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeInitialStateBlock(int blockId) throws IllegalStateException {
|
||||
synchronized public void removeInitialStateBlock(int blockId) throws IllegalStateException {
|
||||
if (lifecycle.started()) {
|
||||
throw new IllegalStateException("can't set initial block when started");
|
||||
}
|
||||
|
@ -205,26 +189,18 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
synchronized protected void doStart() {
|
||||
Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting");
|
||||
Objects.requireNonNull(clusterState.nodes().localNode(), "please set the local node before starting");
|
||||
Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting");
|
||||
add(localNodeMasterListeners);
|
||||
add(taskManager);
|
||||
this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build();
|
||||
this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME), threadPool.getThreadContext());
|
||||
this.reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ReconnectToNodes());
|
||||
Map<String, String> nodeAttributes = discoveryNodeService.buildAttributes();
|
||||
// note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling
|
||||
final String nodeId = generateNodeId(settings);
|
||||
final TransportAddress publishAddress = transportService.boundAddress().publishAddress();
|
||||
DiscoveryNode localNode = new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, nodeAttributes, version);
|
||||
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id());
|
||||
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).blocks(initialBlocks).build();
|
||||
this.transportService.setLocalNode(localNode);
|
||||
this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {
|
||||
FutureUtils.cancel(this.reconnectToNodes);
|
||||
synchronized protected void doStop() {
|
||||
for (NotifyTimeout onGoingTimeout : onGoingTimeouts) {
|
||||
onGoingTimeout.cancel();
|
||||
onGoingTimeout.listener.onClose();
|
||||
|
@ -234,7 +210,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
synchronized protected void doClose() {
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -404,11 +380,6 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
return updateTasksExecutor.getMaxTaskWaitTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskManager getTaskManager() {
|
||||
return taskManager;
|
||||
}
|
||||
|
||||
/** asserts that the current thread is the cluster state update thread */
|
||||
public boolean assertClusterStateThread() {
|
||||
assert Thread.currentThread().getName().contains(InternalClusterService.UPDATE_THREAD_NAME) : "not called from the cluster state update thread";
|
||||
|
@ -461,18 +432,15 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
return;
|
||||
}
|
||||
ClusterStateTaskExecutor.BatchResult<T> batchResult;
|
||||
long startTimeNS = System.nanoTime();
|
||||
long startTimeNS = currentTimeInNanos();
|
||||
try {
|
||||
List<T> inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList());
|
||||
batchResult = executor.execute(previousClusterState, inputs);
|
||||
} catch (Throwable e) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
|
||||
sb.append(previousClusterState.nodes().prettyPrint());
|
||||
sb.append(previousClusterState.routingTable().prettyPrint());
|
||||
sb.append(previousClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.trace(sb.toString(), e);
|
||||
logger.trace("failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", e, executionTime, previousClusterState.version(), source,
|
||||
previousClusterState.nodes().prettyPrint(), previousClusterState.routingTable().prettyPrint(), previousClusterState.getRoutingNodes().prettyPrint());
|
||||
}
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder().failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e).build(previousClusterState);
|
||||
|
@ -513,8 +481,8 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
|
||||
}
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime);
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took [{}] no change in cluster_state", source, executionTime);
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
return;
|
||||
}
|
||||
|
@ -555,9 +523,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("cluster state updated, source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.prettyPrint());
|
||||
logger.trace(sb.toString());
|
||||
logger.trace("cluster state updated, source [{}]\n{}", source, newClusterState.prettyPrint());
|
||||
} else if (logger.isDebugEnabled()) {
|
||||
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
|
||||
}
|
||||
|
@ -572,15 +538,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
}
|
||||
|
||||
// TODO, do this in parallel (and wait)
|
||||
for (DiscoveryNode node : nodesDelta.addedNodes()) {
|
||||
try {
|
||||
transportService.connectToNode(node);
|
||||
} catch (Throwable e) {
|
||||
// the fault detection will detect it as failed as well
|
||||
logger.warn("failed to connect to node [" + node + "]", e);
|
||||
}
|
||||
}
|
||||
nodeConnectionsService.connectToAddedNodes(clusterChangedEvent);
|
||||
|
||||
// if we are the master, publish the new state to all nodes
|
||||
// we publish here before we send a notification to all the listeners, since if it fails
|
||||
|
@ -616,13 +574,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
}
|
||||
|
||||
for (DiscoveryNode node : nodesDelta.removedNodes()) {
|
||||
try {
|
||||
transportService.disconnectFromNode(node);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to disconnect to node [" + node + "]", e);
|
||||
}
|
||||
}
|
||||
nodeConnectionsService.disconnectFromRemovedNodes(clusterChangedEvent);
|
||||
|
||||
newClusterState.status(ClusterState.ClusterStateStatus.APPLIED);
|
||||
|
||||
|
@ -653,21 +605,22 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
logger.error("exception thrown while notifying executor of new cluster state publication [{}]", e, source);
|
||||
}
|
||||
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took [{}] done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
} catch (Throwable t) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.nodes().prettyPrint());
|
||||
sb.append(newClusterState.routingTable().prettyPrint());
|
||||
sb.append(newClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.warn(sb.toString(), t);
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
logger.warn("failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}{}{}", t, executionTime,
|
||||
newClusterState.version(), newClusterState.stateUUID(), source, newClusterState.nodes().prettyPrint(),
|
||||
newClusterState.routingTable().prettyPrint(), newClusterState.getRoutingNodes().prettyPrint());
|
||||
// TODO: do we want to call updateTask.onFailure here?
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// this one is overridden in tests so we can control time
|
||||
protected long currentTimeInNanos() {return System.nanoTime();}
|
||||
|
||||
private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, ESLogger logger) {
|
||||
if (listener instanceof AckedClusterStateTaskListener) {
|
||||
return new SafeAckedClusterStateTaskListener((AckedClusterStateTaskListener) listener, logger);
|
||||
|
@ -781,7 +734,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
private void warnAboutSlowTaskIfNeeded(TimeValue executionTime, String source) {
|
||||
if (executionTime.getMillis() > slowTaskLoggingThreshold.getMillis()) {
|
||||
logger.warn("cluster state update task [{}] took {} above the warn threshold of {}", source, executionTime, slowTaskLoggingThreshold);
|
||||
logger.warn("cluster state update task [{}] took [{}] above the warn threshold of {}", source, executionTime, slowTaskLoggingThreshold);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -813,64 +766,6 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
}
|
||||
|
||||
private class ReconnectToNodes implements Runnable {
|
||||
|
||||
private ConcurrentMap<DiscoveryNode, Integer> failureCount = ConcurrentCollections.newConcurrentMap();
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
// master node will check against all nodes if its alive with certain discoveries implementations,
|
||||
// but we can't rely on that, so we check on it as well
|
||||
for (DiscoveryNode node : clusterState.nodes()) {
|
||||
if (lifecycle.stoppedOrClosed()) {
|
||||
return;
|
||||
}
|
||||
if (clusterState.nodes().nodeExists(node.id())) { // we double check existence of node since connectToNode might take time...
|
||||
if (!transportService.nodeConnected(node)) {
|
||||
try {
|
||||
transportService.connectToNode(node);
|
||||
} catch (Exception e) {
|
||||
if (lifecycle.stoppedOrClosed()) {
|
||||
return;
|
||||
}
|
||||
if (clusterState.nodes().nodeExists(node.id())) { // double check here as well, maybe its gone?
|
||||
Integer nodeFailureCount = failureCount.get(node);
|
||||
if (nodeFailureCount == null) {
|
||||
nodeFailureCount = 1;
|
||||
} else {
|
||||
nodeFailureCount = nodeFailureCount + 1;
|
||||
}
|
||||
// log every 6th failure
|
||||
if ((nodeFailureCount % 6) == 0) {
|
||||
// reset the failure count...
|
||||
nodeFailureCount = 0;
|
||||
logger.warn("failed to reconnect to node {}", e, node);
|
||||
}
|
||||
failureCount.put(node, nodeFailureCount);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// go over and remove failed nodes that have been removed
|
||||
DiscoveryNodes nodes = clusterState.nodes();
|
||||
for (Iterator<DiscoveryNode> failedNodesIt = failureCount.keySet().iterator(); failedNodesIt.hasNext(); ) {
|
||||
DiscoveryNode failedNode = failedNodesIt.next();
|
||||
if (!nodes.nodeExists(failedNode.id())) {
|
||||
failedNodesIt.remove();
|
||||
}
|
||||
}
|
||||
if (lifecycle.started()) {
|
||||
reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static String generateNodeId(Settings settings) {
|
||||
Random random = Randomness.get(settings, NODE_ID_SEED_SETTING);
|
||||
return Strings.randomBase64UUID(random);
|
||||
}
|
||||
|
||||
private static class LocalNodeMasterListeners implements ClusterStateListener {
|
||||
|
||||
private final List<LocalNodeMasterListener> listeners = new CopyOnWriteArrayList<>();
|
||||
|
|
|
@ -18,26 +18,23 @@
|
|||
*/
|
||||
package org.elasticsearch.common;
|
||||
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashSet;
|
||||
|
||||
/**
|
||||
* Holds a field that can be found in a request while parsing and its different variants, which may be deprecated.
|
||||
*/
|
||||
public class ParseField {
|
||||
|
||||
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ParseField.class));
|
||||
|
||||
private final String camelCaseName;
|
||||
private final String underscoreName;
|
||||
private final String[] deprecatedNames;
|
||||
private String allReplacedWith = null;
|
||||
|
||||
static final EnumSet<Flag> EMPTY_FLAGS = EnumSet.noneOf(Flag.class);
|
||||
static final EnumSet<Flag> STRICT_FLAGS = EnumSet.of(Flag.STRICT);
|
||||
|
||||
enum Flag {
|
||||
STRICT
|
||||
}
|
||||
|
||||
public ParseField(String value, String... deprecatedNames) {
|
||||
camelCaseName = Strings.toCamelCase(value);
|
||||
underscoreName = Strings.toUnderscoreCase(value);
|
||||
|
@ -80,19 +77,21 @@ public class ParseField {
|
|||
return parseField;
|
||||
}
|
||||
|
||||
boolean match(String currentFieldName, EnumSet<Flag> flags) {
|
||||
boolean match(String currentFieldName, boolean strict) {
|
||||
if (allReplacedWith == null && (currentFieldName.equals(camelCaseName) || currentFieldName.equals(underscoreName))) {
|
||||
return true;
|
||||
}
|
||||
String msg;
|
||||
for (String depName : deprecatedNames) {
|
||||
if (currentFieldName.equals(depName)) {
|
||||
if (flags.contains(Flag.STRICT)) {
|
||||
msg = "Deprecated field [" + currentFieldName + "] used, expected [" + underscoreName + "] instead";
|
||||
if (allReplacedWith != null) {
|
||||
msg = "Deprecated field [" + currentFieldName + "] used, replaced by [" + allReplacedWith + "]";
|
||||
}
|
||||
if (strict) {
|
||||
throw new IllegalArgumentException(msg);
|
||||
} else {
|
||||
DEPRECATION_LOGGER.deprecated(msg);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -21,29 +21,28 @@ package org.elasticsearch.common;
|
|||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.EnumSet;
|
||||
|
||||
/**
|
||||
* Matcher to use in combination with {@link ParseField} while parsing requests. Matches a {@link ParseField}
|
||||
* against a field name and throw deprecation exception depending on the current value of the {@link #PARSE_STRICT} setting.
|
||||
*/
|
||||
public class ParseFieldMatcher {
|
||||
public static final String PARSE_STRICT = "index.query.parse.strict";
|
||||
public static final ParseFieldMatcher EMPTY = new ParseFieldMatcher(ParseField.EMPTY_FLAGS);
|
||||
public static final ParseFieldMatcher STRICT = new ParseFieldMatcher(ParseField.STRICT_FLAGS);
|
||||
public static final ParseFieldMatcher EMPTY = new ParseFieldMatcher(false);
|
||||
public static final ParseFieldMatcher STRICT = new ParseFieldMatcher(true);
|
||||
|
||||
private final EnumSet<ParseField.Flag> parseFlags;
|
||||
private final boolean strict;
|
||||
|
||||
public ParseFieldMatcher(Settings settings) {
|
||||
if (settings.getAsBoolean(PARSE_STRICT, false)) {
|
||||
this.parseFlags = EnumSet.of(ParseField.Flag.STRICT);
|
||||
} else {
|
||||
this.parseFlags = ParseField.EMPTY_FLAGS;
|
||||
}
|
||||
this(settings.getAsBoolean(PARSE_STRICT, false));
|
||||
}
|
||||
|
||||
public ParseFieldMatcher(EnumSet<ParseField.Flag> parseFlags) {
|
||||
this.parseFlags = parseFlags;
|
||||
public ParseFieldMatcher(boolean strict) {
|
||||
this.strict = strict;
|
||||
}
|
||||
|
||||
/** Should deprecated settings be rejected? */
|
||||
public boolean isStrict() {
|
||||
return strict;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -55,6 +54,6 @@ public class ParseFieldMatcher {
|
|||
* @return true whenever the parse field that we are looking for was found, false otherwise
|
||||
*/
|
||||
public boolean match(String fieldName, ParseField parseField) {
|
||||
return parseField.match(fieldName, parseFlags);
|
||||
return parseField.match(fieldName, strict);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common;
|
||||
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
/**
|
||||
* Annotation to suppress logging usage checks errors inside a whole class or a method.
|
||||
*/
|
||||
@Retention(RetentionPolicy.CLASS)
|
||||
@Target({ ElementType.CONSTRUCTOR, ElementType.METHOD, ElementType.TYPE })
|
||||
public @interface SuppressLoggerChecks {
|
||||
String reason();
|
||||
}
|
|
@ -93,7 +93,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
|
|||
final String message = "[" + this.name + "] Data too large, data for [" +
|
||||
fieldName + "] would be larger than limit of [" +
|
||||
memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]";
|
||||
logger.debug(message);
|
||||
logger.debug("{}", message);
|
||||
throw new CircuitBreakingException(message,
|
||||
bytesNeeded, this.memoryBytesLimit);
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker {
|
|||
this.trippedCount.incrementAndGet();
|
||||
final String message = "Data too large, data for field [" + fieldName + "] would be larger than limit of [" +
|
||||
memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]";
|
||||
logger.debug(message);
|
||||
logger.debug("{}", message);
|
||||
throw new CircuitBreakingException(message);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,138 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.cli;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.PosixFileAttributeView;
|
||||
import java.nio.file.attribute.PosixFileAttributes;
|
||||
import java.nio.file.attribute.PosixFilePermission;
|
||||
import java.nio.file.attribute.PosixFilePermissions;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A helper command that checks if configured paths have been changed when running a CLI command.
|
||||
* It is only executed in case of specified paths by the command and if the paths underlying filesystem
|
||||
* supports posix permissions.
|
||||
*
|
||||
* If this is the case, a warn message is issued whenever an owner, a group or the file permissions is changed by
|
||||
* the command being executed and not configured back to its prior state, which should be the task of the command
|
||||
* being executed.
|
||||
*
|
||||
*/
|
||||
public abstract class CheckFileCommand extends CliTool.Command {
|
||||
|
||||
public CheckFileCommand(Terminal terminal) {
|
||||
super(terminal);
|
||||
}
|
||||
|
||||
/**
|
||||
* abstract method, which should implement the same logic as CliTool.Command.execute(), but is wrapped
|
||||
*/
|
||||
public abstract CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception;
|
||||
|
||||
/**
|
||||
* Returns the array of paths, that should be checked if the permissions, user or groups have changed
|
||||
* before and after execution of the command
|
||||
*
|
||||
*/
|
||||
protected abstract Path[] pathsForPermissionsCheck(Settings settings, Environment env) throws Exception;
|
||||
|
||||
@Override
|
||||
public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
|
||||
Path[] paths = pathsForPermissionsCheck(settings, env);
|
||||
|
||||
if (paths == null || paths.length == 0) {
|
||||
return doExecute(settings, env);
|
||||
}
|
||||
|
||||
Map<Path, Set<PosixFilePermission>> permissions = new HashMap<>(paths.length);
|
||||
Map<Path, String> owners = new HashMap<>(paths.length);
|
||||
Map<Path, String> groups = new HashMap<>(paths.length);
|
||||
|
||||
if (paths != null && paths.length > 0) {
|
||||
for (Path path : paths) {
|
||||
try {
|
||||
boolean supportsPosixPermissions = Environment.getFileStore(path).supportsFileAttributeView(PosixFileAttributeView.class);
|
||||
if (supportsPosixPermissions) {
|
||||
PosixFileAttributes attributes = Files.readAttributes(path, PosixFileAttributes.class);
|
||||
permissions.put(path, attributes.permissions());
|
||||
owners.put(path, attributes.owner().getName());
|
||||
groups.put(path, attributes.group().getName());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// silently swallow if not supported, no need to log things
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CliTool.ExitStatus status = doExecute(settings, env);
|
||||
|
||||
// check if permissions differ
|
||||
for (Map.Entry<Path, Set<PosixFilePermission>> entry : permissions.entrySet()) {
|
||||
if (!Files.exists(entry.getKey())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Set<PosixFilePermission> permissionsBeforeWrite = entry.getValue();
|
||||
Set<PosixFilePermission> permissionsAfterWrite = Files.getPosixFilePermissions(entry.getKey());
|
||||
if (!permissionsBeforeWrite.equals(permissionsAfterWrite)) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "WARNING: The file permissions of [" + entry.getKey() + "] have changed "
|
||||
+ "from [" + PosixFilePermissions.toString(permissionsBeforeWrite) + "] "
|
||||
+ "to [" + PosixFilePermissions.toString(permissionsAfterWrite) + "]");
|
||||
terminal.println(Terminal.Verbosity.SILENT, "Please ensure that the user account running Elasticsearch has read access to this file!");
|
||||
}
|
||||
}
|
||||
|
||||
// check if owner differs
|
||||
for (Map.Entry<Path, String> entry : owners.entrySet()) {
|
||||
if (!Files.exists(entry.getKey())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
String ownerBeforeWrite = entry.getValue();
|
||||
String ownerAfterWrite = Files.getOwner(entry.getKey()).getName();
|
||||
if (!ownerAfterWrite.equals(ownerBeforeWrite)) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "WARNING: Owner of file [" + entry.getKey() + "] used to be [" + ownerBeforeWrite + "], but now is [" + ownerAfterWrite + "]");
|
||||
}
|
||||
}
|
||||
|
||||
// check if group differs
|
||||
for (Map.Entry<Path, String> entry : groups.entrySet()) {
|
||||
if (!Files.exists(entry.getKey())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
String groupBeforeWrite = entry.getValue();
|
||||
String groupAfterWrite = Files.readAttributes(entry.getKey(), PosixFileAttributes.class).group().getName();
|
||||
if (!groupAfterWrite.equals(groupBeforeWrite)) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "WARNING: Group of file [" + entry.getKey() + "] used to be [" + groupBeforeWrite + "], but now is [" + groupAfterWrite + "]");
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
}
|
|
@ -1,250 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.cli;
|
||||
|
||||
import org.apache.commons.cli.AlreadySelectedException;
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.cli.CommandLineParser;
|
||||
import org.apache.commons.cli.DefaultParser;
|
||||
import org.apache.commons.cli.MissingArgumentException;
|
||||
import org.apache.commons.cli.MissingOptionException;
|
||||
import org.apache.commons.cli.UnrecognizedOptionException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
|
||||
/**
|
||||
* A base class for command-line interface tool.
|
||||
*
|
||||
* Two modes are supported:
|
||||
*
|
||||
* - Single command mode. The tool exposes a single command that can potentially accept arguments (eg. CLI options).
|
||||
* - Multi command mode. The tool support multiple commands, each for different tasks, each potentially accepts arguments.
|
||||
*
|
||||
* In a multi-command mode. The first argument must be the command name. For example, the plugin manager
|
||||
* can be seen as a multi-command tool with two possible commands: install and uninstall
|
||||
*
|
||||
* The tool is configured using a {@link CliToolConfig} which encapsulates the tool's commands and their
|
||||
* potential options. The tool also comes with out of the box simple help support (the -h/--help option is
|
||||
* automatically handled) where the help text is configured in a dedicated *.help files located in the same package
|
||||
* as the tool.
|
||||
*/
|
||||
public abstract class CliTool {
|
||||
|
||||
// based on sysexits.h
|
||||
public enum ExitStatus {
|
||||
OK(0),
|
||||
OK_AND_EXIT(0),
|
||||
USAGE(64), /* command line usage error */
|
||||
DATA_ERROR(65), /* data format error */
|
||||
NO_INPUT(66), /* cannot open input */
|
||||
NO_USER(67), /* addressee unknown */
|
||||
NO_HOST(68), /* host name unknown */
|
||||
UNAVAILABLE(69), /* service unavailable */
|
||||
CODE_ERROR(70), /* internal software error */
|
||||
CANT_CREATE(73), /* can't create (user) output file */
|
||||
IO_ERROR(74), /* input/output error */
|
||||
TEMP_FAILURE(75), /* temp failure; user is invited to retry */
|
||||
PROTOCOL(76), /* remote error in protocol */
|
||||
NOPERM(77), /* permission denied */
|
||||
CONFIG(78); /* configuration error */
|
||||
|
||||
final int status;
|
||||
|
||||
ExitStatus(int status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public int status() {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
protected final Terminal terminal;
|
||||
protected final Environment env;
|
||||
protected final Settings settings;
|
||||
|
||||
private final CliToolConfig config;
|
||||
|
||||
protected CliTool(CliToolConfig config) {
|
||||
this(config, Terminal.DEFAULT);
|
||||
}
|
||||
|
||||
protected CliTool(CliToolConfig config, Terminal terminal) {
|
||||
if (config.cmds().size() == 0) {
|
||||
throw new IllegalArgumentException("At least one command must be configured");
|
||||
}
|
||||
this.config = config;
|
||||
this.terminal = terminal;
|
||||
env = InternalSettingsPreparer.prepareEnvironment(EMPTY_SETTINGS, terminal);
|
||||
settings = env.settings();
|
||||
}
|
||||
|
||||
public final ExitStatus execute(String... args) throws Exception {
|
||||
|
||||
// first lets see if the user requests tool help. We're doing it only if
|
||||
// this is a multi-command tool. If it's a single command tool, the -h/--help
|
||||
// option will be taken care of on the command level
|
||||
if (!config.isSingle() && args.length > 0 && (args[0].equals("-h") || args[0].equals("--help"))) {
|
||||
config.printUsage(terminal);
|
||||
return ExitStatus.OK_AND_EXIT;
|
||||
}
|
||||
|
||||
CliToolConfig.Cmd cmd;
|
||||
if (config.isSingle()) {
|
||||
cmd = config.single();
|
||||
} else {
|
||||
|
||||
if (args.length == 0) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: command not specified");
|
||||
config.printUsage(terminal);
|
||||
return ExitStatus.USAGE;
|
||||
}
|
||||
|
||||
String cmdName = args[0];
|
||||
cmd = config.cmd(cmdName);
|
||||
if (cmd == null) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: unknown command [" + cmdName + "]. Use [-h] option to list available commands");
|
||||
return ExitStatus.USAGE;
|
||||
}
|
||||
|
||||
// we now remove the command name from the args
|
||||
if (args.length == 1) {
|
||||
args = new String[0];
|
||||
} else {
|
||||
String[] cmdArgs = new String[args.length - 1];
|
||||
System.arraycopy(args, 1, cmdArgs, 0, cmdArgs.length);
|
||||
args = cmdArgs;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
return parse(cmd, args).execute(settings, env);
|
||||
} catch (UserError error) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + error.getMessage());
|
||||
return error.exitStatus;
|
||||
}
|
||||
}
|
||||
|
||||
public Command parse(String cmdName, String[] args) throws Exception {
|
||||
CliToolConfig.Cmd cmd = config.cmd(cmdName);
|
||||
return parse(cmd, args);
|
||||
}
|
||||
|
||||
public Command parse(CliToolConfig.Cmd cmd, String[] args) throws Exception {
|
||||
CommandLineParser parser = new DefaultParser();
|
||||
CommandLine cli = parser.parse(CliToolConfig.OptionsSource.HELP.options(), args, true);
|
||||
if (cli.hasOption("h")) {
|
||||
return helpCmd(cmd);
|
||||
}
|
||||
try {
|
||||
cli = parser.parse(cmd.options(), args, cmd.isStopAtNonOption());
|
||||
} catch (AlreadySelectedException|MissingArgumentException|MissingOptionException|UnrecognizedOptionException e) {
|
||||
// intentionally drop the stack trace here as these are really user errors,
|
||||
// the stack trace into cli parsing lib is not important
|
||||
throw new UserError(ExitStatus.USAGE, e.toString());
|
||||
}
|
||||
|
||||
if (cli.hasOption("v")) {
|
||||
terminal.setVerbosity(Terminal.Verbosity.VERBOSE);
|
||||
} else if (cli.hasOption("s")) {
|
||||
terminal.setVerbosity(Terminal.Verbosity.SILENT);
|
||||
} else {
|
||||
terminal.setVerbosity(Terminal.Verbosity.NORMAL);
|
||||
}
|
||||
return parse(cmd.name(), cli);
|
||||
}
|
||||
|
||||
protected Command.Help helpCmd(CliToolConfig.Cmd cmd) {
|
||||
return new Command.Help(cmd, terminal);
|
||||
}
|
||||
|
||||
protected static Command.Exit exitCmd(ExitStatus status) {
|
||||
return new Command.Exit(null, status, null);
|
||||
}
|
||||
|
||||
protected static Command.Exit exitCmd(ExitStatus status, Terminal terminal, String msg, Object... args) {
|
||||
return new Command.Exit(String.format(Locale.ROOT, msg, args), status, terminal);
|
||||
}
|
||||
|
||||
protected abstract Command parse(String cmdName, CommandLine cli) throws Exception;
|
||||
|
||||
public static abstract class Command {
|
||||
|
||||
protected final Terminal terminal;
|
||||
|
||||
protected Command(Terminal terminal) {
|
||||
this.terminal = terminal;
|
||||
}
|
||||
|
||||
public abstract ExitStatus execute(Settings settings, Environment env) throws Exception;
|
||||
|
||||
public static class Help extends Command {
|
||||
|
||||
private final CliToolConfig.Cmd cmd;
|
||||
|
||||
private Help(CliToolConfig.Cmd cmd, Terminal terminal) {
|
||||
super(terminal);
|
||||
this.cmd = cmd;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ExitStatus execute(Settings settings, Environment env) throws Exception {
|
||||
cmd.printUsage(terminal);
|
||||
return ExitStatus.OK_AND_EXIT;
|
||||
}
|
||||
}
|
||||
|
||||
public static class Exit extends Command {
|
||||
private final String msg;
|
||||
private final ExitStatus status;
|
||||
|
||||
private Exit(String msg, ExitStatus status, Terminal terminal) {
|
||||
super(terminal);
|
||||
this.msg = msg;
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ExitStatus execute(Settings settings, Environment env) throws Exception {
|
||||
if (msg != null) {
|
||||
if (status != ExitStatus.OK) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + msg);
|
||||
} else {
|
||||
terminal.println(msg);
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
public ExitStatus status() {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -1,302 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.cli;
|
||||
|
||||
import org.apache.commons.cli.Option;
|
||||
import org.apache.commons.cli.OptionGroup;
|
||||
import org.apache.commons.cli.Options;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class CliToolConfig {
|
||||
|
||||
public static Builder config(String name, Class<? extends CliTool> toolType) {
|
||||
return new Builder(name, toolType);
|
||||
}
|
||||
|
||||
private final Class<? extends CliTool> toolType;
|
||||
private final String name;
|
||||
private final Map<String, Cmd> cmds;
|
||||
|
||||
private static final HelpPrinter helpPrinter = new HelpPrinter();
|
||||
|
||||
private CliToolConfig(String name, Class<? extends CliTool> toolType, Cmd[] cmds) {
|
||||
this.name = name;
|
||||
this.toolType = toolType;
|
||||
final Map<String, Cmd> cmdsMapping = new HashMap<>();
|
||||
for (int i = 0; i < cmds.length; i++) {
|
||||
cmdsMapping.put(cmds[i].name, cmds[i]);
|
||||
}
|
||||
this.cmds = Collections.unmodifiableMap(cmdsMapping);
|
||||
}
|
||||
|
||||
public boolean isSingle() {
|
||||
return cmds.size() == 1;
|
||||
}
|
||||
|
||||
public Cmd single() {
|
||||
assert isSingle() : "Requesting single command on a multi-command tool";
|
||||
return cmds.values().iterator().next();
|
||||
}
|
||||
|
||||
public Class<? extends CliTool> toolType() {
|
||||
return toolType;
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public Collection<Cmd> cmds() {
|
||||
return cmds.values();
|
||||
}
|
||||
|
||||
public Cmd cmd(String name) {
|
||||
return cmds.get(name);
|
||||
}
|
||||
|
||||
public void printUsage(Terminal terminal) {
|
||||
helpPrinter.print(this, terminal);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
public static Cmd.Builder cmd(String name, Class<? extends CliTool.Command> cmdType) {
|
||||
return new Cmd.Builder(name, cmdType);
|
||||
}
|
||||
|
||||
public static OptionBuilder option(String shortName, String longName) {
|
||||
return new OptionBuilder(shortName, longName);
|
||||
}
|
||||
|
||||
public static Option.Builder optionBuilder(String shortName, String longName) {
|
||||
return Option.builder(shortName).argName(longName).longOpt(longName);
|
||||
}
|
||||
|
||||
public static OptionGroupBuilder optionGroup(boolean required) {
|
||||
return new OptionGroupBuilder(required);
|
||||
}
|
||||
|
||||
private final Class<? extends CliTool> toolType;
|
||||
private final String name;
|
||||
private Cmd[] cmds;
|
||||
|
||||
private Builder(String name, Class<? extends CliTool> toolType) {
|
||||
this.name = name;
|
||||
this.toolType = toolType;
|
||||
}
|
||||
|
||||
public Builder cmds(Cmd.Builder... cmds) {
|
||||
this.cmds = new Cmd[cmds.length];
|
||||
for (int i = 0; i < cmds.length; i++) {
|
||||
this.cmds[i] = cmds[i].build();
|
||||
this.cmds[i].toolName = name;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder cmds(Cmd... cmds) {
|
||||
for (int i = 0; i < cmds.length; i++) {
|
||||
cmds[i].toolName = name;
|
||||
}
|
||||
this.cmds = cmds;
|
||||
return this;
|
||||
}
|
||||
|
||||
public CliToolConfig build() {
|
||||
return new CliToolConfig(name, toolType, cmds);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Cmd {
|
||||
|
||||
private String toolName;
|
||||
private final String name;
|
||||
private final Class<? extends CliTool.Command> cmdType;
|
||||
private final Options options;
|
||||
private final boolean stopAtNonOption;
|
||||
|
||||
private Cmd(String name, Class<? extends CliTool.Command> cmdType, Options options, boolean stopAtNonOption) {
|
||||
this.name = name;
|
||||
this.cmdType = cmdType;
|
||||
this.options = options;
|
||||
this.stopAtNonOption = stopAtNonOption;
|
||||
OptionsSource.VERBOSITY.populate(options);
|
||||
}
|
||||
|
||||
public Class<? extends CliTool.Command> cmdType() {
|
||||
return cmdType;
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public Options options() {
|
||||
return options;
|
||||
}
|
||||
|
||||
public boolean isStopAtNonOption() {
|
||||
return stopAtNonOption;
|
||||
}
|
||||
|
||||
public void printUsage(Terminal terminal) {
|
||||
helpPrinter.print(toolName, this, terminal);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
private final String name;
|
||||
private final Class<? extends CliTool.Command> cmdType;
|
||||
private Options options = new Options();
|
||||
private boolean stopAtNonOption = false;
|
||||
|
||||
private Builder(String name, Class<? extends CliTool.Command> cmdType) {
|
||||
this.name = name;
|
||||
this.cmdType = cmdType;
|
||||
}
|
||||
|
||||
public Builder options(OptionBuilder... optionBuilder) {
|
||||
for (int i = 0; i < optionBuilder.length; i++) {
|
||||
options.addOption(optionBuilder[i].build());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder options(Option.Builder... optionBuilders) {
|
||||
for (int i = 0; i < optionBuilders.length; i++) {
|
||||
options.addOption(optionBuilders[i].build());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder optionGroups(OptionGroupBuilder... optionGroupBuilders) {
|
||||
for (OptionGroupBuilder builder : optionGroupBuilders) {
|
||||
options.addOptionGroup(builder.build());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param stopAtNonOption if <tt>true</tt> an unrecognized argument stops
|
||||
* the parsing and the remaining arguments are added to the
|
||||
* args list. If <tt>false</tt> an unrecognized
|
||||
* argument triggers a ParseException.
|
||||
*/
|
||||
public Builder stopAtNonOption(boolean stopAtNonOption) {
|
||||
this.stopAtNonOption = stopAtNonOption;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Cmd build() {
|
||||
return new Cmd(name, cmdType, options, stopAtNonOption);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class OptionBuilder {
|
||||
|
||||
private final Option option;
|
||||
|
||||
private OptionBuilder(String shortName, String longName) {
|
||||
option = new Option(shortName, "");
|
||||
option.setLongOpt(longName);
|
||||
option.setArgName(longName);
|
||||
}
|
||||
|
||||
public OptionBuilder required(boolean required) {
|
||||
option.setRequired(required);
|
||||
return this;
|
||||
}
|
||||
|
||||
public OptionBuilder hasArg(boolean optional) {
|
||||
option.setOptionalArg(optional);
|
||||
option.setArgs(1);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Option build() {
|
||||
return option;
|
||||
}
|
||||
}
|
||||
|
||||
public static class OptionGroupBuilder {
|
||||
|
||||
private OptionGroup group;
|
||||
|
||||
private OptionGroupBuilder(boolean required) {
|
||||
group = new OptionGroup();
|
||||
group.setRequired(required);
|
||||
}
|
||||
|
||||
public OptionGroupBuilder options(OptionBuilder... optionBuilders) {
|
||||
for (OptionBuilder builder : optionBuilders) {
|
||||
group.addOption(builder.build());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public OptionGroup build() {
|
||||
return group;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static abstract class OptionsSource {
|
||||
|
||||
static final OptionsSource HELP = new OptionsSource() {
|
||||
|
||||
@Override
|
||||
void populate(Options options) {
|
||||
options.addOption(new OptionBuilder("h", "help").required(false).build());
|
||||
}
|
||||
};
|
||||
|
||||
static final OptionsSource VERBOSITY = new OptionsSource() {
|
||||
@Override
|
||||
void populate(Options options) {
|
||||
OptionGroup verbosityGroup = new OptionGroup();
|
||||
verbosityGroup.setRequired(false);
|
||||
verbosityGroup.addOption(new OptionBuilder("s", "silent").required(false).build());
|
||||
verbosityGroup.addOption(new OptionBuilder("v", "verbose").required(false).build());
|
||||
options.addOptionGroup(verbosityGroup);
|
||||
}
|
||||
};
|
||||
|
||||
private Options options;
|
||||
|
||||
Options options() {
|
||||
if (options == null) {
|
||||
options = new Options();
|
||||
populate(options);
|
||||
}
|
||||
return options;
|
||||
}
|
||||
|
||||
abstract void populate(Options options);
|
||||
|
||||
}
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.cli;
|
||||
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.util.Callback;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class HelpPrinter {
|
||||
|
||||
private static final String HELP_FILE_EXT = ".help";
|
||||
|
||||
public void print(CliToolConfig config, Terminal terminal) {
|
||||
print(config.toolType(), config.name(), terminal);
|
||||
}
|
||||
|
||||
public void print(String toolName, CliToolConfig.Cmd cmd, Terminal terminal) {
|
||||
print(cmd.cmdType(), toolName + "-" + cmd.name(), terminal);
|
||||
}
|
||||
|
||||
private static void print(Class clazz, String name, final Terminal terminal) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "");
|
||||
try (InputStream input = clazz.getResourceAsStream(name + HELP_FILE_EXT)) {
|
||||
Streams.readAllLines(input, new Callback<String>() {
|
||||
@Override
|
||||
public void handle(String line) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, line);
|
||||
}
|
||||
});
|
||||
} catch (IOException ioe) {
|
||||
throw new RuntimeException(ioe);
|
||||
}
|
||||
terminal.println(Terminal.Verbosity.SILENT, "");
|
||||
}
|
||||
}
|
|
@ -29,7 +29,7 @@ public class ShapesAvailability {
|
|||
static {
|
||||
boolean xSPATIAL4J_AVAILABLE;
|
||||
try {
|
||||
Class.forName("com.spatial4j.core.shape.impl.PointImpl");
|
||||
Class.forName("org.locationtech.spatial4j.shape.impl.PointImpl");
|
||||
xSPATIAL4J_AVAILABLE = true;
|
||||
} catch (Throwable t) {
|
||||
xSPATIAL4J_AVAILABLE = false;
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
|
||||
package org.elasticsearch.common.geo;
|
||||
|
||||
import com.spatial4j.core.context.SpatialContext;
|
||||
import com.spatial4j.core.shape.Shape;
|
||||
import com.spatial4j.core.shape.ShapeCollection;
|
||||
import org.locationtech.spatial4j.context.SpatialContext;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
import org.locationtech.spatial4j.shape.ShapeCollection;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Circle;
|
||||
import org.locationtech.spatial4j.shape.Circle;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Rectangle;
|
||||
import org.locationtech.spatial4j.shape.Rectangle;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Shape;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.geo.XShapeCollection;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue