Merge branch 'master' into javadocs

This commit is contained in:
Ryan Ernst 2015-11-19 10:43:43 -08:00
commit c3cb1fd08c
363 changed files with 6387 additions and 3171 deletions

View File

@ -424,7 +424,7 @@ sudo bats $BATS/*rpm*.bats
If you wanted to retest all the release artifacts on a single VM you could:
-------------------------------------------------
gradle copyDepsToTestRoot
gradle prepareTestRoot
vagrant up trusty --provider virtualbox && vagrant ssh trusty
cd $TESTROOT
sudo bats $BATS/*.bats
@ -455,5 +455,9 @@ mvn -Dtests.coverage verify jacoco:report
== Debugging from an IDE
If you want to run elasticsearch from your IDE, you should execute gradle run
It opens a remote debugging port that you can connect with your IDE.
If you want to run elasticsearch from your IDE, the `gradle run` task
supports a remote debugging option:
---------------------------------------------------------------------------
gradle run --debug-jvm
---------------------------------------------------------------------------

8
Vagrantfile vendored
View File

@ -251,10 +251,10 @@ def provision(config,
rm -rf /tmp/bats
}
cat \<\<VARS > /etc/profile.d/elasticsearch_vars.sh
export ZIP=/elasticsearch/distribution/zip/build/releases
export TAR=/elasticsearch/distribution/tar/build/releases
export RPM=/elasticsearch/distribution/rpm/build/releases
export DEB=/elasticsearch/distribution/deb/build/releases
export ZIP=/elasticsearch/distribution/zip/build/distributions
export TAR=/elasticsearch/distribution/tar/build/distributions
export RPM=/elasticsearch/distribution/rpm/build/distributions
export DEB=/elasticsearch/distribution/deb/build/distributions
export TESTROOT=/elasticsearch/qa/vagrant/build/testroot
export BATS=/elasticsearch/qa/vagrant/src/test/resources/packaging/scripts
VARS

View File

@ -129,9 +129,6 @@ if (projectsPrefix.isEmpty()) {
vcs = 'Git'
}
}
tasks.cleanIdea {
delete '.idea'
}
}
// eclipse configuration
@ -170,5 +167,27 @@ task buildSrcEclipse(type: GradleBuild) {
}
tasks.eclipse.dependsOn(buildSrcEclipse)
task run(dependsOn: ':distribution:run')
task clean(type: GradleBuild) {
buildFile = 'buildSrc/build.gradle'
tasks = ['clean']
}
// we need to add the same --debug-jvm option as
// the real RunTask has, so we can pass it through
class Run extends DefaultTask {
boolean debug = false
@org.gradle.api.internal.tasks.options.Option(
option = "debug-jvm",
description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch."
)
public void setDebug(boolean enabled) {
project.project(':distribution').run.clusterConfig.debug = enabled
}
}
task run(type: Run) {
dependsOn ':distribution:run'
description = 'Runs elasticsearch in the foreground'
group = 'Verification'
}

View File

@ -3,6 +3,9 @@ package com.carrotsearch.gradle.junit4
import com.carrotsearch.ant.tasks.junit4.ListenersList
import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener
import groovy.xml.NamespaceBuilder
import groovy.xml.NamespaceBuilderSupport
import org.apache.tools.ant.BuildException
import org.apache.tools.ant.DefaultLogger
import org.apache.tools.ant.RuntimeConfigurable
import org.apache.tools.ant.UnknownElement
import org.gradle.api.DefaultTask
@ -180,39 +183,78 @@ class RandomizedTestingTask extends DefaultTask {
heartbeat: testLoggingConfig.slowTests.heartbeat,
dir: workingDir,
tempdir: new File(workingDir, 'temp'),
haltOnFailure: haltOnFailure,
haltOnFailure: true, // we want to capture when a build failed, but will decide whether to rethrow later
shuffleOnSlave: shuffleOnSlave
]
def junit4 = NamespaceBuilder.newInstance(ant, 'junit4')
junit4.junit4(attributes) {
classpath {
pathElement(path: classpath.asPath)
DefaultLogger listener = null
ByteArrayOutputStream antLoggingBuffer = null
if (logger.isInfoEnabled() == false) {
// in info logging, ant already outputs info level, so we see everything
// but on errors or when debugging, we want to see info level messages
// because junit4 emits jvm output with ant logging
if (testLoggingConfig.outputMode == TestLoggingConfiguration.OutputMode.ALWAYS) {
// we want all output, so just stream directly
listener = new DefaultLogger(
errorPrintStream: System.err,
outputPrintStream: System.out,
messageOutputLevel: org.apache.tools.ant.Project.MSG_INFO)
} else {
// we want to buffer the info, and emit it if the test fails
antLoggingBuffer = new ByteArrayOutputStream()
PrintStream stream = new PrintStream(antLoggingBuffer, true, "UTF-8")
listener = new DefaultLogger(
errorPrintStream: stream,
outputPrintStream: stream,
messageOutputLevel: org.apache.tools.ant.Project.MSG_INFO)
}
if (enableAssertions) {
jvmarg(value: '-ea')
}
if (enableSystemAssertions) {
jvmarg(value: '-esa')
}
for (String arg : jvmArgs) {
jvmarg(value: arg)
}
if (argLine != null) {
jvmarg(line: argLine)
}
fileset(dir: testClassesDir) {
for (String includePattern : patternSet.getIncludes()) {
include(name: includePattern)
project.ant.project.addBuildListener(listener)
}
NamespaceBuilderSupport junit4 = NamespaceBuilder.newInstance(ant, 'junit4')
try {
junit4.junit4(attributes) {
classpath {
pathElement(path: classpath.asPath)
}
for (String excludePattern : patternSet.getExcludes()) {
exclude(name: excludePattern)
if (enableAssertions) {
jvmarg(value: '-ea')
}
if (enableSystemAssertions) {
jvmarg(value: '-esa')
}
for (String arg : jvmArgs) {
jvmarg(value: arg)
}
if (argLine != null) {
jvmarg(line: argLine)
}
fileset(dir: testClassesDir) {
for (String includePattern : patternSet.getIncludes()) {
include(name: includePattern)
}
for (String excludePattern : patternSet.getExcludes()) {
exclude(name: excludePattern)
}
}
for (Map.Entry<String, String> prop : systemProperties) {
sysproperty key: prop.getKey(), value: prop.getValue()
}
makeListeners()
}
for (Map.Entry<String, String> prop : systemProperties) {
sysproperty key: prop.getKey(), value: prop.getValue()
} catch (BuildException e) {
if (antLoggingBuffer != null) {
logger.error('JUnit4 test failed, ant output was:')
logger.error(antLoggingBuffer.toString('UTF-8'))
}
makeListeners()
if (haltOnFailure) {
throw e;
}
}
if (listener != null) {
// remove the listener we added so other ant tasks dont have verbose logging!
project.ant.project.removeBuildListener(listener)
}
}

View File

@ -23,45 +23,163 @@ import com.carrotsearch.ant.tasks.junit4.JUnit4
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.eventbus.Subscribe
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedStartEvent
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteResultEvent
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedTestResultEvent
import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener
import org.gradle.logging.ProgressLogger
import org.gradle.logging.ProgressLoggerFactory
import org.junit.runner.Description
import java.util.concurrent.atomic.AtomicInteger
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.*
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDurationInSeconds
import static java.lang.Math.max
/**
* Adapts junit4's event listeners into gradle's ProgressLogger. Note that
* junit4 guarantees (via guava) that methods on this class won't be called by
* multiple threads simultaneously which is helpful in making it simpler.
*
* Every time a test finishes this class will update the logger. It will log
* the last finished test method on the logger line until the first suite
* finishes. Once the first suite finishes it always logs the last finished
* suite. This means that in test runs with a single suite the logger will be
* updated with the test name the whole time which is useful because these runs
* usually have longer individual tests. For test runs with lots of suites the
* majority of the time is spent showing the last suite that finished which is
* more useful for those test runs because test methods there tend to be very
* quick.
*/
class TestProgressLogger implements AggregatedEventListener {
/** Factory to build a progress logger when testing starts */
ProgressLoggerFactory factory
ProgressLogger progressLogger
int totalSuites;
AtomicInteger suitesCompleted = new AtomicInteger()
AtomicInteger testsCompleted = new AtomicInteger()
AtomicInteger testsFailed = new AtomicInteger()
AtomicInteger testsIgnored = new AtomicInteger()
int totalSuites
int totalSlaves
// sprintf formats used to align the integers we print
String suitesFormat
String slavesFormat
String testsFormat
// Counters incremented test completion.
volatile int suitesCompleted = 0
volatile int testsCompleted = 0
volatile int testsFailed = 0
volatile int testsIgnored = 0
// Information about the last, most interesting event.
volatile String eventDescription
volatile int eventSlave
volatile long eventExecutionTime
/** Have we finished a whole suite yet? */
volatile boolean suiteFinished = false
/* Note that we probably overuse volatile here but it isn't hurting us and
lets us move things around without worying about breaking things. */
@Subscribe
void onStart(AggregatedStartEvent e) throws IOException {
totalSuites = e.getSuiteCount();
totalSuites = e.suiteCount
totalSlaves = e.slaveCount
progressLogger = factory.newOperation(TestProgressLogger)
progressLogger.setDescription('Randomized test runner')
progressLogger.started()
progressLogger.progress('Starting JUnit4 with ' + e.getSlaveCount() + ' jvms')
progressLogger.progress(
"Starting JUnit4 for ${totalSuites} suites on ${totalSlaves} jvms")
suitesFormat = "%0${widthForTotal(totalSuites)}d"
slavesFormat = "%-${widthForTotal(totalSlaves)}s"
/* Just guess the number of tests because we can't figure it out from
here and it isn't worth doing anything fancy to prevent the console
from jumping around a little. 200 is a pretty wild guess for the
minimum but it makes REST tests output sanely. */
int totalNumberOfTestsGuess = max(200, totalSuites * 10)
testsFormat = "%0${widthForTotal(totalNumberOfTestsGuess)}d"
}
@Subscribe
void onTestResult(AggregatedTestResultEvent e) throws IOException {
testsCompleted++
switch (e.status) {
case ERROR:
case FAILURE:
testsFailed++
break
case IGNORED:
case IGNORED_ASSUMPTION:
testsIgnored++
break
case OK:
break
default:
throw new IllegalArgumentException(
"Unknown test status: [${e.status}]")
}
if (!suiteFinished) {
updateEventInfo(e)
}
log()
}
@Subscribe
void onSuiteResult(AggregatedSuiteResultEvent e) throws IOException {
final int suitesCompleted = suitesCompleted.incrementAndGet();
final int testsCompleted = testsCompleted.addAndGet(e.getDescription().testCount())
final int testsFailed = testsFailed.addAndGet(e.getErrorCount() + e.getFailureCount())
final int testsIgnored = testsIgnored.addAndGet(e.getIgnoredCount())
Description description = e.getDescription()
String suiteName = description.getDisplayName();
suiteName = suiteName.substring(suiteName.lastIndexOf('.') + 1);
progressLogger.progress('Suites [' + suitesCompleted + '/' + totalSuites + '], Tests [' + testsCompleted + '|' + testsFailed + '|' + testsIgnored + '], ' + suiteName + ' on J' + e.getSlave().id + ' in ' + formatDurationInSeconds(e.getExecutionTime()))
suitesCompleted++
suiteFinished = true
updateEventInfo(e)
log()
}
/**
* Update the suite information with a junit4 event.
*/
private void updateEventInfo(Object e) {
eventDescription = simpleName(e.description.className)
if (e.description.methodName != null) {
eventDescription += "#${e.description.methodName}"
}
eventSlave = e.slave.id
eventExecutionTime = e.executionTime
}
/**
* Extract a Class#getSimpleName style name from Class#getName style
* string. We can't just use Class#getSimpleName because junit descriptions
* don't alway s set the class field but they always set the className
* field.
*/
private static String simpleName(String className) {
return className.substring(className.lastIndexOf('.') + 1)
}
private void log() {
/* Remember that instances of this class are only ever active on one
thread at a time so there really aren't race conditions here. It'd be
OK if there were because they'd only display an overcount
temporarily. */
String log = ''
if (totalSuites > 1) {
/* Skip printing the suites to save space when there is only a
single suite. This is nice because when there is only a single
suite we log the method name and those can be long. */
log += sprintf("Suites [${suitesFormat}/${suitesFormat}], ",
[suitesCompleted, totalSuites])
}
log += sprintf("Tests [${testsFormat}|%d|%d], ",
[testsCompleted, testsFailed, testsIgnored])
log += "in ${formatDurationInSeconds(eventExecutionTime)} "
if (totalSlaves > 1) {
/* Skip printing the slaves if there is only one of them. This is
nice because when there is only a single slave there is often
only a single suite and we could use the extra space to log the
test method names. */
log += "J${sprintf(slavesFormat, eventSlave)} "
}
log += "completed ${eventDescription}"
progressLogger.progress(log)
}
private static int widthForTotal(int total) {
return ((total - 1) as String).length()
}
@Override

View File

@ -79,7 +79,7 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
forkedJvmCount = e.getSlaveCount();
jvmIdFormat = " J%-" + (1 + (int) Math.floor(Math.log10(forkedJvmCount))) + "d";
outStream = new LoggingOutputStream(logger: logger, level: LogLevel.ERROR, prefix: " 1> ")
outStream = new LoggingOutputStream(logger: logger, level: LogLevel.LIFECYCLE, prefix: " 1> ")
errStream = new LoggingOutputStream(logger: logger, level: LogLevel.ERROR, prefix: " 2> ")
for (String contains : config.stackTraceFilters.contains) {
@ -152,13 +152,13 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
void onSuiteStart(AggregatedSuiteStartedEvent e) throws IOException {
if (isPassthrough()) {
SuiteStartedEvent evt = e.getSuiteStartedEvent();
emitSuiteStart(LogLevel.INFO, evt.getDescription());
emitSuiteStart(LogLevel.LIFECYCLE, evt.getDescription());
}
}
@Subscribe
void onOutput(PartialOutputEvent e) throws IOException {
if (isPassthrough() && logger.isInfoEnabled()) {
if (isPassthrough()) {
// We only allow passthrough output if there is one JVM.
switch (e.getEvent().getType()) {
case EventType.APPEND_STDERR:
@ -187,7 +187,6 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
@Subscribe
void onSuiteResult(AggregatedSuiteResultEvent e) throws IOException {
try {
final int completed = suitesCompleted.incrementAndGet();
if (e.isSuccessful() && e.getTests().isEmpty()) {
@ -197,7 +196,8 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
suiteTimes.put(e.getDescription().getDisplayName(), e.getExecutionTime())
}
LogLevel level = e.isSuccessful() ? LogLevel.INFO : LogLevel.ERROR
LogLevel level = e.isSuccessful() && config.outputMode != OutputMode.ALWAYS ? LogLevel.INFO : LogLevel.LIFECYCLE
// We must emit buffered test and stream events (in case of failures).
if (!isPassthrough()) {
emitSuiteStart(level, e.getDescription())
@ -214,9 +214,6 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
}
emitSuiteEnd(level, e, completed)
} catch (Exception exc) {
logger.lifecycle('EXCEPTION: ', exc)
}
}
/** Suite prologue. */
@ -348,9 +345,9 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
errStream.flush()
}
/** Returns true if output should be logged immediately. Only relevant when running with INFO log level. */
/** Returns true if output should be logged immediately. */
boolean isPassthrough() {
return forkedJvmCount == 1 && config.outputMode == OutputMode.ALWAYS && logger.isInfoEnabled()
return forkedJvmCount == 1 && config.outputMode == OutputMode.ALWAYS
}
@Override

View File

@ -18,6 +18,9 @@
*/
package org.elasticsearch.gradle
import java.time.ZonedDateTime
import java.time.ZoneOffset
import nebula.plugin.extraconfigurations.ProvidedBasePlugin
import org.elasticsearch.gradle.precommit.PrecommitTasks
import org.gradle.api.*
@ -41,6 +44,7 @@ class BuildPlugin implements Plugin<Project> {
project.pluginManager.apply('java')
project.pluginManager.apply('carrotsearch.randomized-testing')
// these plugins add lots of info to our jars
configureJarManifest(project) // jar config must be added before info broker
project.pluginManager.apply('nebula.info-broker')
project.pluginManager.apply('nebula.info-basic')
project.pluginManager.apply('nebula.info-java')
@ -54,7 +58,7 @@ class BuildPlugin implements Plugin<Project> {
configureConfigurations(project)
project.ext.versions = VersionProperties.versions
configureCompile(project)
configureJarManifest(project)
configureTest(project)
PrecommitTasks.configure(project)
}
@ -228,11 +232,17 @@ class BuildPlugin implements Plugin<Project> {
/** Adds additional manifest info to jars */
static void configureJarManifest(Project project) {
project.afterEvaluate {
project.tasks.withType(Jar) { Jar jarTask ->
manifest {
attributes('X-Compile-Elasticsearch-Version': VersionProperties.elasticsearch,
'X-Compile-Lucene-Version': VersionProperties.lucene)
project.tasks.withType(Jar) { Jar jarTask ->
jarTask.doFirst {
// this doFirst is added before the info plugin, therefore it will run
// after the doFirst added by the info plugin, and we can override attributes
jarTask.manifest.attributes(
'X-Compile-Elasticsearch-Version': VersionProperties.elasticsearch,
'X-Compile-Lucene-Version': VersionProperties.lucene,
'Build-Date': ZonedDateTime.now(ZoneOffset.UTC))
if (jarTask.manifest.attributes.containsKey('Change') == false) {
logger.warn('Building without git revision id.')
jarTask.manifest.attributes('Change': 'N/A')
}
}
}
@ -297,7 +307,12 @@ class BuildPlugin implements Plugin<Project> {
regex(/^(\s+at )(org\.apache\.lucene\.util\.TestRule)/)
regex(/^(\s+at )(org\.apache\.lucene\.util\.AbstractBeforeAfterRule)/)
}
outputMode System.getProperty('tests.output', 'onerror')
if (System.getProperty('tests.class') != null && System.getProperty('tests.output') == null) {
// if you are debugging, you want to see the output!
outputMode 'always'
} else {
outputMode System.getProperty('tests.output', 'onerror')
}
}
balancers {

View File

@ -0,0 +1,48 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle
import org.gradle.api.DefaultTask
import org.gradle.api.tasks.*
import org.gradle.internal.nativeintegration.filesystem.Chmod
import java.io.File
import javax.inject.Inject
/**
* Creates an empty directory.
*/
class EmptyDirTask extends DefaultTask {
@Input
Object dir
@Input
int dirMode = 0755
@TaskAction
void create() {
dir = dir as File
dir.mkdirs()
getChmod().chmod(dir, dirMode)
}
@Inject
Chmod getChmod() {
throw new UnsupportedOperationException()
}
}

View File

@ -0,0 +1,50 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle
import org.gradle.api.DefaultTask
import org.gradle.api.tasks.*
import java.io.File
/**
* Creates a file and sets it contents to something.
*/
class FileContentsTask extends DefaultTask {
/**
* The file to be built. Must be of type File to make @OutputFile happy.
*/
@OutputFile
File file
@Input
Object contents
/**
* The file to be built. Takes any objecct and coerces to a file.
*/
void setFile(Object file) {
this.file = file as File
}
@TaskAction
void setContents() {
file = file as File
file.text = contents.toString()
}
}

View File

@ -18,9 +18,9 @@
*/
package org.elasticsearch.gradle.plugin
import nebula.plugin.extraconfigurations.ProvidedBasePlugin
import org.elasticsearch.gradle.BuildPlugin
import org.elasticsearch.gradle.test.RestIntegTestTask
import org.elasticsearch.gradle.test.RunTask
import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.tasks.bundling.Zip
@ -33,26 +33,21 @@ class PluginBuildPlugin extends BuildPlugin {
@Override
void apply(Project project) {
super.apply(project)
// TODO: add target compatibility (java version) to elasticsearch properties and set for the project
configureDependencies(project)
// this afterEvaluate must happen before the afterEvaluate added by integTest configure,
// so that the file name resolution for installing the plugin will be setup
project.afterEvaluate {
project.jar.configure {
baseName project.pluginProperties.extension.name
}
project.bundlePlugin.configure {
baseName project.pluginProperties.extension.name
}
project.integTest.configure {
dependsOn project.bundlePlugin
cluster {
plugin project.pluginProperties.extension.name, project.bundlePlugin.outputs.files
}
}
String name = project.pluginProperties.extension.name
project.jar.baseName = name
project.bundlePlugin.baseName = name
project.integTest.dependsOn(project.bundlePlugin)
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
project.tasks.run.dependsOn(project.bundlePlugin)
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
}
Task bundle = configureBundleTask(project)
RestIntegTestTask.configure(project)
RunTask.configure(project)
Task bundle = configureBundleTask(project)
project.configurations.archives.artifacts.removeAll { it.archiveTask.is project.jar }
project.configurations.getByName('default').extendsFrom = []
project.artifacts {

View File

@ -21,6 +21,7 @@ package org.elasticsearch.gradle.precommit
import org.gradle.api.DefaultTask
import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.InputFiles
import org.gradle.api.tasks.OutputFile
import org.gradle.api.tasks.OutputFiles
import org.gradle.api.tasks.SourceSet
import org.gradle.api.tasks.TaskAction
@ -36,6 +37,9 @@ class ForbiddenPatternsTask extends DefaultTask {
Map<String,String> patterns = new LinkedHashMap<>()
PatternFilterable filesFilter = new PatternSet()
@OutputFile
File outputMarker = new File(project.buildDir, "markers/forbiddenPatterns")
ForbiddenPatternsTask() {
// we always include all source files, and exclude what should not be checked
filesFilter.include('**')
@ -94,6 +98,7 @@ class ForbiddenPatternsTask extends DefaultTask {
if (failures.isEmpty() == false) {
throw new IllegalArgumentException('Found invalid patterns:\n' + failures.join('\n'))
}
outputMarker.setText('done', 'UTF-8')
}
// iterate through patterns to find the right ones for nice error messages

View File

@ -39,6 +39,9 @@ class ClusterConfiguration {
@Input
boolean daemonize = true
@Input
boolean debug = false
@Input
String jvmArgs = System.getProperty('tests.jvm.argline', '')

View File

@ -102,7 +102,7 @@ class ClusterFormationTasks {
String camelName = plugin.getKey().replaceAll(/-(\w)/) { _, c -> c.toUpperCase(Locale.ROOT) }
String taskName = "${task.name}#install${camelName[0].toUpperCase(Locale.ROOT) + camelName.substring(1)}Plugin"
// delay reading the file location until execution time by wrapping in a closure within a GString
String file = "${ -> new File(pluginsTmpDir, plugin.getValue().singleFile.getName()).toURI().toURL().toString() }"
String file = "${-> new File(pluginsTmpDir, plugin.getValue().singleFile.getName()).toURI().toURL().toString()}"
Object[] args = [new File(home, 'bin/plugin'), 'install', file]
setup = configureExecTask(taskName, project, setup, cwd, args)
}
@ -115,8 +115,11 @@ class ClusterFormationTasks {
Task start = configureStartTask("${task.name}#start", project, setup, cwd, config, clusterName, pidFile, home)
task.dependsOn(start)
Task stop = configureStopTask("${task.name}#stop", project, [], pidFile)
task.finalizedBy(stop)
if (config.daemonize) {
// if we are running in the background, make sure to stop the server when the task completes
Task stop = configureStopTask("${task.name}#stop", project, [], pidFile)
task.finalizedBy(stop)
}
}
/** Adds a task to extract the elasticsearch distribution */
@ -209,7 +212,7 @@ class ClusterFormationTasks {
static Task configureStartTask(String name, Project project, Task setup, File cwd, ClusterConfiguration config, String clusterName, File pidFile, File home) {
Map esEnv = [
'JAVA_HOME' : project.javaHome,
'ES_GC_OPTS': config.jvmArgs
'JAVA_OPTS': config.jvmArgs
]
List<String> esProps = config.systemProperties.collect { key, value -> "-D${key}=${value}" }
for (Map.Entry<String, String> property : System.properties.entrySet()) {
@ -232,6 +235,13 @@ class ClusterFormationTasks {
// this closure is converted into ant nodes by groovy's AntBuilder
Closure antRunner = {
// we must add debug options inside the closure so the config is read at execution time, as
// gradle task options are not processed until the end of the configuration phase
if (config.debug) {
println 'Running elasticsearch in debug mode, suspending until connected on port 8000'
esEnv['JAVA_OPTS'] += ' -agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000'
}
exec(executable: executable, spawn: config.daemonize, dir: cwd, taskname: 'elasticsearch') {
esEnv.each { key, value -> env(key: key, value: value) }
(esArgs + esProps).each { arg(value: it) }
@ -248,6 +258,15 @@ class ClusterFormationTasks {
// this closure is the actual code to run elasticsearch
Closure elasticsearchRunner = {
// Command as string for logging
String esCommandString = "Elasticsearch command: ${executable} "
esCommandString += (esArgs + esProps).join(' ')
if (esEnv.isEmpty() == false) {
esCommandString += '\nenvironment:'
esEnv.each { k, v -> esCommandString += "\n ${k}: ${v}" }
}
logger.info(esCommandString)
ByteArrayOutputStream buffer = new ByteArrayOutputStream()
if (logger.isInfoEnabled() || config.daemonize == false) {
// run with piping streams directly out (even stderr to stdout since gradle would capture it)
@ -265,11 +284,17 @@ class ClusterFormationTasks {
File logFile = new File(home, "logs/${clusterName}.log")
if (logFile.exists()) {
logFile.eachLine { line -> logger.error(line) }
} else {
logger.error("Couldn't start elasticsearch and couldn't find ${logFile}")
}
if (logger.isInfoEnabled() == false) {
// We already log the command at info level. No need to do it twice.
logger.error(esCommandString)
}
throw new GradleException('Failed to start elasticsearch')
}
}
Task start = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
start.doLast(elasticsearchRunner)
return start

View File

@ -22,6 +22,7 @@ import com.carrotsearch.gradle.junit4.RandomizedTestingTask
import org.elasticsearch.gradle.BuildPlugin
import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.internal.tasks.options.Option
import org.gradle.api.plugins.JavaBasePlugin
import org.gradle.api.tasks.Input
import org.gradle.util.ConfigureUtil
@ -79,6 +80,14 @@ class RestIntegTestTask extends RandomizedTestingTask {
}
}
@Option(
option = "debug-jvm",
description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch."
)
public void setDebug(boolean enabled) {
clusterConfig.debug = enabled;
}
@Input
void cluster(Closure closure) {
ConfigureUtil.configure(closure, clusterConfig)

View File

@ -1,12 +1,32 @@
package org.elasticsearch.gradle.test
import org.gradle.api.DefaultTask
import org.gradle.api.tasks.TaskAction
import org.gradle.api.Project
import org.gradle.api.internal.tasks.options.Option
class RunTask extends DefaultTask {
ClusterConfiguration clusterConfig = new ClusterConfiguration(httpPort: 9200, transportPort: 9300, daemonize: false)
RunTask() {
ClusterFormationTasks.setup(project, this, clusterConfig)
project.afterEvaluate {
ClusterFormationTasks.setup(project, this, clusterConfig)
}
}
@Option(
option = "debug-jvm",
description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch."
)
public void setDebug(boolean enabled) {
clusterConfig.debug = enabled;
}
static void configure(Project project) {
RunTask task = project.tasks.create(
name: 'run',
type: RunTask,
description: "Runs elasticsearch with '${project.path}'",
group: 'Verification')
}
}

View File

@ -20,10 +20,9 @@ package org.elasticsearch.gradle.vagrant
import com.carrotsearch.gradle.junit4.LoggingOutputStream
import org.gradle.api.GradleScriptException
import org.gradle.api.InvalidUserDataException
import org.gradle.api.logging.Logger
import org.gradle.logging.ProgressLogger
import org.gradle.logging.ProgressLoggerFactory
import java.util.regex.Matcher
/**

View File

@ -20,7 +20,6 @@ package org.elasticsearch.gradle.vagrant
import com.carrotsearch.gradle.junit4.LoggingOutputStream
import org.gradle.logging.ProgressLogger
import org.gradle.logging.ProgressLoggerFactory
/**
* Adapts an OutputStream being written to by vagrant into a ProcessLogger. It

View File

@ -0,0 +1,4 @@
This directory contains templates that work around gradle-ospackage-plugin
trying to be helpful and adding templates for your os packaging scripts. We
have relatively nice scripts already so we just override the templates to be
mostly noops.

View File

@ -0,0 +1,3 @@
<% files.each {file -> %><%= file
%>
<% } %>

View File

@ -0,0 +1,2 @@
#!/bin/sh -e
<% commands.each {command -> %><%= command %><% } %>

View File

@ -0,0 +1,2 @@
#!/bin/sh -e
<% commands.each {command -> %><%= command %><% } %>

View File

@ -1,5 +1,5 @@
elasticsearch = 3.0.0-SNAPSHOT
lucene = 5.4.0-snapshot-1712973
lucene = 5.4.0-snapshot-1714615
# optional dependencies
spatial4j = 0.5

View File

@ -33,9 +33,13 @@ import java.util.jar.JarInputStream;
import java.util.jar.Manifest;
/**
* Information about a build of Elasticsearch.
*/
public class Build {
/**
* The current build of Elasticsearch. Filled with information scanned at
* startup from the jar.
*/
public static final Build CURRENT;
static {
@ -56,6 +60,14 @@ public class Build {
shortHash = "Unknown";
date = "Unknown";
}
if (shortHash == null) {
throw new IllegalStateException("Error finding the build shortHash. " +
"Stopping Elasticsearch now so it doesn't run in subtly broken ways. This is likely a build bug.");
}
if (date == null) {
throw new IllegalStateException("Error finding the build date. " +
"Stopping Elasticsearch now so it doesn't run in subtly broken ways. This is likely a build bug.");
}
CURRENT = new Build(shortHash, date);
}

View File

@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;

View File

@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.cluster.health;
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.unit.TimeValue;

View File

@ -22,10 +22,9 @@ package org.elasticsearch.action.admin.cluster.health;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.RoutingTableValidation;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.health.ClusterStateHealth;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterIndexHealth;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
@ -36,38 +35,22 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import static org.elasticsearch.action.admin.cluster.health.ClusterIndexHealth.readClusterIndexHealth;
/**
*
*/
public class ClusterHealthResponse extends ActionResponse implements Iterable<ClusterIndexHealth>, StatusToXContent {
public class ClusterHealthResponse extends ActionResponse implements StatusToXContent {
private String clusterName;
int numberOfNodes = 0;
int numberOfDataNodes = 0;
int activeShards = 0;
int relocatingShards = 0;
int activePrimaryShards = 0;
int initializingShards = 0;
int unassignedShards = 0;
int numberOfPendingTasks = 0;
int numberOfInFlightFetch = 0;
int delayedUnassignedShards = 0;
TimeValue taskMaxWaitingTime = TimeValue.timeValueMillis(0);
double activeShardsPercent = 100;
boolean timedOut = false;
ClusterHealthStatus status = ClusterHealthStatus.RED;
private List<String> validationFailures;
Map<String, ClusterIndexHealth> indices = new HashMap<>();
private int numberOfPendingTasks = 0;
private int numberOfInFlightFetch = 0;
private int delayedUnassignedShards = 0;
private TimeValue taskMaxWaitingTime = TimeValue.timeValueMillis(0);
private boolean timedOut = false;
private ClusterStateHealth clusterStateHealth;
private ClusterHealthStatus clusterHealthStatus;
ClusterHealthResponse() {
}
@ -87,107 +70,53 @@ public class ClusterHealthResponse extends ActionResponse implements Iterable<Cl
this.numberOfPendingTasks = numberOfPendingTasks;
this.numberOfInFlightFetch = numberOfInFlightFetch;
this.taskMaxWaitingTime = taskMaxWaitingTime;
RoutingTableValidation validation = clusterState.routingTable().validate(clusterState.metaData());
validationFailures = validation.failures();
numberOfNodes = clusterState.nodes().size();
numberOfDataNodes = clusterState.nodes().dataNodes().size();
for (String index : concreteIndices) {
IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(index);
IndexMetaData indexMetaData = clusterState.metaData().index(index);
if (indexRoutingTable == null) {
continue;
}
ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetaData, indexRoutingTable);
indices.put(indexHealth.getIndex(), indexHealth);
}
status = ClusterHealthStatus.GREEN;
for (ClusterIndexHealth indexHealth : indices.values()) {
activePrimaryShards += indexHealth.getActivePrimaryShards();
activeShards += indexHealth.getActiveShards();
relocatingShards += indexHealth.getRelocatingShards();
initializingShards += indexHealth.getInitializingShards();
unassignedShards += indexHealth.getUnassignedShards();
if (indexHealth.getStatus() == ClusterHealthStatus.RED) {
status = ClusterHealthStatus.RED;
} else if (indexHealth.getStatus() == ClusterHealthStatus.YELLOW && status != ClusterHealthStatus.RED) {
status = ClusterHealthStatus.YELLOW;
}
}
if (!validationFailures.isEmpty()) {
status = ClusterHealthStatus.RED;
} else if (clusterState.blocks().hasGlobalBlock(RestStatus.SERVICE_UNAVAILABLE)) {
status = ClusterHealthStatus.RED;
}
// shortcut on green
if (status.equals(ClusterHealthStatus.GREEN)) {
this.activeShardsPercent = 100;
} else {
List<ShardRouting> shardRoutings = clusterState.getRoutingTable().allShards();
int activeShardCount = 0;
int totalShardCount = 0;
for (ShardRouting shardRouting : shardRoutings) {
if (shardRouting.active()) activeShardCount++;
totalShardCount++;
}
this.activeShardsPercent = (((double) activeShardCount) / totalShardCount) * 100;
}
this.clusterStateHealth = new ClusterStateHealth(clusterState, concreteIndices);
this.clusterHealthStatus = clusterStateHealth.getStatus();
}
public String getClusterName() {
return clusterName;
}
//package private for testing
ClusterStateHealth getClusterStateHealth() {
return clusterStateHealth;
}
/**
* The validation failures on the cluster level (without index validation failures).
*/
public List<String> getValidationFailures() {
return this.validationFailures;
return clusterStateHealth.getValidationFailures();
}
/**
* All the validation failures, including index level validation failures.
*/
public List<String> getAllValidationFailures() {
List<String> allFailures = new ArrayList<>(getValidationFailures());
for (ClusterIndexHealth indexHealth : indices.values()) {
allFailures.addAll(indexHealth.getValidationFailures());
}
return allFailures;
}
public int getActiveShards() {
return activeShards;
return clusterStateHealth.getActiveShards();
}
public int getRelocatingShards() {
return relocatingShards;
return clusterStateHealth.getRelocatingShards();
}
public int getActivePrimaryShards() {
return activePrimaryShards;
return clusterStateHealth.getActivePrimaryShards();
}
public int getInitializingShards() {
return initializingShards;
return clusterStateHealth.getInitializingShards();
}
public int getUnassignedShards() {
return unassignedShards;
return clusterStateHealth.getUnassignedShards();
}
public int getNumberOfNodes() {
return this.numberOfNodes;
return clusterStateHealth.getNumberOfNodes();
}
public int getNumberOfDataNodes() {
return this.numberOfDataNodes;
return clusterStateHealth.getNumberOfDataNodes();
}
public int getNumberOfPendingTasks() {
@ -214,12 +143,28 @@ public class ClusterHealthResponse extends ActionResponse implements Iterable<Cl
return this.timedOut;
}
public void setTimedOut(boolean timedOut) {
this.timedOut = timedOut;
}
public ClusterHealthStatus getStatus() {
return status;
return clusterHealthStatus;
}
/**
* Allows to explicitly override the derived cluster health status.
*
* @param status The override status. Must not be null.
*/
public void setStatus(ClusterHealthStatus status) {
if (status == null) {
throw new IllegalArgumentException("'status' must not be null");
}
this.clusterHealthStatus = status;
}
public Map<String, ClusterIndexHealth> getIndices() {
return indices;
return clusterStateHealth.getIndices();
}
/**
@ -234,15 +179,9 @@ public class ClusterHealthResponse extends ActionResponse implements Iterable<Cl
* The percentage of active shards, should be 100% in a green system
*/
public double getActiveShardsPercent() {
return activeShardsPercent;
return clusterStateHealth.getActiveShardsPercent();
}
@Override
public Iterator<ClusterIndexHealth> iterator() {
return indices.values().iterator();
}
public static ClusterHealthResponse readResponseFrom(StreamInput in) throws IOException {
ClusterHealthResponse response = new ClusterHealthResponse();
response.readFrom(in);
@ -253,36 +192,14 @@ public class ClusterHealthResponse extends ActionResponse implements Iterable<Cl
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
clusterName = in.readString();
activePrimaryShards = in.readVInt();
activeShards = in.readVInt();
relocatingShards = in.readVInt();
initializingShards = in.readVInt();
unassignedShards = in.readVInt();
numberOfNodes = in.readVInt();
numberOfDataNodes = in.readVInt();
clusterHealthStatus = ClusterHealthStatus.fromValue(in.readByte());
clusterStateHealth = ClusterStateHealth.readClusterHealth(in);
numberOfPendingTasks = in.readInt();
status = ClusterHealthStatus.fromValue(in.readByte());
int size = in.readVInt();
for (int i = 0; i < size; i++) {
ClusterIndexHealth indexHealth = readClusterIndexHealth(in);
indices.put(indexHealth.getIndex(), indexHealth);
}
timedOut = in.readBoolean();
size = in.readVInt();
if (size == 0) {
validationFailures = Collections.emptyList();
} else {
for (int i = 0; i < size; i++) {
validationFailures.add(in.readString());
}
}
numberOfInFlightFetch = in.readInt();
if (in.getVersion().onOrAfter(Version.V_1_7_0)) {
delayedUnassignedShards= in.readInt();
}
activeShardsPercent = in.readDouble();
taskMaxWaitingTime = TimeValue.readTimeValue(in);
}
@ -290,31 +207,14 @@ public class ClusterHealthResponse extends ActionResponse implements Iterable<Cl
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(clusterName);
out.writeVInt(activePrimaryShards);
out.writeVInt(activeShards);
out.writeVInt(relocatingShards);
out.writeVInt(initializingShards);
out.writeVInt(unassignedShards);
out.writeVInt(numberOfNodes);
out.writeVInt(numberOfDataNodes);
out.writeByte(clusterHealthStatus.value());
clusterStateHealth.writeTo(out);
out.writeInt(numberOfPendingTasks);
out.writeByte(status.value());
out.writeVInt(indices.size());
for (ClusterIndexHealth indexHealth : this) {
indexHealth.writeTo(out);
}
out.writeBoolean(timedOut);
out.writeVInt(validationFailures.size());
for (String failure : validationFailures) {
out.writeString(failure);
}
out.writeInt(numberOfInFlightFetch);
if (out.getVersion().onOrAfter(Version.V_1_7_0)) {
out.writeInt(delayedUnassignedShards);
}
out.writeDouble(activeShardsPercent);
taskMaxWaitingTime.writeTo(out);
}
@ -389,7 +289,7 @@ public class ClusterHealthResponse extends ActionResponse implements Iterable<Cl
// if we don't print index level information, still print the index validation failures
// so we know why the status is red
if (!outputIndices) {
for (ClusterIndexHealth indexHealth : indices.values()) {
for (ClusterIndexHealth indexHealth : clusterStateHealth.getIndices().values()) {
builder.startObject(indexHealth.getIndex());
if (!indexHealth.getValidationFailures().isEmpty()) {
@ -408,7 +308,7 @@ public class ClusterHealthResponse extends ActionResponse implements Iterable<Cl
if (outputIndices) {
builder.startObject(Fields.INDICES);
for (ClusterIndexHealth indexHealth : indices.values()) {
for (ClusterIndexHealth indexHealth : clusterStateHealth.getIndices().values()) {
builder.startObject(indexHealth.getIndex(), XContentBuilder.FieldCaseConversion.NONE);
indexHealth.toXContent(builder, params);
builder.endObject();

View File

@ -25,6 +25,7 @@ import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.*;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.Strings;
@ -184,7 +185,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
// if the state is sufficient for what we where waiting for we don't need to mark this as timedOut.
// We spend too much time in waiting for events such that we might already reached a valid state.
// this should not mark the request as timed out
response.timedOut = timedOut && valid == false;
response.setTimedOut(timedOut && valid == false);
return response;
}
@ -204,7 +205,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.strictExpand(), request.indices());
waitForCounter++;
} catch (IndexNotFoundException e) {
response.status = ClusterHealthStatus.RED; // no indices, make sure its RED
response.setStatus(ClusterHealthStatus.RED); // no indices, make sure its RED
// missing indices, wait a bit more...
}
}
@ -272,13 +273,13 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
} catch (IndexNotFoundException e) {
// one of the specified indices is not there - treat it as RED.
ClusterHealthResponse response = new ClusterHealthResponse(clusterName.value(), Strings.EMPTY_ARRAY, clusterState,
numberOfPendingTasks, numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(System.currentTimeMillis(), settings, clusterState),
numberOfPendingTasks, numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState),
pendingTaskTimeInQueue);
response.status = ClusterHealthStatus.RED;
response.setStatus(ClusterHealthStatus.RED);
return response;
}
return new ClusterHealthResponse(clusterName.value(), concreteIndices, clusterState, numberOfPendingTasks,
numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(System.currentTimeMillis(), settings, clusterState), pendingTaskTimeInQueue);
numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), pendingTaskTimeInQueue);
}
}

View File

@ -223,8 +223,8 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
http = HttpStats.readHttpStats(in);
}
breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in);
scriptStats = in.readOptionalStreamable(new ScriptStats());
discoveryStats = in.readOptionalStreamable(new DiscoveryStats(null));
scriptStats = in.readOptionalStreamable(ScriptStats::new);
discoveryStats = in.readOptionalStreamable(() -> new DiscoveryStats(null));
}

View File

@ -162,7 +162,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override
public ClusterState execute(final ClusterState currentState) {
// now, reroute in case things that require it changed (e.g. number of replicas)
RoutingAllocation.Result routingResult = allocationService.reroute(currentState);
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, "reroute after cluster update settings");
if (!routingResult.changed()) {
return currentState;
}

View File

@ -41,6 +41,8 @@ public class GetSnapshotsRequest extends MasterNodeRequest<GetSnapshotsRequest>
private String[] snapshots = Strings.EMPTY_ARRAY;
private boolean ignoreUnavailable;
public GetSnapshotsRequest() {
}
@ -112,11 +114,28 @@ public class GetSnapshotsRequest extends MasterNodeRequest<GetSnapshotsRequest>
return this;
}
/**
* Set to true to ignore unavailable snapshots
*
* @return this request
*/
public GetSnapshotsRequest ignoreUnavailable(boolean ignoreUnavailable) {
this.ignoreUnavailable = ignoreUnavailable;
return this;
}
/**
* @return Whether snapshots should be ignored when unavailable (corrupt or temporarily not fetchable)
*/
public boolean ignoreUnavailable() {
return ignoreUnavailable;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
repository = in.readString();
snapshots = in.readStringArray();
ignoreUnavailable = in.readBoolean();
}
@Override
@ -124,5 +143,6 @@ public class GetSnapshotsRequest extends MasterNodeRequest<GetSnapshotsRequest>
super.writeTo(out);
out.writeString(repository);
out.writeStringArray(snapshots);
out.writeBoolean(ignoreUnavailable);
}
}

View File

@ -84,4 +84,16 @@ public class GetSnapshotsRequestBuilder extends MasterNodeOperationRequestBuilde
request.snapshots(ArrayUtils.concat(request.snapshots(), snapshots));
return this;
}
/**
* Makes the request ignore unavailable snapshots
*
* @param ignoreUnavailable true to ignore unavailable snapshots.
* @return this builder
*/
public GetSnapshotsRequestBuilder setIgnoreUnavailable(boolean ignoreUnavailable) {
request.ignoreUnavailable(ignoreUnavailable);
return this;
}
}

View File

@ -74,7 +74,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
try {
List<SnapshotInfo> snapshotInfoBuilder = new ArrayList<>();
if (isAllSnapshots(request.snapshots())) {
List<Snapshot> snapshots = snapshotsService.snapshots(request.repository());
List<Snapshot> snapshots = snapshotsService.snapshots(request.repository(), request.ignoreUnavailable());
for (Snapshot snapshot : snapshots) {
snapshotInfoBuilder.add(new SnapshotInfo(snapshot));
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.indices.stats.ShardStats;

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.io.stream.StreamInput;

View File

@ -19,8 +19,7 @@
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
import org.elasticsearch.action.admin.cluster.health.ClusterIndexHealth;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.indices.stats.CommonStats;
@ -31,10 +30,8 @@ import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.health.ClusterStateHealth;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.RoutingTableValidation;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -43,7 +40,6 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -116,34 +112,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
ClusterHealthStatus clusterStatus = null;
if (clusterService.state().nodes().localNodeMaster()) {
// populate cluster status
clusterStatus = ClusterHealthStatus.GREEN;
for (IndexRoutingTable indexRoutingTable : clusterService.state().routingTable()) {
IndexMetaData indexMetaData = clusterService.state().metaData().index(indexRoutingTable.index());
if (indexRoutingTable == null) {
continue;
}
ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetaData, indexRoutingTable);
switch (indexHealth.getStatus()) {
case RED:
clusterStatus = ClusterHealthStatus.RED;
break;
case YELLOW:
if (clusterStatus != ClusterHealthStatus.RED) {
clusterStatus = ClusterHealthStatus.YELLOW;
}
break;
}
}
RoutingTableValidation validation = clusterService.state().routingTable().validate(clusterService.state().metaData());
if (!validation.failures().isEmpty()) {
clusterStatus = ClusterHealthStatus.RED;
} else if (clusterService.state().blocks().hasGlobalBlock(RestStatus.SERVICE_UNAVAILABLE)) {
clusterStatus = ClusterHealthStatus.RED;
}
clusterStatus = new ClusterStateHealth(clusterService.state()).getStatus();
}
return new ClusterStatsNodeResponse(nodeInfo.getNode(), clusterStatus, nodeInfo, nodeStats, shardsStats.toArray(new ShardStats[shardsStats.size()]));

View File

@ -23,6 +23,7 @@ import org.elasticsearch.action.Action;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
/**
* Request builder for {@link IndicesShardStoresRequest}
@ -53,7 +54,7 @@ public class IndicesShardStoreRequestBuilder extends MasterNodeReadOperationRequ
/**
* Set statuses to filter shards to get stores info on.
* @param shardStatuses acceptable values are "green", "yellow", "red" and "all"
* see {@link org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus} for details
* see {@link ClusterHealthStatus} for details
*/
public IndicesShardStoreRequestBuilder setShardStatuses(String... shardStatuses) {
request.shardStatuses(shardStatuses);

View File

@ -20,7 +20,7 @@ package org.elasticsearch.action.admin.indices.shards;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
import org.elasticsearch.common.Strings;

View File

@ -21,8 +21,8 @@ package org.elasticsearch.action.admin.indices.shards;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
import org.elasticsearch.action.admin.cluster.health.ClusterShardHealth;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterShardHealth;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterService;

View File

@ -553,10 +553,10 @@ public class CommonStats implements Streamable, ToXContent {
if (in.readBoolean()) {
segments = SegmentsStats.readSegmentsStats(in);
}
translog = in.readOptionalStreamable(new TranslogStats());
suggest = in.readOptionalStreamable(new SuggestStats());
requestCache = in.readOptionalStreamable(new RequestCacheStats());
recoveryStats = in.readOptionalStreamable(new RecoveryStats());
translog = in.readOptionalStreamable(TranslogStats::new);
suggest = in.readOptionalStreamable(SuggestStats::new);
requestCache = in.readOptionalStreamable(RequestCacheStats::new);
recoveryStats = in.readOptionalStreamable(RecoveryStats::new);
}
@Override

View File

@ -132,7 +132,7 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).getShard(shardRequest.shardId.id());
Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY);
indexShard.delete(delete);
// update the request with teh version so it will go to the replicas
// update the request with the version so it will go to the replicas
request.versionType(delete.versionType().versionTypeForReplicationAndRecovery());
request.version(delete.version());

View File

@ -122,9 +122,11 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
/**
* @param value The string to be parsed
* @return The concrete object represented by the string argument
* @param optionalFormat A string describing how to parse the specified value. Whether this parameter is supported
* depends on the implementation. If optionalFormat is specified and the implementation
* doesn't support it an {@link UnsupportedOperationException} is thrown
*/
protected abstract T valueOf(String value);
protected abstract T valueOf(String value, String optionalFormat);
/**
* Merges the provided stats into this stats instance.
@ -153,7 +155,7 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
*/
public boolean match(IndexConstraint constraint) {
int cmp;
T value = valueOf(constraint.getValue());
T value = valueOf(constraint.getValue(), constraint.getOptionalFormat());
if (constraint.getProperty() == IndexConstraint.Property.MIN) {
cmp = minValue.compareTo(value);
} else if (constraint.getProperty() == IndexConstraint.Property.MAX) {
@ -245,7 +247,10 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
}
@Override
protected java.lang.Long valueOf(String value) {
protected java.lang.Long valueOf(String value, String optionalFormat) {
if (optionalFormat != null) {
throw new UnsupportedOperationException("custom format isn't supported");
}
return java.lang.Long.valueOf(value);
}
@ -295,7 +300,10 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
}
@Override
protected java.lang.Float valueOf(String value) {
protected java.lang.Float valueOf(String value, String optionalFormat) {
if (optionalFormat != null) {
throw new UnsupportedOperationException("custom format isn't supported");
}
return java.lang.Float.valueOf(value);
}
@ -345,7 +353,10 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
}
@Override
protected java.lang.Double valueOf(String value) {
protected java.lang.Double valueOf(String value, String optionalFormat) {
if (optionalFormat != null) {
throw new UnsupportedOperationException("custom format isn't supported");
}
return java.lang.Double.valueOf(value);
}
@ -399,7 +410,10 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
}
@Override
protected BytesRef valueOf(String value) {
protected BytesRef valueOf(String value, String optionalFormat) {
if (optionalFormat != null) {
throw new UnsupportedOperationException("custom format isn't supported");
}
return new BytesRef(value);
}
@ -448,7 +462,11 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
}
@Override
protected java.lang.Long valueOf(String value) {
protected java.lang.Long valueOf(String value, String optionalFormat) {
FormatDateTimeFormatter dateFormatter = this.dateFormatter;
if (optionalFormat != null) {
dateFormatter = Joda.forPattern(optionalFormat);
}
return dateFormatter.parser().parseMillis(value);
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.fieldstats;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
@ -121,22 +122,24 @@ public class FieldStatsRequest extends BroadcastRequest<FieldStatsRequest> {
currentName = parser.currentName();
} else if (fieldToken == Token.START_OBJECT) {
IndexConstraint.Property property = IndexConstraint.Property.parse(currentName);
Token propertyToken = parser.nextToken();
if (propertyToken != Token.FIELD_NAME) {
throw new IllegalArgumentException("unexpected token [" + propertyToken + "]");
}
IndexConstraint.Comparison comparison = IndexConstraint.Comparison.parse(parser.currentName());
propertyToken = parser.nextToken();
if (propertyToken.isValue() == false) {
throw new IllegalArgumentException("unexpected token [" + propertyToken + "]");
}
String value = parser.text();
indexConstraints.add(new IndexConstraint(field, property, comparison, value));
propertyToken = parser.nextToken();
if (propertyToken != Token.END_OBJECT) {
throw new IllegalArgumentException("unexpected token [" + propertyToken + "]");
String value = null;
String optionalFormat = null;
IndexConstraint.Comparison comparison = null;
for (Token propertyToken = parser.nextToken(); propertyToken != Token.END_OBJECT; propertyToken = parser.nextToken()) {
if (propertyToken.isValue()) {
if ("format".equals(parser.currentName())) {
optionalFormat = parser.text();
} else {
comparison = IndexConstraint.Comparison.parse(parser.currentName());
value = parser.text();
}
} else {
if (propertyToken != Token.FIELD_NAME) {
throw new IllegalArgumentException("unexpected token [" + propertyToken + "]");
}
}
}
indexConstraints.add(new IndexConstraint(field, property, comparison, value, optionalFormat));
} else {
throw new IllegalArgumentException("unexpected token [" + fieldToken + "]");
}
@ -189,6 +192,9 @@ public class FieldStatsRequest extends BroadcastRequest<FieldStatsRequest> {
out.writeByte(indexConstraint.getProperty().getId());
out.writeByte(indexConstraint.getComparison().getId());
out.writeString(indexConstraint.getValue());
if (out.getVersion().onOrAfter(Version.V_2_0_1)) {
out.writeOptionalString(indexConstraint.getOptionalFormat());
}
}
out.writeString(level);
}

View File

@ -19,10 +19,12 @@
package org.elasticsearch.action.fieldstats;
import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import java.io.IOException;
import java.util.Locale;
import java.util.Objects;
public class IndexConstraint {
@ -30,37 +32,68 @@ public class IndexConstraint {
private final Property property;
private final Comparison comparison;
private final String value;
private final String optionalFormat;
IndexConstraint(StreamInput input) throws IOException {
this.field = input.readString();
this.property = Property.read(input.readByte());
this.comparison = Comparison.read(input.readByte());
this.value = input.readString();
if (input.getVersion().onOrAfter(Version.V_2_0_1)) {
this.optionalFormat = input.readOptionalString();
} else {
this.optionalFormat = null;
}
}
public IndexConstraint(String field, Property property, Comparison comparison, String value) {
this.field = field;
this.property = property;
this.comparison = comparison;
this.value = value;
this(field, property, comparison, value, null);
}
public IndexConstraint(String field, Property property, Comparison comparison, String value, String optionalFormat) {
this.field = Objects.requireNonNull(field);
this.property = Objects.requireNonNull(property);
this.comparison = Objects.requireNonNull(comparison);
this.value = Objects.requireNonNull(value);
this.optionalFormat = optionalFormat;
}
/**
* @return On what field the constraint is going to be applied on
*/
public String getField() {
return field;
}
/**
* @return How to compare the specified value against the field property (lt, lte, gt and gte)
*/
public Comparison getComparison() {
return comparison;
}
/**
* @return On what property of a field the contraint is going to be applied on (min or max value)
*/
public Property getProperty() {
return property;
}
/**
* @return The value to compare against
*/
public String getValue() {
return value;
}
/**
* @return An optional format, that specifies how the value string is converted in the native value of the field.
* Not all field types support this and right now only date field supports this option.
*/
public String getOptionalFormat() {
return optionalFormat;
}
public enum Property {
MIN((byte) 0),

View File

@ -119,6 +119,10 @@ public class TransportFieldStatsTransportAction extends TransportBroadcastAction
while (iterator.hasNext()) {
Map.Entry<String, Map<String, FieldStats>> entry = iterator.next();
FieldStats indexConstraintFieldStats = entry.getValue().get(indexConstraint.getField());
if (indexConstraintFieldStats == null) {
continue;
}
if (indexConstraintFieldStats.match(indexConstraint)) {
// If the field stats didn't occur in the list of fields in the original request we need to remove the
// field stats, because it was never requested and was only needed to validate the index constraint

View File

@ -346,7 +346,7 @@ public class SearchRequest extends ActionRequest<SearchRequest> implements Indic
indicesOptions = IndicesOptions.readIndicesOptions(in);
requestCache = in.readOptionalBoolean();
template = in.readOptionalStreamable(new Template());
template = in.readOptionalStreamable(Template::new);
}
@Override

View File

@ -155,6 +155,8 @@ public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequ
super.readFrom(in);
if (in.readBoolean()) {
internalShardId = ShardId.readShardId(in);
} else {
internalShardId = null;
}
consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
timeout = TimeValue.readTimeValue(in);
@ -164,7 +166,12 @@ public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequ
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalStreamable(internalShardId);
if (internalShardId != null) {
out.writeBoolean(true);
internalShardId.writeTo(out);
} else {
out.writeBoolean(false);
}
out.writeByte(consistencyLevel.id());
timeout.writeTo(out);
out.writeString(index);

View File

@ -36,7 +36,6 @@ import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateObserver;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.shard.NoOpShardStateActionListener;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
@ -81,6 +80,8 @@ import java.util.function.Supplier;
*/
public abstract class TransportReplicationAction<Request extends ReplicationRequest, ReplicaRequest extends ReplicationRequest, Response extends ActionWriteResponse> extends TransportAction<Request, Response> {
public static final String SHARD_FAILURE_TIMEOUT = "action.support.replication.shard.failure_timeout";
protected final TransportService transportService;
protected final ClusterService clusterService;
protected final IndicesService indicesService;
@ -88,6 +89,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
protected final WriteConsistencyLevel defaultWriteConsistencyLevel;
protected final TransportRequestOptions transportOptions;
protected final MappingUpdatedAction mappingUpdatedAction;
private final TimeValue shardFailedTimeout;
final String transportReplicaAction;
final String executor;
@ -117,6 +119,8 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
this.transportOptions = transportOptions();
this.defaultWriteConsistencyLevel = WriteConsistencyLevel.fromString(settings.get("action.write_consistency", "quorum"));
// TODO: set a default timeout
shardFailedTimeout = settings.getAsTime(SHARD_FAILURE_TIMEOUT, null);
}
@Override
@ -351,7 +355,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
private final AtomicBoolean finished = new AtomicBoolean(false);
private volatile Releasable indexShardReference;
PrimaryPhase(Request request, ActionListener<Response> listener) {
this.internalRequest = new InternalRequest(request);
this.listener = listener;
@ -578,7 +581,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
PrimaryOperationRequest por = new PrimaryOperationRequest(primary.id(), internalRequest.concreteIndex(), internalRequest.request());
Tuple<Response, ReplicaRequest> primaryResponse = shardOperationOnPrimary(observer.observedState(), por);
logger.trace("operation completed on primary [{}]", primary);
replicationPhase = new ReplicationPhase(shardsIt, primaryResponse.v2(), primaryResponse.v1(), observer, primary, internalRequest, listener, indexShardReference);
replicationPhase = new ReplicationPhase(shardsIt, primaryResponse.v2(), primaryResponse.v1(), observer, primary, internalRequest, listener, indexShardReference, shardFailedTimeout);
} catch (Throwable e) {
// shard has not been allocated yet, retry it here
if (retryPrimaryException(e)) {
@ -687,7 +690,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
/**
* inner class is responsible for send the requests to all replica shards and manage the responses
*/
final class ReplicationPhase extends AbstractRunnable implements ShardStateAction.Listener {
final class ReplicationPhase extends AbstractRunnable {
private final ReplicaRequest replicaRequest;
private final Response finalResponse;
@ -702,6 +705,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
private final int totalShards;
private final ClusterStateObserver observer;
private final Releasable indexShardReference;
private final TimeValue shardFailedTimeout;
/**
* the constructor doesn't take any action, just calculates state. Call {@link #run()} to start
@ -709,7 +713,8 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
*/
public ReplicationPhase(ShardIterator originalShardIt, ReplicaRequest replicaRequest, Response finalResponse,
ClusterStateObserver observer, ShardRouting originalPrimaryShard,
InternalRequest internalRequest, ActionListener<Response> listener, Releasable indexShardReference) {
InternalRequest internalRequest, ActionListener<Response> listener, Releasable indexShardReference,
TimeValue shardFailedTimeout) {
this.replicaRequest = replicaRequest;
this.listener = listener;
this.finalResponse = finalResponse;
@ -717,6 +722,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
this.observer = observer;
indexMetaData = observer.observedState().metaData().index(internalRequest.concreteIndex());
this.indexShardReference = indexShardReference;
this.shardFailedTimeout = shardFailedTimeout;
ShardRouting shard;
// we double check on the state, if it got changed we need to make sure we take the latest one cause
@ -822,16 +828,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
forceFinishAsFailed(t);
}
@Override
public void onShardFailedNoMaster() {
}
@Override
public void onShardFailedFailure(DiscoveryNode master, TransportException e) {
}
/**
* start sending current requests to replicas
*/
@ -893,14 +889,14 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
@Override
public void handleException(TransportException exp) {
onReplicaFailure(nodeId, exp);
logger.trace("[{}] transport failure during replica request [{}] ", exp, node, replicaRequest);
if (ignoreReplicaException(exp) == false) {
if (ignoreReplicaException(exp)) {
onReplicaFailure(nodeId, exp);
} else {
logger.warn("{} failed to perform {} on node {}", exp, shardIt.shardId(), actionName, node);
shardStateAction.shardFailed(shard, indexMetaData.getIndexUUID(), "failed to perform " + actionName + " on replica on node " + node, exp, ReplicationPhase.this);
shardStateAction.shardFailed(shard, indexMetaData.getIndexUUID(), "failed to perform " + actionName + " on replica on node " + node, exp, shardFailedTimeout, new ReplicationFailedShardStateListener(nodeId, exp));
}
}
});
} else {
try {
@ -989,6 +985,33 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
}
public class ReplicationFailedShardStateListener implements ShardStateAction.Listener {
private final String nodeId;
private Throwable failure;
public ReplicationFailedShardStateListener(String nodeId, Throwable failure) {
this.nodeId = nodeId;
this.failure = failure;
}
@Override
public void onSuccess() {
onReplicaFailure(nodeId, failure);
}
@Override
public void onShardFailedNoMaster() {
onReplicaFailure(nodeId, failure);
}
@Override
public void onShardFailedFailure(DiscoveryNode master, TransportException e) {
if (e instanceof ReceiveTimeoutTransportException) {
logger.trace("timeout sending shard failure to master [{}]", e, master);
}
onReplicaFailure(nodeId, failure);
}
}
}
/**

View File

@ -21,6 +21,7 @@ package org.elasticsearch.cluster;
import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
import org.elasticsearch.action.support.DestructiveOperations;
import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction;
import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
@ -86,7 +87,6 @@ import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.internal.DefaultSearchContext;
@ -206,6 +206,7 @@ public class ClusterModule extends AbstractModule {
registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_EXCLUDE + ".*", Validator.EMPTY);
registerClusterDynamicSetting(TransportCloseIndexAction.SETTING_CLUSTER_INDICES_CLOSE_ENABLE, Validator.BOOLEAN);
registerClusterDynamicSetting(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE, Validator.INTEGER);
registerClusterDynamicSetting(TransportReplicationAction.SHARD_FAILURE_TIMEOUT, Validator.TIME_NON_NEGATIVE);
}
private void registerBuiltinIndexSettings() {

View File

@ -103,6 +103,8 @@ public class NodeIndexDeletedAction extends AbstractComponent {
INDEX_STORE_DELETED_ACTION_NAME, new NodeIndexStoreDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME);
} catch (LockObtainFailedException exc) {
logger.warn("[{}] failed to lock all shards for index - timed out after 30 seconds", index);
} catch (InterruptedException e) {
logger.warn("[{}] failed to lock all shards for index - interrupted", index);
}
}

View File

@ -37,6 +37,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.*;
@ -45,6 +46,7 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CountDownLatch;
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
@ -78,24 +80,37 @@ public class ShardStateAction extends AbstractComponent {
}
public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
shardFailed(shardRouting, indexUUID, message, failure, null, listener);
}
public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, TimeValue timeout, Listener listener) {
DiscoveryNode masterNode = clusterService.state().nodes().masterNode();
if (masterNode == null) {
logger.warn("can't send shard failed for {}, no master known.", shardRouting);
listener.onShardFailedNoMaster();
return;
}
innerShardFailed(shardRouting, indexUUID, masterNode, message, failure, listener);
innerShardFailed(shardRouting, indexUUID, masterNode, message, failure, timeout, listener);
}
public void resendShardFailed(final ShardRouting shardRouting, final String indexUUID, final DiscoveryNode masterNode, final String message, @Nullable final Throwable failure, Listener listener) {
logger.trace("{} re-sending failed shard for {}, indexUUID [{}], reason [{}]", failure, shardRouting.shardId(), shardRouting, indexUUID, message);
innerShardFailed(shardRouting, indexUUID, masterNode, message, failure, listener);
innerShardFailed(shardRouting, indexUUID, masterNode, message, failure, null, listener);
}
private void innerShardFailed(final ShardRouting shardRouting, final String indexUUID, final DiscoveryNode masterNode, final String message, final Throwable failure, Listener listener) {
private void innerShardFailed(final ShardRouting shardRouting, final String indexUUID, final DiscoveryNode masterNode, final String message, final Throwable failure, TimeValue timeout, Listener listener) {
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, failure);
TransportRequestOptions options = TransportRequestOptions.EMPTY;
if (timeout != null) {
options = TransportRequestOptions.builder().withTimeout(timeout).build();
}
transportService.sendRequest(masterNode,
SHARD_FAILED_ACTION_NAME, shardRoutingEntry, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
SHARD_FAILED_ACTION_NAME, shardRoutingEntry, options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty response) {
listener.onSuccess();
}
@Override
public void handleException(TransportException exp) {
logger.warn("failed to send failed shard to {}", exp, masterNode);
@ -167,7 +182,7 @@ public class ShardStateAction extends AbstractComponent {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
if (oldState != newState && newState.getRoutingNodes().hasUnassigned()) {
if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) {
logger.trace("unassigned shards after shard failures. scheduling a reroute.");
routingService.reroute("unassigned shards after shard failures, scheduling a reroute");
}
@ -288,6 +303,7 @@ public class ShardStateAction extends AbstractComponent {
}
public interface Listener {
default void onSuccess() {}
default void onShardFailedNoMaster() {}
default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) {}
}

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.action.admin.cluster.health;
package org.elasticsearch.cluster.health;
/**

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.action.admin.cluster.health;
package org.elasticsearch.cluster.health;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
@ -37,12 +37,9 @@ import java.util.List;
import java.util.Locale;
import java.util.Map;
import static org.elasticsearch.action.admin.cluster.health.ClusterShardHealth.readClusterShardHealth;
import static org.elasticsearch.cluster.health.ClusterShardHealth.readClusterShardHealth;
/**
*
*/
public class ClusterIndexHealth implements Iterable<ClusterShardHealth>, Streamable, ToXContent {
public final class ClusterIndexHealth implements Iterable<ClusterShardHealth>, Streamable, ToXContent {
private String index;

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.action.admin.cluster.health;
package org.elasticsearch.cluster.health;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
@ -27,10 +27,7 @@ import org.elasticsearch.common.io.stream.Streamable;
import java.io.IOException;
/**
*
*/
public class ClusterShardHealth implements Streamable {
public final class ClusterShardHealth implements Streamable {
private int shardId;

View File

@ -0,0 +1,236 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.health;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.RoutingTableValidation;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.util.*;
import static org.elasticsearch.cluster.health.ClusterIndexHealth.readClusterIndexHealth;
public final class ClusterStateHealth implements Iterable<ClusterIndexHealth>, Streamable {
private int numberOfNodes = 0;
private int numberOfDataNodes = 0;
private int activeShards = 0;
private int relocatingShards = 0;
private int activePrimaryShards = 0;
private int initializingShards = 0;
private int unassignedShards = 0;
private double activeShardsPercent = 100;
private ClusterHealthStatus status = ClusterHealthStatus.RED;
private List<String> validationFailures;
private Map<String, ClusterIndexHealth> indices = new HashMap<>();
public static ClusterStateHealth readClusterHealth(StreamInput in) throws IOException {
ClusterStateHealth clusterStateHealth = new ClusterStateHealth();
clusterStateHealth.readFrom(in);
return clusterStateHealth;
}
ClusterStateHealth() {
// only intended for serialization
}
/**
* Creates a new <code>ClusterStateHealth</code> instance based on cluster meta data and its routing table as a convenience.
*
* @param clusterMetaData Current cluster meta data. Must not be null.
* @param routingTables Current routing table. Must not be null.
*/
public ClusterStateHealth(MetaData clusterMetaData, RoutingTable routingTables) {
this(ClusterState.builder(ClusterName.DEFAULT).metaData(clusterMetaData).routingTable(routingTables).build());
}
/**
* Creates a new <code>ClusterStateHealth</code> instance considering the current cluster state and all indices in the cluster.
*
* @param clusterState The current cluster state. Must not be null.
*/
public ClusterStateHealth(ClusterState clusterState) {
this(clusterState, clusterState.metaData().concreteAllIndices());
}
/**
* Creates a new <code>ClusterStateHealth</code> instance considering the current cluster state and the provided index names.
*
* @param clusterState The current cluster state. Must not be null.
* @param concreteIndices An array of index names to consider. Must not be null but may be empty.
*/
public ClusterStateHealth(ClusterState clusterState, String[] concreteIndices) {
RoutingTableValidation validation = clusterState.routingTable().validate(clusterState.metaData());
validationFailures = validation.failures();
numberOfNodes = clusterState.nodes().size();
numberOfDataNodes = clusterState.nodes().dataNodes().size();
for (String index : concreteIndices) {
IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(index);
IndexMetaData indexMetaData = clusterState.metaData().index(index);
if (indexRoutingTable == null) {
continue;
}
ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetaData, indexRoutingTable);
indices.put(indexHealth.getIndex(), indexHealth);
}
status = ClusterHealthStatus.GREEN;
for (ClusterIndexHealth indexHealth : indices.values()) {
activePrimaryShards += indexHealth.getActivePrimaryShards();
activeShards += indexHealth.getActiveShards();
relocatingShards += indexHealth.getRelocatingShards();
initializingShards += indexHealth.getInitializingShards();
unassignedShards += indexHealth.getUnassignedShards();
if (indexHealth.getStatus() == ClusterHealthStatus.RED) {
status = ClusterHealthStatus.RED;
} else if (indexHealth.getStatus() == ClusterHealthStatus.YELLOW && status != ClusterHealthStatus.RED) {
status = ClusterHealthStatus.YELLOW;
}
}
if (!validationFailures.isEmpty()) {
status = ClusterHealthStatus.RED;
} else if (clusterState.blocks().hasGlobalBlock(RestStatus.SERVICE_UNAVAILABLE)) {
status = ClusterHealthStatus.RED;
}
// shortcut on green
if (status.equals(ClusterHealthStatus.GREEN)) {
this.activeShardsPercent = 100;
} else {
List<ShardRouting> shardRoutings = clusterState.getRoutingTable().allShards();
int activeShardCount = 0;
int totalShardCount = 0;
for (ShardRouting shardRouting : shardRoutings) {
if (shardRouting.active()) activeShardCount++;
totalShardCount++;
}
this.activeShardsPercent = (((double) activeShardCount) / totalShardCount) * 100;
}
}
public List<String> getValidationFailures() {
return Collections.unmodifiableList(validationFailures);
}
public int getActiveShards() {
return activeShards;
}
public int getRelocatingShards() {
return relocatingShards;
}
public int getActivePrimaryShards() {
return activePrimaryShards;
}
public int getInitializingShards() {
return initializingShards;
}
public int getUnassignedShards() {
return unassignedShards;
}
public int getNumberOfNodes() {
return this.numberOfNodes;
}
public int getNumberOfDataNodes() {
return this.numberOfDataNodes;
}
public ClusterHealthStatus getStatus() {
return status;
}
public Map<String, ClusterIndexHealth> getIndices() {
return Collections.unmodifiableMap(indices);
}
public double getActiveShardsPercent() {
return activeShardsPercent;
}
@Override
public Iterator<ClusterIndexHealth> iterator() {
return indices.values().iterator();
}
@Override
public void readFrom(StreamInput in) throws IOException {
activePrimaryShards = in.readVInt();
activeShards = in.readVInt();
relocatingShards = in.readVInt();
initializingShards = in.readVInt();
unassignedShards = in.readVInt();
numberOfNodes = in.readVInt();
numberOfDataNodes = in.readVInt();
status = ClusterHealthStatus.fromValue(in.readByte());
int size = in.readVInt();
for (int i = 0; i < size; i++) {
ClusterIndexHealth indexHealth = readClusterIndexHealth(in);
indices.put(indexHealth.getIndex(), indexHealth);
}
size = in.readVInt();
if (size == 0) {
validationFailures = Collections.emptyList();
} else {
for (int i = 0; i < size; i++) {
validationFailures.add(in.readString());
}
}
activeShardsPercent = in.readDouble();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(activePrimaryShards);
out.writeVInt(activeShards);
out.writeVInt(relocatingShards);
out.writeVInt(initializingShards);
out.writeVInt(unassignedShards);
out.writeVInt(numberOfNodes);
out.writeVInt(numberOfDataNodes);
out.writeByte(status.value());
out.writeVInt(indices.size());
for (ClusterIndexHealth indexHealth : this) {
indexHealth.writeTo(out);
}
out.writeVInt(validationFailures.size());
for (String failure : validationFailures) {
out.writeString(failure);
}
out.writeDouble(activeShardsPercent);
}
}

View File

@ -398,7 +398,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
if (request.state() == State.OPEN) {
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable())
.addAsNew(updatedState.metaData().index(request.index()));
RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build());
RoutingAllocation.Result routingResult = allocationService.reroute(
ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(),
"index [" + request.index() + "] created");
updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
}
removalReason = "cleaning up after validating index on master";

View File

@ -39,6 +39,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import java.util.Arrays;
import java.util.Collection;
import java.util.Locale;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
@ -136,7 +137,8 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
MetaData newMetaData = metaDataBuilder.build();
ClusterBlocks blocks = clusterBlocksBuilder.build();
RoutingAllocation.Result routingResult = allocationService.reroute(
ClusterState.builder(currentState).routingTable(routingTableBuilder.build()).metaData(newMetaData).build());
ClusterState.builder(currentState).routingTable(routingTableBuilder.build()).metaData(newMetaData).build(),
"deleted indices [" + indices + "]");
return ClusterState.builder(currentState).routingResult(routingResult).metaData(newMetaData).blocks(blocks).build();
}

View File

@ -47,6 +47,7 @@ import org.elasticsearch.rest.RestStatus;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
/**
* Service responsible for submitting open/close index requests
@ -124,7 +125,9 @@ public class MetaDataIndexStateService extends AbstractComponent {
rtBuilder.remove(index);
}
RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(rtBuilder.build()).build());
RoutingAllocation.Result routingResult = allocationService.reroute(
ClusterState.builder(updatedState).routingTable(rtBuilder.build()).build(),
"indices closed [" + indicesAsString + "]");
//no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}
@ -181,7 +184,9 @@ public class MetaDataIndexStateService extends AbstractComponent {
rtBuilder.addAsFromCloseToOpen(updatedState.metaData().index(index));
}
RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(rtBuilder.build()).build());
RoutingAllocation.Result routingResult = allocationService.reroute(
ClusterState.builder(updatedState).routingTable(rtBuilder.build()).build(),
"indices opened [" + indicesAsString + "]");
//no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}

View File

@ -323,7 +323,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
ClusterState updatedState = ClusterState.builder(currentState).metaData(metaDataBuilder).routingTable(routingTableBuilder.build()).blocks(blocks).build();
// now, reroute in case things change that require it (like number of replicas)
RoutingAllocation.Result routingResult = allocationService.reroute(updatedState);
RoutingAllocation.Result routingResult = allocationService.reroute(updatedState, "settings update");
updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
return updatedState;

View File

@ -68,7 +68,7 @@ public class RestoreSource implements Streamable, ToXContent {
}
public static RestoreSource readOptionalRestoreSource(StreamInput in) throws IOException {
return in.readOptionalStreamable(new RestoreSource());
return in.readOptionalStreamable(RestoreSource::new);
}
@Override

View File

@ -183,13 +183,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
return this.customs;
}
public <T extends ClusterState.Custom> T custom(String type) {
return (T) customs.get(type);
}
public boolean hasUnassigned() {
return !unassignedShards.isEmpty();
}
public <T extends ClusterState.Custom> T custom(String type) { return (T) customs.get(type); }
public UnassignedShards unassigned() {
return this.unassignedShards;
@ -217,12 +211,22 @@ public class RoutingNodes implements Iterable<RoutingNode> {
return nodesPerAttributesCounts;
}
/**
* Returns <code>true</code> iff this {@link RoutingNodes} instance has any unassigned primaries even if the
* primaries are marked as temporarily ignored.
*/
public boolean hasUnassignedPrimaries() {
return unassignedShards.numPrimaries() > 0;
return unassignedShards.getNumPrimaries() + unassignedShards.getNumIgnoredPrimaries() > 0;
}
/**
* Returns <code>true</code> iff this {@link RoutingNodes} instance has any unassigned shards even if the
* shards are marked as temporarily ignored.
* @see UnassignedShards#isEmpty()
* @see UnassignedShards#isIgnoredEmpty()
*/
public boolean hasUnassignedShards() {
return !unassignedShards.isEmpty();
return unassignedShards.isEmpty() == false || unassignedShards.isIgnoredEmpty() == false;
}
public boolean hasInactivePrimaries() {
@ -524,25 +528,12 @@ public class RoutingNodes implements Iterable<RoutingNode> {
private final List<ShardRouting> ignored;
private int primaries = 0;
private long transactionId = 0;
private final UnassignedShards source;
private final long sourceTransactionId;
public UnassignedShards(UnassignedShards other) {
this.nodes = other.nodes;
source = other;
sourceTransactionId = other.transactionId;
unassigned = new ArrayList<>(other.unassigned);
ignored = new ArrayList<>(other.ignored);
primaries = other.primaries;
}
private int ignoredPrimaries = 0;
public UnassignedShards(RoutingNodes nodes) {
this.nodes = nodes;
unassigned = new ArrayList<>();
ignored = new ArrayList<>();
source = null;
sourceTransactionId = -1;
}
public void add(ShardRouting shardRouting) {
@ -550,21 +541,34 @@ public class RoutingNodes implements Iterable<RoutingNode> {
primaries++;
}
unassigned.add(shardRouting);
transactionId++;
}
public void sort(Comparator<ShardRouting> comparator) {
CollectionUtil.timSort(unassigned, comparator);
}
public int size() {
return unassigned.size();
}
/**
* Returns the size of the non-ignored unassigned shards
*/
public int size() { return unassigned.size(); }
public int numPrimaries() {
/**
* Returns the size of the temporarily marked as ignored unassigned shards
*/
public int ignoredSize() { return ignored.size(); }
/**
* Returns the number of non-ignored unassigned primaries
*/
public int getNumPrimaries() {
return primaries;
}
/**
* Returns the number of temporarily marked as ignored unassigned primaries
*/
public int getNumIgnoredPrimaries() { return ignoredPrimaries; }
@Override
public UnassignedIterator iterator() {
return new UnassignedIterator();
@ -580,12 +584,18 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
/**
* Adds a shard to the ignore unassigned list. Should be used with caution, typically,
* Marks a shard as temporarily ignored and adds it to the ignore unassigned list.
* Should be used with caution, typically,
* the correct usage is to removeAndIgnore from the iterator.
* @see #ignored()
* @see UnassignedIterator#removeAndIgnore()
* @see #isIgnoredEmpty()
*/
public void ignoreShard(ShardRouting shard) {
if (shard.primary()) {
ignoredPrimaries++;
}
ignored.add(shard);
transactionId++;
}
public class UnassignedIterator implements Iterator<ShardRouting> {
@ -618,6 +628,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
/**
* Removes and ignores the unassigned shard (will be ignored for this run, but
* will be added back to unassigned once the metadata is constructed again).
* Typically this is used when an allocation decision prevents a shard from being allocated such
* that subsequent consumers of this API won't try to allocate this shard again.
*/
public void removeAndIgnore() {
innerRemove();
@ -639,45 +651,37 @@ public class RoutingNodes implements Iterable<RoutingNode> {
if (current.primary()) {
primaries--;
}
transactionId++;
}
}
/**
* Returns <code>true</code> iff this collection contains one or more non-ignored unassigned shards.
*/
public boolean isEmpty() {
return unassigned.isEmpty();
}
/**
* Returns <code>true</code> iff any unassigned shards are marked as temporarily ignored.
* @see UnassignedShards#ignoreShard(ShardRouting)
* @see UnassignedIterator#removeAndIgnore()
*/
public boolean isIgnoredEmpty() {
return ignored.isEmpty();
}
public void shuffle() {
Collections.shuffle(unassigned);
}
public void clear() {
transactionId++;
unassigned.clear();
ignored.clear();
primaries = 0;
}
public void transactionEnd(UnassignedShards shards) {
assert shards.source == this && shards.sourceTransactionId == transactionId :
"Expected ID: " + shards.sourceTransactionId + " actual: " + transactionId + " Expected Source: " + shards.source + " actual: " + this;
transactionId++;
this.unassigned.clear();
this.unassigned.addAll(shards.unassigned);
this.ignored.clear();
this.ignored.addAll(shards.ignored);
this.primaries = shards.primaries;
}
public UnassignedShards transactionBegin() {
return new UnassignedShards(this);
}
/**
* Drains all unassigned shards and returns it.
* This method will not drain ignored shards.
*/
public ShardRouting[] drain() {
ShardRouting[] mutableShardRoutings = unassigned.toArray(new ShardRouting[unassigned.size()]);
unassigned.clear();
primaries = 0;
transactionId++;
return mutableShardRoutings;
}
}
@ -698,10 +702,10 @@ public class RoutingNodes implements Iterable<RoutingNode> {
return true;
}
int unassignedPrimaryCount = 0;
int unassignedIgnoredPrimaryCount = 0;
int inactivePrimaryCount = 0;
int inactiveShardCount = 0;
int relocating = 0;
final Set<ShardId> seenShards = new HashSet<>();
Map<String, Integer> indicesAndShards = new HashMap<>();
for (RoutingNode node : routingNodes) {
for (ShardRouting shard : node) {
@ -716,7 +720,6 @@ public class RoutingNodes implements Iterable<RoutingNode> {
if (shard.relocating()) {
relocating++;
}
seenShards.add(shard.shardId());
Integer i = indicesAndShards.get(shard.index());
if (i == null) {
i = shard.id();
@ -751,11 +754,18 @@ public class RoutingNodes implements Iterable<RoutingNode> {
if (shard.primary()) {
unassignedPrimaryCount++;
}
seenShards.add(shard.shardId());
}
assert unassignedPrimaryCount == routingNodes.unassignedShards.numPrimaries() :
"Unassigned primaries is [" + unassignedPrimaryCount + "] but RoutingNodes returned unassigned primaries [" + routingNodes.unassigned().numPrimaries() + "]";
for (ShardRouting shard : routingNodes.unassigned().ignored()) {
if (shard.primary()) {
unassignedIgnoredPrimaryCount++;
}
}
assert unassignedPrimaryCount == routingNodes.unassignedShards.getNumPrimaries() :
"Unassigned primaries is [" + unassignedPrimaryCount + "] but RoutingNodes returned unassigned primaries [" + routingNodes.unassigned().getNumPrimaries() + "]";
assert unassignedIgnoredPrimaryCount == routingNodes.unassignedShards.getNumIgnoredPrimaries() :
"Unassigned ignored primaries is [" + unassignedIgnoredPrimaryCount + "] but RoutingNodes returned unassigned ignored primaries [" + routingNodes.unassigned().getNumIgnoredPrimaries() + "]";
assert inactivePrimaryCount == routingNodes.inactivePrimaryCount :
"Inactive Primary count [" + inactivePrimaryCount + "] but RoutingNodes returned inactive primaries [" + routingNodes.inactivePrimaryCount + "]";
assert inactiveShardCount == routingNodes.inactiveShardCount :

View File

@ -55,9 +55,8 @@ public class RoutingService extends AbstractLifecycleComponent<RoutingService> i
private final AllocationService allocationService;
private AtomicBoolean rerouting = new AtomicBoolean();
private volatile long registeredNextDelaySetting = Long.MAX_VALUE;
private volatile long minDelaySettingAtLastScheduling = Long.MAX_VALUE;
private volatile ScheduledFuture registeredNextDelayFuture;
private volatile long unassignedShardsAllocatedTimestamp = 0;
@Inject
public RoutingService(Settings settings, ThreadPool threadPool, ClusterService clusterService, AllocationService allocationService) {
@ -88,19 +87,6 @@ public class RoutingService extends AbstractLifecycleComponent<RoutingService> i
return this.allocationService;
}
/**
* Update the last time the allocator tried to assign unassigned shards
*
* This is used so that both the GatewayAllocator and RoutingService use a
* consistent timestamp for comparing which shards have been delayed to
* avoid a race condition where GatewayAllocator thinks the shard should
* be delayed and the RoutingService thinks it has already passed the delay
* and that the GatewayAllocator has/will handle it.
*/
public void setUnassignedShardsAllocatedTimestamp(long timeInMillis) {
this.unassignedShardsAllocatedTimestamp = timeInMillis;
}
/**
* Initiates a reroute.
*/
@ -111,45 +97,43 @@ public class RoutingService extends AbstractLifecycleComponent<RoutingService> i
@Override
public void clusterChanged(ClusterChangedEvent event) {
if (event.state().nodes().localNodeMaster()) {
// figure out when the next unassigned allocation need to happen from now. If this is larger or equal
// then the last time we checked and scheduled, we are guaranteed to have a reroute until then, so no need
// to schedule again
long nextDelaySetting = UnassignedInfo.findSmallestDelayedAllocationSetting(settings, event.state());
if (nextDelaySetting > 0 && nextDelaySetting < registeredNextDelaySetting) {
// Figure out if an existing scheduled reroute is good enough or whether we need to cancel and reschedule.
// If the minimum of the currently relevant delay settings is larger than something we scheduled in the past,
// we are guaranteed that the planned schedule will happen before any of the current shard delays are expired.
long minDelaySetting = UnassignedInfo.findSmallestDelayedAllocationSetting(settings, event.state());
if (minDelaySetting <= 0) {
logger.trace("no need to schedule reroute - no delayed unassigned shards, minDelaySetting [{}], scheduled [{}]", minDelaySetting, minDelaySettingAtLastScheduling);
minDelaySettingAtLastScheduling = Long.MAX_VALUE;
FutureUtils.cancel(registeredNextDelayFuture);
registeredNextDelaySetting = nextDelaySetting;
// We use System.currentTimeMillis here because we want the
// next delay from the "now" perspective, rather than the
// delay from the last time the GatewayAllocator tried to
// assign/delay the shard
TimeValue nextDelay = TimeValue.timeValueMillis(UnassignedInfo.findNextDelayedAllocationIn(System.currentTimeMillis(), settings, event.state()));
int unassignedDelayedShards = UnassignedInfo.getNumberOfDelayedUnassigned(unassignedShardsAllocatedTimestamp, settings, event.state());
if (unassignedDelayedShards > 0) {
logger.info("delaying allocation for [{}] unassigned shards, next check in [{}]",
unassignedDelayedShards, nextDelay);
registeredNextDelayFuture = threadPool.schedule(nextDelay, ThreadPool.Names.SAME, new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
registeredNextDelaySetting = Long.MAX_VALUE;
reroute("assign delayed unassigned shards");
}
} else if (minDelaySetting < minDelaySettingAtLastScheduling) {
FutureUtils.cancel(registeredNextDelayFuture);
minDelaySettingAtLastScheduling = minDelaySetting;
TimeValue nextDelay = TimeValue.timeValueNanos(UnassignedInfo.findNextDelayedAllocationIn(event.state()));
assert nextDelay.nanos() > 0 : "next delay must be non 0 as minDelaySetting is [" + minDelaySetting + "]";
logger.info("delaying allocation for [{}] unassigned shards, next check in [{}]",
UnassignedInfo.getNumberOfDelayedUnassigned(event.state()), nextDelay);
registeredNextDelayFuture = threadPool.schedule(nextDelay, ThreadPool.Names.SAME, new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
minDelaySettingAtLastScheduling = Long.MAX_VALUE;
reroute("assign delayed unassigned shards");
}
@Override
public void onFailure(Throwable t) {
logger.warn("failed to schedule/execute reroute post unassigned shard", t);
registeredNextDelaySetting = Long.MAX_VALUE;
}
});
}
@Override
public void onFailure(Throwable t) {
logger.warn("failed to schedule/execute reroute post unassigned shard", t);
minDelaySettingAtLastScheduling = Long.MAX_VALUE;
}
});
} else {
logger.trace("no need to schedule reroute due to delayed unassigned, next_delay_setting [{}], registered [{}]", nextDelaySetting, registeredNextDelaySetting);
logger.trace("no need to schedule reroute - current schedule reroute is enough. minDelaySetting [{}], scheduled [{}]", minDelaySetting, minDelaySettingAtLastScheduling);
}
}
}
// visible for testing
long getRegisteredNextDelaySetting() {
return this.registeredNextDelaySetting;
long getMinDelaySettingAtLastScheduling() {
return this.minDelaySettingAtLastScheduling;
}
// visible for testing
@ -167,7 +151,7 @@ public class RoutingService extends AbstractLifecycleComponent<RoutingService> i
@Override
public ClusterState execute(ClusterState currentState) {
rerouting.set(false);
RoutingAllocation.Result routingResult = allocationService.reroute(currentState);
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, reason);
if (!routingResult.changed()) {
// no state changed
return currentState;

View File

@ -39,7 +39,7 @@ public class RoutingValidationException extends RoutingException {
public RoutingValidationException(StreamInput in) throws IOException {
super(in);
validation = in.readOptionalStreamable(new RoutingTableValidation());
validation = in.readOptionalStreamable(RoutingTableValidation::new);
}
@Override

View File

@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
/**
* Holds additional information as to why the shard is in unassigned state.
@ -103,21 +104,24 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
}
private final Reason reason;
private final long timestamp;
private final long unassignedTimeMillis; // used for display and log messages, in milliseconds
private final long unassignedTimeNanos; // in nanoseconds, used to calculate delay for delayed shard allocation
private volatile long lastComputedLeftDelayNanos = 0l; // how long to delay shard allocation, not serialized (always positive, 0 means no delay)
private final String message;
private final Throwable failure;
public UnassignedInfo(Reason reason, String message) {
this(reason, System.currentTimeMillis(), message, null);
this(reason, System.currentTimeMillis(), System.nanoTime(), message, null);
}
public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Throwable failure) {
this(reason, System.currentTimeMillis(), message, failure);
this(reason, System.currentTimeMillis(), System.nanoTime(), message, failure);
}
private UnassignedInfo(Reason reason, long timestamp, String message, Throwable failure) {
private UnassignedInfo(Reason reason, long unassignedTimeMillis, long timestampNanos, String message, Throwable failure) {
this.reason = reason;
this.timestamp = timestamp;
this.unassignedTimeMillis = unassignedTimeMillis;
this.unassignedTimeNanos = timestampNanos;
this.message = message;
this.failure = failure;
assert !(message == null && failure != null) : "provide a message if a failure exception is provided";
@ -125,14 +129,18 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
UnassignedInfo(StreamInput in) throws IOException {
this.reason = Reason.values()[(int) in.readByte()];
this.timestamp = in.readLong();
this.unassignedTimeMillis = in.readLong();
// As System.nanoTime() cannot be compared across different JVMs, reset it to now.
// This means that in master failover situations, elapsed delay time is forgotten.
this.unassignedTimeNanos = System.nanoTime();
this.message = in.readOptionalString();
this.failure = in.readThrowable();
}
public void writeTo(StreamOutput out) throws IOException {
out.writeByte((byte) reason.ordinal());
out.writeLong(timestamp);
out.writeLong(unassignedTimeMillis);
// Do not serialize unassignedTimeNanos as System.nanoTime() cannot be compared across different JVMs
out.writeOptionalString(message);
out.writeThrowable(failure);
}
@ -149,13 +157,20 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
}
/**
* The timestamp in milliseconds since epoch. Note, we use timestamp here since
* we want to make sure its preserved across node serializations. Extra care need
* to be made if its used to calculate diff (handle negative values) in case of
* time drift.
* The timestamp in milliseconds when the shard became unassigned, based on System.currentTimeMillis().
* Note, we use timestamp here since we want to make sure its preserved across node serializations.
*/
public long getTimestampInMillis() {
return this.timestamp;
public long getUnassignedTimeInMillis() {
return this.unassignedTimeMillis;
}
/**
* The timestamp in nanoseconds when the shard became unassigned, based on System.nanoTime().
* Used to calculate the delay for delayed shard allocation.
* ONLY EXPOSED FOR TESTS!
*/
public long getUnassignedTimeInNanos() {
return this.unassignedTimeNanos;
}
/**
@ -186,7 +201,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
}
/**
* The allocation delay value associated with the index (defaulting to node settings if not set).
* The allocation delay value in milliseconds associated with the index (defaulting to node settings if not set).
*/
public long getAllocationDelayTimeoutSetting(Settings settings, Settings indexSettings) {
if (reason != Reason.NODE_LEFT) {
@ -197,31 +212,40 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
}
/**
* The time in millisecond until this unassigned shard can be reassigned.
* The delay in nanoseconds until this unassigned shard can be reassigned. This value is cached and might be slightly out-of-date.
* See also the {@link #updateDelay(long, Settings, Settings)} method.
*/
public long getDelayAllocationExpirationIn(long unassignedShardsAllocatedTimestamp, Settings settings, Settings indexSettings) {
long delayTimeout = getAllocationDelayTimeoutSetting(settings, indexSettings);
if (delayTimeout == 0) {
return 0;
}
long delta = unassignedShardsAllocatedTimestamp - timestamp;
// account for time drift, treat it as no timeout
if (delta < 0) {
return 0;
}
return delayTimeout - delta;
public long getLastComputedLeftDelayNanos() {
return lastComputedLeftDelayNanos;
}
/**
* Updates delay left based on current time (in nanoseconds) and index/node settings.
* Should only be called from ReplicaShardAllocator.
* @return updated delay in nanoseconds
*/
public long updateDelay(long nanoTimeNow, Settings settings, Settings indexSettings) {
long delayTimeoutMillis = getAllocationDelayTimeoutSetting(settings, indexSettings);
final long newComputedLeftDelayNanos;
if (delayTimeoutMillis == 0l) {
newComputedLeftDelayNanos = 0l;
} else {
assert nanoTimeNow >= unassignedTimeNanos;
long delayTimeoutNanos = TimeUnit.NANOSECONDS.convert(delayTimeoutMillis, TimeUnit.MILLISECONDS);
newComputedLeftDelayNanos = Math.max(0l, delayTimeoutNanos - (nanoTimeNow - unassignedTimeNanos));
}
lastComputedLeftDelayNanos = newComputedLeftDelayNanos;
return newComputedLeftDelayNanos;
}
/**
* Returns the number of shards that are unassigned and currently being delayed.
*/
public static int getNumberOfDelayedUnassigned(long unassignedShardsAllocatedTimestamp, Settings settings, ClusterState state) {
public static int getNumberOfDelayedUnassigned(ClusterState state) {
int count = 0;
for (ShardRouting shard : state.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED)) {
if (shard.primary() == false) {
IndexMetaData indexMetaData = state.metaData().index(shard.getIndex());
long delay = shard.unassignedInfo().getDelayAllocationExpirationIn(unassignedShardsAllocatedTimestamp, settings, indexMetaData.getSettings());
long delay = shard.unassignedInfo().getLastComputedLeftDelayNanos();
if (delay > 0) {
count++;
}
@ -231,15 +255,16 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
}
/**
* Finds the smallest delay expiration setting of an unassigned shard. Returns 0 if there are none.
* Finds the smallest delay expiration setting in milliseconds of all unassigned shards that are still delayed. Returns 0 if there are none.
*/
public static long findSmallestDelayedAllocationSetting(Settings settings, ClusterState state) {
long nextDelaySetting = Long.MAX_VALUE;
for (ShardRouting shard : state.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED)) {
if (shard.primary() == false) {
IndexMetaData indexMetaData = state.metaData().index(shard.getIndex());
long leftDelayNanos = shard.unassignedInfo().getLastComputedLeftDelayNanos();
long delayTimeoutSetting = shard.unassignedInfo().getAllocationDelayTimeoutSetting(settings, indexMetaData.getSettings());
if (delayTimeoutSetting > 0 && delayTimeoutSetting < nextDelaySetting) {
if (leftDelayNanos > 0 && delayTimeoutSetting > 0 && delayTimeoutSetting < nextDelaySetting) {
nextDelaySetting = delayTimeoutSetting;
}
}
@ -249,14 +274,13 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
/**
* Finds the next (closest) delay expiration of an unassigned shard. Returns 0 if there are none.
* Finds the next (closest) delay expiration of an unassigned shard in nanoseconds. Returns 0 if there are none.
*/
public static long findNextDelayedAllocationIn(long unassignedShardsAllocatedTimestamp, Settings settings, ClusterState state) {
public static long findNextDelayedAllocationIn(ClusterState state) {
long nextDelay = Long.MAX_VALUE;
for (ShardRouting shard : state.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED)) {
if (shard.primary() == false) {
IndexMetaData indexMetaData = state.metaData().index(shard.getIndex());
long nextShardDelay = shard.unassignedInfo().getDelayAllocationExpirationIn(unassignedShardsAllocatedTimestamp, settings, indexMetaData.getSettings());
long nextShardDelay = shard.unassignedInfo().getLastComputedLeftDelayNanos();
if (nextShardDelay > 0 && nextShardDelay < nextDelay) {
nextDelay = nextShardDelay;
}
@ -268,7 +292,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
public String shortSummary() {
StringBuilder sb = new StringBuilder();
sb.append("[reason=").append(reason).append("]");
sb.append(", at[").append(DATE_TIME_FORMATTER.printer().print(timestamp)).append("]");
sb.append(", at[").append(DATE_TIME_FORMATTER.printer().print(unassignedTimeMillis)).append("]");
String details = getDetails();
if (details != null) {
sb.append(", details[").append(details).append("]");
@ -285,7 +309,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("unassigned_info");
builder.field("reason", reason);
builder.field("at", DATE_TIME_FORMATTER.printer().print(timestamp));
builder.field("at", DATE_TIME_FORMATTER.printer().print(unassignedTimeMillis));
String details = getDetails();
if (details != null) {
builder.field("details", details);
@ -301,7 +325,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
UnassignedInfo that = (UnassignedInfo) o;
if (timestamp != that.timestamp) return false;
if (unassignedTimeMillis != that.unassignedTimeMillis) return false;
if (reason != that.reason) return false;
if (message != null ? !message.equals(that.message) : that.message != null) return false;
return !(failure != null ? !failure.equals(that.failure) : that.failure != null);
@ -311,7 +335,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
@Override
public int hashCode() {
int result = reason != null ? reason.hashCode() : 0;
result = 31 * result + Long.hashCode(timestamp);
result = 31 * result + Long.hashCode(unassignedTimeMillis);
result = 31 * result + (message != null ? message.hashCode() : 0);
result = 31 * result + (failure != null ? failure.hashCode() : 0);
return result;

View File

@ -22,6 +22,8 @@ package org.elasticsearch.cluster.routing.allocation;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterStateHealth;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
@ -41,6 +43,10 @@ import org.elasticsearch.common.settings.Settings;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
/**
@ -85,7 +91,16 @@ public class AllocationService extends AbstractComponent {
if (withReroute) {
reroute(allocation);
}
return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()));
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable);
String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString());
logClusterHealthStateChange(
new ClusterStateHealth(clusterState),
new ClusterStateHealth(clusterState.metaData(), routingTable),
"shards started [" + startedShardsAsString + "] ..."
);
return result;
}
public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
@ -111,7 +126,32 @@ public class AllocationService extends AbstractComponent {
}
shardsAllocators.applyFailedShards(allocation);
reroute(allocation);
return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()));
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable);
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString());
logClusterHealthStateChange(
new ClusterStateHealth(clusterState),
new ClusterStateHealth(clusterState.getMetaData(), routingTable),
"shards failed [" + failedShardsAsString + "] ..."
);
return result;
}
/**
* Internal helper to cap the number of elements in a potentially long list for logging.
*
* @param elements The elements to log. May be any non-null list. Must not be null.
* @param formatter A function that can convert list elements to a String. Must not be null.
* @param <T> The list element type.
* @return A comma-separated string of the first few elements.
*/
private <T> String firstListElementsToCommaDelimitedString(List<T> elements, Function<T, String> formatter) {
final int maxNumberOfElements = 10;
return elements
.stream()
.limit(maxNumberOfElements)
.map(formatter)
.collect(Collectors.joining(", "));
}
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands) {
@ -134,7 +174,14 @@ public class AllocationService extends AbstractComponent {
// the assumption is that commands will move / act on shards (or fail through exceptions)
// so, there will always be shard "movements", so no need to check on reroute
reroute(allocation);
return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), explanations);
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable, explanations);
logClusterHealthStateChange(
new ClusterStateHealth(clusterState),
new ClusterStateHealth(clusterState.getMetaData(), routingTable),
"reroute commands"
);
return result;
}
/**
@ -142,8 +189,8 @@ public class AllocationService extends AbstractComponent {
* <p>
* If the same instance of the routing table is returned, then no change has been made.
*/
public RoutingAllocation.Result reroute(ClusterState clusterState) {
return reroute(clusterState, false);
public RoutingAllocation.Result reroute(ClusterState clusterState, String reason) {
return reroute(clusterState, reason, false);
}
/**
@ -151,7 +198,7 @@ public class AllocationService extends AbstractComponent {
* <p>
* If the same instance of the routing table is returned, then no change has been made.
*/
public RoutingAllocation.Result reroute(ClusterState clusterState, boolean debug) {
protected RoutingAllocation.Result reroute(ClusterState clusterState, String reason, boolean debug) {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
@ -160,7 +207,22 @@ public class AllocationService extends AbstractComponent {
if (!reroute(allocation)) {
return new RoutingAllocation.Result(false, clusterState.routingTable());
}
return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()));
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable);
logClusterHealthStateChange(
new ClusterStateHealth(clusterState),
new ClusterStateHealth(clusterState.getMetaData(), routingTable),
reason
);
return result;
}
private void logClusterHealthStateChange(ClusterStateHealth previousStateHealth, ClusterStateHealth newStateHealth, String reason) {
ClusterHealthStatus previousHealth = previousStateHealth.getStatus();
ClusterHealthStatus currentHealth = newStateHealth.getStatus();
if (!previousHealth.equals(currentHealth)) {
logger.info("Cluster health status changed from [{}] to [{}] (reason: [{}]).", previousHealth, currentHealth, reason);
}
}
private boolean reroute(RoutingAllocation allocation) {
@ -176,7 +238,7 @@ public class AllocationService extends AbstractComponent {
changed |= electPrimariesAndUnassignedDanglingReplicas(allocation);
// now allocate all the unassigned to available nodes
if (allocation.routingNodes().hasUnassigned()) {
if (allocation.routingNodes().unassigned().size() > 0) {
changed |= shardsAllocators.allocateUnassigned(allocation);
}
@ -232,7 +294,7 @@ public class AllocationService extends AbstractComponent {
private boolean electPrimariesAndUnassignedDanglingReplicas(RoutingAllocation allocation) {
boolean changed = false;
RoutingNodes routingNodes = allocation.routingNodes();
if (!routingNodes.hasUnassignedPrimaries()) {
if (routingNodes.unassigned().getNumPrimaries() == 0) {
// move out if we don't have unassigned primaries
return changed;
}

View File

@ -353,7 +353,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
logger.trace("Start assigning unassigned shards");
}
}
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned().transactionBegin();
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned();
boolean changed = initialize(routingNodes, unassigned);
if (onlyAssign == false && changed == false && allocation.deciders().canRebalance(allocation).type() == Type.YES) {
NodeSorter sorter = newNodeSorter();
@ -433,7 +433,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
}
}
routingNodes.unassigned().transactionEnd(unassigned);
return changed;
}
@ -508,7 +507,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
if (logger.isTraceEnabled()) {
logger.trace("Try moving shard [{}] from [{}]", shard, node);
}
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned().transactionBegin();
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned();
boolean changed = initialize(routingNodes, unassigned);
if (!changed) {
final ModelNode sourceNode = nodes.get(node.nodeId());
@ -544,7 +543,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
}
}
routingNodes.unassigned().transactionEnd(unassigned);
return changed;
}

View File

@ -51,15 +51,12 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
public static final String NAME = "cluster_rebalance";
public static final String CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE = "cluster.routing.allocation.allow_rebalance";
public static final Validator ALLOCATION_ALLOW_REBALANCE_VALIDATOR = new Validator() {
@Override
public String validate(String setting, String value, ClusterState clusterState) {
try {
ClusterRebalanceType.parseString(value);
return null;
} catch (IllegalArgumentException e) {
return "the value of " + setting + " must be one of: [always, indices_primaries_active, indices_all_active]";
}
public static final Validator ALLOCATION_ALLOW_REBALANCE_VALIDATOR = (setting, value, clusterState) -> {
try {
ClusterRebalanceType.parseString(value);
return null;
} catch (IllegalArgumentException e) {
return "the value of " + setting + " must be one of: [always, indices_primaries_active, indices_all_active]";
}
};
@ -153,7 +150,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
}
if (type == ClusterRebalanceType.INDICES_ALL_ACTIVE) {
// check if there are unassigned shards.
if ( allocation.routingNodes().hasUnassignedShards() ) {
if (allocation.routingNodes().hasUnassignedShards() ) {
return allocation.decision(Decision.NO, NAME, "cluster has unassigned shards");
}
// in case all indices are assigned, are there initializing shards which

View File

@ -34,7 +34,8 @@ public enum ShapeRelation implements Writeable<ShapeRelation>{
INTERSECTS("intersects"),
DISJOINT("disjoint"),
WITHIN("within");
WITHIN("within"),
CONTAINS("contains");
private final String relationName;

View File

@ -41,7 +41,7 @@ import java.io.IOException;
import java.util.*;
/**
* Basic class for building GeoJSON shapes like Polygons, Linestrings, etc
* Basic class for building GeoJSON shapes like Polygons, Linestrings, etc
*/
public abstract class ShapeBuilder extends ToXContentToBytes {
@ -97,122 +97,10 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
return jtsGeometry;
}
/**
* Create a new point
*
* @param longitude longitude of the point
* @param latitude latitude of the point
* @return a new {@link PointBuilder}
*/
public static PointBuilder newPoint(double longitude, double latitude) {
return newPoint(new Coordinate(longitude, latitude));
}
/**
* Create a new {@link PointBuilder} from a {@link Coordinate}
* @param coordinate coordinate defining the position of the point
* @return a new {@link PointBuilder}
*/
public static PointBuilder newPoint(Coordinate coordinate) {
return new PointBuilder().coordinate(coordinate);
}
/**
* Create a new set of points
* @return new {@link MultiPointBuilder}
*/
public static MultiPointBuilder newMultiPoint() {
return new MultiPointBuilder();
}
/**
* Create a new lineString
* @return a new {@link LineStringBuilder}
*/
public static LineStringBuilder newLineString() {
return new LineStringBuilder();
}
/**
* Create a new Collection of lineStrings
* @return a new {@link MultiLineStringBuilder}
*/
public static MultiLineStringBuilder newMultiLinestring() {
return new MultiLineStringBuilder();
}
/**
* Create a new Polygon
* @return a new {@link PointBuilder}
*/
public static PolygonBuilder newPolygon() {
return new PolygonBuilder();
}
/**
* Create a new Polygon
* @return a new {@link PointBuilder}
*/
public static PolygonBuilder newPolygon(Orientation orientation) {
return new PolygonBuilder(orientation);
}
/**
* Create a new Collection of polygons
* @return a new {@link MultiPolygonBuilder}
*/
public static MultiPolygonBuilder newMultiPolygon() {
return new MultiPolygonBuilder();
}
/**
* Create a new Collection of polygons
* @return a new {@link MultiPolygonBuilder}
*/
public static MultiPolygonBuilder newMultiPolygon(Orientation orientation) {
return new MultiPolygonBuilder(orientation);
}
/**
* Create a new GeometryCollection
* @return a new {@link GeometryCollectionBuilder}
*/
public static GeometryCollectionBuilder newGeometryCollection() {
return new GeometryCollectionBuilder();
}
/**
* Create a new GeometryCollection
* @return a new {@link GeometryCollectionBuilder}
*/
public static GeometryCollectionBuilder newGeometryCollection(Orientation orientation) {
return new GeometryCollectionBuilder(orientation);
}
/**
* create a new Circle
* @return a new {@link CircleBuilder}
*/
public static CircleBuilder newCircleBuilder() {
return new CircleBuilder();
}
/**
* create a new rectangle
* @return a new {@link EnvelopeBuilder}
*/
public static EnvelopeBuilder newEnvelope() { return new EnvelopeBuilder(); }
/**
* create a new rectangle
* @return a new {@link EnvelopeBuilder}
*/
public static EnvelopeBuilder newEnvelope(Orientation orientation) { return new EnvelopeBuilder(orientation); }
/**
* Create a new Shape from this builder. Since calling this method could change the
* defined shape. (by inserting new coordinates or change the position of points)
* the builder looses its validity. So this method should only be called once on a builder
* the builder looses its validity. So this method should only be called once on a builder
* @return new {@link Shape} defined by the builder
*/
public abstract Shape build();
@ -220,7 +108,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
/**
* Recursive method which parses the arrays of coordinates used to define
* Shapes
*
*
* @param parser
* Parser that will be read from
* @return CoordinateNode representing the start of the coordinate tree
@ -232,8 +120,8 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
XContentParser.Token token = parser.nextToken();
// Base cases
if (token != XContentParser.Token.START_ARRAY &&
token != XContentParser.Token.END_ARRAY &&
if (token != XContentParser.Token.START_ARRAY &&
token != XContentParser.Token.END_ARRAY &&
token != XContentParser.Token.VALUE_NULL) {
double lon = parser.doubleValue();
token = parser.nextToken();
@ -317,7 +205,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
/**
* Calculate the intersection of a line segment and a vertical dateline.
*
*
* @param p1
* start-point of the line segment
* @param p2
@ -347,7 +235,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
* Calculate all intersections of line segments and a vertical line. The
* Array of edges will be ordered asc by the y-coordinate of the
* intersections of edges.
*
*
* @param dateline
* x-coordinate of the dateline
* @param edges
@ -360,7 +248,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
for (int i = 0; i < edges.length; i++) {
Coordinate p1 = edges[i].coordinate;
Coordinate p2 = edges[i].next.coordinate;
assert !Double.isNaN(p2.x) && !Double.isNaN(p1.x);
assert !Double.isNaN(p2.x) && !Double.isNaN(p1.x);
edges[i].intersect = Edge.MAX_COORDINATE;
double position = intersection(p1, p2, dateline);
@ -386,7 +274,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
/**
* Creates a new leaf CoordinateNode
*
*
* @param coordinate
* Coordinate for the Node
*/
@ -397,7 +285,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
/**
* Creates a new parent CoordinateNode
*
*
* @param children
* Children of the Node
*/
@ -427,7 +315,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
/**
* This helper class implements a linked list for {@link Coordinate}. It contains
* fields for a dateline intersection and component id
* fields for a dateline intersection and component id
*/
protected static final class Edge {
Coordinate coordinate; // coordinate of the start point
@ -500,7 +388,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
/**
* Concatenate a set of points to a polygon
*
*
* @param component
* component id of the polygon
* @param direction
@ -547,7 +435,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
/**
* Create a connected list of a list of coordinates
*
*
* @param points
* array of point
* @param offset
@ -567,7 +455,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
final int next = (offset + ((top + 1) % length));
boolean orientation = points[offset + prev].x > points[offset + next].x;
// OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness)
// OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness)
// since GeoJSON doesn't specify (and doesn't need to) GEO core will assume OGC standards
// thus if orientation is computed as cw, the logic will translate points across dateline
// and convert to a right handed system
@ -576,7 +464,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
double[] range = range(points, offset, length);
final double rng = range[1] - range[0];
// translate the points if the following is true
// 1. shell orientation is cw and range is greater than a hemisphere (180 degrees) but not spanning 2 hemispheres
// 1. shell orientation is cw and range is greater than a hemisphere (180 degrees) but not spanning 2 hemispheres
// (translation would result in a collapsed poly)
// 2. the shell of the candidate hole has been translated (to preserve the coordinate system)
boolean incorrectOrientation = component == 0 && handedness != orientation;
@ -595,7 +483,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
}
/**
* Transforms coordinates in the eastern hemisphere (-180:0) to a (180:360) range
* Transforms coordinates in the eastern hemisphere (-180:0) to a (180:360) range
*/
protected static void translate(Coordinate[] points) {
for (Coordinate c : points) {
@ -607,7 +495,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
/**
* Set the intersection of this line segment to the given position
*
*
* @param position
* position of the intersection [0..1]
* @return the {@link Coordinate} of the intersection
@ -770,7 +658,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
throw new ElasticsearchParseException("shape type [{}] not included", shapeType);
}
}
protected static void validatePointNode(CoordinateNode node) {
if (node.isEmpty()) {
throw new ElasticsearchParseException("invalid number of points (0) provided when expecting a single coordinate ([lat, lng])");
@ -783,11 +671,11 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
protected static PointBuilder parsePoint(CoordinateNode node) {
validatePointNode(node);
return newPoint(node.coordinate);
return ShapeBuilders.newPoint(node.coordinate);
}
protected static CircleBuilder parseCircle(CoordinateNode coordinates, Distance radius) {
return newCircleBuilder().center(coordinates.coordinate).radius(radius);
return ShapeBuilders.newCircleBuilder().center(coordinates.coordinate).radius(radius);
}
protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates, final Orientation orientation) {
@ -804,7 +692,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
uL = new Coordinate(Math.min(uL.x, lR.x), Math.max(uL.y, lR.y));
lR = new Coordinate(Math.max(uLtmp.x, lR.x), Math.min(uLtmp.y, lR.y));
}
return newEnvelope(orientation).topLeft(uL).bottomRight(lR);
return ShapeBuilders.newEnvelope(orientation).topLeft(uL).bottomRight(lR);
}
protected static void validateMultiPointNode(CoordinateNode coordinates) {
@ -842,7 +730,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
throw new ElasticsearchParseException("invalid number of points in LineString (found [{}] - must be >= 2)", coordinates.children.size());
}
LineStringBuilder line = newLineString();
LineStringBuilder line = ShapeBuilders.newLineString();
for (CoordinateNode node : coordinates.children) {
line.point(node.coordinate);
}
@ -850,7 +738,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
}
protected static MultiLineStringBuilder parseMultiLine(CoordinateNode coordinates) {
MultiLineStringBuilder multiline = newMultiLinestring();
MultiLineStringBuilder multiline = ShapeBuilders.newMultiLinestring();
for (CoordinateNode node : coordinates.children) {
multiline.linestring(parseLineString(node));
}
@ -903,13 +791,13 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
protected static MultiPolygonBuilder parseMultiPolygon(CoordinateNode coordinates, final Orientation orientation,
final boolean coerce) {
MultiPolygonBuilder polygons = newMultiPolygon(orientation);
MultiPolygonBuilder polygons = ShapeBuilders.newMultiPolygon(orientation);
for (CoordinateNode node : coordinates.children) {
polygons.polygon(parsePolygon(node, orientation, coerce));
}
return polygons;
}
/**
* Parse the geometries array of a GeometryCollection
*
@ -922,16 +810,16 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
if (parser.currentToken() != XContentParser.Token.START_ARRAY) {
throw new ElasticsearchParseException("geometries must be an array of geojson objects");
}
XContentParser.Token token = parser.nextToken();
GeometryCollectionBuilder geometryCollection = newGeometryCollection( (mapper == null) ? Orientation.RIGHT : mapper
GeometryCollectionBuilder geometryCollection = ShapeBuilders.newGeometryCollection( (mapper == null) ? Orientation.RIGHT : mapper
.fieldType().orientation());
while (token != XContentParser.Token.END_ARRAY) {
ShapeBuilder shapeBuilder = GeoShapeType.parse(parser);
geometryCollection.shape(shapeBuilder);
token = parser.nextToken();
}
return geometryCollection;
}
}

View File

@ -0,0 +1,148 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.geo.builders;
import com.vividsolutions.jts.geom.Coordinate;
/**
* A collection of static methods for creating ShapeBuilders.
*/
public class ShapeBuilders {
/**
* Create a new point
*
* @param longitude longitude of the point
* @param latitude latitude of the point
* @return a new {@link PointBuilder}
*/
public static PointBuilder newPoint(double longitude, double latitude) {
return ShapeBuilders.newPoint(new Coordinate(longitude, latitude));
}
/**
* Create a new {@link PointBuilder} from a {@link Coordinate}
* @param coordinate coordinate defining the position of the point
* @return a new {@link PointBuilder}
*/
public static PointBuilder newPoint(Coordinate coordinate) {
return new PointBuilder().coordinate(coordinate);
}
/**
* Create a new set of points
* @return new {@link MultiPointBuilder}
*/
public static MultiPointBuilder newMultiPoint() {
return new MultiPointBuilder();
}
/**
* Create a new lineString
* @return a new {@link LineStringBuilder}
*/
public static LineStringBuilder newLineString() {
return new LineStringBuilder();
}
/**
* Create a new Collection of lineStrings
* @return a new {@link MultiLineStringBuilder}
*/
public static MultiLineStringBuilder newMultiLinestring() {
return new MultiLineStringBuilder();
}
/**
* Create a new Polygon
* @return a new {@link PointBuilder}
*/
public static PolygonBuilder newPolygon() {
return new PolygonBuilder();
}
/**
* Create a new Polygon
* @return a new {@link PointBuilder}
*/
public static PolygonBuilder newPolygon(ShapeBuilder.Orientation orientation) {
return new PolygonBuilder(orientation);
}
/**
* Create a new Collection of polygons
* @return a new {@link MultiPolygonBuilder}
*/
public static MultiPolygonBuilder newMultiPolygon() {
return new MultiPolygonBuilder();
}
/**
* Create a new Collection of polygons
* @return a new {@link MultiPolygonBuilder}
*/
public static MultiPolygonBuilder newMultiPolygon(ShapeBuilder.Orientation orientation) {
return new MultiPolygonBuilder(orientation);
}
/**
* Create a new GeometryCollection
* @return a new {@link GeometryCollectionBuilder}
*/
public static GeometryCollectionBuilder newGeometryCollection() {
return new GeometryCollectionBuilder();
}
/**
* Create a new GeometryCollection
*
* @return a new {@link GeometryCollectionBuilder}
*/
public static GeometryCollectionBuilder newGeometryCollection(ShapeBuilder.Orientation orientation) {
return new GeometryCollectionBuilder(orientation);
}
/**
* create a new Circle
*
* @return a new {@link CircleBuilder}
*/
public static CircleBuilder newCircleBuilder() {
return new CircleBuilder();
}
/**
* create a new rectangle
*
* @return a new {@link EnvelopeBuilder}
*/
public static EnvelopeBuilder newEnvelope() {
return new EnvelopeBuilder();
}
/**
* create a new rectangle
*
* @return a new {@link EnvelopeBuilder}
*/
public static EnvelopeBuilder newEnvelope(ShapeBuilder.Orientation orientation) {
return new EnvelopeBuilder(orientation);
}
}

View File

@ -24,6 +24,7 @@ import org.apache.lucene.index.IndexFormatTooNewException;
import org.apache.lucene.index.IndexFormatTooOldException;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.BitUtil;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRefBuilder;
import org.elasticsearch.Version;
@ -52,6 +53,7 @@ import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
import static org.elasticsearch.ElasticsearchException.readException;
import static org.elasticsearch.ElasticsearchException.readStackTrace;
@ -234,6 +236,20 @@ public abstract class StreamInput extends InputStream {
return i | ((b & 0x7FL) << 56);
}
public long readZLong() throws IOException {
long accumulator = 0L;
int i = 0;
long currentByte;
while (((currentByte = readByte()) & 0x80L) != 0) {
accumulator |= (currentByte & 0x7F) << i;
i += 7;
if (i > 63) {
throw new IOException("variable-length stream is too long");
}
}
return BitUtil.zigZagDecode(accumulator | (currentByte << i));
}
@Nullable
public Text readOptionalText() throws IOException {
int length = readInt();
@ -517,8 +533,9 @@ public abstract class StreamInput extends InputStream {
/**
* Serializes a potential null value.
*/
public <T extends Streamable> T readOptionalStreamable(T streamable) throws IOException {
public <T extends Streamable> T readOptionalStreamable(Supplier<T> supplier) throws IOException {
if (readBoolean()) {
T streamable = supplier.get();
streamable.readFrom(this);
return streamable;
} else {

View File

@ -24,6 +24,7 @@ import org.apache.lucene.index.IndexFormatTooNewException;
import org.apache.lucene.index.IndexFormatTooOldException;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.BitUtil;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.ElasticsearchException;
@ -172,9 +173,9 @@ public abstract class StreamOutput extends OutputStream {
}
/**
* Writes an long in a variable-length format. Writes between one and nine
* bytes. Smaller values take fewer bytes. Negative numbers are not
* supported.
* Writes a non-negative long in a variable-length format.
* Writes between one and nine bytes. Smaller values take fewer bytes.
* Negative numbers are not supported.
*/
public void writeVLong(long i) throws IOException {
assert i >= 0;
@ -185,6 +186,23 @@ public abstract class StreamOutput extends OutputStream {
writeByte((byte) i);
}
/**
* Writes a long in a variable-length format. Writes between one and ten bytes.
* Values are remapped by sliding the sign bit into the lsb and then encoded as an unsigned number
* e.g., 0 -;&gt; 0, -1 -;&gt; 1, 1 -;&gt; 2, ..., Long.MIN_VALUE -;&gt; -1, Long.MAX_VALUE -;&gt; -2
* Numbers with small absolute value will have a small encoding
* If the numbers are known to be non-negative, use {@link #writeVLong(long)}
*/
public void writeZLong(long i) throws IOException {
// zig-zag encoding cf. https://developers.google.com/protocol-buffers/docs/encoding?hl=en
long value = BitUtil.zigZagEncode(i);
while ((value & 0xFFFFFFFFFFFFFF80L) != 0L) {
writeByte((byte)((value & 0x7F) | 0x80));
value >>>= 7;
}
writeByte((byte) (value & 0x7F));
}
public void writeOptionalString(@Nullable String str) throws IOException {
if (str == null) {
writeBoolean(false);

View File

@ -35,9 +35,6 @@ public abstract class AbstractRunnable implements Runnable {
public final void run() {
try {
doRun();
} catch (InterruptedException ex) {
Thread.interrupted();
onFailure(ex);
} catch (Throwable t) {
onFailure(t);
} finally {

View File

@ -930,8 +930,8 @@ public final class XContentBuilder implements BytesStream, Releasable {
return this;
}
public XContentBuilder rawField(String fieldName, InputStream content) throws IOException {
generator.writeRawField(fieldName, content, bos);
public XContentBuilder rawField(String fieldName, InputStream content, XContentType contentType) throws IOException {
generator.writeRawField(fieldName, content, bos, contentType);
return this;
}

View File

@ -116,7 +116,7 @@ public interface XContentGenerator extends Closeable {
void writeRawField(String fieldName, byte[] content, int offset, int length, OutputStream bos) throws IOException;
void writeRawField(String fieldName, InputStream content, OutputStream bos) throws IOException;
void writeRawField(String fieldName, InputStream content, OutputStream bos, XContentType contentType) throws IOException;
void writeRawField(String fieldName, BytesReference content, OutputStream bos) throws IOException;

View File

@ -423,7 +423,7 @@ public class XContentHelper {
}
XContentType contentType = XContentFactory.xContentType(compressedStreamInput);
if (contentType == builder.contentType()) {
builder.rawField(field, compressedStreamInput);
builder.rawField(field, compressedStreamInput, contentType);
} else {
try (XContentParser parser = XContentFactory.xContent(contentType).createParser(compressedStreamInput)) {
parser.nextToken();

View File

@ -20,15 +20,11 @@
package org.elasticsearch.common.xcontent.cbor;
import com.fasterxml.jackson.core.JsonEncoding;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.dataformat.cbor.CBORFactory;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.*;
import org.elasticsearch.common.xcontent.json.BaseJsonGenerator;
import org.elasticsearch.common.xcontent.support.filtering.FilteringJsonGenerator;
import java.io.*;
@ -63,27 +59,19 @@ public class CborXContent implements XContent {
throw new ElasticsearchParseException("cbor does not support stream parsing...");
}
private XContentGenerator newXContentGenerator(JsonGenerator jsonGenerator) {
return new CborXContentGenerator(new BaseJsonGenerator(jsonGenerator));
}
@Override
public XContentGenerator createGenerator(OutputStream os) throws IOException {
return newXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8));
return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8));
}
@Override
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
if (CollectionUtils.isEmpty(filters)) {
return createGenerator(os);
}
FilteringJsonGenerator cborGenerator = new FilteringJsonGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8), filters);
return new CborXContentGenerator(cborGenerator);
return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8), filters);
}
@Override
public XContentGenerator createGenerator(Writer writer) throws IOException {
return newXContentGenerator(cborFactory.createGenerator(writer));
return new CborXContentGenerator(cborFactory.createGenerator(writer));
}
@Override

View File

@ -19,10 +19,10 @@
package org.elasticsearch.common.xcontent.cbor;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.dataformat.cbor.CBORParser;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.BaseJsonGenerator;
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
import java.io.IOException;
@ -34,8 +34,8 @@ import java.io.OutputStream;
*/
public class CborXContentGenerator extends JsonXContentGenerator {
public CborXContentGenerator(BaseJsonGenerator generator) {
super(generator);
public CborXContentGenerator(JsonGenerator jsonGenerator, String... filters) {
super(jsonGenerator, filters);
}
@Override
@ -49,7 +49,7 @@ public class CborXContentGenerator extends JsonXContentGenerator {
}
@Override
public void writeRawField(String fieldName, InputStream content, OutputStream bos) throws IOException {
public void writeRawField(String fieldName, InputStream content, OutputStream bos, XContentType contentType) throws IOException {
writeFieldName(fieldName);
try (CBORParser parser = CborXContent.cborFactory.createParser(content)) {
parser.nextToken();

View File

@ -1,80 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.xcontent.json;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.base.GeneratorBase;
import com.fasterxml.jackson.core.util.JsonGeneratorDelegate;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.Streams;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
public class BaseJsonGenerator extends JsonGeneratorDelegate {
protected final GeneratorBase base;
public BaseJsonGenerator(JsonGenerator generator, JsonGenerator base) {
super(generator, true);
if (base instanceof GeneratorBase) {
this.base = (GeneratorBase) base;
} else {
this.base = null;
}
}
public BaseJsonGenerator(JsonGenerator generator) {
this(generator, generator);
}
protected void writeStartRaw(String fieldName) throws IOException {
writeFieldName(fieldName);
writeRaw(':');
}
public void writeEndRaw() {
assert base != null : "JsonGenerator should be of instance GeneratorBase but was: " + delegate.getClass();
if (base != null) {
base.getOutputContext().writeValue();
}
}
protected void writeRawValue(byte[] content, OutputStream bos) throws IOException {
flush();
bos.write(content);
}
protected void writeRawValue(byte[] content, int offset, int length, OutputStream bos) throws IOException {
flush();
bos.write(content, offset, length);
}
protected void writeRawValue(InputStream content, OutputStream bos) throws IOException {
flush();
Streams.copy(content, bos);
}
protected void writeRawValue(BytesReference content, OutputStream bos) throws IOException {
flush();
content.writeTo(bos);
}
}

View File

@ -25,9 +25,7 @@ import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.JsonParser;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.*;
import org.elasticsearch.common.xcontent.support.filtering.FilteringJsonGenerator;
import java.io.*;
@ -65,27 +63,19 @@ public class JsonXContent implements XContent {
return '\n';
}
private XContentGenerator newXContentGenerator(JsonGenerator jsonGenerator) {
return new JsonXContentGenerator(new BaseJsonGenerator(jsonGenerator));
}
@Override
public XContentGenerator createGenerator(OutputStream os) throws IOException {
return newXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8));
return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8));
}
@Override
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
if (CollectionUtils.isEmpty(filters)) {
return createGenerator(os);
}
FilteringJsonGenerator jsonGenerator = new FilteringJsonGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8), filters);
return new JsonXContentGenerator(jsonGenerator);
return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8), filters);
}
@Override
public XContentGenerator createGenerator(Writer writer) throws IOException {
return newXContentGenerator(jsonFactory.createGenerator(writer));
return new JsonXContentGenerator(jsonFactory.createGenerator(writer));
}
@Override

View File

@ -19,11 +19,19 @@
package org.elasticsearch.common.xcontent.json;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.JsonStreamContext;
import com.fasterxml.jackson.core.base.GeneratorBase;
import com.fasterxml.jackson.core.filter.FilteringGeneratorDelegate;
import com.fasterxml.jackson.core.io.SerializedString;
import com.fasterxml.jackson.core.util.DefaultIndenter;
import com.fasterxml.jackson.core.util.DefaultPrettyPrinter;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.*;
import org.elasticsearch.common.xcontent.support.filtering.FilterPathBasedFilter;
import java.io.IOException;
import java.io.InputStream;
@ -34,13 +42,40 @@ import java.io.OutputStream;
*/
public class JsonXContentGenerator implements XContentGenerator {
protected final BaseJsonGenerator generator;
/** Generator used to write content **/
protected final JsonGenerator generator;
/**
* Reference to base generator because
* writing raw values needs a specific method call.
*/
private final GeneratorBase base;
/**
* Reference to filtering generator because
* writing an empty object '{}' when everything is filtered
* out needs a specific treatment
*/
private final FilteringGeneratorDelegate filter;
private boolean writeLineFeedAtEnd;
private static final SerializedString LF = new SerializedString("\n");
private static final DefaultPrettyPrinter.Indenter INDENTER = new DefaultIndenter(" ", LF.getValue());
private static final DefaultPrettyPrinter.Indenter INDENTER = new DefaultIndenter(" ", LF.getValue());
public JsonXContentGenerator(BaseJsonGenerator generator) {
this.generator = generator;
public JsonXContentGenerator(JsonGenerator jsonGenerator, String... filters) {
if (jsonGenerator instanceof GeneratorBase) {
this.base = (GeneratorBase) jsonGenerator;
} else {
this.base = null;
}
if (CollectionUtils.isEmpty(filters)) {
this.generator = jsonGenerator;
this.filter = null;
} else {
this.filter = new FilteringGeneratorDelegate(jsonGenerator, new FilterPathBasedFilter(filters), true, true);
this.generator = this.filter;
}
}
@Override
@ -68,13 +103,35 @@ public class JsonXContentGenerator implements XContentGenerator {
generator.writeEndArray();
}
protected boolean isFiltered() {
return filter != null;
}
protected boolean inRoot() {
if (isFiltered()) {
JsonStreamContext context = filter.getFilterContext();
return ((context != null) && (context.inRoot() && context.getCurrentName() == null));
}
return false;
}
@Override
public void writeStartObject() throws IOException {
if (isFiltered() && inRoot()) {
// Bypass generator to always write the root start object
filter.getDelegate().writeStartObject();
return;
}
generator.writeStartObject();
}
@Override
public void writeEndObject() throws IOException {
if (isFiltered() && inRoot()) {
// Bypass generator to always write the root end object
filter.getDelegate().writeEndObject();
return;
}
generator.writeEndObject();
}
@ -253,32 +310,62 @@ public class JsonXContentGenerator implements XContentGenerator {
generator.writeStartObject();
}
private void writeStartRaw(String fieldName) throws IOException {
writeFieldName(fieldName);
generator.writeRaw(':');
}
public void writeEndRaw() {
assert base != null : "JsonGenerator should be of instance GeneratorBase but was: " + generator.getClass();
if (base != null) {
base.getOutputContext().writeValue();
}
}
@Override
public void writeRawField(String fieldName, byte[] content, OutputStream bos) throws IOException {
generator.writeStartRaw(fieldName);
generator.writeRawValue(content, bos);
generator.writeEndRaw();
writeRawField(fieldName, new BytesArray(content), bos);
}
@Override
public void writeRawField(String fieldName, byte[] content, int offset, int length, OutputStream bos) throws IOException {
generator.writeStartRaw(fieldName);
generator.writeRawValue(content, offset, length, bos);
generator.writeEndRaw();
writeRawField(fieldName, new BytesArray(content, offset, length), bos);
}
@Override
public void writeRawField(String fieldName, InputStream content, OutputStream bos) throws IOException {
generator.writeStartRaw(fieldName);
generator.writeRawValue(content, bos);
generator.writeEndRaw();
public void writeRawField(String fieldName, InputStream content, OutputStream bos, XContentType contentType) throws IOException {
if (isFiltered() || (contentType != contentType())) {
// When the current generator is filtered (ie filter != null)
// or the content is in a different format than the current generator,
// we need to copy the whole structure so that it will be correctly
// filtered or converted
try (XContentParser parser = XContentFactory.xContent(contentType).createParser(content)) {
parser.nextToken();
writeFieldName(fieldName);
copyCurrentStructure(parser);
}
} else {
writeStartRaw(fieldName);
flush();
Streams.copy(content, bos);
writeEndRaw();
}
}
@Override
public final void writeRawField(String fieldName, BytesReference content, OutputStream bos) throws IOException {
XContentType contentType = XContentFactory.xContentType(content);
if (contentType != null) {
writeObjectRaw(fieldName, content, bos);
if (isFiltered() || (contentType != contentType())) {
// When the current generator is filtered (ie filter != null)
// or the content is in a different format than the current generator,
// we need to copy the whole structure so that it will be correctly
// filtered or converted
copyRawField(fieldName, content, contentType.xContent());
} else {
// Otherwise, the generator is not filtered and has the same type: we can potentially optimize the write
writeObjectRaw(fieldName, content, bos);
}
} else {
writeFieldName(fieldName);
// we could potentially optimize this to not rely on exception logic...
@ -296,9 +383,29 @@ public class JsonXContentGenerator implements XContentGenerator {
}
protected void writeObjectRaw(String fieldName, BytesReference content, OutputStream bos) throws IOException {
generator.writeStartRaw(fieldName);
generator.writeRawValue(content, bos);
generator.writeEndRaw();
writeStartRaw(fieldName);
flush();
content.writeTo(bos);
writeEndRaw();
}
protected void copyRawField(String fieldName, BytesReference content, XContent xContent) throws IOException {
XContentParser parser = null;
try {
if (content.hasArray()) {
parser = xContent.createParser(content.array(), content.arrayOffset(), content.length());
} else {
parser = xContent.createParser(content.streamInput());
}
if (fieldName != null) {
writeFieldName(fieldName);
}
copyCurrentStructure(parser);
} finally {
if (parser != null) {
parser.close();
}
}
}
@Override

View File

@ -20,15 +20,11 @@
package org.elasticsearch.common.xcontent.smile;
import com.fasterxml.jackson.core.JsonEncoding;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.dataformat.smile.SmileFactory;
import com.fasterxml.jackson.dataformat.smile.SmileGenerator;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.*;
import org.elasticsearch.common.xcontent.json.BaseJsonGenerator;
import org.elasticsearch.common.xcontent.support.filtering.FilteringJsonGenerator;
import java.io.*;
@ -64,27 +60,19 @@ public class SmileXContent implements XContent {
return (byte) 0xFF;
}
private XContentGenerator newXContentGenerator(JsonGenerator jsonGenerator) {
return new SmileXContentGenerator(new BaseJsonGenerator(jsonGenerator));
}
@Override
public XContentGenerator createGenerator(OutputStream os) throws IOException {
return newXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8));
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8));
}
@Override
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
if (CollectionUtils.isEmpty(filters)) {
return createGenerator(os);
}
FilteringJsonGenerator smileGenerator = new FilteringJsonGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), filters);
return new SmileXContentGenerator(smileGenerator);
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), filters);
}
@Override
public XContentGenerator createGenerator(Writer writer) throws IOException {
return newXContentGenerator(smileFactory.createGenerator(writer));
return new SmileXContentGenerator(smileFactory.createGenerator(writer));
}
@Override

View File

@ -19,10 +19,10 @@
package org.elasticsearch.common.xcontent.smile;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.dataformat.smile.SmileParser;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.BaseJsonGenerator;
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
import java.io.IOException;
@ -34,8 +34,8 @@ import java.io.OutputStream;
*/
public class SmileXContentGenerator extends JsonXContentGenerator {
public SmileXContentGenerator(BaseJsonGenerator generator) {
super(generator);
public SmileXContentGenerator(JsonGenerator jsonGenerator, String... filters) {
super(jsonGenerator, filters);
}
@Override
@ -49,7 +49,7 @@ public class SmileXContentGenerator extends JsonXContentGenerator {
}
@Override
public void writeRawField(String fieldName, InputStream content, OutputStream bos) throws IOException {
public void writeRawField(String fieldName, InputStream content, OutputStream bos, XContentType contentType) throws IOException {
writeFieldName(fieldName);
try (SmileParser parser = SmileXContent.smileFactory.createParser(content)) {
parser.nextToken();

View File

@ -1,90 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.xcontent.support;
import org.elasticsearch.common.xcontent.XContentGenerator;
import java.io.IOException;
/**
*
*/
public abstract class AbstractXContentGenerator implements XContentGenerator {
@Override
public void writeStringField(String fieldName, String value) throws IOException {
writeFieldName(fieldName);
writeString(value);
}
@Override
public void writeBooleanField(String fieldName, boolean value) throws IOException {
writeFieldName(fieldName);
writeBoolean(value);
}
@Override
public void writeNullField(String fieldName) throws IOException {
writeFieldName(fieldName);
writeNull();
}
@Override
public void writeNumberField(String fieldName, int value) throws IOException {
writeFieldName(fieldName);
writeNumber(value);
}
@Override
public void writeNumberField(String fieldName, long value) throws IOException {
writeFieldName(fieldName);
writeNumber(value);
}
@Override
public void writeNumberField(String fieldName, double value) throws IOException {
writeFieldName(fieldName);
writeNumber(value);
}
@Override
public void writeNumberField(String fieldName, float value) throws IOException {
writeFieldName(fieldName);
writeNumber(value);
}
@Override
public void writeBinaryField(String fieldName, byte[] data) throws IOException {
writeFieldName(fieldName);
writeBinary(data);
}
@Override
public void writeArrayFieldStart(String fieldName) throws IOException {
writeFieldName(fieldName);
writeStartArray();
}
@Override
public void writeObjectFieldStart(String fieldName) throws IOException {
writeFieldName(fieldName);
writeStartObject();
}
}

View File

@ -1,222 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.xcontent.support.filtering;
import com.fasterxml.jackson.core.JsonGenerator;
import org.elasticsearch.common.regex.Regex;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* A FilterContext contains the description of a field about to be written by a JsonGenerator.
*/
public class FilterContext {
/**
* The field/property name to be write
*/
private String property;
/**
* List of XContentFilter matched by the current filtering context
*/
private List<String[]> matchings;
/**
* Flag to indicate if the field/property must be written
*/
private Boolean write = null;
/**
* Flag to indicate if the field/property match a filter
*/
private boolean match = false;
/**
* Points to the parent context
*/
private FilterContext parent;
/**
* Type of the field/property
*/
private Type type = Type.VALUE;
protected enum Type {
VALUE,
OBJECT,
ARRAY,
ARRAY_OF_OBJECT
}
public FilterContext(String property, FilterContext parent) {
this.property = property;
this.parent = parent;
}
public void reset(String property) {
this.property = property;
this.write = null;
if (matchings != null) {
matchings.clear();
}
this.match = false;
this.type = Type.VALUE;
}
public void reset(String property, FilterContext parent) {
reset(property);
this.parent = parent;
if (parent.isMatch()) {
match = true;
}
}
public FilterContext parent() {
return parent;
}
public List<String[]> matchings() {
return matchings;
}
public void addMatching(String[] matching) {
if (matchings == null) {
matchings = new ArrayList<>();
}
matchings.add(matching);
}
public boolean isRoot() {
return parent == null;
}
public boolean isArray() {
return Type.ARRAY.equals(type);
}
public void initArray() {
this.type = Type.ARRAY;
}
public boolean isObject() {
return Type.OBJECT.equals(type);
}
public void initObject() {
this.type = Type.OBJECT;
}
public boolean isArrayOfObject() {
return Type.ARRAY_OF_OBJECT.equals(type);
}
public void initArrayOfObject() {
this.type = Type.ARRAY_OF_OBJECT;
}
public boolean isMatch() {
return match;
}
/**
* This method contains the logic to check if a field/property must be included
* or not.
*/
public boolean include() {
if (write == null) {
if (parent != null) {
// the parent context matches the end of a filter list:
// by default we include all the sub properties so we
// don't need to check if the sub properties also match
if (parent.isMatch()) {
write = true;
match = true;
return write;
}
if (parent.matchings() != null) {
// Iterates over the filters matched by the parent context
// and checks if the current context also match
for (String[] matcher : parent.matchings()) {
if (matcher.length > 0) {
String field = matcher[0];
if ("**".equals(field)) {
addMatching(matcher);
}
if ((field != null) && (Regex.simpleMatch(field, property))) {
int remaining = matcher.length - 1;
// the current context matches the end of a filter list:
// it must be written and it is flagged as a direct match
if (remaining == 0) {
write = true;
match = true;
return write;
} else {
String[] submatching = new String[remaining];
System.arraycopy(matcher, 1, submatching, 0, remaining);
addMatching(submatching);
}
}
}
}
}
} else {
// Root object is always written
write = true;
}
if (write == null) {
write = false;
}
}
return write;
}
/**
* Ensure that the full path to the current field is write by the JsonGenerator
*/
public void writePath(JsonGenerator generator) throws IOException {
if (parent != null) {
parent.writePath(generator);
}
if ((write == null) || (!write)) {
write = true;
if (property == null) {
generator.writeStartObject();
} else {
generator.writeFieldName(property);
if (isArray()) {
generator.writeStartArray();
} else if (isObject() || isArrayOfObject()) {
generator.writeStartObject();
}
}
}
}
}

View File

@ -0,0 +1,117 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.xcontent.support.filtering;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.util.CollectionUtils;
import java.util.ArrayList;
import java.util.List;
public class FilterPath {
static final FilterPath EMPTY = new FilterPath();
private final String filter;
private final String segment;
private final FilterPath next;
private final boolean simpleWildcard;
private final boolean doubleWildcard;
protected FilterPath(String filter, String segment, FilterPath next) {
this.filter = filter;
this.segment = segment;
this.next = next;
this.simpleWildcard = (segment != null) && (segment.length() == 1) && (segment.charAt(0) == '*');
this.doubleWildcard = (segment != null) && (segment.length() == 2) && (segment.charAt(0) == '*') && (segment.charAt(1) == '*');
}
private FilterPath() {
this("<empty>", "", null);
}
public FilterPath matchProperty(String name) {
if ((next != null) && (simpleWildcard || doubleWildcard || Regex.simpleMatch(segment, name))) {
return next;
}
return null;
}
public boolean matches() {
return next == null;
}
boolean isDoubleWildcard() {
return doubleWildcard;
}
boolean isSimpleWildcard() {
return simpleWildcard;
}
String getSegment() {
return segment;
}
FilterPath getNext() {
return next;
}
public static FilterPath[] compile(String... filters) {
if (CollectionUtils.isEmpty(filters)) {
return null;
}
List<FilterPath> paths = new ArrayList<>();
for (String filter : filters) {
if (filter != null) {
filter = filter.trim();
if (filter.length() > 0) {
paths.add(parse(filter, filter));
}
}
}
return paths.toArray(new FilterPath[paths.size()]);
}
private static FilterPath parse(final String filter, final String segment) {
int end = segment.length();
for (int i = 0; i < end; ) {
char c = segment.charAt(i);
if (c == '.') {
String current = segment.substring(0, i).replaceAll("\\\\.", ".");
return new FilterPath(filter, current, parse(filter, segment.substring(i + 1)));
}
++i;
if ((c == '\\') && (i < end) && (segment.charAt(i) == '.')) {
++i;
}
}
return new FilterPath(filter, segment.replaceAll("\\\\.", "."), EMPTY);
}
@Override
public String toString() {
return "FilterPath [filter=" + filter + ", segment=" + segment + "]";
}
}

View File

@ -0,0 +1,107 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.xcontent.support.filtering;
import com.fasterxml.jackson.core.filter.TokenFilter;
import org.elasticsearch.common.util.CollectionUtils;
import java.util.ArrayList;
import java.util.List;
public class FilterPathBasedFilter extends TokenFilter {
/**
* Marker value that should be used to indicate that a property name
* or value matches one of the filter paths.
*/
private static final TokenFilter MATCHING = new TokenFilter(){};
/**
* Marker value that should be used to indicate that none of the
* property names/values matches one of the filter paths.
*/
private static final TokenFilter NO_MATCHING = new TokenFilter(){};
private final FilterPath[] filters;
public FilterPathBasedFilter(FilterPath[] filters) {
if (CollectionUtils.isEmpty(filters)) {
throw new IllegalArgumentException("filters cannot be null or empty");
}
this.filters = filters;
}
public FilterPathBasedFilter(String[] filters) {
this(FilterPath.compile(filters));
}
/**
* Evaluates if a property name matches one of the given filter paths.
*/
private TokenFilter evaluate(String name, FilterPath[] filters) {
if (filters != null) {
List<FilterPath> nextFilters = null;
for (FilterPath filter : filters) {
FilterPath next = filter.matchProperty(name);
if (next != null) {
if (next.matches()) {
return MATCHING;
} else {
if (nextFilters == null) {
nextFilters = new ArrayList<>();
}
if (filter.isDoubleWildcard()) {
nextFilters.add(filter);
}
nextFilters.add(next);
}
}
}
if ((nextFilters != null) && (nextFilters.isEmpty() == false)) {
return new FilterPathBasedFilter(nextFilters.toArray(new FilterPath[nextFilters.size()]));
}
}
return NO_MATCHING;
}
@Override
public TokenFilter includeProperty(String name) {
TokenFilter include = evaluate(name, filters);
if (include == MATCHING) {
return TokenFilter.INCLUDE_ALL;
}
if (include == NO_MATCHING) {
return null;
}
return include;
}
@Override
protected boolean _includeScalar() {
for (FilterPath filter : filters) {
if (filter.matches()) {
return true;
}
}
return false;
}
}

View File

@ -1,424 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.xcontent.support.filtering;
import com.fasterxml.jackson.core.Base64Variant;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.SerializableString;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.json.BaseJsonGenerator;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Queue;
/**
* A FilteringJsonGenerator uses antpath-like filters to include/exclude fields when writing XContent streams.
*
* When writing a XContent stream, this class instantiates (or reuses) a FilterContext instance for each
* field (or property) that must be generated. This filter context is used to check if the field/property must be
* written according to the current list of XContentFilter filters.
*/
public class FilteringJsonGenerator extends BaseJsonGenerator {
/**
* List of previous contexts
* (MAX_CONTEXTS contexts are kept around in order to be reused)
*/
private Queue<FilterContext> contexts = new ArrayDeque<>();
private static final int MAX_CONTEXTS = 10;
/**
* Current filter context
*/
private FilterContext context;
public FilteringJsonGenerator(JsonGenerator generator, String[] filters) {
super(generator);
List<String[]> builder = new ArrayList<>();
if (filters != null) {
for (String filter : filters) {
String[] matcher = Strings.delimitedListToStringArray(filter, ".");
if (matcher != null) {
builder.add(matcher);
}
}
}
// Creates a root context that matches all filtering rules
this.context = get(null, null, Collections.unmodifiableList(builder));
}
/**
* Get a new context instance (and reset it if needed)
*/
private FilterContext get(String property, FilterContext parent) {
FilterContext ctx = contexts.poll();
if (ctx == null) {
ctx = new FilterContext(property, parent);
} else {
ctx.reset(property, parent);
}
return ctx;
}
/**
* Get a new context instance (and reset it if needed)
*/
private FilterContext get(String property, FilterContext context, List<String[]> matchings) {
FilterContext ctx = get(property, context);
if (matchings != null) {
for (String[] matching : matchings) {
ctx.addMatching(matching);
}
}
return ctx;
}
/**
* Adds a context instance to the pool in order to reuse it if needed
*/
private void put(FilterContext ctx) {
if (contexts.size() <= MAX_CONTEXTS) {
contexts.offer(ctx);
}
}
@Override
public void writeStartArray() throws IOException {
context.initArray();
if (context.include()) {
super.writeStartArray();
}
}
@Override
public void writeStartArray(int size) throws IOException {
context.initArray();
if (context.include()) {
super.writeStartArray(size);
}
}
@Override
public void writeEndArray() throws IOException {
// Case of array of objects
if (context.isArrayOfObject()) {
// Release current context and go one level up
FilterContext parent = context.parent();
put(context);
context = parent;
}
if (context.include()) {
super.writeEndArray();
}
}
@Override
public void writeStartObject() throws IOException {
// Case of array of objects
if (context.isArray()) {
// Get a context for the anonymous object
context = get(null, context, context.matchings());
context.initArrayOfObject();
}
if (!context.isArrayOfObject()) {
context.initObject();
}
if (context.include()) {
super.writeStartObject();
}
context = get(null, context);
}
@Override
public void writeEndObject() throws IOException {
if (!context.isRoot()) {
// Release current context and go one level up
FilterContext parent = context.parent();
put(context);
context = parent;
}
if (context.include()) {
super.writeEndObject();
}
}
@Override
public void writeFieldName(String name) throws IOException {
context.reset(name);
if (context.include()) {
// Ensure that the full path to the field is written
context.writePath(delegate);
super.writeFieldName(name);
}
}
@Override
public void writeFieldName(SerializableString name) throws IOException {
context.reset(name.getValue());
if (context.include()) {
// Ensure that the full path to the field is written
context.writePath(delegate);
super.writeFieldName(name);
}
}
@Override
public void writeString(String text) throws IOException {
if (context.include()) {
super.writeString(text);
}
}
@Override
public void writeString(char[] text, int offset, int len) throws IOException {
if (context.include()) {
super.writeString(text, offset, len);
}
}
@Override
public void writeString(SerializableString text) throws IOException {
if (context.include()) {
super.writeString(text);
}
}
@Override
public void writeRawUTF8String(byte[] text, int offset, int length) throws IOException {
if (context.include()) {
super.writeRawUTF8String(text, offset, length);
}
}
@Override
public void writeUTF8String(byte[] text, int offset, int length) throws IOException {
if (context.include()) {
super.writeUTF8String(text, offset, length);
}
}
@Override
public void writeRaw(String text) throws IOException {
if (context.include()) {
super.writeRaw(text);
}
}
@Override
public void writeRaw(String text, int offset, int len) throws IOException {
if (context.include()) {
super.writeRaw(text, offset, len);
}
}
@Override
public void writeRaw(SerializableString raw) throws IOException {
if (context.include()) {
super.writeRaw(raw);
}
}
@Override
public void writeRaw(char[] text, int offset, int len) throws IOException {
if (context.include()) {
super.writeRaw(text, offset, len);
}
}
@Override
public void writeRaw(char c) throws IOException {
if (context.include()) {
super.writeRaw(c);
}
}
@Override
public void writeRawValue(String text) throws IOException {
if (context.include()) {
super.writeRawValue(text);
}
}
@Override
public void writeRawValue(String text, int offset, int len) throws IOException {
if (context.include()) {
super.writeRawValue(text, offset, len);
}
}
@Override
public void writeRawValue(char[] text, int offset, int len) throws IOException {
if (context.include()) {
super.writeRawValue(text, offset, len);
}
}
@Override
public void writeBinary(Base64Variant b64variant, byte[] data, int offset, int len) throws IOException {
if (context.include()) {
super.writeBinary(b64variant, data, offset, len);
}
}
@Override
public int writeBinary(Base64Variant b64variant, InputStream data, int dataLength) throws IOException {
if (context.include()) {
return super.writeBinary(b64variant, data, dataLength);
}
return 0;
}
@Override
public void writeNumber(short v) throws IOException {
if (context.include()) {
super.writeNumber(v);
}
}
@Override
public void writeNumber(int v) throws IOException {
if (context.include()) {
super.writeNumber(v);
}
}
@Override
public void writeNumber(long v) throws IOException {
if (context.include()) {
super.writeNumber(v);
}
}
@Override
public void writeNumber(BigInteger v) throws IOException {
if (context.include()) {
super.writeNumber(v);
}
}
@Override
public void writeNumber(double v) throws IOException {
if (context.include()) {
super.writeNumber(v);
}
}
@Override
public void writeNumber(float v) throws IOException {
if (context.include()) {
super.writeNumber(v);
}
}
@Override
public void writeNumber(BigDecimal v) throws IOException {
if (context.include()) {
super.writeNumber(v);
}
}
@Override
public void writeNumber(String encodedValue) throws IOException, UnsupportedOperationException {
if (context.include()) {
super.writeNumber(encodedValue);
}
}
@Override
public void writeBoolean(boolean state) throws IOException {
if (context.include()) {
super.writeBoolean(state);
}
}
@Override
public void writeNull() throws IOException {
if (context.include()) {
super.writeNull();
}
}
@Override
public void copyCurrentEvent(JsonParser jp) throws IOException {
if (context.include()) {
super.copyCurrentEvent(jp);
}
}
@Override
public void copyCurrentStructure(JsonParser jp) throws IOException {
if (context.include()) {
super.copyCurrentStructure(jp);
}
}
@Override
protected void writeRawValue(byte[] content, OutputStream bos) throws IOException {
if (context.include()) {
super.writeRawValue(content, bos);
}
}
@Override
protected void writeRawValue(byte[] content, int offset, int length, OutputStream bos) throws IOException {
if (context.include()) {
super.writeRawValue(content, offset, length, bos);
}
}
@Override
protected void writeRawValue(InputStream content, OutputStream bos) throws IOException {
if (context.include()) {
super.writeRawValue(content, bos);
}
}
@Override
protected void writeRawValue(BytesReference content, OutputStream bos) throws IOException {
if (context.include()) {
super.writeRawValue(content, bos);
}
}
@Override
public void close() throws IOException {
contexts.clear();
super.close();
}
}

View File

@ -20,15 +20,11 @@
package org.elasticsearch.common.xcontent.yaml;
import com.fasterxml.jackson.core.JsonEncoding;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.*;
import org.elasticsearch.common.xcontent.json.BaseJsonGenerator;
import org.elasticsearch.common.xcontent.support.filtering.FilteringJsonGenerator;
import java.io.*;
@ -62,27 +58,19 @@ public class YamlXContent implements XContent {
throw new ElasticsearchParseException("yaml does not support stream parsing...");
}
private XContentGenerator newXContentGenerator(JsonGenerator jsonGenerator) {
return new YamlXContentGenerator(new BaseJsonGenerator(jsonGenerator));
}
@Override
public XContentGenerator createGenerator(OutputStream os) throws IOException {
return newXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8));
return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8));
}
@Override
public XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
if (CollectionUtils.isEmpty(filters)) {
return createGenerator(os);
}
FilteringJsonGenerator yamlGenerator = new FilteringJsonGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8), filters);
return new YamlXContentGenerator(yamlGenerator);
return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8), filters);
}
@Override
public XContentGenerator createGenerator(Writer writer) throws IOException {
return newXContentGenerator(yamlFactory.createGenerator(writer));
return new YamlXContentGenerator(yamlFactory.createGenerator(writer));
}
@Override

View File

@ -19,10 +19,10 @@
package org.elasticsearch.common.xcontent.yaml;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.dataformat.yaml.YAMLParser;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.BaseJsonGenerator;
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
import java.io.IOException;
@ -34,8 +34,8 @@ import java.io.OutputStream;
*/
public class YamlXContentGenerator extends JsonXContentGenerator {
public YamlXContentGenerator(BaseJsonGenerator generator) {
super(generator);
public YamlXContentGenerator(JsonGenerator jsonGenerator, String... filters) {
super(jsonGenerator, filters);
}
@Override
@ -49,7 +49,7 @@ public class YamlXContentGenerator extends JsonXContentGenerator {
}
@Override
public void writeRawField(String fieldName, InputStream content, OutputStream bos) throws IOException {
public void writeRawField(String fieldName, InputStream content, OutputStream bos, XContentType contentType) throws IOException {
writeFieldName(fieldName);
try (YAMLParser parser = YamlXContent.yamlFactory.createParser(content)) {
parser.nextToken();

View File

@ -39,6 +39,8 @@ import java.util.concurrent.TimeUnit;
public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryService> {
public static final String SETTING_INITIAL_STATE_TIMEOUT = "discovery.initial_state_timeout";
public static final String SETTING_DISCOVERY_SEED = "discovery.id.seed";
private static class InitialStateListener implements InitialStateDiscoveryListener {
@ -130,7 +132,7 @@ public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryServic
}
public static String generateNodeId(Settings settings) {
String seed = settings.get("discovery.id.seed");
String seed = settings.get(DiscoveryService.SETTING_DISCOVERY_SEED);
if (seed != null) {
return Strings.randomBase64UUID(new Random(Long.parseLong(seed)));
}

View File

@ -261,7 +261,8 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
}
// reroute here, so we eagerly remove dead nodes from the routing
ClusterState updatedState = ClusterState.builder(currentState).nodes(newNodes).build();
RoutingAllocation.Result routingResult = master.routingService.getAllocationService().reroute(ClusterState.builder(updatedState).build());
RoutingAllocation.Result routingResult = master.routingService.getAllocationService().reroute(
ClusterState.builder(updatedState).build(), "elected as master");
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}

View File

@ -249,7 +249,7 @@ public class NodeJoinController extends AbstractComponent {
currentState = ClusterState.builder(currentState).nodes(builder).blocks(clusterBlocks).build();
// reroute now to remove any dead nodes (master may have stepped down when they left and didn't update the routing table)
RoutingAllocation.Result result = routingService.getAllocationService().reroute(currentState);
RoutingAllocation.Result result = routingService.getAllocationService().reroute(currentState, "nodes joined");
if (result.changed()) {
currentState = ClusterState.builder(currentState).routingResult(result).build();
}

View File

@ -60,10 +60,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.*;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
@ -511,7 +508,9 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
return rejoin(currentState, "not enough master nodes");
}
// eagerly run reroute to remove dead nodes from routing table
RoutingAllocation.Result routingResult = routingService.getAllocationService().reroute(ClusterState.builder(currentState).build());
RoutingAllocation.Result routingResult = routingService.getAllocationService().reroute(
ClusterState.builder(currentState).build(),
"[" + node + "] left");
return ClusterState.builder(currentState).routingResult(routingResult).build();
}
@ -554,7 +553,9 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
return rejoin(currentState, "not enough master nodes");
}
// eagerly run reroute to remove dead nodes from routing table
RoutingAllocation.Result routingResult = routingService.getAllocationService().reroute(ClusterState.builder(currentState).build());
RoutingAllocation.Result routingResult = routingService.getAllocationService().reroute(
ClusterState.builder(currentState).build(),
"[" + node + "] failed");
return ClusterState.builder(currentState).routingResult(routingResult).build();
}

View File

@ -113,10 +113,6 @@ public class GatewayAllocator extends AbstractComponent {
}
public boolean allocateUnassigned(final RoutingAllocation allocation) {
// Take a snapshot of the current time and tell the RoutingService
// about it, so it will use a consistent timestamp for delays
long lastAllocateUnassignedRun = System.currentTimeMillis();
this.routingService.setUnassignedShardsAllocatedTimestamp(lastAllocateUnassignedRun);
boolean changed = false;
RoutingNodes.UnassignedShards unassigned = allocation.routingNodes().unassigned();
@ -124,7 +120,7 @@ public class GatewayAllocator extends AbstractComponent {
changed |= primaryShardAllocator.allocateUnassigned(allocation);
changed |= replicaShardAllocator.processExistingRecoveries(allocation);
changed |= replicaShardAllocator.allocateUnassigned(allocation, lastAllocateUnassignedRun);
changed |= replicaShardAllocator.allocateUnassigned(allocation);
return changed;
}

View File

@ -251,7 +251,9 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
routingTableBuilder.version(0);
// now, reroute
RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build());
RoutingAllocation.Result routingResult = allocationService.reroute(
ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(),
"state recovered");
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}

View File

@ -168,7 +168,8 @@ public class LocalAllocateDangledIndices extends AbstractComponent {
ClusterState updatedState = ClusterState.builder(currentState).metaData(metaData).blocks(blocks).routingTable(routingTable).build();
// now, reroute
RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTable).build());
RoutingAllocation.Result routingResult = allocationService.reroute(
ClusterState.builder(updatedState).routingTable(routingTable).build(), "dangling indices allocated");
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}

View File

@ -111,10 +111,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
}
public boolean allocateUnassigned(RoutingAllocation allocation) {
return allocateUnassigned(allocation, System.currentTimeMillis());
}
public boolean allocateUnassigned(RoutingAllocation allocation, long allocateUnassignedTimestapm) {
long nanoTimeNow = System.nanoTime();
boolean changed = false;
final RoutingNodes routingNodes = allocation.routingNodes();
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
@ -173,27 +170,43 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
unassignedIterator.initialize(nodeWithHighestMatch.nodeId(), shard.version(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
}
} else if (matchingNodes.hasAnyData() == false) {
// if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation
// of the replica shard needs to be delayed, and if so, add it to the ignore unassigned list
// note: we only care about replica in delayed allocation, since if we have an unassigned primary it
// will anyhow wait to find an existing copy of the shard to be allocated
// note: the other side of the equation is scheduling a reroute in a timely manner, which happens in the RoutingService
IndexMetaData indexMetaData = allocation.metaData().index(shard.getIndex());
long delay = shard.unassignedInfo().getDelayAllocationExpirationIn(allocateUnassignedTimestapm, settings, indexMetaData.getSettings());
if (delay > 0) {
logger.debug("[{}][{}]: delaying allocation of [{}] for [{}]", shard.index(), shard.id(), shard, TimeValue.timeValueMillis(delay));
/**
* mark it as changed, since we want to kick a publishing to schedule future allocation,
* see {@link org.elasticsearch.cluster.routing.RoutingService#clusterChanged(ClusterChangedEvent)}).
*/
changed = true;
unassignedIterator.removeAndIgnore();
}
// if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation of the replica shard needs to be delayed
changed |= ignoreUnassignedIfDelayed(nanoTimeNow, allocation, unassignedIterator, shard);
}
}
return changed;
}
/**
* Check if the allocation of the replica is to be delayed. Compute the delay and if it is delayed, add it to the ignore unassigned list
* Note: we only care about replica in delayed allocation, since if we have an unassigned primary it
* will anyhow wait to find an existing copy of the shard to be allocated
* Note: the other side of the equation is scheduling a reroute in a timely manner, which happens in the RoutingService
*
* PUBLIC FOR TESTS!
*
* @param timeNowNanos Timestamp in nanoseconds representing "now"
* @param allocation the routing allocation
* @param unassignedIterator iterator over unassigned shards
* @param shard the shard which might be delayed
* @return true iff allocation is delayed for this shard
*/
public boolean ignoreUnassignedIfDelayed(long timeNowNanos, RoutingAllocation allocation, RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator, ShardRouting shard) {
IndexMetaData indexMetaData = allocation.metaData().index(shard.getIndex());
// calculate delay and store it in UnassignedInfo to be used by RoutingService
long delay = shard.unassignedInfo().updateDelay(timeNowNanos, settings, indexMetaData.getSettings());
if (delay > 0) {
logger.debug("[{}][{}]: delaying allocation of [{}] for [{}]", shard.index(), shard.id(), shard, TimeValue.timeValueNanos(delay));
/**
* mark it as changed, since we want to kick a publishing to schedule future allocation,
* see {@link org.elasticsearch.cluster.routing.RoutingService#clusterChanged(ClusterChangedEvent)}).
*/
unassignedIterator.removeAndIgnore();
return true;
}
return false;
}
/**
* Can the shard be allocated on at least one node based on the allocation deciders.
*/

View File

@ -62,7 +62,7 @@ public final class CommitStats implements Streamable, ToXContent {
}
public static CommitStats readOptionalCommitStatsFrom(StreamInput in) throws IOException {
return in.readOptionalStreamable(new CommitStats());
return in.readOptionalStreamable(CommitStats::new);
}

View File

@ -19,19 +19,24 @@
package org.elasticsearch.index.fielddata.ordinals;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiDocValues.OrdinalMap;
import org.apache.lucene.index.RandomAccessOrds;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.packed.PackedInts;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
import org.elasticsearch.index.fielddata.*;
import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
/**
* Utility class to build global ordinals.
@ -69,4 +74,38 @@ public enum GlobalOrdinalsBuilder {
);
}
public static IndexOrdinalsFieldData buildEmpty(IndexSettings indexSettings, final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData) throws IOException {
assert indexReader.leaves().size() > 1;
final AtomicOrdinalsFieldData[] atomicFD = new AtomicOrdinalsFieldData[indexReader.leaves().size()];
final RandomAccessOrds[] subs = new RandomAccessOrds[indexReader.leaves().size()];
for (int i = 0; i < indexReader.leaves().size(); ++i) {
atomicFD[i] = new AbstractAtomicOrdinalsFieldData() {
@Override
public RandomAccessOrds getOrdinalsValues() {
return DocValues.emptySortedSet();
}
@Override
public long ramBytesUsed() {
return 0;
}
@Override
public Collection<Accountable> getChildResources() {
return Collections.emptyList();
}
@Override
public void close() {
}
};
subs[i] = atomicFD[i].getOrdinalsValues();
}
final OrdinalMap ordinalMap = OrdinalMap.build(null, subs, PackedInts.DEFAULT);
return new InternalGlobalOrdinalsIndexFieldData(indexSettings, indexFieldData.getFieldNames(),
indexFieldData.getFieldDataType(), atomicFD, ordinalMap, 0
);
}
}

View File

@ -64,7 +64,10 @@ public abstract class AbstractIndexFieldData<FD extends AtomicFieldData> extends
@Override
public FD load(LeafReaderContext context) {
if (context.reader().getFieldInfos().fieldInfo(fieldNames.indexName()) == null) {
// If the field doesn't exist, then don't bother with loading and adding an empty instance to the field data cache
// Some leaf readers may be wrapped and report different set of fields and use the same cache key.
// If a field can't be found then it doesn't mean it isn't there,
// so if a field doesn't exist then we don't cache it and just return an empty field data instance.
// The next time the field is found, we do cache.
return empty(context.reader().maxDoc());
}

Some files were not shown because too many files have changed in this diff Show More