Merge branch 'master' into feature/aggs-refactoring
# Conflicts: # core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java # core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java # test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
This commit is contained in:
commit
2c33f78192
|
@ -58,6 +58,9 @@ public abstract class AntTask extends DefaultTask {
|
|||
ant.project.removeBuildListener(listener)
|
||||
}
|
||||
|
||||
// otherwise groovy replaces System.out, and you have no chance to debug
|
||||
// ant.saveStreams = false
|
||||
|
||||
final int outputLevel = logger.isDebugEnabled() ? Project.MSG_DEBUG : Project.MSG_INFO
|
||||
final PrintStream stream = useStdout() ? System.out : new PrintStream(outputBuffer, true, Charset.defaultCharset().name())
|
||||
BuildLogger antLogger = makeLogger(stream, outputLevel)
|
||||
|
|
|
@ -198,6 +198,10 @@ class BuildPlugin implements Plugin<Project> {
|
|||
* to iterate the transitive dependencies and add excludes.
|
||||
*/
|
||||
static void configureConfigurations(Project project) {
|
||||
// we are not shipping these jars, we act like dumb consumers of these things
|
||||
if (project.path.startsWith(':test:fixtures')) {
|
||||
return
|
||||
}
|
||||
// fail on any conflicting dependency versions
|
||||
project.configurations.all({ Configuration configuration ->
|
||||
if (configuration.name.startsWith('_transitive_')) {
|
||||
|
@ -205,6 +209,10 @@ class BuildPlugin implements Plugin<Project> {
|
|||
// we just have them to find *what* transitive deps exist
|
||||
return
|
||||
}
|
||||
if (configuration.name.endsWith('Fixture')) {
|
||||
// just a self contained test-fixture configuration, likely transitive and hellacious
|
||||
return
|
||||
}
|
||||
configuration.resolutionStrategy.failOnVersionConflict()
|
||||
})
|
||||
|
||||
|
|
|
@ -16,51 +16,39 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle.precommit
|
||||
package org.elasticsearch.gradle.precommit;
|
||||
|
||||
import org.apache.tools.ant.BuildLogger
|
||||
import org.apache.tools.ant.DefaultLogger
|
||||
import org.apache.tools.ant.Project
|
||||
import org.elasticsearch.gradle.AntTask
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.apache.tools.ant.BuildEvent;
|
||||
import org.apache.tools.ant.BuildException;
|
||||
import org.apache.tools.ant.BuildListener;
|
||||
import org.apache.tools.ant.BuildLogger;
|
||||
import org.apache.tools.ant.DefaultLogger;
|
||||
import org.apache.tools.ant.Project;
|
||||
import org.elasticsearch.gradle.AntTask;
|
||||
import org.gradle.api.artifacts.Configuration;
|
||||
import org.gradle.api.file.FileCollection;
|
||||
|
||||
import java.nio.file.FileVisitResult
|
||||
import java.nio.file.Files
|
||||
import java.nio.file.Path
|
||||
import java.nio.file.SimpleFileVisitor
|
||||
import java.nio.file.attribute.BasicFileAttributes
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.SimpleFileVisitor;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* Basic static checking to keep tabs on third party JARs
|
||||
*/
|
||||
public class ThirdPartyAuditTask extends AntTask {
|
||||
|
||||
// true to be lenient about MISSING CLASSES
|
||||
private boolean missingClasses;
|
||||
|
||||
// patterns for classes to exclude, because we understand their issues
|
||||
private String[] excludes = new String[0];
|
||||
|
||||
ThirdPartyAuditTask() {
|
||||
dependsOn(project.configurations.testCompile)
|
||||
description = "Checks third party JAR bytecode for missing classes, use of internal APIs, and other horrors'"
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to true to be lenient with missing classes. By default this check will fail if it finds
|
||||
* MISSING CLASSES. This means the set of jars is incomplete. However, in some cases
|
||||
* this can be due to intentional exclusions that are well-tested and understood.
|
||||
*/
|
||||
public void setMissingClasses(boolean value) {
|
||||
missingClasses = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if leniency about missing classes is enabled.
|
||||
*/
|
||||
public boolean isMissingClasses() {
|
||||
return missingClasses;
|
||||
// we depend on this because its the only reliable configuration
|
||||
// this probably makes the build slower: gradle you suck here when it comes to configurations, you pay the price.
|
||||
dependsOn(project.configurations.testCompile);
|
||||
description = "Checks third party JAR bytecode for missing classes, use of internal APIs, and other horrors'";
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -70,7 +58,7 @@ public class ThirdPartyAuditTask extends AntTask {
|
|||
public void setExcludes(String[] classes) {
|
||||
for (String s : classes) {
|
||||
if (s.indexOf('*') != -1) {
|
||||
throw new IllegalArgumentException("illegal third party audit exclusion: '" + s + "', wildcards are not permitted!")
|
||||
throw new IllegalArgumentException("illegal third party audit exclusion: '" + s + "', wildcards are not permitted!");
|
||||
}
|
||||
}
|
||||
excludes = classes;
|
||||
|
@ -83,29 +71,78 @@ public class ThirdPartyAuditTask extends AntTask {
|
|||
return excludes;
|
||||
}
|
||||
|
||||
// yes, we parse Uwe Schindler's errors to find missing classes, and to keep a continuous audit. Just don't let him know!
|
||||
static final Pattern MISSING_CLASS_PATTERN =
|
||||
Pattern.compile(/WARNING: The referenced class '(.*)' cannot be loaded\. Please fix the classpath\!/);
|
||||
|
||||
static final Pattern VIOLATION_PATTERN =
|
||||
Pattern.compile(/\s\sin ([a-zA-Z0-9\$\.]+) \(.*\)/);
|
||||
|
||||
// we log everything and capture errors and handle them with our whitelist
|
||||
// this is important, as we detect stale whitelist entries, workaround forbidden apis bugs,
|
||||
// and it also allows whitelisting missing classes!
|
||||
static class EvilLogger extends DefaultLogger {
|
||||
final Set<String> missingClasses = new TreeSet<>();
|
||||
final Map<String,List<String>> violations = new TreeMap<>();
|
||||
String previousLine = null;
|
||||
|
||||
@Override
|
||||
public void messageLogged(BuildEvent event) {
|
||||
if (event.getTask().getClass() == de.thetaphi.forbiddenapis.ant.AntTask.class) {
|
||||
if (event.getPriority() == Project.MSG_WARN) {
|
||||
Matcher m = MISSING_CLASS_PATTERN.matcher(event.getMessage());
|
||||
if (m.matches()) {
|
||||
missingClasses.add(m.group(1).replace('.', '/') + ".class");
|
||||
}
|
||||
} else if (event.getPriority() == Project.MSG_ERR) {
|
||||
Matcher m = VIOLATION_PATTERN.matcher(event.getMessage());
|
||||
if (m.matches()) {
|
||||
String violation = previousLine + '\n' + event.getMessage();
|
||||
String clazz = m.group(1).replace('.', '/') + ".class";
|
||||
List<String> current = violations.get(clazz);
|
||||
if (current == null) {
|
||||
current = new ArrayList<>();
|
||||
violations.put(clazz, current);
|
||||
}
|
||||
current.add(violation);
|
||||
}
|
||||
previousLine = event.getMessage();
|
||||
}
|
||||
}
|
||||
super.messageLogged(event);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BuildLogger makeLogger(PrintStream stream, int outputLevel) {
|
||||
return new DefaultLogger(
|
||||
errorPrintStream: stream,
|
||||
outputPrintStream: stream,
|
||||
// ignore passed in outputLevel for now, until we are filtering warning messages
|
||||
messageOutputLevel: Project.MSG_ERR)
|
||||
DefaultLogger log = new EvilLogger();
|
||||
log.errorPrintStream = stream;
|
||||
log.outputPrintStream = stream;
|
||||
log.messageOutputLevel = outputLevel;
|
||||
return log;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void runAnt(AntBuilder ant) {
|
||||
ant.project.addTaskDefinition('thirdPartyAudit', de.thetaphi.forbiddenapis.ant.AntTask)
|
||||
Configuration configuration = project.configurations.findByName('runtime');
|
||||
if (configuration == null) {
|
||||
// some projects apparently do not have 'runtime'? what a nice inconsistency,
|
||||
// basically only serves to waste time in build logic!
|
||||
configuration = project.configurations.findByName('testCompile');
|
||||
}
|
||||
assert configuration != null;
|
||||
ant.project.addTaskDefinition('thirdPartyAudit', de.thetaphi.forbiddenapis.ant.AntTask);
|
||||
|
||||
// we only want third party dependencies.
|
||||
FileCollection jars = project.configurations.testCompile.fileCollection({ dependency ->
|
||||
FileCollection jars = configuration.fileCollection({ dependency ->
|
||||
dependency.group.startsWith("org.elasticsearch") == false
|
||||
})
|
||||
});
|
||||
|
||||
// we don't want provided dependencies, which we have already scanned. e.g. don't
|
||||
// scan ES core's dependencies for every single plugin
|
||||
Configuration provided = project.configurations.findByName('provided')
|
||||
Configuration provided = project.configurations.findByName('provided');
|
||||
if (provided != null) {
|
||||
jars -= provided
|
||||
jars -= provided;
|
||||
}
|
||||
|
||||
// no dependencies matched, we are done
|
||||
|
@ -113,72 +150,101 @@ public class ThirdPartyAuditTask extends AntTask {
|
|||
return;
|
||||
}
|
||||
|
||||
|
||||
// print which jars we are going to scan, always
|
||||
// this is not the time to try to be succinct! Forbidden will print plenty on its own!
|
||||
Set<String> names = new HashSet<>()
|
||||
Set<String> names = new TreeSet<>();
|
||||
for (File jar : jars) {
|
||||
names.add(jar.getName())
|
||||
}
|
||||
logger.error("[thirdPartyAudit] Scanning: " + names)
|
||||
|
||||
// warn that classes are missing
|
||||
// TODO: move these to excludes list!
|
||||
if (missingClasses) {
|
||||
logger.warn("[thirdPartyAudit] WARNING: CLASSES ARE MISSING! Expect NoClassDefFoundError in bug reports from users!")
|
||||
names.add(jar.getName());
|
||||
}
|
||||
|
||||
// TODO: forbidden-apis + zipfileset gives O(n^2) behavior unless we dump to a tmpdir first,
|
||||
// and then remove our temp dir afterwards. don't complain: try it yourself.
|
||||
// we don't use gradle temp dir handling, just google it, or try it yourself.
|
||||
|
||||
File tmpDir = new File(project.buildDir, 'tmp/thirdPartyAudit')
|
||||
File tmpDir = new File(project.buildDir, 'tmp/thirdPartyAudit');
|
||||
|
||||
// clean up any previous mess (if we failed), then unzip everything to one directory
|
||||
ant.delete(dir: tmpDir.getAbsolutePath())
|
||||
tmpDir.mkdirs()
|
||||
ant.delete(dir: tmpDir.getAbsolutePath());
|
||||
tmpDir.mkdirs();
|
||||
for (File jar : jars) {
|
||||
ant.unzip(src: jar.getAbsolutePath(), dest: tmpDir.getAbsolutePath())
|
||||
ant.unzip(src: jar.getAbsolutePath(), dest: tmpDir.getAbsolutePath());
|
||||
}
|
||||
|
||||
// convert exclusion class names to binary file names
|
||||
String[] excludedFiles = new String[excludes.length];
|
||||
for (int i = 0; i < excludes.length; i++) {
|
||||
excludedFiles[i] = excludes[i].replace('.', '/') + ".class"
|
||||
// check if the excluded file exists, if not, sure sign things are outdated
|
||||
if (! new File(tmpDir, excludedFiles[i]).exists()) {
|
||||
throw new IllegalStateException("bogus thirdPartyAudit exclusion: '" + excludes[i] + "', not found in any dependency")
|
||||
}
|
||||
excludedFiles[i] = excludes[i].replace('.', '/') + ".class";
|
||||
}
|
||||
Set<String> excludedSet = new TreeSet<>(Arrays.asList(excludedFiles));
|
||||
|
||||
// jarHellReprise
|
||||
checkSheistyClasses(tmpDir.toPath(), new HashSet<>(Arrays.asList(excludedFiles)));
|
||||
Set<String> sheistySet = getSheistyClasses(tmpDir.toPath());
|
||||
|
||||
ant.thirdPartyAudit(internalRuntimeForbidden: true,
|
||||
try {
|
||||
ant.thirdPartyAudit(internalRuntimeForbidden: false,
|
||||
failOnUnsupportedJava: false,
|
||||
failOnMissingClasses: !missingClasses,
|
||||
classpath: project.configurations.testCompile.asPath) {
|
||||
fileset(dir: tmpDir, excludes: excludedFiles.join(','))
|
||||
failOnMissingClasses: false,
|
||||
signaturesFile: new File(getClass().getResource('/forbidden/third-party-audit.txt').toURI()),
|
||||
classpath: configuration.asPath) {
|
||||
fileset(dir: tmpDir)
|
||||
}
|
||||
} catch (BuildException ignore) {}
|
||||
|
||||
EvilLogger evilLogger = null;
|
||||
for (BuildListener listener : ant.project.getBuildListeners()) {
|
||||
if (listener instanceof EvilLogger) {
|
||||
evilLogger = (EvilLogger) listener;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert evilLogger != null;
|
||||
|
||||
// keep our whitelist up to date
|
||||
Set<String> bogusExclusions = new TreeSet<>(excludedSet);
|
||||
bogusExclusions.removeAll(sheistySet);
|
||||
bogusExclusions.removeAll(evilLogger.missingClasses);
|
||||
bogusExclusions.removeAll(evilLogger.violations.keySet());
|
||||
if (!bogusExclusions.isEmpty()) {
|
||||
throw new IllegalStateException("Invalid exclusions, nothing is wrong with these classes: " + bogusExclusions);
|
||||
}
|
||||
|
||||
// don't duplicate classes with the JDK
|
||||
sheistySet.removeAll(excludedSet);
|
||||
if (!sheistySet.isEmpty()) {
|
||||
throw new IllegalStateException("JAR HELL WITH JDK! " + sheistySet);
|
||||
}
|
||||
|
||||
// don't allow a broken classpath
|
||||
evilLogger.missingClasses.removeAll(excludedSet);
|
||||
if (!evilLogger.missingClasses.isEmpty()) {
|
||||
throw new IllegalStateException("CLASSES ARE MISSING! " + evilLogger.missingClasses);
|
||||
}
|
||||
|
||||
// don't use internal classes
|
||||
evilLogger.violations.keySet().removeAll(excludedSet);
|
||||
if (!evilLogger.violations.isEmpty()) {
|
||||
throw new IllegalStateException("VIOLATIONS WERE FOUND! " + evilLogger.violations);
|
||||
}
|
||||
|
||||
// clean up our mess (if we succeed)
|
||||
ant.delete(dir: tmpDir.getAbsolutePath())
|
||||
ant.delete(dir: tmpDir.getAbsolutePath());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* check for sheisty classes: if they also exist in the extensions classloader, its jar hell with the jdk!
|
||||
*/
|
||||
private void checkSheistyClasses(Path root, Set<String> excluded) {
|
||||
private Set<String> getSheistyClasses(Path root) {
|
||||
// system.parent = extensions loader.
|
||||
// note: for jigsaw, this evilness will need modifications (e.g. use jrt filesystem!).
|
||||
// but groovy/gradle needs to work at all first!
|
||||
ClassLoader ext = ClassLoader.getSystemClassLoader().getParent()
|
||||
assert ext != null
|
||||
ClassLoader ext = ClassLoader.getSystemClassLoader().getParent();
|
||||
assert ext != null;
|
||||
|
||||
Set<String> sheistySet = new TreeSet<>();
|
||||
Files.walkFileTree(root, new SimpleFileVisitor<Path>() {
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
String entry = root.relativize(file).toString()
|
||||
String entry = root.relativize(file).toString().replace('\\', '/');
|
||||
if (entry.endsWith(".class")) {
|
||||
if (ext.getResource(entry) != null) {
|
||||
sheistySet.add(entry);
|
||||
|
@ -187,19 +253,6 @@ public class ThirdPartyAuditTask extends AntTask {
|
|||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
});
|
||||
|
||||
// check if we are ok
|
||||
if (sheistySet.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// leniency against exclusions list
|
||||
sheistySet.removeAll(excluded);
|
||||
|
||||
if (sheistySet.isEmpty()) {
|
||||
logger.warn("[thirdPartyAudit] WARNING: JAR HELL WITH JDK! Expect insanely hard-to-debug problems!")
|
||||
} else {
|
||||
throw new IllegalStateException("JAR HELL WITH JDK! " + sheistySet);
|
||||
}
|
||||
return sheistySet;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
# Checks that we run against bytecode of third-party dependencies
|
||||
#
|
||||
# Be judicious about what is denied here: MANY classes will be subject
|
||||
# to these rules, so please try to keep the false positive rate low!
|
||||
#
|
||||
# Each third party .class failing checks will need to be explicitly
|
||||
# listed in the module's build.gradle file:
|
||||
#
|
||||
# thirdPartyAudit.excludes = [
|
||||
# // uses internal java api: sun.misc.Unsafe
|
||||
# 'org.foo.Bar',
|
||||
# // missing class!
|
||||
# 'com.missing.dependency.WTF',
|
||||
# // ...
|
||||
# ]
|
||||
#
|
||||
# Wildcards are not allowed, excludes must be exact. The build also fails with
|
||||
# the message "Invalid exclusions, nothing is wrong with these classes" if
|
||||
# extraneous classes are in the excludes list, this ensures the list is
|
||||
# up-to-date, and that each module accurately documents the evil things
|
||||
# that its dependencies do.
|
||||
#
|
||||
# For more information, look at ThirdPartyAuditTask.groovy in buildSrc/
|
||||
|
||||
#
|
||||
# Ruleset to fail on java internal apis, using this logic:
|
||||
# http://docs.oracle.com/javase/8/docs/api/java/lang/SecurityManager.html#checkPackageAccess-java.lang.String-
|
||||
#
|
||||
# // The list may change at any time, regenerated with:
|
||||
# for (String pkg : new TreeSet<>(Arrays.asList(
|
||||
# Security.getProperty("package.access").split(",")))) {
|
||||
# System.out.println(pkg + "**");
|
||||
# }
|
||||
#
|
||||
@defaultMessage non-public internal runtime class
|
||||
com.oracle.webservices.internal.**
|
||||
com.oracle.xmlns.internal.**
|
||||
com.sun.activation.registries.**
|
||||
com.sun.browser.**
|
||||
com.sun.corba.se.**
|
||||
com.sun.glass.**
|
||||
com.sun.imageio.**
|
||||
com.sun.istack.internal.**
|
||||
com.sun.javafx.**
|
||||
com.sun.jmx.**
|
||||
com.sun.media.**
|
||||
com.sun.media.sound.**
|
||||
com.sun.naming.internal.**
|
||||
com.sun.openpisces.**
|
||||
com.sun.org.apache.bcel.internal.**
|
||||
com.sun.org.apache.regexp.internal.**
|
||||
com.sun.org.apache.xalan.internal.extensions.**
|
||||
com.sun.org.apache.xalan.internal.lib.**
|
||||
com.sun.org.apache.xalan.internal.res.**
|
||||
com.sun.org.apache.xalan.internal.templates.**
|
||||
com.sun.org.apache.xalan.internal.utils.**
|
||||
com.sun.org.apache.xalan.internal.xslt.**
|
||||
com.sun.org.apache.xalan.internal.xsltc.cmdline.**
|
||||
com.sun.org.apache.xalan.internal.xsltc.compiler.**
|
||||
com.sun.org.apache.xalan.internal.xsltc.trax.**
|
||||
com.sun.org.apache.xalan.internal.xsltc.util.**
|
||||
com.sun.org.apache.xerces.internal.**
|
||||
com.sun.org.apache.xml.internal.res.**
|
||||
com.sun.org.apache.xml.internal.security.**
|
||||
com.sun.org.apache.xml.internal.serializer.utils.**
|
||||
com.sun.org.apache.xml.internal.utils.**
|
||||
com.sun.org.apache.xpath.internal.**
|
||||
com.sun.org.glassfish.**
|
||||
com.sun.pisces.**
|
||||
com.sun.prism.**
|
||||
com.sun.proxy.**
|
||||
com.sun.scenario.**
|
||||
com.sun.t2k.**
|
||||
com.sun.webkit.**
|
||||
com.sun.xml.internal.**
|
||||
jdk.internal.**
|
||||
jdk.management.resource.internal.**
|
||||
jdk.nashorn.internal.**
|
||||
jdk.nashorn.tools.**
|
||||
oracle.jrockit.jfr.**
|
||||
org.jcp.xml.dsig.internal.**
|
||||
sun.**
|
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 3.0.0-SNAPSHOT
|
||||
lucene = 5.5.0-snapshot-1719088
|
||||
lucene = 5.5.0-snapshot-1721183
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.5
|
||||
|
|
|
@ -111,12 +111,121 @@ forbiddenPatterns {
|
|||
exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt'
|
||||
}
|
||||
|
||||
// classes are missing, e.g. org.jboss.marshalling.Marshaller
|
||||
thirdPartyAudit.missingClasses = true
|
||||
// uses internal sun ssl classes!
|
||||
thirdPartyAudit.excludes = [
|
||||
// uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name)
|
||||
'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator',
|
||||
// uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name)
|
||||
'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator',
|
||||
|
||||
// classes are missing!
|
||||
|
||||
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
|
||||
'com.fasterxml.jackson.databind.ObjectMapper',
|
||||
|
||||
// from org.jboss.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder (netty)
|
||||
'com.google.protobuf.CodedInputStream',
|
||||
|
||||
// from org.jboss.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender (netty)
|
||||
'com.google.protobuf.CodedOutputStream',
|
||||
|
||||
// from org.jboss.netty.handler.codec.protobuf.ProtobufDecoder (netty)
|
||||
'com.google.protobuf.ExtensionRegistry',
|
||||
'com.google.protobuf.MessageLite$Builder',
|
||||
'com.google.protobuf.MessageLite',
|
||||
'com.google.protobuf.Parser',
|
||||
|
||||
// from org.apache.log4j.receivers.net.JMSReceiver (log4j-extras)
|
||||
'javax.jms.Message',
|
||||
'javax.jms.MessageListener',
|
||||
'javax.jms.ObjectMessage',
|
||||
'javax.jms.TopicConnection',
|
||||
'javax.jms.TopicConnectionFactory',
|
||||
'javax.jms.TopicPublisher',
|
||||
'javax.jms.TopicSession',
|
||||
'javax.jms.TopicSubscriber',
|
||||
|
||||
// from org.apache.log4j.net.SMTPAppender (log4j)
|
||||
'javax.mail.Authenticator',
|
||||
'javax.mail.Message$RecipientType',
|
||||
'javax.mail.Message',
|
||||
'javax.mail.Multipart',
|
||||
'javax.mail.PasswordAuthentication',
|
||||
'javax.mail.Session',
|
||||
'javax.mail.Transport',
|
||||
'javax.mail.internet.InternetAddress',
|
||||
'javax.mail.internet.InternetHeaders',
|
||||
'javax.mail.internet.MimeBodyPart',
|
||||
'javax.mail.internet.MimeMessage',
|
||||
'javax.mail.internet.MimeMultipart',
|
||||
'javax.mail.internet.MimeUtility',
|
||||
|
||||
// from org.jboss.netty.channel.socket.http.HttpTunnelingServlet (netty)
|
||||
'javax.servlet.ServletConfig',
|
||||
'javax.servlet.ServletException',
|
||||
'javax.servlet.ServletOutputStream',
|
||||
'javax.servlet.http.HttpServlet',
|
||||
'javax.servlet.http.HttpServletRequest',
|
||||
'javax.servlet.http.HttpServletResponse',
|
||||
|
||||
// from org.jboss.netty.logging.CommonsLoggerFactory (netty)
|
||||
'org.apache.commons.logging.Log',
|
||||
'org.apache.commons.logging.LogFactory',
|
||||
|
||||
// from org.apache.lucene.sandbox.queries.regex.JakartaRegexpCapabilities$JakartaRegexMatcher (lucene-sandbox)
|
||||
'org.apache.regexp.CharacterIterator',
|
||||
'org.apache.regexp.RE',
|
||||
'org.apache.regexp.REProgram',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.OpenSslEngine (netty)
|
||||
'org.apache.tomcat.jni.Buffer',
|
||||
'org.apache.tomcat.jni.Library',
|
||||
'org.apache.tomcat.jni.Pool',
|
||||
'org.apache.tomcat.jni.SSL',
|
||||
'org.apache.tomcat.jni.SSLContext',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty)
|
||||
'org.bouncycastle.asn1.x500.X500Name',
|
||||
'org.bouncycastle.cert.X509v3CertificateBuilder',
|
||||
'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter',
|
||||
'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder',
|
||||
'org.bouncycastle.jce.provider.BouncyCastleProvider',
|
||||
'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.JettyNpnSslEngine (netty)
|
||||
'org.eclipse.jetty.npn.NextProtoNego$ClientProvider',
|
||||
'org.eclipse.jetty.npn.NextProtoNego$ServerProvider',
|
||||
'org.eclipse.jetty.npn.NextProtoNego',
|
||||
|
||||
// from org.jboss.netty.logging.JBossLoggerFactory (netty)
|
||||
'org.jboss.logging.Logger',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.ChannelBufferByteInput (netty)
|
||||
'org.jboss.marshalling.ByteInput',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty)
|
||||
'org.jboss.marshalling.ByteOutput',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty)
|
||||
'org.jboss.marshalling.Marshaller',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty)
|
||||
'org.jboss.marshalling.MarshallerFactory',
|
||||
'org.jboss.marshalling.MarshallingConfiguration',
|
||||
'org.jboss.marshalling.Unmarshaller',
|
||||
|
||||
// from com.spatial4j.core.io.GeoJSONReader (spatial4j)
|
||||
'org.noggit.JSONParser',
|
||||
|
||||
// from org.jboss.netty.container.osgi.NettyBundleActivator (netty)
|
||||
'org.osgi.framework.BundleActivator',
|
||||
'org.osgi.framework.BundleContext',
|
||||
|
||||
// from org.jboss.netty.logging.OsgiLoggerFactory$1 (netty)
|
||||
'org.osgi.framework.ServiceReference',
|
||||
'org.osgi.service.log.LogService',
|
||||
'org.osgi.util.tracker.ServiceTracker',
|
||||
'org.osgi.util.tracker.ServiceTrackerCustomizer',
|
||||
|
||||
'org.slf4j.impl.StaticMDCBinder',
|
||||
'org.slf4j.impl.StaticMarkerBinder',
|
||||
]
|
||||
|
||||
// dependency license are currently checked in distribution
|
||||
|
|
|
@ -54,7 +54,7 @@ import java.util.Objects;
|
|||
* While aggregating the total term frequency is trivial since it
|
||||
* can be summed up not every {@link org.apache.lucene.search.similarities.Similarity}
|
||||
* makes use of this statistic. The document frequency which is used in the
|
||||
* {@link org.apache.lucene.search.similarities.DefaultSimilarity}
|
||||
* {@link org.apache.lucene.search.similarities.ClassicSimilarity}
|
||||
* can only be estimated as an lower-bound since it is a document based statistic. For
|
||||
* the document frequency the maximum frequency across all fields per term is used
|
||||
* which is the minimum number of documents the terms occurs in.
|
||||
|
|
|
@ -226,7 +226,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
}
|
||||
if (query == null) {
|
||||
query = super.getFieldQuery(currentFieldType.names().indexName(), queryText, quoted);
|
||||
query = super.getFieldQuery(currentFieldType.name(), queryText, quoted);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
@ -466,7 +466,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
query = currentFieldType.prefixQuery(termStr, multiTermRewriteMethod, context);
|
||||
}
|
||||
if (query == null) {
|
||||
query = getPossiblyAnalyzedPrefixQuery(currentFieldType.names().indexName(), termStr);
|
||||
query = getPossiblyAnalyzedPrefixQuery(currentFieldType.name(), termStr);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
@ -592,7 +592,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
if (!settings.forceAnalyzer()) {
|
||||
setAnalyzer(context.getSearchAnalyzer(currentFieldType));
|
||||
}
|
||||
indexedNameField = currentFieldType.names().indexName();
|
||||
indexedNameField = currentFieldType.name();
|
||||
return getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr);
|
||||
}
|
||||
return getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr);
|
||||
|
|
|
@ -279,6 +279,8 @@ public class Version {
|
|||
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_2_0_ID = 2020099;
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
|
||||
public static final int V_2_3_0_ID = 2030099;
|
||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
|
||||
public static final int V_3_0_0_ID = 3000099;
|
||||
public static final Version V_3_0_0 = new Version(V_3_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final Version CURRENT = V_3_0_0;
|
||||
|
@ -295,6 +297,8 @@ public class Version {
|
|||
switch (id) {
|
||||
case V_3_0_0_ID:
|
||||
return V_3_0_0;
|
||||
case V_2_3_0_ID:
|
||||
return V_2_3_0;
|
||||
case V_2_2_0_ID:
|
||||
return V_2_2_0;
|
||||
case V_2_1_2_ID:
|
||||
|
|
|
@ -28,6 +28,8 @@ import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction
|
|||
import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.TransportDeleteRepositoryAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction;
|
||||
|
@ -255,6 +257,7 @@ public class ActionModule extends AbstractModule {
|
|||
registerAction(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class);
|
||||
registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
|
||||
registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
|
||||
registerAction(ListTasksAction.INSTANCE, TransportListTasksAction.class);
|
||||
|
||||
registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class);
|
||||
registerAction(ClusterStateAction.INSTANCE, TransportClusterStateAction.class);
|
||||
|
|
|
@ -38,7 +38,7 @@ public abstract class ActionRequest<T extends ActionRequest> extends TransportRe
|
|||
super(request);
|
||||
// this does not set the listenerThreaded API, if needed, its up to the caller to set it
|
||||
// since most times, we actually want it to not be threaded...
|
||||
//this.listenerThreaded = request.listenerThreaded();
|
||||
// this.listenerThreaded = request.listenerThreaded();
|
||||
}
|
||||
|
||||
public abstract ActionRequestValidationException validate();
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.ExceptionsHelper.detailedMessage;
|
||||
|
||||
/**
|
||||
* Information about task operation failures
|
||||
*
|
||||
* The class is final due to serialization limitations
|
||||
*/
|
||||
public final class TaskOperationFailure implements Writeable<TaskOperationFailure>, ToXContent {
|
||||
|
||||
private final String nodeId;
|
||||
|
||||
private final long taskId;
|
||||
|
||||
private final Throwable reason;
|
||||
|
||||
private final RestStatus status;
|
||||
|
||||
public TaskOperationFailure(StreamInput in) throws IOException {
|
||||
nodeId = in.readString();
|
||||
taskId = in.readLong();
|
||||
reason = in.readThrowable();
|
||||
status = RestStatus.readFrom(in);
|
||||
}
|
||||
|
||||
public TaskOperationFailure(String nodeId, long taskId, Throwable t) {
|
||||
this.nodeId = nodeId;
|
||||
this.taskId = taskId;
|
||||
this.reason = t;
|
||||
status = ExceptionsHelper.status(t);
|
||||
}
|
||||
|
||||
public String getNodeId() {
|
||||
return this.nodeId;
|
||||
}
|
||||
|
||||
public long getTaskId() {
|
||||
return this.taskId;
|
||||
}
|
||||
|
||||
public String getReason() {
|
||||
return detailedMessage(reason);
|
||||
}
|
||||
|
||||
public RestStatus getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public Throwable getCause() {
|
||||
return reason;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskOperationFailure readFrom(StreamInput in) throws IOException {
|
||||
return new TaskOperationFailure(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(nodeId);
|
||||
out.writeLong(taskId);
|
||||
out.writeThrowable(reason);
|
||||
RestStatus.writeTo(out, status);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[" + nodeId + "][" + taskId + "] failed, reason [" + getReason() + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("task_id", getTaskId());
|
||||
builder.field("node_id", getNodeId());
|
||||
builder.field("status", status.name());
|
||||
if (reason != null) {
|
||||
builder.field("reason");
|
||||
builder.startObject();
|
||||
ElasticsearchException.toXContent(builder, params, reason);
|
||||
builder.endObject();
|
||||
}
|
||||
return builder;
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -75,7 +76,13 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) {
|
||||
protected final void masterOperation(ClusterHealthRequest request, ClusterState state, ActionListener<ClusterHealthResponse> listener) throws Exception {
|
||||
logger.warn("attempt to execute a cluster health operation without a task");
|
||||
throw new UnsupportedOperationException("task parameter is required for this operation");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(Task task, final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) {
|
||||
if (request.waitForEvents() != null) {
|
||||
final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis();
|
||||
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new ClusterStateUpdateTask(request.waitForEvents()) {
|
||||
|
@ -95,7 +102,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
@Override
|
||||
public void onNoLongerMaster(String source) {
|
||||
logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", request.waitForEvents());
|
||||
doExecute(request, listener);
|
||||
doExecute(task, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
package org.elasticsearch.plugin.hadoop.hdfs;
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
|
@ -19,12 +17,30 @@ package org.elasticsearch.plugin.hadoop.hdfs;
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
public class UtilsTests extends ESTestCase {
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public void testDetectLibFolder() {
|
||||
String location = HdfsPlugin.class.getProtectionDomain().getCodeSource().getLocation().toString();
|
||||
assertEquals(location, Utils.detectLibFolder());
|
||||
/**
|
||||
* Action for retrieving a list of currently running tasks
|
||||
*/
|
||||
public class ListTasksAction extends Action<ListTasksRequest, ListTasksResponse, ListTasksRequestBuilder> {
|
||||
|
||||
public static final ListTasksAction INSTANCE = new ListTasksAction();
|
||||
public static final String NAME = "cluster:monitor/tasks/lists";
|
||||
|
||||
private ListTasksAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ListTasksResponse newResponse() {
|
||||
return new ListTasksResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ListTasksRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ListTasksRequestBuilder(client, this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A request to get node tasks
|
||||
*/
|
||||
public class ListTasksRequest extends BaseTasksRequest<ListTasksRequest> {
|
||||
|
||||
private boolean detailed = false;
|
||||
|
||||
/**
|
||||
* Get information from nodes based on the nodes ids specified. If none are passed, information
|
||||
* for all nodes will be returned.
|
||||
*/
|
||||
public ListTasksRequest(String... nodesIds) {
|
||||
super(nodesIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the detailed task information be returned.
|
||||
*/
|
||||
public boolean detailed() {
|
||||
return this.detailed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the node settings be returned.
|
||||
*/
|
||||
public ListTasksRequest detailed(boolean detailed) {
|
||||
this.detailed = detailed;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
detailed = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(detailed);
|
||||
}
|
||||
}
|
|
@ -1,5 +1,3 @@
|
|||
package org.elasticsearch.plugin.hadoop.hdfs;
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
|
@ -19,14 +17,25 @@ package org.elasticsearch.plugin.hadoop.hdfs;
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import java.net.URL;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
public class HdfsTestPlugin extends HdfsPlugin {
|
||||
import org.elasticsearch.action.support.tasks.TasksRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
@Override
|
||||
protected List<URL> getHadoopClassLoaderPath(String baseLib) {
|
||||
return Collections.emptyList();
|
||||
/**
|
||||
* Builder for the request to retrieve the list of tasks running on the specified nodes
|
||||
*/
|
||||
public class ListTasksRequestBuilder extends TasksRequestBuilder<ListTasksRequest, ListTasksResponse, ListTasksRequestBuilder> {
|
||||
|
||||
public ListTasksRequestBuilder(ElasticsearchClient client, ListTasksAction action) {
|
||||
super(client, action, new ListTasksRequest());
|
||||
}
|
||||
|
||||
/**
|
||||
* Should detailed task information be returned.
|
||||
*/
|
||||
public ListTasksRequestBuilder setDetailed(boolean detailed) {
|
||||
request.detailed(detailed);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Returns the list of tasks currently running on the nodes
|
||||
*/
|
||||
public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
||||
|
||||
private List<TaskInfo> tasks;
|
||||
|
||||
private Map<DiscoveryNode, List<TaskInfo>> nodes;
|
||||
|
||||
public ListTasksResponse() {
|
||||
}
|
||||
|
||||
public ListTasksResponse(List<TaskInfo> tasks, List<TaskOperationFailure> taskFailures, List<? extends FailedNodeException> nodeFailures) {
|
||||
super(taskFailures, nodeFailures);
|
||||
this.tasks = tasks == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(tasks));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
tasks = Collections.unmodifiableList(in.readList(TaskInfo::new));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeList(tasks);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the list of tasks by node
|
||||
*/
|
||||
public Map<DiscoveryNode, List<TaskInfo>> getPerNodeTasks() {
|
||||
if (nodes != null) {
|
||||
return nodes;
|
||||
}
|
||||
Map<DiscoveryNode, List<TaskInfo>> nodeTasks = new HashMap<>();
|
||||
|
||||
Set<DiscoveryNode> nodes = new HashSet<>();
|
||||
for (TaskInfo shard : tasks) {
|
||||
nodes.add(shard.getNode());
|
||||
}
|
||||
|
||||
for (DiscoveryNode node : nodes) {
|
||||
List<TaskInfo> tasks = new ArrayList<>();
|
||||
for (TaskInfo taskInfo : this.tasks) {
|
||||
if (taskInfo.getNode().equals(node)) {
|
||||
tasks.add(taskInfo);
|
||||
}
|
||||
}
|
||||
nodeTasks.put(node, tasks);
|
||||
}
|
||||
this.nodes = nodeTasks;
|
||||
return nodeTasks;
|
||||
}
|
||||
|
||||
public List<TaskInfo> getTasks() {
|
||||
return tasks;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (getTaskFailures() != null && getTaskFailures().size() > 0) {
|
||||
builder.startArray("task_failures");
|
||||
for (TaskOperationFailure ex : getTaskFailures()){
|
||||
builder.value(ex);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
||||
if (getNodeFailures() != null && getNodeFailures().size() > 0) {
|
||||
builder.startArray("node_failures");
|
||||
for (FailedNodeException ex : getNodeFailures()){
|
||||
builder.value(ex);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
||||
builder.startObject("nodes");
|
||||
for (Map.Entry<DiscoveryNode, List<TaskInfo>> entry : getPerNodeTasks().entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
builder.startObject(node.getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("name", node.name());
|
||||
builder.field("transport_address", node.address().toString());
|
||||
builder.field("host", node.getHostName());
|
||||
builder.field("ip", node.getAddress());
|
||||
|
||||
if (!node.attributes().isEmpty()) {
|
||||
builder.startObject("attributes");
|
||||
for (ObjectObjectCursor<String, String> attr : node.attributes()) {
|
||||
builder.field(attr.key, attr.value, XContentBuilder.FieldCaseConversion.NONE);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.startArray("tasks");
|
||||
for(TaskInfo task : entry.getValue()) {
|
||||
task.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
|
||||
builder.startObject();
|
||||
toXContent(builder, EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
return builder.string();
|
||||
} catch (IOException e) {
|
||||
return "{ \"error\" : \"" + e.getMessage() + "\"}";
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Information about a currently running task.
|
||||
* <p>
|
||||
* Tasks are used for communication with transport actions. As a result, they can contain callback
|
||||
* references as well as mutable state. That makes it impractical to send tasks over transport channels
|
||||
* and use in APIs. Instead, immutable and streamable TaskInfo objects are used to represent
|
||||
* snapshot information about currently running tasks.
|
||||
*/
|
||||
public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
|
||||
private final DiscoveryNode node;
|
||||
|
||||
private final long id;
|
||||
|
||||
private final String type;
|
||||
|
||||
private final String action;
|
||||
|
||||
private final String description;
|
||||
|
||||
private final String parentNode;
|
||||
|
||||
private final long parentId;
|
||||
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description) {
|
||||
this(node, id, type, action, description, null, -1L);
|
||||
}
|
||||
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, String parentNode, long parentId) {
|
||||
this.node = node;
|
||||
this.id = id;
|
||||
this.type = type;
|
||||
this.action = action;
|
||||
this.description = description;
|
||||
this.parentNode = parentNode;
|
||||
this.parentId = parentId;
|
||||
}
|
||||
|
||||
public TaskInfo(StreamInput in) throws IOException {
|
||||
node = DiscoveryNode.readNode(in);
|
||||
id = in.readLong();
|
||||
type = in.readString();
|
||||
action = in.readString();
|
||||
description = in.readOptionalString();
|
||||
parentNode = in.readOptionalString();
|
||||
parentId = in.readLong();
|
||||
}
|
||||
|
||||
public DiscoveryNode getNode() {
|
||||
return node;
|
||||
}
|
||||
|
||||
public long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public String getAction() {
|
||||
return action;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public String getParentNode() {
|
||||
return parentNode;
|
||||
}
|
||||
|
||||
public long getParentId() {
|
||||
return parentId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskInfo readFrom(StreamInput in) throws IOException {
|
||||
return new TaskInfo(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
node.writeTo(out);
|
||||
out.writeLong(id);
|
||||
out.writeString(type);
|
||||
out.writeString(action);
|
||||
out.writeOptionalString(description);
|
||||
out.writeOptionalString(parentNode);
|
||||
out.writeLong(parentId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("node", node.getId());
|
||||
builder.field("id", id);
|
||||
builder.field("type", type);
|
||||
builder.field("action", action);
|
||||
if (description != null) {
|
||||
builder.field("description", description);
|
||||
}
|
||||
if (parentNode != null) {
|
||||
builder.field("parent_node", parentNode);
|
||||
builder.field("parent_id", parentId);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.tasks.TransportTasksAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportListTasksAction extends TransportTasksAction<ListTasksRequest, ListTasksResponse, TaskInfo> {
|
||||
|
||||
@Inject
|
||||
public TransportListTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ListTasksAction.NAME, clusterName, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, ListTasksRequest::new, ListTasksResponse::new, ThreadPool.Names.MANAGEMENT);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ListTasksResponse newResponse(ListTasksRequest request, List<TaskInfo> tasks, List<TaskOperationFailure> taskOperationFailures, List<FailedNodeException> failedNodeExceptions) {
|
||||
return new ListTasksResponse(tasks, taskOperationFailures, failedNodeExceptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TaskInfo readTaskResponse(StreamInput in) throws IOException {
|
||||
return new TaskInfo(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TaskInfo taskOperation(ListTasksRequest request, Task task) {
|
||||
return task.taskInfo(clusterService.localNode(), request.detailed());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -126,13 +126,13 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
if (indexService == null) {
|
||||
throw new IllegalArgumentException("No index provided, and trying to analyzer based on a specific field which requires the index parameter");
|
||||
}
|
||||
MappedFieldType fieldType = indexService.mapperService().smartNameFieldType(request.field());
|
||||
MappedFieldType fieldType = indexService.mapperService().fullName(request.field());
|
||||
if (fieldType != null) {
|
||||
if (fieldType.isNumeric()) {
|
||||
throw new IllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are not supported on numeric fields");
|
||||
}
|
||||
analyzer = fieldType.indexAnalyzer();
|
||||
field = fieldType.names().indexName();
|
||||
field = fieldType.name();
|
||||
}
|
||||
}
|
||||
if (field == null) {
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -75,12 +76,12 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(CloseIndexRequest request, ActionListener<CloseIndexResponse> listener) {
|
||||
protected void doExecute(Task task, CloseIndexRequest request, ActionListener<CloseIndexResponse> listener) {
|
||||
destructiveOperations.failDestructive(request.indices());
|
||||
if (closeIndexEnabled == false) {
|
||||
throw new IllegalStateException("closing indices is disabled - set [" + CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey() + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace");
|
||||
}
|
||||
super.doExecute(request, listener);
|
||||
super.doExecute(task, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -62,9 +63,9 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(DeleteIndexRequest request, ActionListener<DeleteIndexResponse> listener) {
|
||||
protected void doExecute(Task task, DeleteIndexRequest request, ActionListener<DeleteIndexResponse> listener) {
|
||||
destructiveOperations.failDestructive(request.indices());
|
||||
super.doExecute(request, listener);
|
||||
super.doExecute(task, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -171,7 +171,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
|||
for (String field : request.fields()) {
|
||||
if (Regex.isMatchAllPattern(field)) {
|
||||
for (FieldMapper fieldMapper : allFieldMappers) {
|
||||
addFieldMapper(fieldMapper.fieldType().names().fullName(), fieldMapper, fieldMappings, request.includeDefaults());
|
||||
addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults());
|
||||
}
|
||||
} else if (Regex.isSimpleMatchPattern(field)) {
|
||||
// go through the field mappers 3 times, to make sure we give preference to the resolve order: full name, index name, name.
|
||||
|
@ -179,15 +179,15 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
|||
Collection<FieldMapper> remainingFieldMappers = newLinkedList(allFieldMappers);
|
||||
for (Iterator<FieldMapper> it = remainingFieldMappers.iterator(); it.hasNext(); ) {
|
||||
final FieldMapper fieldMapper = it.next();
|
||||
if (Regex.simpleMatch(field, fieldMapper.fieldType().names().fullName())) {
|
||||
addFieldMapper(fieldMapper.fieldType().names().fullName(), fieldMapper, fieldMappings, request.includeDefaults());
|
||||
if (Regex.simpleMatch(field, fieldMapper.fieldType().name())) {
|
||||
addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults());
|
||||
it.remove();
|
||||
}
|
||||
}
|
||||
for (Iterator<FieldMapper> it = remainingFieldMappers.iterator(); it.hasNext(); ) {
|
||||
final FieldMapper fieldMapper = it.next();
|
||||
if (Regex.simpleMatch(field, fieldMapper.fieldType().names().indexName())) {
|
||||
addFieldMapper(fieldMapper.fieldType().names().indexName(), fieldMapper, fieldMappings, request.includeDefaults());
|
||||
if (Regex.simpleMatch(field, fieldMapper.fieldType().name())) {
|
||||
addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults());
|
||||
it.remove();
|
||||
}
|
||||
}
|
||||
|
@ -214,7 +214,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
|||
builder.startObject();
|
||||
fieldMapper.toXContent(builder, includeDefaults ? includeDefaultsParams : ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
fieldMappings.put(field, new FieldMappingMetaData(fieldMapper.fieldType().names().fullName(), builder.bytes()));
|
||||
fieldMappings.put(field, new FieldMappingMetaData(fieldMapper.fieldType().name(), builder.bytes()));
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("failed to serialize XContent of field [" + field + "]", e);
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -65,9 +66,9 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(OpenIndexRequest request, ActionListener<OpenIndexResponse> listener) {
|
||||
protected void doExecute(Task task, OpenIndexRequest request, ActionListener<OpenIndexResponse> listener) {
|
||||
destructiveOperations.failDestructive(request.indices());
|
||||
super.doExecute(request, listener);
|
||||
super.doExecute(task, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
|
|||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
||||
|
@ -167,9 +168,8 @@ public class BulkProcessor implements Closeable {
|
|||
}
|
||||
|
||||
public static Builder builder(Client client, Listener listener) {
|
||||
if (client == null) {
|
||||
throw new NullPointerException("The client you specified while building a BulkProcessor is null");
|
||||
}
|
||||
Objects.requireNonNull(client, "client");
|
||||
Objects.requireNonNull(listener, "listener");
|
||||
|
||||
return new Builder(client, listener);
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.index;
|
||||
|
||||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
|
@ -43,11 +42,9 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -605,41 +602,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
mappingMd != null ? mappingMd.timestamp().dateTimeFormatter() : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER,
|
||||
getVersion(metaData, concreteIndex));
|
||||
}
|
||||
// extract values if needed
|
||||
if (mappingMd != null) {
|
||||
MappingMetaData.ParseContext parseContext = mappingMd.createParseContext(id, routing, timestamp);
|
||||
|
||||
if (parseContext.shouldParse()) {
|
||||
XContentParser parser = null;
|
||||
try {
|
||||
parser = XContentHelper.createParser(source);
|
||||
mappingMd.parse(parser, parseContext);
|
||||
if (parseContext.shouldParseId()) {
|
||||
id = parseContext.id();
|
||||
}
|
||||
if (parseContext.shouldParseRouting()) {
|
||||
if (routing != null && !routing.equals(parseContext.routing())) {
|
||||
throw new MapperParsingException("The provided routing value [" + routing + "] doesn't match the routing key stored in the document: [" + parseContext.routing() + "]");
|
||||
}
|
||||
routing = parseContext.routing();
|
||||
}
|
||||
if (parseContext.shouldParseTimestamp()) {
|
||||
timestamp = parseContext.timestamp();
|
||||
if (timestamp != null) {
|
||||
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(timestamp, mappingMd.timestamp().dateTimeFormatter(), getVersion(metaData, concreteIndex));
|
||||
}
|
||||
}
|
||||
} catch (MapperParsingException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw new ElasticsearchParseException("failed to parse doc to extract routing/timestamp/id", e);
|
||||
} finally {
|
||||
if (parser != null) {
|
||||
parser.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// might as well check for routing here
|
||||
if (mappingMd.routing().required() && routing == null) {
|
||||
throw new RoutingMissingException(concreteIndex, type, id);
|
||||
|
|
|
@ -54,7 +54,9 @@ import org.elasticsearch.search.internal.InternalSearchResponse;
|
|||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -77,7 +79,7 @@ public abstract class TransportSearchTypeAction extends TransportAction<SearchRe
|
|||
public TransportSearchTypeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, SearchAction.NAME, threadPool, actionFilters, indexNameExpressionResolver);
|
||||
super(settings, SearchAction.NAME, threadPool, actionFilters, indexNameExpressionResolver, clusterService.getTaskManager());
|
||||
this.clusterService = clusterService;
|
||||
this.searchService = searchService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionRequest;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
||||
/**
|
||||
* A filter allowing to filter transport actions
|
||||
|
@ -39,7 +40,7 @@ public interface ActionFilter {
|
|||
* Enables filtering the execution of an action on the request side, either by sending a response through the
|
||||
* {@link ActionListener} or by continuing the execution through the given {@link ActionFilterChain chain}
|
||||
*/
|
||||
void apply(String action, ActionRequest request, ActionListener listener, ActionFilterChain chain);
|
||||
void apply(Task task, String action, ActionRequest request, ActionListener listener, ActionFilterChain chain);
|
||||
|
||||
/**
|
||||
* Enables filtering the execution of an action on the response side, either by sending a response through the
|
||||
|
@ -59,9 +60,9 @@ public interface ActionFilter {
|
|||
}
|
||||
|
||||
@Override
|
||||
public final void apply(String action, ActionRequest request, ActionListener listener, ActionFilterChain chain) {
|
||||
public final void apply(Task task, String action, ActionRequest request, ActionListener listener, ActionFilterChain chain) {
|
||||
if (apply(action, request, listener)) {
|
||||
chain.proceed(action, request, listener);
|
||||
chain.proceed(task, action, request, listener);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.support;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
||||
/**
|
||||
* A filter chain allowing to continue and process the transport action request
|
||||
|
@ -32,7 +33,7 @@ public interface ActionFilterChain {
|
|||
* Continue processing the request. Should only be called if a response has not been sent through
|
||||
* the given {@link ActionListener listener}
|
||||
*/
|
||||
void proceed(final String action, final ActionRequest request, final ActionListener listener);
|
||||
void proceed(Task task, final String action, final ActionRequest request, final ActionListener listener);
|
||||
|
||||
/**
|
||||
* Continue processing the response. Should only be called if a response has not been sent through
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
|
||||
/**
|
||||
* Encapsulates the logic of whether a new index should be automatically created when
|
||||
|
@ -35,6 +36,7 @@ public final class AutoCreateIndex {
|
|||
|
||||
private final boolean needToCheck;
|
||||
private final boolean globallyDisabled;
|
||||
private final boolean dynamicMappingDisabled;
|
||||
private final String[] matches;
|
||||
private final String[] matches2;
|
||||
private final IndexNameExpressionResolver resolver;
|
||||
|
@ -42,6 +44,7 @@ public final class AutoCreateIndex {
|
|||
@Inject
|
||||
public AutoCreateIndex(Settings settings, IndexNameExpressionResolver resolver) {
|
||||
this.resolver = resolver;
|
||||
dynamicMappingDisabled = !settings.getAsBoolean(MapperService.INDEX_MAPPER_DYNAMIC_SETTING, MapperService.INDEX_MAPPER_DYNAMIC_DEFAULT);
|
||||
String value = settings.get("action.auto_create_index");
|
||||
if (value == null || Booleans.isExplicitTrue(value)) {
|
||||
needToCheck = true;
|
||||
|
@ -82,7 +85,7 @@ public final class AutoCreateIndex {
|
|||
if (exists) {
|
||||
return false;
|
||||
}
|
||||
if (globallyDisabled) {
|
||||
if (globallyDisabled || dynamicMappingDisabled) {
|
||||
return false;
|
||||
}
|
||||
// matches not set, default value of "true"
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.tasks.ChildTask;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Base class for requests that can have associated child tasks
|
||||
*/
|
||||
public class ChildTaskRequest extends TransportRequest {
|
||||
|
||||
private String parentTaskNode;
|
||||
|
||||
private long parentTaskId;
|
||||
|
||||
protected ChildTaskRequest() {
|
||||
|
||||
}
|
||||
|
||||
protected ChildTaskRequest(TransportRequest parentTaskRequest) {
|
||||
super(parentTaskRequest);
|
||||
}
|
||||
|
||||
public void setParentTask(String parentTaskNode, long parentTaskId) {
|
||||
this.parentTaskNode = parentTaskNode;
|
||||
this.parentTaskId = parentTaskId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
parentTaskNode = in.readOptionalString();
|
||||
parentTaskId = in.readLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeOptionalString(parentTaskNode);
|
||||
out.writeLong(parentTaskId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action) {
|
||||
return new ChildTask(id, type, action, this::getDescription, parentTaskNode, parentTaskId);
|
||||
}
|
||||
}
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionRequest;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
|
@ -36,14 +37,19 @@ import java.util.function.Supplier;
|
|||
public abstract class HandledTransportAction<Request extends ActionRequest, Response extends ActionResponse> extends TransportAction<Request,Response>{
|
||||
|
||||
protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request) {
|
||||
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver);
|
||||
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager());
|
||||
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new TransportHandler());
|
||||
}
|
||||
|
||||
class TransportHandler implements TransportRequestHandler<Request> {
|
||||
|
||||
@Override
|
||||
public final void messageReceived(final Request request, final TransportChannel channel) throws Exception {
|
||||
public final void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception {
|
||||
messageReceived(request, channel);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void messageReceived(Request request, TransportChannel channel) throws Exception {
|
||||
execute(request, new ActionListener<Response>() {
|
||||
@Override
|
||||
public void onResponse(Response response) {
|
||||
|
|
|
@ -29,6 +29,8 @@ import org.elasticsearch.common.ParseFieldMatcher;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -45,15 +47,17 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
|
|||
private final ActionFilter[] filters;
|
||||
protected final ParseFieldMatcher parseFieldMatcher;
|
||||
protected final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
protected final TaskManager taskManager;
|
||||
|
||||
protected TransportAction(Settings settings, String actionName, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, TaskManager taskManager) {
|
||||
super(settings);
|
||||
this.threadPool = threadPool;
|
||||
this.actionName = actionName;
|
||||
this.filters = actionFilters.filters();
|
||||
this.parseFieldMatcher = new ParseFieldMatcher(settings);
|
||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||
this.taskManager = taskManager;
|
||||
}
|
||||
|
||||
public final ActionFuture<Response> execute(Request request) {
|
||||
|
@ -63,6 +67,28 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
|
|||
}
|
||||
|
||||
public final void execute(Request request, ActionListener<Response> listener) {
|
||||
Task task = taskManager.register("transport", actionName, request);
|
||||
if (task == null) {
|
||||
execute(null, request, listener);
|
||||
} else {
|
||||
execute(task, request, new ActionListener<Response>() {
|
||||
@Override
|
||||
public void onResponse(Response response) {
|
||||
taskManager.unregister(task);
|
||||
listener.onResponse(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
taskManager.unregister(task);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private final void execute(Task task, Request request, ActionListener<Response> listener) {
|
||||
|
||||
ActionRequestValidationException validationException = request.validate();
|
||||
if (validationException != null) {
|
||||
listener.onFailure(validationException);
|
||||
|
@ -71,17 +97,21 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
|
|||
|
||||
if (filters.length == 0) {
|
||||
try {
|
||||
doExecute(request, listener);
|
||||
doExecute(task, request, listener);
|
||||
} catch(Throwable t) {
|
||||
logger.trace("Error during transport action execution.", t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
} else {
|
||||
RequestFilterChain requestFilterChain = new RequestFilterChain<>(this, logger);
|
||||
requestFilterChain.proceed(actionName, request, listener);
|
||||
requestFilterChain.proceed(task, actionName, request, listener);
|
||||
}
|
||||
}
|
||||
|
||||
protected void doExecute(Task task, Request request, ActionListener<Response> listener) {
|
||||
doExecute(request, listener);
|
||||
}
|
||||
|
||||
protected abstract void doExecute(Request request, ActionListener<Response> listener);
|
||||
|
||||
private static class RequestFilterChain<Request extends ActionRequest, Response extends ActionResponse> implements ActionFilterChain {
|
||||
|
@ -96,13 +126,13 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
|
|||
}
|
||||
|
||||
@Override @SuppressWarnings("unchecked")
|
||||
public void proceed(String actionName, ActionRequest request, ActionListener listener) {
|
||||
public void proceed(Task task, String actionName, ActionRequest request, ActionListener listener) {
|
||||
int i = index.getAndIncrement();
|
||||
try {
|
||||
if (i < this.action.filters.length) {
|
||||
this.action.filters[i].apply(actionName, request, listener, this);
|
||||
this.action.filters[i].apply(task, actionName, request, listener, this);
|
||||
} else if (i == this.action.filters.length) {
|
||||
this.action.doExecute((Request) request, new FilteredActionListener<Response>(actionName, listener, new ResponseFilterChain(this.action.filters, logger)));
|
||||
this.action.doExecute(task, (Request) request, new FilteredActionListener<Response>(actionName, listener, new ResponseFilterChain(this.action.filters, logger)));
|
||||
} else {
|
||||
listener.onFailure(new IllegalStateException("proceed was called too many times"));
|
||||
}
|
||||
|
@ -131,7 +161,7 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
|
|||
}
|
||||
|
||||
@Override
|
||||
public void proceed(String action, ActionRequest request, ActionListener listener) {
|
||||
public void proceed(Task task, String action, ActionRequest request, ActionListener listener) {
|
||||
assert false : "response filter chain should never be called on the request side";
|
||||
}
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.MasterNotDiscoveredException;
|
||||
import org.elasticsearch.node.NodeClosedException;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
|
@ -84,6 +85,13 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
|
||||
protected abstract void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception;
|
||||
|
||||
/**
|
||||
* Override this operation if access to the task parameter is needed
|
||||
*/
|
||||
protected void masterOperation(Task task, Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
|
||||
masterOperation(request, state, listener);
|
||||
}
|
||||
|
||||
protected boolean localExecute(Request request) {
|
||||
return false;
|
||||
}
|
||||
|
@ -91,8 +99,14 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
protected abstract ClusterBlockException checkBlock(Request request, ClusterState state);
|
||||
|
||||
@Override
|
||||
protected void doExecute(final Request request, ActionListener<Response> listener) {
|
||||
new AsyncSingleAction(request, listener).start();
|
||||
protected final void doExecute(final Request request, ActionListener<Response> listener) {
|
||||
logger.warn("attempt to execute a master node operation without task");
|
||||
throw new UnsupportedOperationException("task parameter is required for this operation");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, final Request request, ActionListener<Response> listener) {
|
||||
new AsyncSingleAction(task, request, listener).start();
|
||||
}
|
||||
|
||||
class AsyncSingleAction {
|
||||
|
@ -100,6 +114,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
private final ActionListener<Response> listener;
|
||||
private final Request request;
|
||||
private volatile ClusterStateObserver observer;
|
||||
private final Task task;
|
||||
|
||||
private final ClusterStateObserver.ChangePredicate retryableOrNoBlockPredicate = new ClusterStateObserver.ValidationPredicate() {
|
||||
@Override
|
||||
|
@ -109,7 +124,8 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
}
|
||||
};
|
||||
|
||||
AsyncSingleAction(Request request, ActionListener<Response> listener) {
|
||||
AsyncSingleAction(Task task, Request request, ActionListener<Response> listener) {
|
||||
this.task = task;
|
||||
this.request = request;
|
||||
// TODO do we really need to wrap it in a listener? the handlers should be cheap
|
||||
if ((listener instanceof ThreadedActionListener) == false) {
|
||||
|
@ -157,7 +173,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
threadPool.executor(executor).execute(new ActionRunnable(delegate) {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
masterOperation(request, clusterService.state(), delegate);
|
||||
masterOperation(task, request, clusterService.state(), delegate);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -19,16 +19,16 @@
|
|||
|
||||
package org.elasticsearch.action.support.nodes;
|
||||
|
||||
import org.elasticsearch.action.support.ChildTaskRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class BaseNodeRequest extends TransportRequest {
|
||||
public abstract class BaseNodeRequest extends ChildTaskRequest {
|
||||
|
||||
private String nodeId;
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.NoSuchNodeException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.ChildTaskRequest;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
|
@ -32,6 +33,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
||||
import org.elasticsearch.transport.NodeShouldNotConnectException;
|
||||
|
@ -71,8 +73,14 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||
new AsyncAction(request, listener).start();
|
||||
protected final void doExecute(NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||
logger.warn("attempt to execute a transport nodes operation without a task");
|
||||
throw new UnsupportedOperationException("task parameter is required for this operation");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||
new AsyncAction(task, request, listener).start();
|
||||
}
|
||||
|
||||
protected boolean transportCompress() {
|
||||
|
@ -106,8 +114,10 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
private final ActionListener<NodesResponse> listener;
|
||||
private final AtomicReferenceArray<Object> responses;
|
||||
private final AtomicInteger counter = new AtomicInteger();
|
||||
private final Task task;
|
||||
|
||||
private AsyncAction(NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||
private AsyncAction(Task task, NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||
this.task = task;
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
ClusterState clusterState = clusterService.state();
|
||||
|
@ -150,7 +160,11 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
// those (and they randomize the client node usage, so tricky to find when)
|
||||
onFailure(idx, nodeId, new NodeShouldNotConnectException(clusterService.localNode(), node));
|
||||
} else {
|
||||
NodeRequest nodeRequest = newNodeRequest(nodeId, request);
|
||||
ChildTaskRequest nodeRequest = newNodeRequest(nodeId, request);
|
||||
if (task != null) {
|
||||
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
||||
}
|
||||
|
||||
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(), new BaseTransportResponseHandler<NodeResponse>() {
|
||||
@Override
|
||||
public NodeResponse newInstance() {
|
||||
|
|
|
@ -114,7 +114,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request,
|
||||
Supplier<ReplicaRequest> replicaRequest, String executor) {
|
||||
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver);
|
||||
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager());
|
||||
this.transportService = transportService;
|
||||
this.clusterService = clusterService;
|
||||
this.indicesService = indicesService;
|
||||
|
@ -882,7 +882,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
onReplicaFailure(nodeId, exp);
|
||||
} else {
|
||||
logger.warn("{} failed to perform {} on node {}", exp, shardId, transportReplicaAction, node);
|
||||
shardStateAction.shardFailed(shard, indexUUID, "failed to perform " + transportReplicaAction + " on replica on node " + node, exp, shardFailedTimeout, new ReplicationFailedShardStateListener(nodeId, exp));
|
||||
shardStateAction.shardFailed(clusterService.state(), shard, indexUUID, "failed to perform " + transportReplicaAction + " on replica on node " + node, exp, shardFailedTimeout, new ReplicationFailedShardStateListener(nodeId, exp));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1018,7 +1018,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
// ignore
|
||||
}
|
||||
}
|
||||
if (indexShard.getTranslogDurability() == Translog.Durabilty.REQUEST && location != null) {
|
||||
if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null) {
|
||||
indexShard.sync(location);
|
||||
}
|
||||
indexShard.maybeFlush();
|
||||
|
|
|
@ -66,7 +66,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
protected TransportSingleShardAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Supplier<Request> request, String executor) {
|
||||
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver);
|
||||
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager());
|
||||
this.clusterService = clusterService;
|
||||
this.transportService = transportService;
|
||||
|
||||
|
@ -177,7 +177,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
perform(exp);
|
||||
listener.onFailure(exp);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
|
|
|
@ -0,0 +1,195 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support.tasks;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.tasks.ChildTask;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A base class for task requests
|
||||
*/
|
||||
public class BaseTasksRequest<T extends BaseTasksRequest> extends ActionRequest<T> {
|
||||
|
||||
|
||||
public static final String[] ALL_ACTIONS = Strings.EMPTY_ARRAY;
|
||||
|
||||
public static final String[] ALL_NODES = Strings.EMPTY_ARRAY;
|
||||
|
||||
public static final long ALL_TASKS = -1L;
|
||||
|
||||
private String[] nodesIds = ALL_NODES;
|
||||
|
||||
private TimeValue timeout;
|
||||
|
||||
private String[] actions = ALL_ACTIONS;
|
||||
|
||||
private String parentNode;
|
||||
|
||||
private long parentTaskId = ALL_TASKS;
|
||||
|
||||
public BaseTasksRequest() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get information about tasks from nodes based on the nodes ids specified.
|
||||
* If none are passed, information for all nodes will be returned.
|
||||
*/
|
||||
public BaseTasksRequest(ActionRequest request, String... nodesIds) {
|
||||
super(request);
|
||||
this.nodesIds = nodesIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get information about tasks from nodes based on the nodes ids specified.
|
||||
* If none are passed, information for all nodes will be returned.
|
||||
*/
|
||||
public BaseTasksRequest(String... nodesIds) {
|
||||
this.nodesIds = nodesIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the list of action masks for the actions that should be returned
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T actions(String... actions) {
|
||||
this.actions = actions;
|
||||
return (T) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the list of action masks for the actions that should be returned
|
||||
*/
|
||||
public String[] actions() {
|
||||
return actions;
|
||||
}
|
||||
|
||||
public final String[] nodesIds() {
|
||||
return nodesIds;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T nodesIds(String... nodesIds) {
|
||||
this.nodesIds = nodesIds;
|
||||
return (T) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the parent node id that tasks should be filtered by
|
||||
*/
|
||||
public String parentNode() {
|
||||
return parentNode;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public T parentNode(String parentNode) {
|
||||
this.parentNode = parentNode;
|
||||
return (T) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the parent task id that tasks should be filtered by
|
||||
*/
|
||||
public long parentTaskId() {
|
||||
return parentTaskId;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public T parentTaskId(long parentTaskId) {
|
||||
this.parentTaskId = parentTaskId;
|
||||
return (T) this;
|
||||
}
|
||||
|
||||
|
||||
public TimeValue timeout() {
|
||||
return this.timeout;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T timeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
return (T) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T timeout(String timeout) {
|
||||
this.timeout = TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout");
|
||||
return (T) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
nodesIds = in.readStringArray();
|
||||
actions = in.readStringArray();
|
||||
parentNode = in.readOptionalString();
|
||||
parentTaskId = in.readLong();
|
||||
if (in.readBoolean()) {
|
||||
timeout = TimeValue.readTimeValue(in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStringArrayNullable(nodesIds);
|
||||
out.writeStringArrayNullable(actions);
|
||||
out.writeOptionalString(parentNode);
|
||||
out.writeLong(parentTaskId);
|
||||
out.writeOptionalStreamable(timeout);
|
||||
}
|
||||
|
||||
public boolean match(Task task) {
|
||||
if (actions() != null && actions().length > 0 && Regex.simpleMatch(actions(), task.getAction()) == false) {
|
||||
return false;
|
||||
}
|
||||
if (parentNode() != null || parentTaskId() != BaseTasksRequest.ALL_TASKS) {
|
||||
if (task instanceof ChildTask) {
|
||||
if (parentNode() != null) {
|
||||
if (parentNode().equals(((ChildTask) task).getParentNode()) == false) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (parentTaskId() != BaseTasksRequest.ALL_TASKS) {
|
||||
if (parentTaskId() != ((ChildTask) task).getParentId()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// This is not a child task and we need to match parent node or id
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support.tasks;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
|
||||
/**
|
||||
* Base class for responses of task-related operations
|
||||
*/
|
||||
public class BaseTasksResponse extends ActionResponse {
|
||||
private List<TaskOperationFailure> taskFailures;
|
||||
private List<FailedNodeException> nodeFailures;
|
||||
|
||||
public BaseTasksResponse() {
|
||||
}
|
||||
|
||||
public BaseTasksResponse(List<TaskOperationFailure> taskFailures, List<? extends FailedNodeException> nodeFailures) {
|
||||
this.taskFailures = taskFailures == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(taskFailures));
|
||||
this.nodeFailures = nodeFailures == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(nodeFailures));
|
||||
}
|
||||
|
||||
/**
|
||||
* The list of task failures exception.
|
||||
*/
|
||||
public List<TaskOperationFailure> getTaskFailures() {
|
||||
return taskFailures;
|
||||
}
|
||||
|
||||
/**
|
||||
* The list of node failures exception.
|
||||
*/
|
||||
public List<FailedNodeException> getNodeFailures() {
|
||||
return nodeFailures;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
List<TaskOperationFailure> taskFailures = new ArrayList<>();
|
||||
for (int i = 0; i < size; i++) {
|
||||
taskFailures.add(new TaskOperationFailure(in));
|
||||
}
|
||||
size = in.readVInt();
|
||||
this.taskFailures = Collections.unmodifiableList(taskFailures);
|
||||
List<FailedNodeException> nodeFailures = new ArrayList<>();
|
||||
for (int i = 0; i < size; i++) {
|
||||
nodeFailures.add(new FailedNodeException(in));
|
||||
}
|
||||
this.nodeFailures = Collections.unmodifiableList(nodeFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(taskFailures.size());
|
||||
for (TaskOperationFailure exp : taskFailures) {
|
||||
exp.writeTo(out);
|
||||
}
|
||||
out.writeVInt(nodeFailures.size());
|
||||
for (FailedNodeException exp : nodeFailures) {
|
||||
exp.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.support.tasks;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
/**
|
||||
* Builder for task-based requests
|
||||
*/
|
||||
public class TasksRequestBuilder <Request extends BaseTasksRequest<Request>, Response extends BaseTasksResponse, RequestBuilder extends TasksRequestBuilder<Request, Response, RequestBuilder>>
|
||||
extends ActionRequestBuilder<Request, Response, RequestBuilder> {
|
||||
|
||||
protected TasksRequestBuilder(ElasticsearchClient client, Action<Request, Response, RequestBuilder> action, Request request) {
|
||||
super(client, action, request);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder setNodesIds(String... nodesIds) {
|
||||
request.nodesIds(nodesIds);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder setActions(String... actions) {
|
||||
request.actions(actions);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder setTimeout(TimeValue timeout) {
|
||||
request.timeout(timeout);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,380 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support.tasks;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.NoSuchNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.ChildTaskRequest;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
||||
import org.elasticsearch.transport.NodeShouldNotConnectException;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* The base class for transport actions that are interacting with currently running tasks.
|
||||
*/
|
||||
public abstract class TransportTasksAction<
|
||||
TasksRequest extends BaseTasksRequest<TasksRequest>,
|
||||
TasksResponse extends BaseTasksResponse,
|
||||
TaskResponse extends Writeable<TaskResponse>
|
||||
> extends HandledTransportAction<TasksRequest, TasksResponse> {
|
||||
|
||||
protected final ClusterName clusterName;
|
||||
protected final ClusterService clusterService;
|
||||
protected final TransportService transportService;
|
||||
protected final Supplier<TasksRequest> requestSupplier;
|
||||
protected final Supplier<TasksResponse> responseSupplier;
|
||||
|
||||
protected final String transportNodeAction;
|
||||
|
||||
protected TransportTasksAction(Settings settings, String actionName, ClusterName clusterName, ThreadPool threadPool,
|
||||
ClusterService clusterService, TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<TasksRequest> requestSupplier,
|
||||
Supplier<TasksResponse> responseSupplier,
|
||||
String nodeExecutor) {
|
||||
super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, requestSupplier);
|
||||
this.clusterName = clusterName;
|
||||
this.clusterService = clusterService;
|
||||
this.transportService = transportService;
|
||||
this.transportNodeAction = actionName + "[n]";
|
||||
this.requestSupplier = requestSupplier;
|
||||
this.responseSupplier = responseSupplier;
|
||||
|
||||
transportService.registerRequestHandler(transportNodeAction, NodeTaskRequest::new, nodeExecutor, new NodeTransportHandler());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final void doExecute(TasksRequest request, ActionListener<TasksResponse> listener) {
|
||||
logger.warn("attempt to execute a transport tasks operation without a task");
|
||||
throw new UnsupportedOperationException("task parameter is required for this operation");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, TasksRequest request, ActionListener<TasksResponse> listener) {
|
||||
new AsyncAction(task, request, listener).start();
|
||||
}
|
||||
|
||||
private NodeTasksResponse nodeOperation(NodeTaskRequest nodeTaskRequest) {
|
||||
TasksRequest request = nodeTaskRequest.tasksRequest;
|
||||
List<TaskResponse> results = new ArrayList<>();
|
||||
List<TaskOperationFailure> exceptions = new ArrayList<>();
|
||||
for (Task task : taskManager.getTasks().values()) {
|
||||
// First check action and node filters
|
||||
if (request.match(task)) {
|
||||
try {
|
||||
results.add(taskOperation(request, task));
|
||||
} catch (Exception ex) {
|
||||
exceptions.add(new TaskOperationFailure(clusterService.localNode().id(), task.getId(), ex));
|
||||
}
|
||||
}
|
||||
}
|
||||
return new NodeTasksResponse(clusterService.localNode().id(), results, exceptions);
|
||||
}
|
||||
|
||||
protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
|
||||
return nodesIds;
|
||||
}
|
||||
|
||||
protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) {
|
||||
return clusterState.nodes().resolveNodesIds(request.nodesIds());
|
||||
}
|
||||
|
||||
protected abstract TasksResponse newResponse(TasksRequest request, List<TaskResponse> tasks, List<TaskOperationFailure> taskOperationFailures, List<FailedNodeException> failedNodeExceptions);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
protected TasksResponse newResponse(TasksRequest request, AtomicReferenceArray responses) {
|
||||
List<TaskResponse> tasks = new ArrayList<>();
|
||||
List<FailedNodeException> failedNodeExceptions = new ArrayList<>();
|
||||
List<TaskOperationFailure> taskOperationFailures = new ArrayList<>();
|
||||
for (int i = 0; i < responses.length(); i++) {
|
||||
Object response = responses.get(i);
|
||||
if (response instanceof FailedNodeException) {
|
||||
failedNodeExceptions.add((FailedNodeException) response);
|
||||
} else {
|
||||
NodeTasksResponse tasksResponse = (NodeTasksResponse) response;
|
||||
if (tasksResponse.results != null) {
|
||||
tasks.addAll(tasksResponse.results);
|
||||
}
|
||||
if (tasksResponse.exceptions != null) {
|
||||
taskOperationFailures.addAll(tasksResponse.exceptions);
|
||||
}
|
||||
}
|
||||
}
|
||||
return newResponse(request, tasks, taskOperationFailures, failedNodeExceptions);
|
||||
}
|
||||
|
||||
protected abstract TaskResponse readTaskResponse(StreamInput in) throws IOException;
|
||||
|
||||
protected abstract TaskResponse taskOperation(TasksRequest request, Task task);
|
||||
|
||||
protected boolean transportCompress() {
|
||||
return false;
|
||||
}
|
||||
|
||||
protected abstract boolean accumulateExceptions();
|
||||
|
||||
private class AsyncAction {
|
||||
|
||||
private final TasksRequest request;
|
||||
private final String[] nodesIds;
|
||||
private final DiscoveryNode[] nodes;
|
||||
private final ActionListener<TasksResponse> listener;
|
||||
private final AtomicReferenceArray<Object> responses;
|
||||
private final AtomicInteger counter = new AtomicInteger();
|
||||
private final Task task;
|
||||
|
||||
private AsyncAction(Task task, TasksRequest request, ActionListener<TasksResponse> listener) {
|
||||
this.task = task;
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
ClusterState clusterState = clusterService.state();
|
||||
String[] nodesIds = resolveNodes(request, clusterState);
|
||||
this.nodesIds = filterNodeIds(clusterState.nodes(), nodesIds);
|
||||
ImmutableOpenMap<String, DiscoveryNode> nodes = clusterState.nodes().nodes();
|
||||
this.nodes = new DiscoveryNode[nodesIds.length];
|
||||
for (int i = 0; i < nodesIds.length; i++) {
|
||||
this.nodes[i] = nodes.get(nodesIds[i]);
|
||||
}
|
||||
this.responses = new AtomicReferenceArray<>(this.nodesIds.length);
|
||||
}
|
||||
|
||||
private void start() {
|
||||
if (nodesIds.length == 0) {
|
||||
// nothing to do
|
||||
try {
|
||||
listener.onResponse(newResponse(request, responses));
|
||||
} catch (Throwable t) {
|
||||
logger.debug("failed to generate empty response", t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
} else {
|
||||
TransportRequestOptions.Builder builder = TransportRequestOptions.builder();
|
||||
if (request.timeout() != null) {
|
||||
builder.withTimeout(request.timeout());
|
||||
}
|
||||
builder.withCompress(transportCompress());
|
||||
for (int i = 0; i < nodesIds.length; i++) {
|
||||
final String nodeId = nodesIds[i];
|
||||
final int idx = i;
|
||||
final DiscoveryNode node = nodes[i];
|
||||
try {
|
||||
if (node == null) {
|
||||
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
|
||||
} else if (!clusterService.localNode().shouldConnectTo(node) && !clusterService.localNode().equals(node)) {
|
||||
// the check "!clusterService.localNode().equals(node)" is to maintain backward comp. where before
|
||||
// we allowed to connect from "local" client node to itself, certain tests rely on it, if we remove it, we need to fix
|
||||
// those (and they randomize the client node usage, so tricky to find when)
|
||||
onFailure(idx, nodeId, new NodeShouldNotConnectException(clusterService.localNode(), node));
|
||||
} else {
|
||||
NodeTaskRequest nodeRequest = new NodeTaskRequest(request);
|
||||
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
||||
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(), new BaseTransportResponseHandler<NodeTasksResponse>() {
|
||||
@Override
|
||||
public NodeTasksResponse newInstance() {
|
||||
return new NodeTasksResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleResponse(NodeTasksResponse response) {
|
||||
onOperation(idx, response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
onFailure(idx, node.id(), exp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
onFailure(idx, nodeId, t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void onOperation(int idx, NodeTasksResponse nodeResponse) {
|
||||
responses.set(idx, nodeResponse);
|
||||
if (counter.incrementAndGet() == responses.length()) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void onFailure(int idx, String nodeId, Throwable t) {
|
||||
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
||||
logger.debug("failed to execute on node [{}]", t, nodeId);
|
||||
}
|
||||
if (accumulateExceptions()) {
|
||||
responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t));
|
||||
}
|
||||
if (counter.incrementAndGet() == responses.length()) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
TasksResponse finalResponse;
|
||||
try {
|
||||
finalResponse = newResponse(request, responses);
|
||||
} catch (Throwable t) {
|
||||
logger.debug("failed to combine responses from nodes", t);
|
||||
listener.onFailure(t);
|
||||
return;
|
||||
}
|
||||
listener.onResponse(finalResponse);
|
||||
}
|
||||
}
|
||||
|
||||
class NodeTransportHandler implements TransportRequestHandler<NodeTaskRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(final NodeTaskRequest request, final TransportChannel channel) throws Exception {
|
||||
channel.sendResponse(nodeOperation(request));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private class NodeTaskRequest extends ChildTaskRequest {
|
||||
private TasksRequest tasksRequest;
|
||||
|
||||
protected NodeTaskRequest() {
|
||||
super();
|
||||
}
|
||||
|
||||
protected NodeTaskRequest(TasksRequest tasksRequest) {
|
||||
super(tasksRequest);
|
||||
this.tasksRequest = tasksRequest;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
tasksRequest = requestSupplier.get();
|
||||
tasksRequest.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
tasksRequest.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
private class NodeTasksResponse extends TransportResponse {
|
||||
protected String nodeId;
|
||||
protected List<TaskOperationFailure> exceptions;
|
||||
protected List<TaskResponse> results;
|
||||
|
||||
public NodeTasksResponse() {
|
||||
}
|
||||
|
||||
public NodeTasksResponse(String nodeId,
|
||||
List<TaskResponse> results,
|
||||
List<TaskOperationFailure> exceptions) {
|
||||
this.nodeId = nodeId;
|
||||
this.results = results;
|
||||
this.exceptions = exceptions;
|
||||
}
|
||||
|
||||
public String getNodeId() {
|
||||
return nodeId;
|
||||
}
|
||||
|
||||
public List<TaskOperationFailure> getExceptions() {
|
||||
return exceptions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
nodeId = in.readString();
|
||||
int resultsSize = in.readVInt();
|
||||
results = new ArrayList<>(resultsSize);
|
||||
for (; resultsSize > 0; resultsSize--) {
|
||||
final TaskResponse result = in.readBoolean() ? readTaskResponse(in) : null;
|
||||
results.add(result);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
int taskFailures = in.readVInt();
|
||||
exceptions = new ArrayList<>(taskFailures);
|
||||
for (int i = 0; i < taskFailures; i++) {
|
||||
exceptions.add(new TaskOperationFailure(in));
|
||||
}
|
||||
} else {
|
||||
exceptions = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(nodeId);
|
||||
out.writeVInt(results.size());
|
||||
for (TaskResponse result : results) {
|
||||
if (result != null) {
|
||||
out.writeBoolean(true);
|
||||
result.writeTo(out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
}
|
||||
out.writeBoolean(exceptions != null);
|
||||
if (exceptions != null) {
|
||||
int taskFailures = exceptions.size();
|
||||
out.writeVInt(taskFailures);
|
||||
for (TaskOperationFailure exception : exceptions) {
|
||||
exception.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -24,7 +24,7 @@ import org.apache.lucene.index.Term;
|
|||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.TermStatistics;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.search.similarities.ClassicSimilarity;
|
||||
import org.apache.lucene.search.similarities.TFIDFSimilarity;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -67,7 +67,7 @@ public class TermVectorsFilter {
|
|||
|
||||
this.dfs = dfs;
|
||||
this.scoreTerms = new HashMap<>();
|
||||
this.similarity = new DefaultSimilarity();
|
||||
this.similarity = new ClassicSimilarity();
|
||||
}
|
||||
|
||||
public void setSettings(TermVectorsRequest.FilterSettings settings) {
|
||||
|
|
|
@ -23,6 +23,8 @@ import org.elasticsearch.common.SuppressForbidden;
|
|||
|
||||
import java.net.SocketPermission;
|
||||
import java.net.URL;
|
||||
import java.io.FilePermission;
|
||||
import java.io.IOException;
|
||||
import java.security.CodeSource;
|
||||
import java.security.Permission;
|
||||
import java.security.PermissionCollection;
|
||||
|
@ -81,10 +83,39 @@ final class ESPolicy extends Policy {
|
|||
}
|
||||
}
|
||||
|
||||
// Special handling for broken Hadoop code: "let me execute or my classes will not load"
|
||||
// yeah right, REMOVE THIS when hadoop is fixed
|
||||
if (permission instanceof FilePermission && "<<ALL FILES>>".equals(permission.getName())) {
|
||||
for (StackTraceElement element : Thread.currentThread().getStackTrace()) {
|
||||
if ("org.apache.hadoop.util.Shell".equals(element.getClassName()) &&
|
||||
"runCommand".equals(element.getMethodName())) {
|
||||
// we found the horrible method: the hack begins!
|
||||
// force the hadoop code to back down, by throwing an exception that it catches.
|
||||
rethrow(new IOException("no hadoop, you cannot do this."));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise defer to template + dynamic file permissions
|
||||
return template.implies(domain, permission) || dynamic.implies(permission) || system.implies(domain, permission);
|
||||
}
|
||||
|
||||
/**
|
||||
* Classy puzzler to rethrow any checked exception as an unchecked one.
|
||||
*/
|
||||
private static class Rethrower<T extends Throwable> {
|
||||
private void rethrow(Throwable t) throws T {
|
||||
throw (T) t;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Rethrows <code>t</code> (identical object).
|
||||
*/
|
||||
private void rethrow(Throwable t) {
|
||||
new Rethrower<Error>().rethrow(t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PermissionCollection getPermissions(CodeSource codesource) {
|
||||
// code should not rely on this method, or at least use it correctly:
|
||||
|
|
|
@ -33,6 +33,9 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
|||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse;
|
||||
|
@ -249,6 +252,29 @@ public interface ClusterAdminClient extends ElasticsearchClient {
|
|||
*/
|
||||
NodesHotThreadsRequestBuilder prepareNodesHotThreads(String... nodesIds);
|
||||
|
||||
/**
|
||||
* List tasks
|
||||
*
|
||||
* @param request The nodes tasks request
|
||||
* @return The result future
|
||||
* @see org.elasticsearch.client.Requests#listTasksRequest(String...)
|
||||
*/
|
||||
ActionFuture<ListTasksResponse> listTasks(ListTasksRequest request);
|
||||
|
||||
/**
|
||||
* List active tasks
|
||||
*
|
||||
* @param request The nodes tasks request
|
||||
* @param listener A listener to be notified with a result
|
||||
* @see org.elasticsearch.client.Requests#listTasksRequest(String...)
|
||||
*/
|
||||
void listTasks(ListTasksRequest request, ActionListener<ListTasksResponse> listener);
|
||||
|
||||
/**
|
||||
* List active tasks
|
||||
*/
|
||||
ListTasksRequestBuilder prepareListTasks(String... nodesIds);
|
||||
|
||||
/**
|
||||
* Returns list of shards the given search would be executed on.
|
||||
*/
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.client;
|
|||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
|
@ -404,6 +405,27 @@ public class Requests {
|
|||
return new ClusterStatsRequest();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a nodes tasks request against all the nodes.
|
||||
*
|
||||
* @return The nodes tasks request
|
||||
* @see org.elasticsearch.client.ClusterAdminClient#listTasks(ListTasksRequest)
|
||||
*/
|
||||
public static ListTasksRequest listTasksRequest() {
|
||||
return new ListTasksRequest();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a nodes tasks request against one or more nodes. Pass <tt>null</tt> or an empty array for all nodes.
|
||||
*
|
||||
* @param nodesIds The nodes ids to get the tasks for
|
||||
* @return The nodes tasks request
|
||||
* @see org.elasticsearch.client.ClusterAdminClient#nodesStats(org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest)
|
||||
*/
|
||||
public static ListTasksRequest listTasksRequest(String... nodesIds) {
|
||||
return new ListTasksRequest(nodesIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Registers snapshot repository
|
||||
*
|
||||
|
|
|
@ -41,6 +41,10 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
|
|||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder;
|
||||
|
@ -968,6 +972,21 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
return new NodesHotThreadsRequestBuilder(this, NodesHotThreadsAction.INSTANCE).setNodesIds(nodesIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<ListTasksResponse> listTasks(final ListTasksRequest request) {
|
||||
return execute(ListTasksAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void listTasks(final ListTasksRequest request, final ActionListener<ListTasksResponse> listener) {
|
||||
execute(ListTasksAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ListTasksRequestBuilder prepareListTasks(String... nodesIds) {
|
||||
return new ListTasksRequestBuilder(this, ListTasksAction.INSTANCE).setNodesIds(nodesIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<ClusterSearchShardsResponse> searchShards(final ClusterSearchShardsRequest request) {
|
||||
return execute(ClusterSearchShardsAction.INSTANCE, request);
|
||||
|
|
|
@ -65,6 +65,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.util.ExtensionPoint;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.indexing.IndexingSlowLog;
|
||||
import org.elasticsearch.index.search.stats.SearchSlowLog;
|
||||
|
@ -73,7 +74,6 @@ import org.elasticsearch.index.shard.IndexShard;
|
|||
import org.elasticsearch.index.shard.MergePolicyConfig;
|
||||
import org.elasticsearch.index.shard.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.translog.TranslogConfig;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
||||
|
@ -128,7 +128,6 @@ public class ClusterModule extends AbstractModule {
|
|||
registerShardsAllocator(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, BalancedShardsAllocator.class);
|
||||
}
|
||||
|
||||
|
||||
private void registerBuiltinIndexSettings() {
|
||||
registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
|
||||
registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_TYPE, Validator.EMPTY);
|
||||
|
@ -140,7 +139,6 @@ public class ClusterModule extends AbstractModule {
|
|||
registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_EXCLUDE_GROUP + "*", Validator.EMPTY);
|
||||
registerIndexDynamicSetting(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(TranslogConfig.INDEX_TRANSLOG_FS_TYPE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, Validator.NON_NEGATIVE_INTEGER);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexMetaData.SETTING_READ_ONLY, Validator.EMPTY);
|
||||
|
@ -152,7 +150,6 @@ public class ClusterModule extends AbstractModule {
|
|||
registerIndexDynamicSetting(IndicesTTLService.INDEX_TTL_DISABLE_PURGE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexShard.INDEX_REFRESH_INTERVAL, Validator.TIME);
|
||||
registerIndexDynamicSetting(PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(EngineConfig.INDEX_COMPOUND_ON_FLUSH, Validator.BOOLEAN);
|
||||
registerIndexDynamicSetting(EngineConfig.INDEX_GC_DELETES_SETTING, Validator.TIME);
|
||||
registerIndexDynamicSetting(IndexShard.INDEX_FLUSH_ON_CLOSE, Validator.BOOLEAN);
|
||||
registerIndexDynamicSetting(EngineConfig.INDEX_VERSION_MAP_SIZE, Validator.BYTES_SIZE_OR_PERCENTAGE);
|
||||
|
@ -182,13 +179,10 @@ public class ClusterModule extends AbstractModule {
|
|||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, Validator.DOUBLE_GTE_2);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT, Validator.NON_NEGATIVE_DOUBLE);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_COMPOUND_FORMAT, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, Validator.INTEGER);
|
||||
registerIndexDynamicSetting(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, Validator.BYTES_SIZE);
|
||||
registerIndexDynamicSetting(IndexShard.INDEX_TRANSLOG_DISABLE_FLUSH, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(TranslogConfig.INDEX_TRANSLOG_DURABILITY, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexSettings.INDEX_TRANSLOG_DURABILITY, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndicesWarmer.INDEX_WARMER_ENABLED, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, Validator.BOOLEAN);
|
||||
registerIndexDynamicSetting(IndicesRequestCache.DEPRECATED_INDEX_CACHE_REQUEST_ENABLED, Validator.BOOLEAN);
|
||||
registerIndexDynamicSetting(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, Validator.TIME);
|
||||
registerIndexDynamicSetting(DefaultSearchContext.MAX_RESULT_WINDOW, Validator.POSITIVE_INTEGER);
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.cluster.service.PendingClusterTask;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
@ -148,4 +149,9 @@ public interface ClusterService extends LifecycleComponent<ClusterService> {
|
|||
* @return A zero time value if the queue is empty, otherwise the time value oldest task waiting in the queue
|
||||
*/
|
||||
TimeValue getMaxTaskWaitTime();
|
||||
|
||||
/**
|
||||
* Returns task manager created in the cluster service
|
||||
*/
|
||||
TaskManager getTaskManager();
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ public class ClusterStateObserver {
|
|||
final AtomicReference<ObservedState> lastObservedState;
|
||||
final TimeoutClusterStateListener clusterStateListener = new ObserverClusterStateListener();
|
||||
// observingContext is not null when waiting on cluster state changes
|
||||
final AtomicReference<ObservingContext> observingContext = new AtomicReference<ObservingContext>(null);
|
||||
final AtomicReference<ObservingContext> observingContext = new AtomicReference<>(null);
|
||||
volatile Long startTimeNS;
|
||||
volatile boolean timedOut;
|
||||
|
||||
|
@ -117,7 +117,7 @@ public class ClusterStateObserver {
|
|||
if (timeOutValue != null) {
|
||||
long timeSinceStartMS = TimeValue.nsecToMSec(System.nanoTime() - startTimeNS);
|
||||
timeoutTimeLeftMS = timeOutValue.millis() - timeSinceStartMS;
|
||||
if (timeoutTimeLeftMS <= 0l) {
|
||||
if (timeoutTimeLeftMS <= 0L) {
|
||||
// things have timeout while we were busy -> notify
|
||||
logger.trace("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS));
|
||||
// update to latest, in case people want to retry
|
||||
|
@ -238,7 +238,7 @@ public class ClusterStateObserver {
|
|||
}
|
||||
}
|
||||
|
||||
public static interface Listener {
|
||||
public interface Listener {
|
||||
|
||||
/** called when a new state is observed */
|
||||
void onNewClusterState(ClusterState state);
|
||||
|
@ -256,15 +256,17 @@ public class ClusterStateObserver {
|
|||
*
|
||||
* @return true if newState should be accepted
|
||||
*/
|
||||
public boolean apply(ClusterState previousState, ClusterState.ClusterStateStatus previousStatus,
|
||||
ClusterState newState, ClusterState.ClusterStateStatus newStatus);
|
||||
boolean apply(ClusterState previousState,
|
||||
ClusterState.ClusterStateStatus previousStatus,
|
||||
ClusterState newState,
|
||||
ClusterState.ClusterStateStatus newStatus);
|
||||
|
||||
/**
|
||||
* called to see whether a cluster change should be accepted
|
||||
*
|
||||
* @return true if changedEvent.state() should be accepted
|
||||
*/
|
||||
public boolean apply(ClusterChangedEvent changedEvent);
|
||||
boolean apply(ClusterChangedEvent changedEvent);
|
||||
}
|
||||
|
||||
|
||||
|
@ -272,20 +274,14 @@ public class ClusterStateObserver {
|
|||
|
||||
@Override
|
||||
public boolean apply(ClusterState previousState, ClusterState.ClusterStateStatus previousStatus, ClusterState newState, ClusterState.ClusterStateStatus newStatus) {
|
||||
if (previousState != newState || previousStatus != newStatus) {
|
||||
return validate(newState);
|
||||
}
|
||||
return false;
|
||||
return (previousState != newState || previousStatus != newStatus) && validate(newState);
|
||||
}
|
||||
|
||||
protected abstract boolean validate(ClusterState newState);
|
||||
|
||||
@Override
|
||||
public boolean apply(ClusterChangedEvent changedEvent) {
|
||||
if (changedEvent.previousState().version() != changedEvent.state().version()) {
|
||||
return validate(changedEvent.state());
|
||||
}
|
||||
return false;
|
||||
return changedEvent.previousState().version() != changedEvent.state().version() && validate(changedEvent.state());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -58,49 +59,38 @@ import java.util.Locale;
|
|||
|
||||
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
|
||||
|
||||
|
||||
public class ShardStateAction extends AbstractComponent {
|
||||
public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started";
|
||||
public static final String SHARD_FAILED_ACTION_NAME = "internal:cluster/shard/failure";
|
||||
|
||||
private final TransportService transportService;
|
||||
private final ClusterService clusterService;
|
||||
private final AllocationService allocationService;
|
||||
private final RoutingService routingService;
|
||||
|
||||
@Inject
|
||||
public ShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService,
|
||||
AllocationService allocationService, RoutingService routingService) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.transportService = transportService;
|
||||
this.allocationService = allocationService;
|
||||
this.routingService = routingService;
|
||||
|
||||
transportService.registerRequestHandler(SHARD_STARTED_ACTION_NAME, ShardRoutingEntry::new, ThreadPool.Names.SAME, new ShardStartedTransportHandler());
|
||||
transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ShardRoutingEntry::new, ThreadPool.Names.SAME, new ShardFailedTransportHandler());
|
||||
transportService.registerRequestHandler(SHARD_STARTED_ACTION_NAME, ShardRoutingEntry::new, ThreadPool.Names.SAME, new ShardStartedTransportHandler(clusterService, new ShardStartedClusterStateTaskExecutor(allocationService, logger), logger));
|
||||
transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ShardRoutingEntry::new, ThreadPool.Names.SAME, new ShardFailedTransportHandler(clusterService, new ShardFailedClusterStateTaskExecutor(allocationService, routingService, logger), logger));
|
||||
}
|
||||
|
||||
public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
|
||||
shardFailed(shardRouting, indexUUID, message, failure, null, listener);
|
||||
public void shardFailed(final ClusterState clusterState, final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
|
||||
shardFailed(clusterState, shardRouting, indexUUID, message, failure, null, listener);
|
||||
}
|
||||
|
||||
public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, TimeValue timeout, Listener listener) {
|
||||
DiscoveryNode masterNode = clusterService.state().nodes().masterNode();
|
||||
public void resendShardFailed(final ClusterState clusterState, final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, Listener listener) {
|
||||
logger.trace("{} re-sending failed shard [{}], index UUID [{}], reason [{}]", shardRouting.shardId(), failure, shardRouting, indexUUID, message);
|
||||
shardFailed(clusterState, shardRouting, indexUUID, message, failure, listener);
|
||||
}
|
||||
|
||||
public void shardFailed(final ClusterState clusterState, final ShardRouting shardRouting, final String indexUUID, final String message, @Nullable final Throwable failure, TimeValue timeout, Listener listener) {
|
||||
DiscoveryNode masterNode = clusterState.nodes().masterNode();
|
||||
if (masterNode == null) {
|
||||
logger.warn("can't send shard failed for {}, no master known.", shardRouting);
|
||||
logger.warn("{} no master known to fail shard [{}]", shardRouting.shardId(), shardRouting);
|
||||
listener.onShardFailedNoMaster();
|
||||
return;
|
||||
}
|
||||
innerShardFailed(shardRouting, indexUUID, masterNode, message, failure, timeout, listener);
|
||||
}
|
||||
|
||||
public void resendShardFailed(final ShardRouting shardRouting, final String indexUUID, final DiscoveryNode masterNode, final String message, @Nullable final Throwable failure, Listener listener) {
|
||||
logger.trace("{} re-sending failed shard for {}, indexUUID [{}], reason [{}]", failure, shardRouting.shardId(), shardRouting, indexUUID, message);
|
||||
innerShardFailed(shardRouting, indexUUID, masterNode, message, failure, null, listener);
|
||||
}
|
||||
|
||||
private void innerShardFailed(final ShardRouting shardRouting, final String indexUUID, final DiscoveryNode masterNode, final String message, final Throwable failure, TimeValue timeout, Listener listener) {
|
||||
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, message, failure);
|
||||
TransportRequestOptions options = TransportRequestOptions.EMPTY;
|
||||
if (timeout != null) {
|
||||
|
@ -115,33 +105,49 @@ public class ShardStateAction extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.warn("unexpected failure while sending request to [{}] to fail shard [{}]", exp, masterNode, shardRoutingEntry);
|
||||
logger.warn("{} unexpected failure while sending request to [{}] to fail shard [{}]", exp, shardRoutingEntry.shardRouting.shardId(), masterNode, shardRoutingEntry);
|
||||
listener.onShardFailedFailure(masterNode, exp);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
|
||||
private static class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
|
||||
private final ClusterService clusterService;
|
||||
private final ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor;
|
||||
private final ESLogger logger;
|
||||
|
||||
public ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, ESLogger logger) {
|
||||
this.clusterService = clusterService;
|
||||
this.shardFailedClusterStateTaskExecutor = shardFailedClusterStateTaskExecutor;
|
||||
this.logger = logger;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
|
||||
handleShardFailureOnMaster(request, new ClusterStateTaskListener() {
|
||||
logger.warn("{} received shard failed for {}", request.failure, request.shardRouting.shardId(), request);
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-failed (" + request.shardRouting + "), message [" + request.message + "]",
|
||||
request,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH),
|
||||
shardFailedClusterStateTaskExecutor,
|
||||
new ClusterStateTaskListener() {
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure while failing shard [{}]", t, request.shardRouting);
|
||||
logger.error("{} unexpected failure while failing shard [{}]", t, request.shardRouting.shardId(), request.shardRouting);
|
||||
try {
|
||||
channel.sendResponse(t);
|
||||
} catch (Throwable channelThrowable) {
|
||||
logger.warn("failed to send failure [{}] while failing shard [{}]", channelThrowable, t, request.shardRouting);
|
||||
logger.warn("{} failed to send failure [{}] while failing shard [{}]", channelThrowable, request.shardRouting.shardId(), t, request.shardRouting);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNoLongerMaster(String source) {
|
||||
logger.error("no longer master while failing shard [{}]", request.shardRouting);
|
||||
logger.error("{} no longer master while failing shard [{}]", request.shardRouting.shardId(), request.shardRouting);
|
||||
try {
|
||||
channel.sendResponse(new NotMasterException(source));
|
||||
} catch (Throwable channelThrowable) {
|
||||
logger.warn("failed to send no longer master while failing shard [{}]", channelThrowable, request.shardRouting);
|
||||
logger.warn("{} failed to send no longer master while failing shard [{}]", channelThrowable, request.shardRouting.shardId(), request.shardRouting);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -150,7 +156,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
try {
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
} catch (Throwable channelThrowable) {
|
||||
logger.warn("failed to send response while failing shard [{}]", channelThrowable, request.shardRouting);
|
||||
logger.warn("{} failed to send response while failing shard [{}]", channelThrowable, request.shardRouting.shardId(), request.shardRouting);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -158,7 +164,17 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry> {
|
||||
private static class ShardFailedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardRoutingEntry> {
|
||||
private final AllocationService allocationService;
|
||||
private final RoutingService routingService;
|
||||
private final ESLogger logger;
|
||||
|
||||
public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, ESLogger logger) {
|
||||
this.allocationService = allocationService;
|
||||
this.routingService = routingService;
|
||||
this.logger = logger;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder();
|
||||
|
@ -192,48 +208,56 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler();
|
||||
|
||||
private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry, ClusterStateTaskListener listener) {
|
||||
logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
|
||||
shardRoutingEntry,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH),
|
||||
shardFailedClusterStateHandler,
|
||||
listener);
|
||||
}
|
||||
|
||||
public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) {
|
||||
DiscoveryNode masterNode = clusterService.state().nodes().masterNode();
|
||||
public void shardStarted(final ClusterState clusterState, final ShardRouting shardRouting, String indexUUID, final String reason) {
|
||||
DiscoveryNode masterNode = clusterState.nodes().masterNode();
|
||||
if (masterNode == null) {
|
||||
logger.warn("{} can't send shard started for {}, no master known.", shardRouting.shardId(), shardRouting);
|
||||
logger.warn("{} no master known to start shard [{}]", shardRouting.shardId(), shardRouting);
|
||||
return;
|
||||
}
|
||||
shardStarted(shardRouting, indexUUID, reason, masterNode);
|
||||
}
|
||||
|
||||
public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason, final DiscoveryNode masterNode) {
|
||||
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason, null);
|
||||
logger.debug("{} sending shard started for {}", shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
|
||||
logger.debug("sending start shard [{}]", shardRoutingEntry);
|
||||
transportService.sendRequest(masterNode,
|
||||
SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.warn("failed to send shard started to [{}]", exp, masterNode);
|
||||
logger.warn("{} failure sending start shard [{}] to [{}]", exp, shardRouting.shardId(), masterNode, shardRouting);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
class ShardStartedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
|
||||
private static class ShardStartedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
|
||||
private final ClusterService clusterService;
|
||||
private final ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor;
|
||||
private final ESLogger logger;
|
||||
|
||||
public ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, ESLogger logger) {
|
||||
this.clusterService = clusterService;
|
||||
this.shardStartedClusterStateTaskExecutor = shardStartedClusterStateTaskExecutor;
|
||||
this.logger = logger;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
|
||||
handleShardStartedOnMaster(request);
|
||||
logger.debug("{} received shard started for [{}]", request.shardRouting.shardId(), request);
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-started (" + request.shardRouting + "), reason [" + request.message + "]",
|
||||
request,
|
||||
ClusterStateTaskConfig.build(Priority.URGENT),
|
||||
shardStartedClusterStateTaskExecutor,
|
||||
shardStartedClusterStateTaskExecutor);
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
|
||||
class ShardStartedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
|
||||
private static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
|
||||
private final AllocationService allocationService;
|
||||
private final ESLogger logger;
|
||||
|
||||
public ShardStartedClusterStateTaskExecutor(AllocationService allocationService, ESLogger logger) {
|
||||
this.allocationService = allocationService;
|
||||
this.logger = logger;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardRoutingEntry> builder = BatchResult.builder();
|
||||
|
@ -262,19 +286,6 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
private final ShardStartedClusterStateHandler shardStartedClusterStateHandler = new ShardStartedClusterStateHandler();
|
||||
|
||||
private void handleShardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) {
|
||||
logger.debug("received shard started for {}", shardRoutingEntry);
|
||||
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
|
||||
shardRoutingEntry,
|
||||
ClusterStateTaskConfig.build(Priority.URGENT),
|
||||
shardStartedClusterStateHandler,
|
||||
shardStartedClusterStateHandler);
|
||||
}
|
||||
|
||||
public static class ShardRoutingEntry extends TransportRequest {
|
||||
ShardRouting shardRouting;
|
||||
String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
|
||||
|
|
|
@ -50,92 +50,20 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
|
||||
public static final MappingMetaData PROTO = new MappingMetaData();
|
||||
|
||||
public static class Id {
|
||||
|
||||
public static final Id EMPTY = new Id(null);
|
||||
|
||||
private final String path;
|
||||
|
||||
private final String[] pathElements;
|
||||
|
||||
public Id(String path) {
|
||||
this.path = path;
|
||||
if (path == null) {
|
||||
pathElements = Strings.EMPTY_ARRAY;
|
||||
} else {
|
||||
pathElements = Strings.delimitedListToStringArray(path, ".");
|
||||
}
|
||||
}
|
||||
|
||||
public boolean hasPath() {
|
||||
return path != null;
|
||||
}
|
||||
|
||||
public String path() {
|
||||
return this.path;
|
||||
}
|
||||
|
||||
public String[] pathElements() {
|
||||
return this.pathElements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
Id id = (Id) o;
|
||||
|
||||
if (path != null ? !path.equals(id.path) : id.path != null) return false;
|
||||
if (!Arrays.equals(pathElements, id.pathElements)) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = path != null ? path.hashCode() : 0;
|
||||
result = 31 * result + (pathElements != null ? Arrays.hashCode(pathElements) : 0);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
public static class Routing {
|
||||
|
||||
public static final Routing EMPTY = new Routing(false, null);
|
||||
public static final Routing EMPTY = new Routing(false);
|
||||
|
||||
private final boolean required;
|
||||
|
||||
private final String path;
|
||||
|
||||
private final String[] pathElements;
|
||||
|
||||
public Routing(boolean required, String path) {
|
||||
public Routing(boolean required) {
|
||||
this.required = required;
|
||||
this.path = path;
|
||||
if (path == null) {
|
||||
pathElements = Strings.EMPTY_ARRAY;
|
||||
} else {
|
||||
pathElements = Strings.delimitedListToStringArray(path, ".");
|
||||
}
|
||||
}
|
||||
|
||||
public boolean required() {
|
||||
return required;
|
||||
}
|
||||
|
||||
public boolean hasPath() {
|
||||
return path != null;
|
||||
}
|
||||
|
||||
public String path() {
|
||||
return this.path;
|
||||
}
|
||||
|
||||
public String[] pathElements() {
|
||||
return this.pathElements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
|
@ -143,19 +71,12 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
|
||||
Routing routing = (Routing) o;
|
||||
|
||||
if (required != routing.required) return false;
|
||||
if (path != null ? !path.equals(routing.path) : routing.path != null) return false;
|
||||
if (!Arrays.equals(pathElements, routing.pathElements)) return false;
|
||||
|
||||
return true;
|
||||
return required == routing.required;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = (required ? 1 : 0);
|
||||
result = 31 * result + (path != null ? path.hashCode() : 0);
|
||||
result = 31 * result + (pathElements != null ? Arrays.hashCode(pathElements) : 0);
|
||||
return result;
|
||||
return getClass().hashCode() + (required ? 1 : 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -182,31 +103,21 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
}
|
||||
|
||||
|
||||
public static final Timestamp EMPTY = new Timestamp(false, null, TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT,
|
||||
public static final Timestamp EMPTY = new Timestamp(false, TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT,
|
||||
TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null);
|
||||
|
||||
private final boolean enabled;
|
||||
|
||||
private final String path;
|
||||
|
||||
private final String format;
|
||||
|
||||
private final String[] pathElements;
|
||||
|
||||
private final FormatDateTimeFormatter dateTimeFormatter;
|
||||
|
||||
private final String defaultTimestamp;
|
||||
|
||||
private final Boolean ignoreMissing;
|
||||
|
||||
public Timestamp(boolean enabled, String path, String format, String defaultTimestamp, Boolean ignoreMissing) {
|
||||
public Timestamp(boolean enabled, String format, String defaultTimestamp, Boolean ignoreMissing) {
|
||||
this.enabled = enabled;
|
||||
this.path = path;
|
||||
if (path == null) {
|
||||
pathElements = Strings.EMPTY_ARRAY;
|
||||
} else {
|
||||
pathElements = Strings.delimitedListToStringArray(path, ".");
|
||||
}
|
||||
this.format = format;
|
||||
this.dateTimeFormatter = Joda.forPattern(format);
|
||||
this.defaultTimestamp = defaultTimestamp;
|
||||
|
@ -217,18 +128,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
return enabled;
|
||||
}
|
||||
|
||||
public boolean hasPath() {
|
||||
return path != null;
|
||||
}
|
||||
|
||||
public String path() {
|
||||
return this.path;
|
||||
}
|
||||
|
||||
public String[] pathElements() {
|
||||
return this.pathElements;
|
||||
}
|
||||
|
||||
public String format() {
|
||||
return this.format;
|
||||
}
|
||||
|
@ -258,10 +157,8 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
|
||||
if (enabled != timestamp.enabled) return false;
|
||||
if (format != null ? !format.equals(timestamp.format) : timestamp.format != null) return false;
|
||||
if (path != null ? !path.equals(timestamp.path) : timestamp.path != null) return false;
|
||||
if (defaultTimestamp != null ? !defaultTimestamp.equals(timestamp.defaultTimestamp) : timestamp.defaultTimestamp != null) return false;
|
||||
if (ignoreMissing != null ? !ignoreMissing.equals(timestamp.ignoreMissing) : timestamp.ignoreMissing != null) return false;
|
||||
if (!Arrays.equals(pathElements, timestamp.pathElements)) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -269,9 +166,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
@Override
|
||||
public int hashCode() {
|
||||
int result = (enabled ? 1 : 0);
|
||||
result = 31 * result + (path != null ? path.hashCode() : 0);
|
||||
result = 31 * result + (format != null ? format.hashCode() : 0);
|
||||
result = 31 * result + (pathElements != null ? Arrays.hashCode(pathElements) : 0);
|
||||
result = 31 * result + (dateTimeFormatter != null ? dateTimeFormatter.hashCode() : 0);
|
||||
result = 31 * result + (defaultTimestamp != null ? defaultTimestamp.hashCode() : 0);
|
||||
result = 31 * result + (ignoreMissing != null ? ignoreMissing.hashCode() : 0);
|
||||
|
@ -283,7 +178,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
|
||||
private final CompressedXContent source;
|
||||
|
||||
private Id id;
|
||||
private Routing routing;
|
||||
private Timestamp timestamp;
|
||||
private boolean hasParentField;
|
||||
|
@ -291,9 +185,8 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
public MappingMetaData(DocumentMapper docMapper) {
|
||||
this.type = docMapper.type();
|
||||
this.source = docMapper.mappingSource();
|
||||
this.id = new Id(docMapper.idFieldMapper().path());
|
||||
this.routing = new Routing(docMapper.routingFieldMapper().required(), docMapper.routingFieldMapper().path());
|
||||
this.timestamp = new Timestamp(docMapper.timestampFieldMapper().enabled(), docMapper.timestampFieldMapper().path(),
|
||||
this.routing = new Routing(docMapper.routingFieldMapper().required());
|
||||
this.timestamp = new Timestamp(docMapper.timestampFieldMapper().enabled(),
|
||||
docMapper.timestampFieldMapper().fieldType().dateTimeFormatter().format(), docMapper.timestampFieldMapper().defaultTimestamp(),
|
||||
docMapper.timestampFieldMapper().ignoreMissing());
|
||||
this.hasParentField = docMapper.parentFieldMapper().active();
|
||||
|
@ -337,40 +230,22 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
}
|
||||
|
||||
private void initMappers(Map<String, Object> withoutType) {
|
||||
if (withoutType.containsKey("_id")) {
|
||||
String path = null;
|
||||
Map<String, Object> routingNode = (Map<String, Object>) withoutType.get("_id");
|
||||
for (Map.Entry<String, Object> entry : routingNode.entrySet()) {
|
||||
String fieldName = Strings.toUnderscoreCase(entry.getKey());
|
||||
Object fieldNode = entry.getValue();
|
||||
if (fieldName.equals("path")) {
|
||||
path = fieldNode.toString();
|
||||
}
|
||||
}
|
||||
this.id = new Id(path);
|
||||
} else {
|
||||
this.id = Id.EMPTY;
|
||||
}
|
||||
if (withoutType.containsKey("_routing")) {
|
||||
boolean required = false;
|
||||
String path = null;
|
||||
Map<String, Object> routingNode = (Map<String, Object>) withoutType.get("_routing");
|
||||
for (Map.Entry<String, Object> entry : routingNode.entrySet()) {
|
||||
String fieldName = Strings.toUnderscoreCase(entry.getKey());
|
||||
Object fieldNode = entry.getValue();
|
||||
if (fieldName.equals("required")) {
|
||||
required = nodeBooleanValue(fieldNode);
|
||||
} else if (fieldName.equals("path")) {
|
||||
path = fieldNode.toString();
|
||||
}
|
||||
}
|
||||
this.routing = new Routing(required, path);
|
||||
this.routing = new Routing(required);
|
||||
} else {
|
||||
this.routing = Routing.EMPTY;
|
||||
}
|
||||
if (withoutType.containsKey("_timestamp")) {
|
||||
boolean enabled = false;
|
||||
String path = null;
|
||||
String format = TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT;
|
||||
String defaultTimestamp = TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP;
|
||||
Boolean ignoreMissing = null;
|
||||
|
@ -380,8 +255,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
Object fieldNode = entry.getValue();
|
||||
if (fieldName.equals("enabled")) {
|
||||
enabled = nodeBooleanValue(fieldNode);
|
||||
} else if (fieldName.equals("path")) {
|
||||
path = fieldNode.toString();
|
||||
} else if (fieldName.equals("format")) {
|
||||
format = fieldNode.toString();
|
||||
} else if (fieldName.equals("default") && fieldNode != null) {
|
||||
|
@ -390,7 +263,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
ignoreMissing = nodeBooleanValue(fieldNode);
|
||||
}
|
||||
}
|
||||
this.timestamp = new Timestamp(enabled, path, format, defaultTimestamp, ignoreMissing);
|
||||
this.timestamp = new Timestamp(enabled, format, defaultTimestamp, ignoreMissing);
|
||||
} else {
|
||||
this.timestamp = Timestamp.EMPTY;
|
||||
}
|
||||
|
@ -401,19 +274,15 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
}
|
||||
}
|
||||
|
||||
public MappingMetaData(String type, CompressedXContent source, Id id, Routing routing, Timestamp timestamp, boolean hasParentField) {
|
||||
public MappingMetaData(String type, CompressedXContent source, Routing routing, Timestamp timestamp, boolean hasParentField) {
|
||||
this.type = type;
|
||||
this.source = source;
|
||||
this.id = id;
|
||||
this.routing = routing;
|
||||
this.timestamp = timestamp;
|
||||
this.hasParentField = hasParentField;
|
||||
}
|
||||
|
||||
void updateDefaultMapping(MappingMetaData defaultMapping) {
|
||||
if (id == Id.EMPTY) {
|
||||
id = defaultMapping.id();
|
||||
}
|
||||
if (routing == Routing.EMPTY) {
|
||||
routing = defaultMapping.routing();
|
||||
}
|
||||
|
@ -453,10 +322,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
return sourceAsMap();
|
||||
}
|
||||
|
||||
public Id id() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
public Routing routing() {
|
||||
return this.routing;
|
||||
}
|
||||
|
@ -465,114 +330,14 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
return this.timestamp;
|
||||
}
|
||||
|
||||
public ParseContext createParseContext(@Nullable String id, @Nullable String routing, @Nullable String timestamp) {
|
||||
// We parse the routing even if there is already a routing key in the request in order to make sure that
|
||||
// they are the same
|
||||
return new ParseContext(
|
||||
id == null && id().hasPath(),
|
||||
routing().hasPath(),
|
||||
timestamp == null && timestamp().hasPath()
|
||||
);
|
||||
}
|
||||
|
||||
public void parse(XContentParser parser, ParseContext parseContext) throws IOException {
|
||||
innerParse(parser, parseContext);
|
||||
}
|
||||
|
||||
private void innerParse(XContentParser parser, ParseContext context) throws IOException {
|
||||
if (!context.parsingStillNeeded()) {
|
||||
return;
|
||||
}
|
||||
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
if (token == null) {
|
||||
token = parser.nextToken();
|
||||
}
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
token = parser.nextToken();
|
||||
}
|
||||
String idPart = context.idParsingStillNeeded() ? id().pathElements()[context.locationId] : null;
|
||||
String routingPart = context.routingParsingStillNeeded() ? routing().pathElements()[context.locationRouting] : null;
|
||||
String timestampPart = context.timestampParsingStillNeeded() ? timestamp().pathElements()[context.locationTimestamp] : null;
|
||||
|
||||
for (; token == XContentParser.Token.FIELD_NAME; token = parser.nextToken()) {
|
||||
// Must point to field name
|
||||
String fieldName = parser.currentName();
|
||||
// And then the value...
|
||||
token = parser.nextToken();
|
||||
boolean incLocationId = false;
|
||||
boolean incLocationRouting = false;
|
||||
boolean incLocationTimestamp = false;
|
||||
if (context.idParsingStillNeeded() && fieldName.equals(idPart)) {
|
||||
if (context.locationId + 1 == id.pathElements().length) {
|
||||
if (!token.isValue()) {
|
||||
throw new MapperParsingException("id field must be a value but was either an object or an array");
|
||||
}
|
||||
context.id = parser.textOrNull();
|
||||
context.idResolved = true;
|
||||
} else {
|
||||
incLocationId = true;
|
||||
}
|
||||
}
|
||||
if (context.routingParsingStillNeeded() && fieldName.equals(routingPart)) {
|
||||
if (context.locationRouting + 1 == routing.pathElements().length) {
|
||||
context.routing = parser.textOrNull();
|
||||
context.routingResolved = true;
|
||||
} else {
|
||||
incLocationRouting = true;
|
||||
}
|
||||
}
|
||||
if (context.timestampParsingStillNeeded() && fieldName.equals(timestampPart)) {
|
||||
if (context.locationTimestamp + 1 == timestamp.pathElements().length) {
|
||||
context.timestamp = parser.textOrNull();
|
||||
context.timestampResolved = true;
|
||||
} else {
|
||||
incLocationTimestamp = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (incLocationId || incLocationRouting || incLocationTimestamp) {
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
context.locationId += incLocationId ? 1 : 0;
|
||||
context.locationRouting += incLocationRouting ? 1 : 0;
|
||||
context.locationTimestamp += incLocationTimestamp ? 1 : 0;
|
||||
innerParse(parser, context);
|
||||
context.locationId -= incLocationId ? 1 : 0;
|
||||
context.locationRouting -= incLocationRouting ? 1 : 0;
|
||||
context.locationTimestamp -= incLocationTimestamp ? 1 : 0;
|
||||
}
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
|
||||
if (!context.parsingStillNeeded()) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(type());
|
||||
source().writeTo(out);
|
||||
// id
|
||||
if (id().hasPath()) {
|
||||
out.writeBoolean(true);
|
||||
out.writeString(id().path());
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
// routing
|
||||
out.writeBoolean(routing().required());
|
||||
if (routing().hasPath()) {
|
||||
out.writeBoolean(true);
|
||||
out.writeString(routing().path());
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
// timestamp
|
||||
out.writeBoolean(timestamp().enabled());
|
||||
out.writeOptionalString(timestamp().path());
|
||||
out.writeString(timestamp().format());
|
||||
out.writeOptionalString(timestamp().defaultTimestamp());
|
||||
out.writeOptionalBoolean(timestamp().ignoreMissing());
|
||||
|
@ -586,7 +351,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
|
||||
MappingMetaData that = (MappingMetaData) o;
|
||||
|
||||
if (!id.equals(that.id)) return false;
|
||||
if (!routing.equals(that.routing)) return false;
|
||||
if (!source.equals(that.source)) return false;
|
||||
if (!timestamp.equals(that.timestamp)) return false;
|
||||
|
@ -599,7 +363,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
public int hashCode() {
|
||||
int result = type.hashCode();
|
||||
result = 31 * result + source.hashCode();
|
||||
result = 31 * result + id.hashCode();
|
||||
result = 31 * result + routing.hashCode();
|
||||
result = 31 * result + timestamp.hashCode();
|
||||
return result;
|
||||
|
@ -608,142 +371,20 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
public MappingMetaData readFrom(StreamInput in) throws IOException {
|
||||
String type = in.readString();
|
||||
CompressedXContent source = CompressedXContent.readCompressedString(in);
|
||||
// id
|
||||
Id id = new Id(in.readBoolean() ? in.readString() : null);
|
||||
// routing
|
||||
Routing routing = new Routing(in.readBoolean(), in.readBoolean() ? in.readString() : null);
|
||||
Routing routing = new Routing(in.readBoolean());
|
||||
// timestamp
|
||||
|
||||
boolean enabled = in.readBoolean();
|
||||
String path = in.readOptionalString();
|
||||
String format = in.readString();
|
||||
String defaultTimestamp = in.readOptionalString();
|
||||
Boolean ignoreMissing = null;
|
||||
|
||||
ignoreMissing = in.readOptionalBoolean();
|
||||
|
||||
final Timestamp timestamp = new Timestamp(enabled, path, format, defaultTimestamp, ignoreMissing);
|
||||
final Timestamp timestamp = new Timestamp(enabled, format, defaultTimestamp, ignoreMissing);
|
||||
final boolean hasParentField = in.readBoolean();
|
||||
return new MappingMetaData(type, source, id, routing, timestamp, hasParentField);
|
||||
return new MappingMetaData(type, source, routing, timestamp, hasParentField);
|
||||
}
|
||||
|
||||
public static class ParseContext {
|
||||
final boolean shouldParseId;
|
||||
final boolean shouldParseRouting;
|
||||
final boolean shouldParseTimestamp;
|
||||
|
||||
int locationId = 0;
|
||||
int locationRouting = 0;
|
||||
int locationTimestamp = 0;
|
||||
boolean idResolved;
|
||||
boolean routingResolved;
|
||||
boolean timestampResolved;
|
||||
String id;
|
||||
String routing;
|
||||
String timestamp;
|
||||
|
||||
public ParseContext(boolean shouldParseId, boolean shouldParseRouting, boolean shouldParseTimestamp) {
|
||||
this.shouldParseId = shouldParseId;
|
||||
this.shouldParseRouting = shouldParseRouting;
|
||||
this.shouldParseTimestamp = shouldParseTimestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* The id value parsed, <tt>null</tt> if does not require parsing, or not resolved.
|
||||
*/
|
||||
public String id() {
|
||||
return id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Does id parsing really needed at all?
|
||||
*/
|
||||
public boolean shouldParseId() {
|
||||
return shouldParseId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Has id been resolved during the parsing phase.
|
||||
*/
|
||||
public boolean idResolved() {
|
||||
return idResolved;
|
||||
}
|
||||
|
||||
/**
|
||||
* Is id parsing still needed?
|
||||
*/
|
||||
public boolean idParsingStillNeeded() {
|
||||
return shouldParseId && !idResolved;
|
||||
}
|
||||
|
||||
/**
|
||||
* The routing value parsed, <tt>null</tt> if does not require parsing, or not resolved.
|
||||
*/
|
||||
public String routing() {
|
||||
return routing;
|
||||
}
|
||||
|
||||
/**
|
||||
* Does routing parsing really needed at all?
|
||||
*/
|
||||
public boolean shouldParseRouting() {
|
||||
return shouldParseRouting;
|
||||
}
|
||||
|
||||
/**
|
||||
* Has routing been resolved during the parsing phase.
|
||||
*/
|
||||
public boolean routingResolved() {
|
||||
return routingResolved;
|
||||
}
|
||||
|
||||
/**
|
||||
* Is routing parsing still needed?
|
||||
*/
|
||||
public boolean routingParsingStillNeeded() {
|
||||
return shouldParseRouting && !routingResolved;
|
||||
}
|
||||
|
||||
/**
|
||||
* The timestamp value parsed, <tt>null</tt> if does not require parsing, or not resolved.
|
||||
*/
|
||||
public String timestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Does timestamp parsing really needed at all?
|
||||
*/
|
||||
public boolean shouldParseTimestamp() {
|
||||
return shouldParseTimestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Has timestamp been resolved during the parsing phase.
|
||||
*/
|
||||
public boolean timestampResolved() {
|
||||
return timestampResolved;
|
||||
}
|
||||
|
||||
/**
|
||||
* Is timestamp parsing still needed?
|
||||
*/
|
||||
public boolean timestampParsingStillNeeded() {
|
||||
return shouldParseTimestamp && !timestampResolved;
|
||||
}
|
||||
|
||||
/**
|
||||
* Do we really need parsing?
|
||||
*/
|
||||
public boolean shouldParse() {
|
||||
return shouldParseId || shouldParseRouting || shouldParseTimestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Is parsing still needed?
|
||||
*/
|
||||
public boolean parsingStillNeeded() {
|
||||
return idParsingStillNeeded() || routingParsingStillNeeded() || timestampParsingStillNeeded();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -259,9 +259,8 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
} else {
|
||||
newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, existingMapper == null);
|
||||
if (existingMapper != null) {
|
||||
// first, simulate
|
||||
// this will just throw exceptions in case of problems
|
||||
existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
|
||||
// first, simulate: just call merge and ignore the result
|
||||
existingMapper.merge(newMapper.mapping(), request.updateAllTypes());
|
||||
} else {
|
||||
// TODO: can we find a better place for this validation?
|
||||
// The reason this validation is here is that the mapper service doesn't learn about
|
||||
|
|
|
@ -45,12 +45,6 @@ import static org.elasticsearch.common.transport.TransportAddressSerializers.add
|
|||
*/
|
||||
public class DiscoveryNode implements Streamable, ToXContent {
|
||||
|
||||
/**
|
||||
* Minimum version of a node to communicate with. This version corresponds to the minimum compatibility version
|
||||
* of the current elasticsearch major version.
|
||||
*/
|
||||
public static final Version MINIMUM_DISCOVERY_NODE_VERSION = Version.CURRENT.minimumCompatibilityVersion();
|
||||
|
||||
public static boolean localNode(Settings settings) {
|
||||
if (settings.get("node.local") != null) {
|
||||
return settings.getAsBoolean("node.local", false);
|
||||
|
@ -109,7 +103,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
|||
/**
|
||||
* Creates a new {@link DiscoveryNode}
|
||||
* <p>
|
||||
* <b>Note:</b> if the version of the node is unknown {@link #MINIMUM_DISCOVERY_NODE_VERSION} should be used.
|
||||
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current version.
|
||||
* it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
|
||||
* the node might not be able to communicate with the remove node. After initial handshakes node versions will be discovered
|
||||
* and updated.
|
||||
|
@ -126,7 +120,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
|||
/**
|
||||
* Creates a new {@link DiscoveryNode}
|
||||
* <p>
|
||||
* <b>Note:</b> if the version of the node is unknown {@link #MINIMUM_DISCOVERY_NODE_VERSION} should be used.
|
||||
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current version.
|
||||
* it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
|
||||
* the node might not be able to communicate with the remove node. After initial handshakes node versions will be discovered
|
||||
* and updated.
|
||||
|
@ -145,7 +139,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
|||
/**
|
||||
* Creates a new {@link DiscoveryNode}.
|
||||
* <p>
|
||||
* <b>Note:</b> if the version of the node is unknown {@link #MINIMUM_DISCOVERY_NODE_VERSION} should be used.
|
||||
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current version.
|
||||
* it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
|
||||
* the node might not be able to communicate with the remove node. After initial handshakes node versions will be discovered
|
||||
* and updated.
|
||||
|
@ -178,7 +172,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
|||
/**
|
||||
* Creates a new {@link DiscoveryNode}.
|
||||
* <p>
|
||||
* <b>Note:</b> if the version of the node is unknown {@link #MINIMUM_DISCOVERY_NODE_VERSION} should be used.
|
||||
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current version.
|
||||
* it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
|
||||
* the node might not be able to communicate with the remove node. After initial handshakes node versions will be discovered
|
||||
* and updated.
|
||||
|
|
|
@ -106,7 +106,6 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
}
|
||||
}
|
||||
this.allShardsStarted = allShardsStarted;
|
||||
|
||||
this.primary = primary;
|
||||
if (primary != null) {
|
||||
this.primaryAsList = Collections.singletonList(primary);
|
||||
|
|
|
@ -69,6 +69,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
private int relocatingShards = 0;
|
||||
|
||||
private final Map<String, ObjectIntHashMap<String>> nodesPerAttributeNames = new HashMap<>();
|
||||
private final Map<String, Recoveries> recoveryiesPerNode = new HashMap<>();
|
||||
|
||||
public RoutingNodes(ClusterState clusterState) {
|
||||
this(clusterState, true);
|
||||
|
@ -91,6 +92,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
// also fill replicaSet information
|
||||
for (ObjectCursor<IndexRoutingTable> indexRoutingTable : routingTable.indicesRouting().values()) {
|
||||
for (IndexShardRoutingTable indexShard : indexRoutingTable.value) {
|
||||
assert indexShard.primary != null;
|
||||
for (ShardRouting shard : indexShard) {
|
||||
// to get all the shards belonging to an index, including the replicas,
|
||||
// we define a replica set and keep track of it. A replica set is identified
|
||||
|
@ -107,16 +109,18 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
// add the counterpart shard with relocatingNodeId reflecting the source from which
|
||||
// it's relocating from.
|
||||
ShardRouting targetShardRouting = shard.buildTargetRelocatingShard();
|
||||
addInitialRecovery(targetShardRouting);
|
||||
if (readOnly) {
|
||||
targetShardRouting.freeze();
|
||||
}
|
||||
entries.add(targetShardRouting);
|
||||
assignedShardsAdd(targetShardRouting);
|
||||
} else if (!shard.active()) { // shards that are initializing without being relocated
|
||||
} else if (shard.active() == false) { // shards that are initializing without being relocated
|
||||
if (shard.primary()) {
|
||||
inactivePrimaryCount++;
|
||||
}
|
||||
inactiveShardCount++;
|
||||
addInitialRecovery(shard);
|
||||
}
|
||||
} else {
|
||||
final ShardRouting sr = getRouting(shard, readOnly);
|
||||
|
@ -132,6 +136,79 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
}
|
||||
}
|
||||
|
||||
private void addRecovery(ShardRouting routing) {
|
||||
addRecovery(routing, true, false);
|
||||
}
|
||||
|
||||
private void removeRecovery(ShardRouting routing) {
|
||||
addRecovery(routing, false, false);
|
||||
}
|
||||
|
||||
public void addInitialRecovery(ShardRouting routing) {
|
||||
addRecovery(routing,true, true);
|
||||
}
|
||||
|
||||
private void addRecovery(final ShardRouting routing, final boolean increment, final boolean initializing) {
|
||||
final int howMany = increment ? 1 : -1;
|
||||
assert routing.initializing() : "routing must be initializing: " + routing;
|
||||
Recoveries.getOrAdd(recoveryiesPerNode, routing.currentNodeId()).addIncoming(howMany);
|
||||
final String sourceNodeId;
|
||||
if (routing.relocatingNodeId() != null) { // this is a relocation-target
|
||||
sourceNodeId = routing.relocatingNodeId();
|
||||
if (routing.primary() && increment == false) { // primary is done relocating
|
||||
int numRecoveringReplicas = 0;
|
||||
for (ShardRouting assigned : assignedShards(routing)) {
|
||||
if (assigned.primary() == false && assigned.initializing() && assigned.relocatingNodeId() == null) {
|
||||
numRecoveringReplicas++;
|
||||
}
|
||||
}
|
||||
// we transfer the recoveries to the relocated primary
|
||||
recoveryiesPerNode.get(sourceNodeId).addOutgoing(-numRecoveringReplicas);
|
||||
recoveryiesPerNode.get(routing.currentNodeId()).addOutgoing(numRecoveringReplicas);
|
||||
}
|
||||
} else if (routing.primary() == false) { // primary without relocationID is initial recovery
|
||||
ShardRouting primary = findPrimary(routing);
|
||||
if (primary == null && initializing) {
|
||||
primary = routingTable.index(routing.index()).shard(routing.shardId().id()).primary;
|
||||
} else if (primary == null) {
|
||||
throw new IllegalStateException("replica is initializing but primary is unassigned");
|
||||
}
|
||||
sourceNodeId = primary.currentNodeId();
|
||||
} else {
|
||||
sourceNodeId = null;
|
||||
}
|
||||
if (sourceNodeId != null) {
|
||||
Recoveries.getOrAdd(recoveryiesPerNode, sourceNodeId).addOutgoing(howMany);
|
||||
}
|
||||
}
|
||||
|
||||
public int getIncomingRecoveries(String nodeId) {
|
||||
return recoveryiesPerNode.getOrDefault(nodeId, Recoveries.EMPTY).getIncoming();
|
||||
}
|
||||
|
||||
public int getOutgoingRecoveries(String nodeId) {
|
||||
return recoveryiesPerNode.getOrDefault(nodeId, Recoveries.EMPTY).getOutgoing();
|
||||
}
|
||||
|
||||
private ShardRouting findPrimary(ShardRouting routing) {
|
||||
List<ShardRouting> shardRoutings = assignedShards.get(routing.shardId());
|
||||
ShardRouting primary = null;
|
||||
if (shardRoutings != null) {
|
||||
for (ShardRouting shardRouting : shardRoutings) {
|
||||
if (shardRouting.primary()) {
|
||||
if (shardRouting.active()) {
|
||||
return shardRouting;
|
||||
} else if (primary == null) {
|
||||
primary = shardRouting;
|
||||
} else if (primary.relocatingNodeId() != null) {
|
||||
primary = shardRouting;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return primary;
|
||||
}
|
||||
|
||||
private static ShardRouting getRouting(ShardRouting src, boolean readOnly) {
|
||||
if (readOnly) {
|
||||
src.freeze(); // we just freeze and reuse this instance if we are read only
|
||||
|
@ -352,6 +429,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
if (shard.primary()) {
|
||||
inactivePrimaryCount++;
|
||||
}
|
||||
addRecovery(shard);
|
||||
assignedShardsAdd(shard);
|
||||
}
|
||||
|
||||
|
@ -367,6 +445,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
ShardRouting target = shard.buildTargetRelocatingShard();
|
||||
node(target.currentNodeId()).add(target);
|
||||
assignedShardsAdd(target);
|
||||
addRecovery(target);
|
||||
return target;
|
||||
}
|
||||
|
||||
|
@ -383,9 +462,12 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
inactivePrimaryCount--;
|
||||
}
|
||||
}
|
||||
removeRecovery(shard);
|
||||
shard.moveToStarted();
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Cancels a relocation of a shard that shard must relocating.
|
||||
*/
|
||||
|
@ -440,6 +522,9 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
cancelRelocation(shard);
|
||||
}
|
||||
assignedShardsRemove(shard);
|
||||
if (shard.initializing()) {
|
||||
removeRecovery(shard);
|
||||
}
|
||||
}
|
||||
|
||||
private void assignedShardsAdd(ShardRouting shard) {
|
||||
|
@ -749,6 +834,34 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
}
|
||||
}
|
||||
|
||||
for (Map.Entry<String, Recoveries> recoveries : routingNodes.recoveryiesPerNode.entrySet()) {
|
||||
String node = recoveries.getKey();
|
||||
final Recoveries value = recoveries.getValue();
|
||||
int incoming = 0;
|
||||
int outgoing = 0;
|
||||
RoutingNode routingNode = routingNodes.nodesToShards.get(node);
|
||||
if (routingNode != null) { // node might have dropped out of the cluster
|
||||
for (ShardRouting routing : routingNode) {
|
||||
if (routing.initializing()) {
|
||||
incoming++;
|
||||
} else if (routing.relocating()) {
|
||||
outgoing++;
|
||||
}
|
||||
if (routing.primary() && (routing.initializing() && routing.relocatingNodeId() != null) == false) { // we don't count the initialization end of the primary relocation
|
||||
List<ShardRouting> shardRoutings = routingNodes.assignedShards.get(routing.shardId());
|
||||
for (ShardRouting assigned : shardRoutings) {
|
||||
if (assigned.primary() == false && assigned.initializing() && assigned.relocatingNodeId() == null) {
|
||||
outgoing++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assert incoming == value.incoming : incoming + " != " + value.incoming;
|
||||
assert outgoing == value.outgoing : outgoing + " != " + value.outgoing + " node: " + routingNode;
|
||||
}
|
||||
|
||||
|
||||
assert unassignedPrimaryCount == routingNodes.unassignedShards.getNumPrimaries() :
|
||||
"Unassigned primaries is [" + unassignedPrimaryCount + "] but RoutingNodes returned unassigned primaries [" + routingNodes.unassigned().getNumPrimaries() + "]";
|
||||
assert unassignedIgnoredPrimaryCount == routingNodes.unassignedShards.getNumIgnoredPrimaries() :
|
||||
|
@ -856,4 +969,41 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
throw new IllegalStateException("can't modify RoutingNodes - readonly");
|
||||
}
|
||||
}
|
||||
|
||||
private static final class Recoveries {
|
||||
private static final Recoveries EMPTY = new Recoveries();
|
||||
private int incoming = 0;
|
||||
private int outgoing = 0;
|
||||
|
||||
int getTotal() {
|
||||
return incoming + outgoing;
|
||||
}
|
||||
|
||||
void addOutgoing(int howMany) {
|
||||
assert outgoing + howMany >= 0 : outgoing + howMany+ " must be >= 0";
|
||||
outgoing += howMany;
|
||||
}
|
||||
|
||||
void addIncoming(int howMany) {
|
||||
assert incoming + howMany >= 0 : incoming + howMany+ " must be >= 0";
|
||||
incoming += howMany;
|
||||
}
|
||||
|
||||
int getOutgoing() {
|
||||
return outgoing;
|
||||
}
|
||||
|
||||
int getIncoming() {
|
||||
return incoming;
|
||||
}
|
||||
|
||||
public static Recoveries getOrAdd(Map<String, Recoveries> map, String key) {
|
||||
Recoveries recoveries = map.get(key);
|
||||
if (recoveries == null) {
|
||||
recoveries = new Recoveries();
|
||||
map.put(key, recoveries);
|
||||
}
|
||||
return recoveries;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
|
@ -45,6 +46,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
@ -180,7 +182,10 @@ public class AllocationService extends AbstractComponent {
|
|||
routingNodes.unassigned().shuffle();
|
||||
FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), failedShards, clusterInfoService.getClusterInfo());
|
||||
boolean changed = false;
|
||||
for (FailedRerouteAllocation.FailedShard failedShard : failedShards) {
|
||||
// as failing primaries also fail associated replicas, we fail replicas first here so that their nodes are added to ignore list
|
||||
List<FailedRerouteAllocation.FailedShard> orderedFailedShards = new ArrayList<>(failedShards);
|
||||
orderedFailedShards.sort(Comparator.comparing(failedShard -> failedShard.shard.primary()));
|
||||
for (FailedRerouteAllocation.FailedShard failedShard : orderedFailedShards) {
|
||||
changed |= applyFailedShard(allocation, failedShard.shard, true, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShard.message, failedShard.failure,
|
||||
System.nanoTime(), System.currentTimeMillis()));
|
||||
}
|
||||
|
@ -364,35 +369,17 @@ public class AllocationService extends AbstractComponent {
|
|||
|
||||
private boolean electPrimariesAndUnassignedDanglingReplicas(RoutingAllocation allocation) {
|
||||
boolean changed = false;
|
||||
RoutingNodes routingNodes = allocation.routingNodes();
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
if (routingNodes.unassigned().getNumPrimaries() == 0) {
|
||||
// move out if we don't have unassigned primaries
|
||||
return changed;
|
||||
}
|
||||
|
||||
// go over and remove dangling replicas that are initializing for primary shards
|
||||
List<ShardRouting> shardsToFail = new ArrayList<>();
|
||||
for (ShardRouting shardEntry : routingNodes.unassigned()) {
|
||||
if (shardEntry.primary()) {
|
||||
for (ShardRouting routing : routingNodes.assignedShards(shardEntry)) {
|
||||
if (!routing.primary() && routing.initializing()) {
|
||||
shardsToFail.add(routing);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
for (ShardRouting shardToFail : shardsToFail) {
|
||||
changed |= applyFailedShard(allocation, shardToFail, false,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "primary failed while replica initializing",
|
||||
null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
|
||||
}
|
||||
|
||||
// now, go over and elect a new primary if possible, not, from this code block on, if one is elected,
|
||||
// routingNodes.hasUnassignedPrimaries() will potentially be false
|
||||
|
||||
for (ShardRouting shardEntry : routingNodes.unassigned()) {
|
||||
if (shardEntry.primary()) {
|
||||
// remove dangling replicas that are initializing for primary shards
|
||||
changed |= failReplicasForUnassignedPrimary(allocation, shardEntry);
|
||||
ShardRouting candidate = allocation.routingNodes().activeReplica(shardEntry);
|
||||
if (candidate != null) {
|
||||
IndexMetaData index = allocation.metaData().index(candidate.index());
|
||||
|
@ -457,6 +444,22 @@ public class AllocationService extends AbstractComponent {
|
|||
return changed;
|
||||
}
|
||||
|
||||
private boolean failReplicasForUnassignedPrimary(RoutingAllocation allocation, ShardRouting primary) {
|
||||
List<ShardRouting> replicas = new ArrayList<>();
|
||||
for (ShardRouting routing : allocation.routingNodes().assignedShards(primary)) {
|
||||
if (!routing.primary() && routing.initializing()) {
|
||||
replicas.add(routing);
|
||||
}
|
||||
}
|
||||
boolean changed = false;
|
||||
for (ShardRouting routing : replicas) {
|
||||
changed |= applyFailedShard(allocation, routing, false,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "primary failed while replica initializing",
|
||||
null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
||||
private boolean applyStartedShards(RoutingNodes routingNodes, Iterable<? extends ShardRouting> startedShardEntries) {
|
||||
boolean dirty = false;
|
||||
// apply shards might be called several times with the same shard, ignore it
|
||||
|
@ -523,7 +526,6 @@ public class AllocationService extends AbstractComponent {
|
|||
logger.debug("{} ignoring shard failure, unknown index in {} ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary());
|
||||
return false;
|
||||
}
|
||||
|
||||
RoutingNodes routingNodes = allocation.routingNodes();
|
||||
|
||||
RoutingNodes.RoutingNodeIterator matchedNode = routingNodes.routingNodeIter(failedShard.currentNodeId());
|
||||
|
@ -546,7 +548,10 @@ public class AllocationService extends AbstractComponent {
|
|||
logger.debug("{} ignoring shard failure, unknown allocation id in {} ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary());
|
||||
return false;
|
||||
}
|
||||
|
||||
if (failedShard.primary()) {
|
||||
// fail replicas first otherwise we move RoutingNodes into an inconsistent state
|
||||
failReplicasForUnassignedPrimary(allocation, failedShard);
|
||||
}
|
||||
// replace incoming instance to make sure we work on the latest one. Copy it to maintain information during modifications.
|
||||
failedShard = new ShardRouting(matchedNode.current());
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator;
|
|||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.IntroSorter;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
|
@ -173,7 +174,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
|
||||
private final float indexBalance;
|
||||
private final float shardBalance;
|
||||
private final float[] theta;
|
||||
private final float theta0;
|
||||
private final float theta1;
|
||||
|
||||
|
||||
public WeightFunction(float indexBalance, float shardBalance) {
|
||||
|
@ -181,37 +183,30 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
if (sum <= 0.0f) {
|
||||
throw new IllegalArgumentException("Balance factors must sum to a value > 0 but was: " + sum);
|
||||
}
|
||||
theta = new float[]{shardBalance / sum, indexBalance / sum};
|
||||
theta0 = shardBalance / sum;
|
||||
theta1 = indexBalance / sum;
|
||||
this.indexBalance = indexBalance;
|
||||
this.shardBalance = shardBalance;
|
||||
}
|
||||
|
||||
public float weight(Operation operation, Balancer balancer, ModelNode node, String index) {
|
||||
final float weightShard = (node.numShards() - balancer.avgShardsPerNode());
|
||||
final float weightIndex = (node.numShards(index) - balancer.avgShardsPerNode(index));
|
||||
assert theta != null;
|
||||
return theta[0] * weightShard + theta[1] * weightIndex;
|
||||
public float weight(Balancer balancer, ModelNode node, String index) {
|
||||
return weight(balancer, node, index, 0);
|
||||
}
|
||||
|
||||
}
|
||||
public float weightShardAdded(Balancer balancer, ModelNode node, String index) {
|
||||
return weight(balancer, node, index, 1);
|
||||
}
|
||||
|
||||
public float weightShardRemoved(Balancer balancer, ModelNode node, String index) {
|
||||
return weight(balancer, node, index, -1);
|
||||
}
|
||||
|
||||
private float weight(Balancer balancer, ModelNode node, String index, int numAdditionalShards) {
|
||||
final float weightShard = (node.numShards() + numAdditionalShards - balancer.avgShardsPerNode());
|
||||
final float weightIndex = (node.numShards(index) + numAdditionalShards - balancer.avgShardsPerNode(index));
|
||||
return theta0 * weightShard + theta1 * weightIndex;
|
||||
}
|
||||
|
||||
/**
|
||||
* An enum that donates the actual operation the {@link WeightFunction} is
|
||||
* applied to.
|
||||
*/
|
||||
public static enum Operation {
|
||||
/**
|
||||
* Provided during balance operations.
|
||||
*/
|
||||
BALANCE,
|
||||
/**
|
||||
* Provided during initial allocation operation for unassigned shards.
|
||||
*/
|
||||
ALLOCATE,
|
||||
/**
|
||||
* Provided during move operation.
|
||||
*/
|
||||
MOVE
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -227,6 +222,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
|
||||
private final float threshold;
|
||||
private final MetaData metaData;
|
||||
private final float avgShardsPerNode;
|
||||
|
||||
private final Predicate<ShardRouting> assignedFilter = shard -> shard.assignedToNode();
|
||||
|
||||
|
@ -240,6 +236,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
nodes.put(node.nodeId(), new ModelNode(node.nodeId()));
|
||||
}
|
||||
metaData = routingNodes.metaData();
|
||||
avgShardsPerNode = ((float) metaData.totalNumberOfShards()) / nodes.size();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -260,21 +257,13 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
* Returns the global average of shards per node
|
||||
*/
|
||||
public float avgShardsPerNode() {
|
||||
return ((float) metaData.totalNumberOfShards()) / nodes.size();
|
||||
return avgShardsPerNode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the global average of primaries per node
|
||||
*/
|
||||
public float avgPrimariesPerNode() {
|
||||
return ((float) metaData.numberOfShards()) / nodes.size();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns a new {@link NodeSorter} that sorts the nodes based on their
|
||||
* current weight with respect to the index passed to the sorter. The
|
||||
* returned sorter is not sorted. Use {@link NodeSorter#reset(org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.Operation, String)}
|
||||
* returned sorter is not sorted. Use {@link NodeSorter#reset(String)}
|
||||
* to sort based on an index.
|
||||
*/
|
||||
private NodeSorter newNodeSorter() {
|
||||
|
@ -348,12 +337,33 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
if (onlyAssign == false && changed == false && allocation.deciders().canRebalance(allocation).type() == Type.YES) {
|
||||
NodeSorter sorter = newNodeSorter();
|
||||
if (nodes.size() > 1) { /* skip if we only have one node */
|
||||
for (String index : buildWeightOrderedIndidces(Operation.BALANCE, sorter)) {
|
||||
sorter.reset(Operation.BALANCE, index);
|
||||
final float[] weights = sorter.weights;
|
||||
final ModelNode[] modelNodes = sorter.modelNodes;
|
||||
AllocationDeciders deciders = allocation.deciders();
|
||||
final ModelNode[] modelNodes = sorter.modelNodes;
|
||||
final float[] weights = sorter.weights;
|
||||
for (String index : buildWeightOrderedIndices(sorter)) {
|
||||
IndexMetaData indexMetaData = metaData.index(index);
|
||||
|
||||
// find nodes that have a shard of this index or where shards of this index are allowed to stay
|
||||
// move these nodes to the front of modelNodes so that we can only balance based on these nodes
|
||||
int relevantNodes = 0;
|
||||
for (int i = 0; i < modelNodes.length; i++) {
|
||||
ModelNode modelNode = modelNodes[i];
|
||||
if (modelNode.getIndex(index) != null
|
||||
|| deciders.canAllocate(indexMetaData, modelNode.getRoutingNode(routingNodes), allocation).type() != Type.NO) {
|
||||
// swap nodes at position i and relevantNodes
|
||||
modelNodes[i] = modelNodes[relevantNodes];
|
||||
modelNodes[relevantNodes] = modelNode;
|
||||
relevantNodes++;
|
||||
}
|
||||
}
|
||||
|
||||
if (relevantNodes < 2) {
|
||||
continue;
|
||||
}
|
||||
|
||||
sorter.reset(index, 0, relevantNodes);
|
||||
int lowIdx = 0;
|
||||
int highIdx = weights.length - 1;
|
||||
int highIdx = relevantNodes - 1;
|
||||
while (true) {
|
||||
final ModelNode minNode = modelNodes[lowIdx];
|
||||
final ModelNode maxNode = modelNodes[highIdx];
|
||||
|
@ -388,17 +398,17 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
/* pass the delta to the replication function to prevent relocations that only swap the weights of the two nodes.
|
||||
* a relocation must bring us closer to the balance if we only achieve the same delta the relocation is useless */
|
||||
if (tryRelocateShard(Operation.BALANCE, minNode, maxNode, index, delta)) {
|
||||
if (tryRelocateShard(minNode, maxNode, index, delta)) {
|
||||
/*
|
||||
* TODO we could be a bit smarter here, we don't need to fully sort necessarily
|
||||
* we could just find the place to insert linearly but the win might be minor
|
||||
* compared to the added complexity
|
||||
*/
|
||||
weights[lowIdx] = sorter.weight(Operation.BALANCE, modelNodes[lowIdx]);
|
||||
weights[highIdx] = sorter.weight(Operation.BALANCE, modelNodes[highIdx]);
|
||||
sorter.sort(0, weights.length);
|
||||
weights[lowIdx] = sorter.weight(modelNodes[lowIdx]);
|
||||
weights[highIdx] = sorter.weight(modelNodes[highIdx]);
|
||||
sorter.sort(0, relevantNodes);
|
||||
lowIdx = 0;
|
||||
highIdx = weights.length - 1;
|
||||
highIdx = relevantNodes - 1;
|
||||
changed = true;
|
||||
continue;
|
||||
}
|
||||
|
@ -439,11 +449,11 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
* average. To re-balance we need to move shards back eventually likely
|
||||
* to the nodes we relocated them from.
|
||||
*/
|
||||
private String[] buildWeightOrderedIndidces(Operation operation, NodeSorter sorter) {
|
||||
private String[] buildWeightOrderedIndices(NodeSorter sorter) {
|
||||
final String[] indices = this.indices.toArray(new String[this.indices.size()]);
|
||||
final float[] deltas = new float[indices.length];
|
||||
for (int i = 0; i < deltas.length; i++) {
|
||||
sorter.reset(operation, indices[i]);
|
||||
sorter.reset(indices[i]);
|
||||
deltas[i] = sorter.delta();
|
||||
}
|
||||
new IntroSorter() {
|
||||
|
@ -503,7 +513,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
final ModelNode sourceNode = nodes.get(node.nodeId());
|
||||
assert sourceNode != null;
|
||||
final NodeSorter sorter = newNodeSorter();
|
||||
sorter.reset(Operation.MOVE, shard.getIndex());
|
||||
sorter.reset(shard.getIndex());
|
||||
final ModelNode[] nodes = sorter.modelNodes;
|
||||
assert sourceNode.containsShard(shard);
|
||||
/*
|
||||
|
@ -517,7 +527,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
if (currentNode.getNodeId().equals(node.nodeId())) {
|
||||
continue;
|
||||
}
|
||||
RoutingNode target = routingNodes.node(currentNode.getNodeId());
|
||||
RoutingNode target = currentNode.getRoutingNode(routingNodes);
|
||||
Decision allocationDecision = allocation.deciders().canAllocate(shard, target, allocation);
|
||||
Decision rebalanceDecision = allocation.deciders().canRebalance(shard, allocation);
|
||||
Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision);
|
||||
|
@ -643,26 +653,15 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
if (throttledNodes.contains(node)) {
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* The shard we add is removed below to simulate the
|
||||
* addition for weight calculation we use Decision.ALWAYS to
|
||||
* not violate the not null condition.
|
||||
*/
|
||||
if (!node.containsShard(shard)) {
|
||||
node.addShard(shard, Decision.ALWAYS);
|
||||
float currentWeight = weight.weight(Operation.ALLOCATE, this, node, shard.index());
|
||||
/*
|
||||
* Remove the shard from the node again this is only a
|
||||
* simulation
|
||||
*/
|
||||
Decision removed = node.removeShard(shard);
|
||||
assert removed != null;
|
||||
// simulate weight if we would add shard to node
|
||||
float currentWeight = weight.weightShardAdded(this, node, shard.index());
|
||||
/*
|
||||
* Unless the operation is not providing any gains we
|
||||
* don't check deciders
|
||||
*/
|
||||
if (currentWeight <= minWeight) {
|
||||
Decision currentDecision = deciders.canAllocate(shard, routingNodes.node(node.getNodeId()), allocation);
|
||||
Decision currentDecision = deciders.canAllocate(shard, node.getRoutingNode(routingNodes), allocation);
|
||||
NOUPDATE:
|
||||
if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) {
|
||||
if (currentWeight == minWeight) {
|
||||
|
@ -708,11 +707,11 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId());
|
||||
}
|
||||
routingNodes.initialize(shard, routingNodes.node(minNode.getNodeId()).nodeId(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
routingNodes.initialize(shard, minNode.getNodeId(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
changed = true;
|
||||
continue; // don't add to ignoreUnassigned
|
||||
} else {
|
||||
final RoutingNode node = routingNodes.node(minNode.getNodeId());
|
||||
final RoutingNode node = minNode.getRoutingNode(routingNodes);
|
||||
if (deciders.canAllocate(node, allocation).type() != Type.YES) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Can not allocate on node [{}] remove from round decision [{}]", node, decision.type());
|
||||
|
@ -748,7 +747,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
* balance model. Iff this method returns a <code>true</code> the relocation has already been executed on the
|
||||
* simulation model as well as on the cluster.
|
||||
*/
|
||||
private boolean tryRelocateShard(Operation operation, ModelNode minNode, ModelNode maxNode, String idx, float minCost) {
|
||||
private boolean tryRelocateShard(ModelNode minNode, ModelNode maxNode, String idx, float minCost) {
|
||||
final ModelIndex index = maxNode.getIndex(idx);
|
||||
Decision decision = null;
|
||||
if (index != null) {
|
||||
|
@ -756,22 +755,18 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
logger.trace("Try relocating shard for index index [{}] from node [{}] to node [{}]", idx, maxNode.getNodeId(),
|
||||
minNode.getNodeId());
|
||||
}
|
||||
final RoutingNode node = routingNodes.node(minNode.getNodeId());
|
||||
ShardRouting candidate = null;
|
||||
final AllocationDeciders deciders = allocation.deciders();
|
||||
/* make a copy since we modify this list in the loop */
|
||||
final ArrayList<ShardRouting> shards = new ArrayList<>(index.getAllShards());
|
||||
for (ShardRouting shard : shards) {
|
||||
for (ShardRouting shard : index.getAllShards()) {
|
||||
if (shard.started()) {
|
||||
// skip initializing, unassigned and relocating shards we can't relocate them anyway
|
||||
Decision allocationDecision = deciders.canAllocate(shard, node, allocation);
|
||||
Decision allocationDecision = deciders.canAllocate(shard, minNode.getRoutingNode(routingNodes), allocation);
|
||||
Decision rebalanceDecision = deciders.canRebalance(shard, allocation);
|
||||
if (((allocationDecision.type() == Type.YES) || (allocationDecision.type() == Type.THROTTLE))
|
||||
&& ((rebalanceDecision.type() == Type.YES) || (rebalanceDecision.type() == Type.THROTTLE))) {
|
||||
Decision srcDecision;
|
||||
if ((srcDecision = maxNode.removeShard(shard)) != null) {
|
||||
minNode.addShard(shard, srcDecision);
|
||||
final float delta = weight.weight(operation, this, minNode, idx) - weight.weight(operation, this, maxNode, idx);
|
||||
if (maxNode.containsShard(shard)) {
|
||||
// simulate moving shard from maxNode to minNode
|
||||
final float delta = weight.weightShardAdded(this, minNode, idx) - weight.weightShardRemoved(this, maxNode, idx);
|
||||
if (delta < minCost ||
|
||||
(candidate != null && delta == minCost && candidate.id() > shard.id())) {
|
||||
/* this last line is a tie-breaker to make the shard allocation alg deterministic
|
||||
|
@ -780,8 +775,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
candidate = shard;
|
||||
decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision);
|
||||
}
|
||||
minNode.removeShard(shard);
|
||||
maxNode.addShard(shard, srcDecision);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -799,11 +792,10 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
/* now allocate on the cluster - if we are started we need to relocate the shard */
|
||||
if (candidate.started()) {
|
||||
RoutingNode lowRoutingNode = routingNodes.node(minNode.getNodeId());
|
||||
routingNodes.relocate(candidate, lowRoutingNode.nodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
routingNodes.relocate(candidate, minNode.getNodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
|
||||
} else {
|
||||
routingNodes.initialize(candidate, routingNodes.node(minNode.getNodeId()).nodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
routingNodes.initialize(candidate, minNode.getNodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
}
|
||||
return true;
|
||||
|
||||
|
@ -822,8 +814,9 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
static class ModelNode implements Iterable<ModelIndex> {
|
||||
private final String id;
|
||||
private final Map<String, ModelIndex> indices = new HashMap<>();
|
||||
/* cached stats - invalidated on add/remove and lazily calculated */
|
||||
private int numShards = -1;
|
||||
private int numShards = 0;
|
||||
// lazily calculated
|
||||
private RoutingNode routingNode;
|
||||
|
||||
public ModelNode(String id) {
|
||||
this.id = id;
|
||||
|
@ -837,14 +830,14 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
return id;
|
||||
}
|
||||
|
||||
public int numShards() {
|
||||
if (numShards == -1) {
|
||||
int sum = 0;
|
||||
for (ModelIndex index : indices.values()) {
|
||||
sum += index.numShards();
|
||||
}
|
||||
numShards = sum;
|
||||
public RoutingNode getRoutingNode(RoutingNodes routingNodes) {
|
||||
if (routingNode == null) {
|
||||
routingNode = routingNodes.node(id);
|
||||
}
|
||||
return routingNode;
|
||||
}
|
||||
|
||||
public int numShards() {
|
||||
return numShards;
|
||||
}
|
||||
|
||||
|
@ -853,14 +846,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
return index == null ? 0 : index.numShards();
|
||||
}
|
||||
|
||||
public Collection<ShardRouting> shards() {
|
||||
Collection<ShardRouting> result = new ArrayList<>();
|
||||
for (ModelIndex index : indices.values()) {
|
||||
result.addAll(index.getAllShards());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public int highestPrimary(String index) {
|
||||
ModelIndex idx = indices.get(index);
|
||||
if (idx != null) {
|
||||
|
@ -870,17 +855,16 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
|
||||
public void addShard(ShardRouting shard, Decision decision) {
|
||||
numShards = -1;
|
||||
ModelIndex index = indices.get(shard.index());
|
||||
if (index == null) {
|
||||
index = new ModelIndex(shard.index());
|
||||
indices.put(index.getIndexId(), index);
|
||||
}
|
||||
index.addShard(shard, decision);
|
||||
numShards++;
|
||||
}
|
||||
|
||||
public Decision removeShard(ShardRouting shard) {
|
||||
numShards = -1;
|
||||
ModelIndex index = indices.get(shard.index());
|
||||
Decision removed = null;
|
||||
if (index != null) {
|
||||
|
@ -889,6 +873,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
indices.remove(shard.index());
|
||||
}
|
||||
}
|
||||
numShards--;
|
||||
return removed;
|
||||
}
|
||||
|
||||
|
@ -914,7 +899,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
static final class ModelIndex {
|
||||
private final String id;
|
||||
private final Map<ShardRouting, Decision> shards = new HashMap<>();
|
||||
private int numPrimaries = -1;
|
||||
private int highestPrimary = -1;
|
||||
|
||||
public ModelIndex(String id) {
|
||||
|
@ -938,10 +922,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
return id;
|
||||
}
|
||||
|
||||
public Decision getDecicion(ShardRouting shard) {
|
||||
return shards.get(shard);
|
||||
}
|
||||
|
||||
public int numShards() {
|
||||
return shards.size();
|
||||
}
|
||||
|
@ -950,26 +930,13 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
return shards.keySet();
|
||||
}
|
||||
|
||||
public int numPrimaries() {
|
||||
if (numPrimaries == -1) {
|
||||
int num = 0;
|
||||
for (ShardRouting shard : shards.keySet()) {
|
||||
if (shard.primary()) {
|
||||
num++;
|
||||
}
|
||||
}
|
||||
return numPrimaries = num;
|
||||
}
|
||||
return numPrimaries;
|
||||
}
|
||||
|
||||
public Decision removeShard(ShardRouting shard) {
|
||||
highestPrimary = numPrimaries = -1;
|
||||
highestPrimary = -1;
|
||||
return shards.remove(shard);
|
||||
}
|
||||
|
||||
public void addShard(ShardRouting shard, Decision decision) {
|
||||
highestPrimary = numPrimaries = -1;
|
||||
highestPrimary = -1;
|
||||
assert decision != null;
|
||||
assert !shards.containsKey(shard) : "Shard already allocated on current node: " + shards.get(shard) + " " + shard;
|
||||
shards.put(shard, decision);
|
||||
|
@ -1001,16 +968,20 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
* Resets the sorter, recalculates the weights per node and sorts the
|
||||
* nodes by weight, with minimal weight first.
|
||||
*/
|
||||
public void reset(Operation operation, String index) {
|
||||
public void reset(String index, int from, int to) {
|
||||
this.index = index;
|
||||
for (int i = 0; i < weights.length; i++) {
|
||||
weights[i] = weight(operation, modelNodes[i]);
|
||||
for (int i = from; i < to; i++) {
|
||||
weights[i] = weight(modelNodes[i]);
|
||||
}
|
||||
sort(0, modelNodes.length);
|
||||
sort(from, to);
|
||||
}
|
||||
|
||||
public float weight(Operation operation, ModelNode node) {
|
||||
return function.weight(operation, balancer, node, index);
|
||||
public void reset(String index) {
|
||||
reset(index, 0, modelNodes.length);
|
||||
}
|
||||
|
||||
public float weight(ModelNode node) {
|
||||
return function.weight(balancer, node, index);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
|
@ -73,6 +74,14 @@ public abstract class AllocationDecider extends AbstractComponent {
|
|||
return Decision.ALWAYS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link Decision} whether the given shard routing can be allocated at all at this state of the
|
||||
* {@link RoutingAllocation}. The default is {@link Decision#ALWAYS}.
|
||||
*/
|
||||
public Decision canAllocate(IndexMetaData indexMetaData, RoutingNode node, RoutingAllocation allocation) {
|
||||
return Decision.ALWAYS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link Decision} whether the given node can allow any allocation at all at this state of the
|
||||
* {@link RoutingAllocation}. The default is {@link Decision#ALWAYS}.
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
|
@ -120,6 +121,25 @@ public class AllocationDeciders extends AllocationDecider {
|
|||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decision canAllocate(IndexMetaData indexMetaData, RoutingNode node, RoutingAllocation allocation) {
|
||||
Decision.Multi ret = new Decision.Multi();
|
||||
for (AllocationDecider allocationDecider : allocations) {
|
||||
Decision decision = allocationDecider.canAllocate(indexMetaData, node, allocation);
|
||||
// short track if a NO is returned.
|
||||
if (decision == Decision.NO) {
|
||||
if (!allocation.debugDecision()) {
|
||||
return decision;
|
||||
} else {
|
||||
ret.add(decision);
|
||||
}
|
||||
} else if (decision != Decision.ALWAYS) {
|
||||
ret.add(decision);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) {
|
||||
Decision.Multi ret = new Decision.Multi();
|
||||
|
|
|
@ -88,29 +88,37 @@ public class FilterAllocationDecider extends AllocationDecider {
|
|||
return shouldFilter(shardRouting, node, allocation);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decision canAllocate(IndexMetaData indexMetaData, RoutingNode node, RoutingAllocation allocation) {
|
||||
return shouldFilter(indexMetaData, node, allocation);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
return shouldFilter(shardRouting, node, allocation);
|
||||
}
|
||||
|
||||
private Decision shouldFilter(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
if (clusterRequireFilters != null) {
|
||||
if (!clusterRequireFilters.match(node.node())) {
|
||||
return allocation.decision(Decision.NO, NAME, "node does not match global required filters [%s]", clusterRequireFilters);
|
||||
}
|
||||
}
|
||||
if (clusterIncludeFilters != null) {
|
||||
if (!clusterIncludeFilters.match(node.node())) {
|
||||
return allocation.decision(Decision.NO, NAME, "node does not match global include filters [%s]", clusterIncludeFilters);
|
||||
}
|
||||
}
|
||||
if (clusterExcludeFilters != null) {
|
||||
if (clusterExcludeFilters.match(node.node())) {
|
||||
return allocation.decision(Decision.NO, NAME, "node matches global exclude filters [%s]", clusterExcludeFilters);
|
||||
}
|
||||
}
|
||||
Decision decision = shouldClusterFilter(node, allocation);
|
||||
if (decision != null) return decision;
|
||||
|
||||
IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
|
||||
decision = shouldIndexFilter(allocation.routingNodes().metaData().index(shardRouting.index()), node, allocation);
|
||||
if (decision != null) return decision;
|
||||
|
||||
return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters");
|
||||
}
|
||||
|
||||
private Decision shouldFilter(IndexMetaData indexMd, RoutingNode node, RoutingAllocation allocation) {
|
||||
Decision decision = shouldClusterFilter(node, allocation);
|
||||
if (decision != null) return decision;
|
||||
|
||||
decision = shouldIndexFilter(indexMd, node, allocation);
|
||||
if (decision != null) return decision;
|
||||
|
||||
return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters");
|
||||
}
|
||||
|
||||
private Decision shouldIndexFilter(IndexMetaData indexMd, RoutingNode node, RoutingAllocation allocation) {
|
||||
if (indexMd.requireFilters() != null) {
|
||||
if (!indexMd.requireFilters().match(node.node())) {
|
||||
return allocation.decision(Decision.NO, NAME, "node does not match index required filters [%s]", indexMd.requireFilters());
|
||||
|
@ -126,8 +134,26 @@ public class FilterAllocationDecider extends AllocationDecider {
|
|||
return allocation.decision(Decision.NO, NAME, "node matches index exclude filters [%s]", indexMd.excludeFilters());
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters");
|
||||
private Decision shouldClusterFilter(RoutingNode node, RoutingAllocation allocation) {
|
||||
if (clusterRequireFilters != null) {
|
||||
if (!clusterRequireFilters.match(node.node())) {
|
||||
return allocation.decision(Decision.NO, NAME, "node does not match global required filters [%s]", clusterRequireFilters);
|
||||
}
|
||||
}
|
||||
if (clusterIncludeFilters != null) {
|
||||
if (!clusterIncludeFilters.match(node.node())) {
|
||||
return allocation.decision(Decision.NO, NAME, "node does not match global include filters [%s]", clusterIncludeFilters);
|
||||
}
|
||||
}
|
||||
if (clusterExcludeFilters != null) {
|
||||
if (clusterExcludeFilters.match(node.node())) {
|
||||
return allocation.decision(Decision.NO, NAME, "node matches global exclude filters [%s]", clusterExcludeFilters);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private void setClusterRequireFilters(Settings settings) {
|
||||
|
|
|
@ -44,22 +44,24 @@ public class NodeVersionAllocationDecider extends AllocationDecider {
|
|||
|
||||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
String sourceNodeId = shardRouting.currentNodeId();
|
||||
/* if sourceNodeId is not null we do a relocation and just check the version of the node
|
||||
* that we are currently allocate on. If not we are initializing and recover from primary.*/
|
||||
if (sourceNodeId == null) { // we allocate - check primary
|
||||
if (shardRouting.primary()) {
|
||||
// we are the primary we can allocate wherever
|
||||
if (shardRouting.primary()) {
|
||||
if (shardRouting.currentNodeId() == null) {
|
||||
// fresh primary, we can allocate wherever
|
||||
return allocation.decision(Decision.YES, NAME, "primary shard can be allocated anywhere");
|
||||
} else {
|
||||
// relocating primary, only migrate to newer host
|
||||
return isVersionCompatible(allocation.routingNodes(), shardRouting.currentNodeId(), node, allocation);
|
||||
}
|
||||
} else {
|
||||
final ShardRouting primary = allocation.routingNodes().activePrimary(shardRouting);
|
||||
if (primary == null) { // we have a primary - it's a start ;)
|
||||
// check that active primary has a newer version so that peer recovery works
|
||||
if (primary != null) {
|
||||
return isVersionCompatible(allocation.routingNodes(), primary.currentNodeId(), node, allocation);
|
||||
} else {
|
||||
// ReplicaAfterPrimaryActiveAllocationDecider should prevent this case from occurring
|
||||
return allocation.decision(Decision.YES, NAME, "no active primary shard yet");
|
||||
}
|
||||
sourceNodeId = primary.currentNodeId();
|
||||
}
|
||||
return isVersionCompatible(allocation.routingNodes(), sourceNodeId, node, allocation);
|
||||
|
||||
}
|
||||
|
||||
private Decision isVersionCompatible(final RoutingNodes routingNodes, final String sourceNodeId, final RoutingNode target, RoutingAllocation allocation) {
|
||||
|
|
|
@ -50,26 +50,36 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
|
|||
public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2;
|
||||
public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4;
|
||||
public static final String NAME = "throttling";
|
||||
public static final String CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES = "cluster.routing.allocation.concurrent_recoveries";
|
||||
|
||||
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", (s) -> s.get(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES,Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_incoming_recoveries", (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_incoming_recoveries"), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_outgoing_recoveries", (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_outgoing_recoveries"), true, Setting.Scope.CLUSTER);
|
||||
|
||||
|
||||
private volatile int primariesInitialRecoveries;
|
||||
private volatile int concurrentRecoveries;
|
||||
private volatile int concurrentIncomingRecoveries;
|
||||
private volatile int concurrentOutgoingRecoveries;
|
||||
|
||||
|
||||
@Inject
|
||||
public ThrottlingAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
|
||||
super(settings);
|
||||
this.primariesInitialRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.get(settings);
|
||||
this.concurrentRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.get(settings);
|
||||
logger.debug("using node_concurrent_recoveries [{}], node_initial_primaries_recoveries [{}]", concurrentRecoveries, primariesInitialRecoveries);
|
||||
concurrentIncomingRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.get(settings);
|
||||
concurrentOutgoingRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.get(settings);
|
||||
|
||||
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, this::setPrimariesInitialRecoveries);
|
||||
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, this::setConcurrentRecoveries);
|
||||
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, this::setConcurrentIncomingRecoverries);
|
||||
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING, this::setConcurrentOutgoingRecoverries);
|
||||
|
||||
logger.debug("using node_concurrent_outgoing_recoveries [{}], node_concurrent_incoming_recoveries [{}], node_initial_primaries_recoveries [{}]", concurrentOutgoingRecoveries, concurrentIncomingRecoveries, primariesInitialRecoveries);
|
||||
}
|
||||
|
||||
private void setConcurrentRecoveries(int concurrentRecoveries) {
|
||||
this.concurrentRecoveries = concurrentRecoveries;
|
||||
private void setConcurrentIncomingRecoverries(int concurrentIncomingRecoveries) {
|
||||
this.concurrentIncomingRecoveries = concurrentIncomingRecoveries;
|
||||
}
|
||||
private void setConcurrentOutgoingRecoverries(int concurrentOutgoingRecoveries) {
|
||||
this.concurrentOutgoingRecoveries = concurrentOutgoingRecoveries;
|
||||
}
|
||||
|
||||
private void setPrimariesInitialRecoveries(int primariesInitialRecoveries) {
|
||||
|
@ -99,7 +109,7 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO should we allow shards not allocated post API to always allocate?
|
||||
// either primary or replica doing recovery (from peer shard)
|
||||
|
||||
// count the number of recoveries on the node, its for both target (INITIALIZING) and source (RELOCATING)
|
||||
|
@ -108,17 +118,16 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
|
|||
|
||||
@Override
|
||||
public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) {
|
||||
int currentRecoveries = 0;
|
||||
for (ShardRouting shard : node) {
|
||||
if (shard.initializing()) {
|
||||
currentRecoveries++;
|
||||
}
|
||||
}
|
||||
if (currentRecoveries >= concurrentRecoveries) {
|
||||
return allocation.decision(Decision.THROTTLE, NAME, "too many shards currently recovering [%d], limit: [%d]",
|
||||
currentRecoveries, concurrentRecoveries);
|
||||
} else {
|
||||
return allocation.decision(Decision.YES, NAME, "below shard recovery limit of [%d]", concurrentRecoveries);
|
||||
int currentOutRecoveries = allocation.routingNodes().getOutgoingRecoveries(node.nodeId());
|
||||
int currentInRecoveries = allocation.routingNodes().getIncomingRecoveries(node.nodeId());
|
||||
if (currentOutRecoveries >= concurrentOutgoingRecoveries) {
|
||||
return allocation.decision(Decision.THROTTLE, NAME, "too many outgoing shards currently recovering [%d], limit: [%d]",
|
||||
currentOutRecoveries, concurrentOutgoingRecoveries);
|
||||
} else if (currentInRecoveries >= concurrentIncomingRecoveries) {
|
||||
return allocation.decision(Decision.THROTTLE, NAME, "too many incoming shards currently recovering [%d], limit: [%d]",
|
||||
currentInRecoveries, concurrentIncomingRecoveries);
|
||||
} else {
|
||||
return allocation.decision(Decision.YES, NAME, "below shard recovery limit of outgoing: [%d] incoming: [%d]", concurrentOutgoingRecoveries, concurrentIncomingRecoveries);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,6 +65,7 @@ import org.elasticsearch.common.util.concurrent.PrioritizedRunnable;
|
|||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -133,6 +134,8 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
private final ClusterBlocks.Builder initialBlocks;
|
||||
|
||||
private final TaskManager taskManager;
|
||||
|
||||
private volatile ScheduledFuture reconnectToNodes;
|
||||
|
||||
@Inject
|
||||
|
@ -159,6 +162,8 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
localNodeMasterListeners = new LocalNodeMasterListeners(threadPool);
|
||||
|
||||
initialBlocks = ClusterBlocks.builder().addGlobalBlock(discoveryService.getNoMasterBlock());
|
||||
|
||||
taskManager = transportService.getTaskManager();
|
||||
}
|
||||
|
||||
private void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) {
|
||||
|
@ -308,6 +313,13 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
final ClusterStateTaskExecutor<T> executor,
|
||||
final ClusterStateTaskListener listener
|
||||
) {
|
||||
innerSubmitStateUpdateTask(source, task, config, executor, safe(listener, logger));
|
||||
}
|
||||
|
||||
private <T> void innerSubmitStateUpdateTask(final String source, final T task,
|
||||
final ClusterStateTaskConfig config,
|
||||
final ClusterStateTaskExecutor executor,
|
||||
final SafeClusterStateTaskListener listener) {
|
||||
if (!lifecycle.started()) {
|
||||
return;
|
||||
}
|
||||
|
@ -372,6 +384,10 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
return updateTasksExecutor.getMaxTaskWaitTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskManager getTaskManager() {
|
||||
return taskManager;
|
||||
}
|
||||
|
||||
/** asserts that the current thread is the cluster state update thread */
|
||||
public boolean assertClusterStateThread() {
|
||||
|
@ -631,6 +647,95 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
}
|
||||
|
||||
private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, ESLogger logger) {
|
||||
if (listener instanceof AckedClusterStateTaskListener) {
|
||||
return new SafeAckedClusterStateTaskListener((AckedClusterStateTaskListener) listener, logger);
|
||||
} else {
|
||||
return new SafeClusterStateTaskListener(listener, logger);
|
||||
}
|
||||
}
|
||||
|
||||
private static class SafeClusterStateTaskListener implements ClusterStateTaskListener {
|
||||
private final ClusterStateTaskListener listener;
|
||||
private final ESLogger logger;
|
||||
|
||||
public SafeClusterStateTaskListener(ClusterStateTaskListener listener, ESLogger logger) {
|
||||
this.listener = listener;
|
||||
this.logger = logger;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
try {
|
||||
listener.onFailure(source, t);
|
||||
} catch (Exception e) {
|
||||
logger.error("exception thrown by listener notifying of failure [{}] from [{}]", e, t, source);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNoLongerMaster(String source) {
|
||||
try {
|
||||
listener.onNoLongerMaster(source);
|
||||
} catch (Exception e) {
|
||||
logger.error("exception thrown by listener while notifying no longer master from [{}]", e, source);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
try {
|
||||
listener.clusterStateProcessed(source, oldState, newState);
|
||||
} catch (Exception e) {
|
||||
logger.error(
|
||||
"exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n{}\nnew cluster state:\n{}",
|
||||
e,
|
||||
source,
|
||||
oldState.prettyPrint(),
|
||||
newState.prettyPrint());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class SafeAckedClusterStateTaskListener extends SafeClusterStateTaskListener implements AckedClusterStateTaskListener {
|
||||
private final AckedClusterStateTaskListener listener;
|
||||
private final ESLogger logger;
|
||||
|
||||
public SafeAckedClusterStateTaskListener(AckedClusterStateTaskListener listener, ESLogger logger) {
|
||||
super(listener, logger);
|
||||
this.listener = listener;
|
||||
this.logger = logger;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean mustAck(DiscoveryNode discoveryNode) {
|
||||
return listener.mustAck(discoveryNode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onAllNodesAcked(@Nullable Throwable t) {
|
||||
try {
|
||||
listener.onAllNodesAcked(t);
|
||||
} catch (Exception e) {
|
||||
logger.error("exception thrown by listener while notifying on all nodes acked [{}]", e, t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onAckTimeout() {
|
||||
try {
|
||||
listener.onAckTimeout();
|
||||
} catch (Exception e) {
|
||||
logger.error("exception thrown by listener while notifying on ack timeout", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeValue ackTimeout() {
|
||||
return listener.ackTimeout();
|
||||
}
|
||||
}
|
||||
|
||||
class UpdateTask<T> extends SourcePrioritizedRunnable {
|
||||
|
||||
public final T task;
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.common.blobstore.BlobMetaData;
|
|||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
||||
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
|
@ -97,6 +96,7 @@ public class FsBlobContainer extends AbstractBlobContainer {
|
|||
@Override
|
||||
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {
|
||||
final Path file = path.resolve(blobName);
|
||||
// TODO: why is this not specifying CREATE_NEW? Do we really need to be able to truncate existing files?
|
||||
try (OutputStream outputStream = Files.newOutputStream(file)) {
|
||||
Streams.copy(inputStream, outputStream, new byte[blobStore.bufferSizeInBytes()]);
|
||||
}
|
||||
|
@ -104,16 +104,6 @@ public class FsBlobContainer extends AbstractBlobContainer {
|
|||
IOUtils.fsync(path, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBlob(String blobName, BytesReference data) throws IOException {
|
||||
final Path file = path.resolve(blobName);
|
||||
try (OutputStream outputStream = Files.newOutputStream(file)) {
|
||||
data.writeTo(outputStream);
|
||||
}
|
||||
IOUtils.fsync(file, false);
|
||||
IOUtils.fsync(path, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void move(String source, String target) throws IOException {
|
||||
Path sourcePath = path.resolve(source);
|
||||
|
|
|
@ -22,8 +22,10 @@ package org.elasticsearch.common.blobstore.support;
|
|||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -57,4 +59,11 @@ public abstract class AbstractBlobContainer implements BlobContainer {
|
|||
deleteBlob(blob);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBlob(String blobName, BytesReference bytes) throws IOException {
|
||||
try (InputStream stream = bytes.streamInput()) {
|
||||
writeBlob(blobName, stream, bytes.length());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -296,7 +296,7 @@ public class Cache<K, V> {
|
|||
}
|
||||
|
||||
public static final int NUMBER_OF_SEGMENTS = 256;
|
||||
private final CacheSegment<K, V>[] segments = new CacheSegment[NUMBER_OF_SEGMENTS];
|
||||
@SuppressWarnings("unchecked") private final CacheSegment<K, V>[] segments = new CacheSegment[NUMBER_OF_SEGMENTS];
|
||||
|
||||
{
|
||||
for (int i = 0; i < segments.length; i++) {
|
||||
|
@ -432,7 +432,7 @@ public class Cache<K, V> {
|
|||
promote(tuple.v1(), now);
|
||||
}
|
||||
if (replaced) {
|
||||
removalListener.onRemoval(new RemovalNotification(tuple.v2().key, tuple.v2().value, RemovalNotification.RemovalReason.REPLACED));
|
||||
removalListener.onRemoval(new RemovalNotification<>(tuple.v2().key, tuple.v2().value, RemovalNotification.RemovalReason.REPLACED));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ public final class GeoPoint {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[" + lat + ", " + lon + "]";
|
||||
return lat + ", " + lon;
|
||||
}
|
||||
|
||||
public static GeoPoint parseFromLatLon(String latLon) {
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.geo;
|
|||
|
||||
import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
|
||||
import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
|
||||
import org.apache.lucene.util.GeoDistanceUtils;
|
||||
import org.apache.lucene.util.SloppyMath;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
|
@ -65,19 +66,11 @@ public class GeoUtils {
|
|||
/** Earth ellipsoid polar distance in meters */
|
||||
public static final double EARTH_POLAR_DISTANCE = Math.PI * EARTH_SEMI_MINOR_AXIS;
|
||||
|
||||
/** Returns the maximum distance/radius from the point 'center' before overlapping */
|
||||
public static double maxRadialDistance(GeoPoint center) {
|
||||
if (Math.abs(center.lat()) == 90.0) {
|
||||
return SloppyMath.haversin(center.lat(), center.lon(), 0, center.lon())*1000.0;
|
||||
}
|
||||
return SloppyMath.haversin(center.lat(), center.lon(), center.lat(), (180.0 + center.lon()) % 360)*1000.0;
|
||||
}
|
||||
|
||||
/** Returns the minimum between the provided distance 'initialRadius' and the
|
||||
* maximum distance/radius from the point 'center' before overlapping
|
||||
**/
|
||||
public static double maxRadialDistance(GeoPoint center, double initialRadius) {
|
||||
final double maxRadius = maxRadialDistance(center);
|
||||
final double maxRadius = GeoDistanceUtils.maxRadialDistanceMeters(center.lon(), center.lat());
|
||||
return Math.min(initialRadius, maxRadius);
|
||||
}
|
||||
|
||||
|
@ -384,7 +377,7 @@ public class GeoUtils {
|
|||
if(parser.currentToken() == Token.START_OBJECT) {
|
||||
while(parser.nextToken() != Token.END_OBJECT) {
|
||||
if(parser.currentToken() == Token.FIELD_NAME) {
|
||||
String field = parser.text();
|
||||
String field = parser.currentName();
|
||||
if(LATITUDE.equals(field)) {
|
||||
parser.nextToken();
|
||||
switch (parser.currentToken()) {
|
||||
|
|
|
@ -48,13 +48,21 @@ import java.io.FileNotFoundException;
|
|||
import java.io.FilterInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.AccessDeniedException;
|
||||
import java.nio.file.AtomicMoveNotSupportedException;
|
||||
import java.nio.file.DirectoryNotEmptyException;
|
||||
import java.nio.file.FileAlreadyExistsException;
|
||||
import java.nio.file.FileSystemException;
|
||||
import java.nio.file.FileSystemLoopException;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.nio.file.NotDirectoryException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.ElasticsearchException.readException;
|
||||
|
@ -594,11 +602,41 @@ public abstract class StreamInput extends InputStream {
|
|||
case 13:
|
||||
return (T) readStackTrace(new FileNotFoundException(readOptionalString()), this);
|
||||
case 14:
|
||||
final int subclass = readVInt();
|
||||
final String file = readOptionalString();
|
||||
final String other = readOptionalString();
|
||||
final String reason = readOptionalString();
|
||||
readOptionalString(); // skip the msg - it's composed from file, other and reason
|
||||
return (T) readStackTrace(new NoSuchFileException(file, other, reason), this);
|
||||
final Throwable throwable;
|
||||
switch (subclass) {
|
||||
case 0:
|
||||
throwable = new NoSuchFileException(file, other, reason);
|
||||
break;
|
||||
case 1:
|
||||
throwable = new NotDirectoryException(file);
|
||||
break;
|
||||
case 2:
|
||||
throwable = new DirectoryNotEmptyException(file);
|
||||
break;
|
||||
case 3:
|
||||
throwable = new AtomicMoveNotSupportedException(file, other, reason);
|
||||
break;
|
||||
case 4:
|
||||
throwable = new FileAlreadyExistsException(file, other, reason);
|
||||
break;
|
||||
case 5:
|
||||
throwable = new AccessDeniedException(file, other, reason);
|
||||
break;
|
||||
case 6:
|
||||
throwable = new FileSystemLoopException(file);
|
||||
break;
|
||||
case 7:
|
||||
throwable = new FileSystemException(file, other, reason);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("unknown FileSystemException with index " + subclass);
|
||||
}
|
||||
return (T) readStackTrace(throwable, this);
|
||||
case 15:
|
||||
return (T) readStackTrace(new OutOfMemoryError(readOptionalString()), this);
|
||||
case 16:
|
||||
|
@ -607,6 +645,8 @@ public abstract class StreamInput extends InputStream {
|
|||
return (T) readStackTrace(new LockObtainFailedException(readOptionalString(), readThrowable()), this);
|
||||
case 18:
|
||||
return (T) readStackTrace(new InterruptedException(readOptionalString()), this);
|
||||
case 19:
|
||||
return (T) readStackTrace(new IOException(readOptionalString(), readThrowable()), this);
|
||||
default:
|
||||
assert false : "no such exception for id: " + key;
|
||||
}
|
||||
|
@ -659,6 +699,18 @@ public abstract class StreamInput extends InputStream {
|
|||
return readNamedWriteable(ScoreFunctionBuilder.class);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a list of objects
|
||||
*/
|
||||
public <T> List<T> readList(StreamInputReader<T> reader) throws IOException {
|
||||
int count = readVInt();
|
||||
List<T> builder = new ArrayList<>(count);
|
||||
for (int i=0; i<count; i++) {
|
||||
builder.add(reader.read(this));
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static StreamInput wrap(BytesReference reference) {
|
||||
if (reference.hasArray() == false) {
|
||||
reference = reference.toBytesArray();
|
||||
|
|
|
@ -17,13 +17,17 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.hdfs;
|
||||
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
package org.elasticsearch.common.io.stream;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
interface FsCallback<V> {
|
||||
|
||||
V doInHdfs(FileSystem fs) throws IOException;
|
||||
/**
|
||||
* Defines a method for reading a list of objects from StreamInput.
|
||||
*
|
||||
* It can be used in {@link StreamInput#readList(StreamInputReader)} for reading
|
||||
* lists of immutable objects that implement StreamInput accepting constructors.
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface StreamInputReader<T> {
|
||||
T read(StreamInput t) throws IOException;
|
||||
}
|
|
@ -44,7 +44,15 @@ import java.io.EOFException;
|
|||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.nio.file.AccessDeniedException;
|
||||
import java.nio.file.AtomicMoveNotSupportedException;
|
||||
import java.nio.file.DirectoryNotEmptyException;
|
||||
import java.nio.file.FileAlreadyExistsException;
|
||||
import java.nio.file.FileSystemException;
|
||||
import java.nio.file.FileSystemLoopException;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.nio.file.NotDirectoryException;
|
||||
import java.util.Date;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
|
@ -567,11 +575,28 @@ public abstract class StreamOutput extends OutputStream {
|
|||
} else if (throwable instanceof FileNotFoundException) {
|
||||
writeVInt(13);
|
||||
writeCause = false;
|
||||
} else if (throwable instanceof NoSuchFileException) {
|
||||
} else if (throwable instanceof FileSystemException) {
|
||||
writeVInt(14);
|
||||
writeOptionalString(((NoSuchFileException) throwable).getFile());
|
||||
writeOptionalString(((NoSuchFileException) throwable).getOtherFile());
|
||||
writeOptionalString(((NoSuchFileException) throwable).getReason());
|
||||
if (throwable instanceof NoSuchFileException) {
|
||||
writeVInt(0);
|
||||
} else if (throwable instanceof NotDirectoryException) {
|
||||
writeVInt(1);
|
||||
} else if (throwable instanceof DirectoryNotEmptyException) {
|
||||
writeVInt(2);
|
||||
} else if (throwable instanceof AtomicMoveNotSupportedException) {
|
||||
writeVInt(3);
|
||||
} else if (throwable instanceof FileAlreadyExistsException) {
|
||||
writeVInt(4);
|
||||
} else if (throwable instanceof AccessDeniedException) {
|
||||
writeVInt(5);
|
||||
} else if (throwable instanceof FileSystemLoopException) {
|
||||
writeVInt(6);
|
||||
} else {
|
||||
writeVInt(7);
|
||||
}
|
||||
writeOptionalString(((FileSystemException) throwable).getFile());
|
||||
writeOptionalString(((FileSystemException) throwable).getOtherFile());
|
||||
writeOptionalString(((FileSystemException) throwable).getReason());
|
||||
writeCause = false;
|
||||
} else if (throwable instanceof OutOfMemoryError) {
|
||||
writeVInt(15);
|
||||
|
@ -583,6 +608,8 @@ public abstract class StreamOutput extends OutputStream {
|
|||
} else if (throwable instanceof InterruptedException) {
|
||||
writeVInt(18);
|
||||
writeCause = false;
|
||||
} else if (throwable instanceof IOException) {
|
||||
writeVInt(19);
|
||||
} else {
|
||||
ElasticsearchException ex;
|
||||
if (throwable instanceof ElasticsearchException && ElasticsearchException.isRegistered(throwable.getClass())) {
|
||||
|
@ -656,4 +683,14 @@ public abstract class StreamOutput extends OutputStream {
|
|||
writeDouble(geoPoint.lat());
|
||||
writeDouble(geoPoint.lon());
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a list of {@link Writeable} objects
|
||||
*/
|
||||
public <T extends Writeable<T>> void writeList(List<T> list) throws IOException {
|
||||
writeVInt(list.size());
|
||||
for (T obj: list) {
|
||||
obj.writeTo(this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -284,7 +284,8 @@ public class Lucene {
|
|||
continue;
|
||||
}
|
||||
final Bits liveDocs = context.reader().getLiveDocs();
|
||||
for (int doc = scorer.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = scorer.nextDoc()) {
|
||||
final DocIdSetIterator iterator = scorer.iterator();
|
||||
for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
|
||||
if (liveDocs == null || liveDocs.get(doc)) {
|
||||
return true;
|
||||
}
|
||||
|
@ -667,19 +668,11 @@ public class Lucene {
|
|||
throw new IllegalStateException(message);
|
||||
}
|
||||
@Override
|
||||
public int advance(int arg0) throws IOException {
|
||||
throw new IllegalStateException(message);
|
||||
}
|
||||
@Override
|
||||
public long cost() {
|
||||
throw new IllegalStateException(message);
|
||||
}
|
||||
@Override
|
||||
public int docID() {
|
||||
throw new IllegalStateException(message);
|
||||
}
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
public DocIdSetIterator iterator() {
|
||||
throw new IllegalStateException(message);
|
||||
}
|
||||
};
|
||||
|
@ -757,10 +750,10 @@ public class Lucene {
|
|||
if (scorer == null) {
|
||||
return new Bits.MatchNoBits(maxDoc);
|
||||
}
|
||||
final TwoPhaseIterator twoPhase = scorer.asTwoPhaseIterator();
|
||||
final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator();
|
||||
final DocIdSetIterator iterator;
|
||||
if (twoPhase == null) {
|
||||
iterator = scorer;
|
||||
iterator = scorer.iterator();
|
||||
} else {
|
||||
iterator = twoPhase.approximation();
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.lucene.index.TermState;
|
|||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.CollectionStatistics;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
|
@ -120,7 +121,7 @@ public final class AllTermQuery extends Query {
|
|||
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
|
||||
AllTermScorer scorer = scorer(context);
|
||||
if (scorer != null) {
|
||||
int newDoc = scorer.advance(doc);
|
||||
int newDoc = scorer.iterator().advance(doc);
|
||||
if (newDoc == doc) {
|
||||
float score = scorer.score();
|
||||
float freq = scorer.freq();
|
||||
|
@ -213,18 +214,8 @@ public final class AllTermQuery extends Query {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
return postings.nextDoc();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
return postings.advance(target);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long cost() {
|
||||
return postings.cost();
|
||||
public DocIdSetIterator iterator() {
|
||||
return postings;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.lucene.search.DocIdSetIterator;
|
|||
import org.apache.lucene.search.FilteredDocIdSetIterator;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.BitDocIdSet;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
|
@ -99,11 +100,12 @@ public class FilterableTermsEnum extends TermsEnum {
|
|||
}
|
||||
BitSet bits = null;
|
||||
if (weight != null) {
|
||||
DocIdSetIterator docs = weight.scorer(context);
|
||||
if (docs == null) {
|
||||
Scorer scorer = weight.scorer(context);
|
||||
if (scorer == null) {
|
||||
// fully filtered, none matching, no need to iterate on this
|
||||
continue;
|
||||
}
|
||||
DocIdSetIterator docs = scorer.iterator();
|
||||
|
||||
// we want to force apply deleted docs
|
||||
final Bits liveDocs = context.reader().getLiveDocs();
|
||||
|
|
|
@ -1,67 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.lucene.search;
|
||||
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class EmptyScorer extends Scorer {
|
||||
|
||||
private int docId = -1;
|
||||
|
||||
public EmptyScorer(Weight weight) {
|
||||
super(weight);
|
||||
}
|
||||
|
||||
@Override
|
||||
public float score() throws IOException {
|
||||
throw new UnsupportedOperationException("Should never be called");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int freq() throws IOException {
|
||||
throw new UnsupportedOperationException("Should never be called");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docID() {
|
||||
return docId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
assert docId != NO_MORE_DOCS;
|
||||
return docId = NO_MORE_DOCS;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
return slowAdvance(target);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long cost() {
|
||||
return 0;
|
||||
}
|
||||
}
|
|
@ -30,7 +30,7 @@ import org.apache.lucene.index.TermsEnum;
|
|||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.search.similarities.ClassicSimilarity;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.search.similarities.TFIDFSimilarity;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -138,7 +138,7 @@ public class MoreLikeThisQuery extends Query {
|
|||
if (rewritten != this) {
|
||||
return rewritten;
|
||||
}
|
||||
XMoreLikeThis mlt = new XMoreLikeThis(reader, similarity == null ? new DefaultSimilarity() : similarity);
|
||||
XMoreLikeThis mlt = new XMoreLikeThis(reader, similarity == null ? new ClassicSimilarity() : similarity);
|
||||
|
||||
mlt.setFieldNames(moreLikeFields);
|
||||
mlt.setAnalyzer(analyzer);
|
||||
|
|
|
@ -179,8 +179,6 @@ public class Queries {
|
|||
result = calc < 0 ? result + calc : calc;
|
||||
}
|
||||
|
||||
return (optionalClauseCount < result ?
|
||||
optionalClauseCount : (result < 0 ? 0 : result));
|
||||
|
||||
return result < 0 ? 0 : result;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ import org.apache.lucene.search.BoostQuery;
|
|||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.search.similarities.ClassicSimilarity;
|
||||
import org.apache.lucene.search.similarities.TFIDFSimilarity;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
|
@ -304,7 +304,7 @@ public final class XMoreLikeThis {
|
|||
/**
|
||||
* For idf() calculations.
|
||||
*/
|
||||
private TFIDFSimilarity similarity;// = new DefaultSimilarity();
|
||||
private TFIDFSimilarity similarity;// = new ClassicSimilarity();
|
||||
|
||||
/**
|
||||
* IndexReader to use
|
||||
|
@ -346,7 +346,7 @@ public final class XMoreLikeThis {
|
|||
* Constructor requiring an IndexReader.
|
||||
*/
|
||||
public XMoreLikeThis(IndexReader ir) {
|
||||
this(ir, new DefaultSimilarity());
|
||||
this(ir, new ClassicSimilarity());
|
||||
}
|
||||
|
||||
public XMoreLikeThis(IndexReader ir, TFIDFSimilarity sim) {
|
||||
|
|
|
@ -1,142 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.lucene.search.function;
|
||||
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
abstract class CustomBoostFactorScorer extends Scorer {
|
||||
|
||||
final Scorer scorer;
|
||||
final float maxBoost;
|
||||
final CombineFunction scoreCombiner;
|
||||
|
||||
Float minScore;
|
||||
NextDoc nextDoc;
|
||||
|
||||
CustomBoostFactorScorer(Weight w, Scorer scorer, float maxBoost, CombineFunction scoreCombiner, Float minScore)
|
||||
throws IOException {
|
||||
super(w);
|
||||
if (minScore == null) {
|
||||
nextDoc = new AnyNextDoc();
|
||||
} else {
|
||||
nextDoc = new MinScoreNextDoc();
|
||||
}
|
||||
this.scorer = scorer;
|
||||
this.maxBoost = maxBoost;
|
||||
this.scoreCombiner = scoreCombiner;
|
||||
this.minScore = minScore;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docID() {
|
||||
return scorer.docID();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
return nextDoc.advance(target);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
return nextDoc.nextDoc();
|
||||
}
|
||||
|
||||
public abstract float innerScore() throws IOException;
|
||||
|
||||
@Override
|
||||
public float score() throws IOException {
|
||||
return nextDoc.score();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int freq() throws IOException {
|
||||
return scorer.freq();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long cost() {
|
||||
return scorer.cost();
|
||||
}
|
||||
|
||||
public interface NextDoc {
|
||||
public int advance(int target) throws IOException;
|
||||
|
||||
public int nextDoc() throws IOException;
|
||||
|
||||
public float score() throws IOException;
|
||||
}
|
||||
|
||||
public class MinScoreNextDoc implements NextDoc {
|
||||
float currentScore = Float.MAX_VALUE * -1.0f;
|
||||
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
int doc;
|
||||
do {
|
||||
doc = scorer.nextDoc();
|
||||
if (doc == NO_MORE_DOCS) {
|
||||
return doc;
|
||||
}
|
||||
currentScore = innerScore();
|
||||
} while (currentScore < minScore);
|
||||
return doc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float score() throws IOException {
|
||||
return currentScore;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
int doc = scorer.advance(target);
|
||||
if (doc == NO_MORE_DOCS) {
|
||||
return doc;
|
||||
}
|
||||
currentScore = innerScore();
|
||||
if (currentScore < minScore) {
|
||||
return scorer.nextDoc();
|
||||
}
|
||||
return doc;
|
||||
}
|
||||
}
|
||||
|
||||
public class AnyNextDoc implements NextDoc {
|
||||
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
return scorer.nextDoc();
|
||||
}
|
||||
|
||||
@Override
|
||||
public float score() throws IOException {
|
||||
return innerScore();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
return scorer.advance(target);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.search.FilterScorer;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
|
@ -142,7 +143,7 @@ public class FiltersFunctionScoreQuery extends Query {
|
|||
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
|
||||
if (needsScores == false) {
|
||||
if (needsScores == false && minScore == null) {
|
||||
return subQuery.createWeight(searcher, needsScores);
|
||||
}
|
||||
|
||||
|
@ -184,11 +185,7 @@ public class FiltersFunctionScoreQuery extends Query {
|
|||
subQueryWeight.normalize(norm, boost);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
// we ignore scoreDocsInOrder parameter, because we need to score in
|
||||
// order if documents are scored with a script. The
|
||||
// ShardLookup depends on in order scoring.
|
||||
private FiltersFunctionFactorScorer functionScorer(LeafReaderContext context) throws IOException {
|
||||
Scorer subQueryScorer = subQueryWeight.scorer(context);
|
||||
if (subQueryScorer == null) {
|
||||
return null;
|
||||
|
@ -201,15 +198,24 @@ public class FiltersFunctionScoreQuery extends Query {
|
|||
Scorer filterScorer = filterWeights[i].scorer(context);
|
||||
docSets[i] = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorer);
|
||||
}
|
||||
return new FiltersFunctionFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, functions, docSets, combineFunction, minScore, needsScores);
|
||||
return new FiltersFunctionFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, functions, docSets, combineFunction, needsScores);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
Scorer scorer = functionScorer(context);
|
||||
if (scorer != null && minScore != null) {
|
||||
scorer = new MinScoreScorer(this, scorer, minScore);
|
||||
}
|
||||
return scorer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
|
||||
|
||||
Explanation subQueryExpl = subQueryWeight.explain(context, doc);
|
||||
if (!subQueryExpl.isMatch()) {
|
||||
return subQueryExpl;
|
||||
Explanation expl = subQueryWeight.explain(context, doc);
|
||||
if (!expl.isMatch()) {
|
||||
return expl;
|
||||
}
|
||||
// First: Gather explanations for all filters
|
||||
List<Explanation> filterExplanations = new ArrayList<>();
|
||||
|
@ -218,7 +224,7 @@ public class FiltersFunctionScoreQuery extends Query {
|
|||
filterWeights[i].scorer(context));
|
||||
if (docSet.get(doc)) {
|
||||
FilterFunction filterFunction = filterFunctions[i];
|
||||
Explanation functionExplanation = filterFunction.function.getLeafScoreFunction(context).explainScore(doc, subQueryExpl);
|
||||
Explanation functionExplanation = filterFunction.function.getLeafScoreFunction(context).explainScore(doc, expl);
|
||||
double factor = functionExplanation.getValue();
|
||||
float sc = CombineFunction.toFloat(factor);
|
||||
Explanation filterExplanation = Explanation.match(sc, "function score, product of:",
|
||||
|
@ -226,46 +232,52 @@ public class FiltersFunctionScoreQuery extends Query {
|
|||
filterExplanations.add(filterExplanation);
|
||||
}
|
||||
}
|
||||
if (filterExplanations.size() == 0) {
|
||||
return subQueryExpl;
|
||||
if (filterExplanations.size() > 0) {
|
||||
FiltersFunctionFactorScorer scorer = functionScorer(context);
|
||||
int actualDoc = scorer.iterator().advance(doc);
|
||||
assert (actualDoc == doc);
|
||||
double score = scorer.computeScore(doc, expl.getValue());
|
||||
Explanation factorExplanation = Explanation.match(
|
||||
CombineFunction.toFloat(score),
|
||||
"function score, score mode [" + scoreMode.toString().toLowerCase(Locale.ROOT) + "]",
|
||||
filterExplanations);
|
||||
expl = combineFunction.explain(expl, factorExplanation, maxBoost);
|
||||
}
|
||||
|
||||
FiltersFunctionFactorScorer scorer = (FiltersFunctionFactorScorer)scorer(context);
|
||||
int actualDoc = scorer.advance(doc);
|
||||
assert (actualDoc == doc);
|
||||
double score = scorer.computeScore(doc, subQueryExpl.getValue());
|
||||
Explanation factorExplanation = Explanation.match(
|
||||
CombineFunction.toFloat(score),
|
||||
"function score, score mode [" + scoreMode.toString().toLowerCase(Locale.ROOT) + "]",
|
||||
filterExplanations);
|
||||
return combineFunction.explain(subQueryExpl, factorExplanation, maxBoost);
|
||||
if (minScore != null && minScore > expl.getValue()) {
|
||||
expl = Explanation.noMatch("Score value is too low, expected at least " + minScore + " but got " + expl.getValue(), expl);
|
||||
}
|
||||
return expl;
|
||||
}
|
||||
}
|
||||
|
||||
static class FiltersFunctionFactorScorer extends CustomBoostFactorScorer {
|
||||
static class FiltersFunctionFactorScorer extends FilterScorer {
|
||||
private final FilterFunction[] filterFunctions;
|
||||
private final ScoreMode scoreMode;
|
||||
private final LeafScoreFunction[] functions;
|
||||
private final Bits[] docSets;
|
||||
private final CombineFunction scoreCombiner;
|
||||
private final float maxBoost;
|
||||
private final boolean needsScores;
|
||||
|
||||
private FiltersFunctionFactorScorer(CustomBoostFactorWeight w, Scorer scorer, ScoreMode scoreMode, FilterFunction[] filterFunctions,
|
||||
float maxBoost, LeafScoreFunction[] functions, Bits[] docSets, CombineFunction scoreCombiner, Float minScore, boolean needsScores) throws IOException {
|
||||
super(w, scorer, maxBoost, scoreCombiner, minScore);
|
||||
float maxBoost, LeafScoreFunction[] functions, Bits[] docSets, CombineFunction scoreCombiner, boolean needsScores) throws IOException {
|
||||
super(scorer, w);
|
||||
this.scoreMode = scoreMode;
|
||||
this.filterFunctions = filterFunctions;
|
||||
this.functions = functions;
|
||||
this.docSets = docSets;
|
||||
this.scoreCombiner = scoreCombiner;
|
||||
this.maxBoost = maxBoost;
|
||||
this.needsScores = needsScores;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float innerScore() throws IOException {
|
||||
int docId = scorer.docID();
|
||||
public float score() throws IOException {
|
||||
int docId = docID();
|
||||
// Even if the weight is created with needsScores=false, it might
|
||||
// be costly to call score(), so we explicitly check if scores
|
||||
// are needed
|
||||
float subQueryScore = needsScores ? scorer.score() : 0f;
|
||||
float subQueryScore = needsScores ? super.score() : 0f;
|
||||
double factor = computeScore(docId, subQueryScore);
|
||||
return scoreCombiner.combine(subQueryScore, factor, maxBoost);
|
||||
}
|
||||
|
@ -357,12 +369,13 @@ public class FiltersFunctionScoreQuery extends Query {
|
|||
}
|
||||
FiltersFunctionScoreQuery other = (FiltersFunctionScoreQuery) o;
|
||||
return Objects.equals(this.subQuery, other.subQuery) && this.maxBoost == other.maxBoost &&
|
||||
Objects.equals(this.combineFunction, other.combineFunction) && Objects.equals(this.minScore, other.minScore) &&
|
||||
Arrays.equals(this.filterFunctions, other.filterFunctions);
|
||||
Objects.equals(this.combineFunction, other.combineFunction) && Objects.equals(this.minScore, other.minScore) &&
|
||||
Objects.equals(this.scoreMode, other.scoreMode) &&
|
||||
Arrays.equals(this.filterFunctions, other.filterFunctions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), subQuery, maxBoost, combineFunction, minScore, filterFunctions);
|
||||
return Objects.hash(super.hashCode(), subQuery, maxBoost, combineFunction, minScore, scoreMode, Arrays.hashCode(filterFunctions));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.search.FilterScorer;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
|
@ -90,7 +91,7 @@ public class FunctionScoreQuery extends Query {
|
|||
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
|
||||
if (needsScores == false) {
|
||||
if (needsScores == false && minScore == null) {
|
||||
return subQuery.createWeight(searcher, needsScores);
|
||||
}
|
||||
|
||||
|
@ -128,8 +129,7 @@ public class FunctionScoreQuery extends Query {
|
|||
subQueryWeight.normalize(norm, boost);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
private FunctionFactorScorer functionScorer(LeafReaderContext context) throws IOException {
|
||||
Scorer subQueryScorer = subQueryWeight.scorer(context);
|
||||
if (subQueryScorer == null) {
|
||||
return null;
|
||||
|
@ -138,7 +138,16 @@ public class FunctionScoreQuery extends Query {
|
|||
if (function != null) {
|
||||
leafFunction = function.getLeafScoreFunction(context);
|
||||
}
|
||||
return new FunctionFactorScorer(this, subQueryScorer, leafFunction, maxBoost, combineFunction, minScore, needsScores);
|
||||
return new FunctionFactorScorer(this, subQueryScorer, leafFunction, maxBoost, combineFunction, needsScores);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
Scorer scorer = functionScorer(context);
|
||||
if (scorer != null && minScore != null) {
|
||||
scorer = new MinScoreScorer(this, scorer, minScore);
|
||||
}
|
||||
return scorer;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -147,38 +156,47 @@ public class FunctionScoreQuery extends Query {
|
|||
if (!subQueryExpl.isMatch()) {
|
||||
return subQueryExpl;
|
||||
}
|
||||
Explanation expl;
|
||||
if (function != null) {
|
||||
Explanation functionExplanation = function.getLeafScoreFunction(context).explainScore(doc, subQueryExpl);
|
||||
return combineFunction.explain(subQueryExpl, functionExplanation, maxBoost);
|
||||
expl = combineFunction.explain(subQueryExpl, functionExplanation, maxBoost);
|
||||
} else {
|
||||
return subQueryExpl;
|
||||
expl = subQueryExpl;
|
||||
}
|
||||
if (minScore != null && minScore > expl.getValue()) {
|
||||
expl = Explanation.noMatch("Score value is too low, expected at least " + minScore + " but got " + expl.getValue(), expl);
|
||||
}
|
||||
return expl;
|
||||
}
|
||||
}
|
||||
|
||||
static class FunctionFactorScorer extends CustomBoostFactorScorer {
|
||||
static class FunctionFactorScorer extends FilterScorer {
|
||||
|
||||
private final LeafScoreFunction function;
|
||||
private final boolean needsScores;
|
||||
private final CombineFunction scoreCombiner;
|
||||
private final float maxBoost;
|
||||
|
||||
private FunctionFactorScorer(CustomBoostFactorWeight w, Scorer scorer, LeafScoreFunction function, float maxBoost, CombineFunction scoreCombiner, Float minScore, boolean needsScores)
|
||||
private FunctionFactorScorer(CustomBoostFactorWeight w, Scorer scorer, LeafScoreFunction function, float maxBoost, CombineFunction scoreCombiner, boolean needsScores)
|
||||
throws IOException {
|
||||
super(w, scorer, maxBoost, scoreCombiner, minScore);
|
||||
super(scorer, w);
|
||||
this.function = function;
|
||||
this.scoreCombiner = scoreCombiner;
|
||||
this.maxBoost = maxBoost;
|
||||
this.needsScores = needsScores;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float innerScore() throws IOException {
|
||||
public float score() throws IOException {
|
||||
// Even if the weight is created with needsScores=false, it might
|
||||
// be costly to call score(), so we explicitly check if scores
|
||||
// are needed
|
||||
float score = needsScores ? scorer.score() : 0f;
|
||||
float score = needsScores ? super.score() : 0f;
|
||||
if (function == null) {
|
||||
return score;
|
||||
} else {
|
||||
return scoreCombiner.combine(score,
|
||||
function.score(scorer.docID(), score), maxBoost);
|
||||
function.score(docID(), score), maxBoost);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.lucene.search.function;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.ScoreCachingWrappingScorer;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.TwoPhaseIterator;
|
||||
import org.apache.lucene.search.Weight;
|
||||
|
||||
/** A {@link Scorer} that filters out documents that have a score that is
|
||||
* lower than a configured constant. */
|
||||
final class MinScoreScorer extends Scorer {
|
||||
|
||||
private final Scorer in;
|
||||
private final float minScore;
|
||||
|
||||
MinScoreScorer(Weight weight, Scorer scorer, float minScore) {
|
||||
super(weight);
|
||||
if (scorer instanceof ScoreCachingWrappingScorer == false) {
|
||||
// when minScore is set, scores might be requested twice: once
|
||||
// to verify the match, and once by the collector
|
||||
scorer = new ScoreCachingWrappingScorer(scorer);
|
||||
}
|
||||
this.in = scorer;
|
||||
this.minScore = minScore;
|
||||
}
|
||||
|
||||
public Scorer getScorer() {
|
||||
return in;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docID() {
|
||||
return in.docID();
|
||||
}
|
||||
|
||||
@Override
|
||||
public float score() throws IOException {
|
||||
return in.score();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int freq() throws IOException {
|
||||
return in.freq();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocIdSetIterator iterator() {
|
||||
return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator());
|
||||
}
|
||||
|
||||
@Override
|
||||
public TwoPhaseIterator twoPhaseIterator() {
|
||||
final TwoPhaseIterator inTwoPhase = this.in.twoPhaseIterator();
|
||||
final DocIdSetIterator approximation = inTwoPhase == null ? in.iterator() : inTwoPhase.approximation();
|
||||
return new TwoPhaseIterator(approximation) {
|
||||
|
||||
@Override
|
||||
public boolean matches() throws IOException {
|
||||
// we need to check the two-phase iterator first
|
||||
// otherwise calling score() is illegal
|
||||
if (inTwoPhase != null && inTwoPhase.matches() == false) {
|
||||
return false;
|
||||
}
|
||||
return in.score() >= minScore;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float matchCost() {
|
||||
return 1000f // random constant for the score computation
|
||||
+ (inTwoPhase == null ? 0 : inTwoPhase.matchCost());
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.common.lucene.search.function;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.elasticsearch.script.ExplainableSearchScript;
|
||||
|
@ -57,19 +58,9 @@ public class ScriptScoreFunction extends ScoreFunction {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
public DocIdSetIterator iterator() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long cost() {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
private final Script sScript;
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthActio
|
|||
import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.node.tasks.RestListTasksAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction;
|
||||
|
@ -259,7 +260,10 @@ public class NetworkModule extends AbstractModule {
|
|||
RestFieldStatsAction.class,
|
||||
|
||||
// no abstract cat action
|
||||
RestCatAction.class
|
||||
RestCatAction.class,
|
||||
|
||||
// Tasks API
|
||||
RestListTasksAction.class
|
||||
);
|
||||
|
||||
private static final List<Class<? extends AbstractCatAction>> builtinCatHandlers = Arrays.asList(
|
||||
|
|
|
@ -109,8 +109,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
|
||||
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
|
||||
MetaData.SETTING_READ_ONLY_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING,
|
||||
|
@ -119,6 +117,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING,
|
||||
ThreadPool.THREADPOOL_GROUP_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING,
|
||||
|
|
|
@ -34,4 +34,9 @@ public class XContentLocation {
|
|||
this.lineNumber = lineNumber;
|
||||
this.columnNumber = columnNumber;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return lineNumber + ":" + columnNumber;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,7 +79,10 @@ public class JsonXContentParser extends AbstractXContentParser {
|
|||
|
||||
@Override
|
||||
public String text() throws IOException {
|
||||
return parser.getText();
|
||||
if (currentToken().isValue()) {
|
||||
return parser.getText();
|
||||
}
|
||||
throw new IllegalStateException("Can't get text on a " + currentToken() + " at " + getTokenLocation());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -194,7 +194,7 @@ public abstract class AbstractXContentParser implements XContentParser {
|
|||
protected abstract double doDoubleValue() throws IOException;
|
||||
|
||||
@Override
|
||||
public String textOrNull() throws IOException {
|
||||
public final String textOrNull() throws IOException {
|
||||
if (currentToken() == Token.VALUE_NULL) {
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.index;
|
|||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
|
@ -39,6 +40,7 @@ import org.elasticsearch.index.analysis.AnalysisService;
|
|||
import org.elasticsearch.index.cache.IndexCache;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.cache.query.QueryCache;
|
||||
import org.elasticsearch.index.engine.EngineClosedException;
|
||||
import org.elasticsearch.index.engine.EngineFactory;
|
||||
import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
|
@ -57,9 +59,11 @@ import org.elasticsearch.index.shard.ShardPath;
|
|||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.indices.AliasFilterParsingException;
|
||||
import org.elasticsearch.indices.InvalidAliasNameException;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
@ -296,6 +300,10 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created");
|
||||
eventListener.afterIndexShardCreated(indexShard);
|
||||
indexShard.updateRoutingEntry(routing, true);
|
||||
if (shards.isEmpty() && this.indexSettings.getTranslogSyncInterval().millis() != 0) {
|
||||
ThreadPool threadPool = nodeServicesProvider.getThreadPool();
|
||||
new AsyncTranslogFSync(this, threadPool).schedule(); // kick this off if we are the first shard in this service.
|
||||
}
|
||||
shards = newMapBuilder(shards).put(shardId.id(), indexShard).immutableMap();
|
||||
success = true;
|
||||
return indexShard;
|
||||
|
@ -451,21 +459,21 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onCache(ShardId shardId, MappedFieldType.Names fieldNames, FieldDataType fieldDataType, Accountable ramUsage) {
|
||||
public void onCache(ShardId shardId, String fieldName, FieldDataType fieldDataType, Accountable ramUsage) {
|
||||
if (shardId != null) {
|
||||
final IndexShard shard = indexService.getShardOrNull(shardId.id());
|
||||
if (shard != null) {
|
||||
shard.fieldData().onCache(shardId, fieldNames, fieldDataType, ramUsage);
|
||||
shard.fieldData().onCache(shardId, fieldName, fieldDataType, ramUsage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRemoval(ShardId shardId, MappedFieldType.Names fieldNames, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) {
|
||||
public void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) {
|
||||
if (shardId != null) {
|
||||
final IndexShard shard = indexService.getShardOrNull(shardId.id());
|
||||
if (shard != null) {
|
||||
shard.fieldData().onRemoval(shardId, fieldNames, fieldDataType, wasEvicted, sizeInBytes);
|
||||
shard.fieldData().onRemoval(shardId, fieldName, fieldDataType, wasEvicted, sizeInBytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -565,5 +573,57 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
return indexStore;
|
||||
} // pkg private for testing
|
||||
|
||||
private void maybeFSyncTranslogs() {
|
||||
if (indexSettings.getTranslogDurability() == Translog.Durability.ASYNC) {
|
||||
for (IndexShard shard : this.shards.values()) {
|
||||
try {
|
||||
Translog translog = shard.getTranslog();
|
||||
if (translog.syncNeeded()) {
|
||||
translog.sync();
|
||||
}
|
||||
} catch (EngineClosedException | AlreadyClosedException ex) {
|
||||
// fine - continue;
|
||||
} catch (IOException e) {
|
||||
logger.warn("failed to sync translog", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* FSyncs the translog for all shards of this index in a defined interval.
|
||||
*/
|
||||
final static class AsyncTranslogFSync implements Runnable {
|
||||
private final IndexService indexService;
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
AsyncTranslogFSync(IndexService indexService, ThreadPool threadPool) {
|
||||
this.indexService = indexService;
|
||||
this.threadPool = threadPool;
|
||||
}
|
||||
|
||||
boolean mustRun() {
|
||||
// don't re-schedule if its closed or if we dont' have a single shard here..., we are done
|
||||
return (indexService.closed.get() || indexService.shards.isEmpty()) == false;
|
||||
}
|
||||
|
||||
void schedule() {
|
||||
threadPool.schedule(indexService.getIndexSettings().getTranslogSyncInterval(), ThreadPool.Names.SAME, AsyncTranslogFSync.this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
if (mustRun()) {
|
||||
threadPool.executor(ThreadPool.Names.FLUSH).execute(() -> {
|
||||
indexService.maybeFSyncTranslogs();
|
||||
if (mustRun()) {
|
||||
schedule();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -25,12 +25,16 @@ import org.elasticsearch.common.logging.ESLogger;
|
|||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
|
@ -48,6 +52,9 @@ public final class IndexSettings {
|
|||
public static final String QUERY_STRING_ANALYZE_WILDCARD = "indices.query.query_string.analyze_wildcard";
|
||||
public static final String QUERY_STRING_ALLOW_LEADING_WILDCARD = "indices.query.query_string.allowLeadingWildcard";
|
||||
public static final String ALLOW_UNMAPPED = "index.query.parse.allow_unmapped_fields";
|
||||
public static final String INDEX_TRANSLOG_SYNC_INTERVAL = "index.translog.sync_interval";
|
||||
public static final String INDEX_TRANSLOG_DURABILITY = "index.translog.durability";
|
||||
|
||||
private final String uuid;
|
||||
private final List<Consumer<Settings>> updateListeners;
|
||||
private final Index index;
|
||||
|
@ -67,6 +74,8 @@ public final class IndexSettings {
|
|||
private final boolean queryStringAllowLeadingWildcard;
|
||||
private final boolean defaultAllowUnmappedFields;
|
||||
private final Predicate<String> indexNameMatcher;
|
||||
private volatile Translog.Durability durability;
|
||||
private final TimeValue syncInterval;
|
||||
|
||||
/**
|
||||
* Returns the default search field for this index.
|
||||
|
@ -127,7 +136,7 @@ public final class IndexSettings {
|
|||
public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSettings, final Collection<Consumer<Settings>> updateListeners, final Predicate<String> indexNameMatcher) {
|
||||
this.nodeSettings = nodeSettings;
|
||||
this.settings = Settings.builder().put(nodeSettings).put(indexMetaData.getSettings()).build();
|
||||
this.updateListeners = Collections.unmodifiableList(new ArrayList<>(updateListeners));
|
||||
this.updateListeners = Collections.unmodifiableList( new ArrayList<>(updateListeners));
|
||||
this.index = new Index(indexMetaData.getIndex());
|
||||
version = Version.indexCreated(settings);
|
||||
uuid = settings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
|
||||
|
@ -144,6 +153,10 @@ public final class IndexSettings {
|
|||
this.parseFieldMatcher = new ParseFieldMatcher(settings);
|
||||
this.defaultAllowUnmappedFields = settings.getAsBoolean(ALLOW_UNMAPPED, true);
|
||||
this.indexNameMatcher = indexNameMatcher;
|
||||
final String value = settings.get(INDEX_TRANSLOG_DURABILITY, Translog.Durability.REQUEST.name());
|
||||
this.durability = getFromSettings(settings, Translog.Durability.REQUEST);
|
||||
syncInterval = settings.getAsTime(INDEX_TRANSLOG_SYNC_INTERVAL, TimeValue.timeValueSeconds(5));
|
||||
|
||||
assert indexNameMatcher.test(indexMetaData.getIndex());
|
||||
}
|
||||
|
||||
|
@ -295,6 +308,11 @@ public final class IndexSettings {
|
|||
logger.warn("failed to refresh index settings for [{}]", e, mergedSettings);
|
||||
}
|
||||
}
|
||||
try {
|
||||
updateSettings(mergedSettings);
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to refresh index settings for [{}]", e, mergedSettings);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -304,4 +322,34 @@ public final class IndexSettings {
|
|||
List<Consumer<Settings>> getUpdateListeners() { // for testing
|
||||
return updateListeners;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the translog durability for this index.
|
||||
*/
|
||||
public Translog.Durability getTranslogDurability() {
|
||||
return durability;
|
||||
}
|
||||
|
||||
private Translog.Durability getFromSettings(Settings settings, Translog.Durability defaultValue) {
|
||||
final String value = settings.get(INDEX_TRANSLOG_DURABILITY, defaultValue.name());
|
||||
try {
|
||||
return Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT));
|
||||
} catch (IllegalArgumentException ex) {
|
||||
logger.warn("Can't apply {} illegal value: {} using {} instead, use one of: {}", INDEX_TRANSLOG_DURABILITY, value, defaultValue, Arrays.toString(Translog.Durability.values()));
|
||||
return defaultValue;
|
||||
}
|
||||
}
|
||||
|
||||
private void updateSettings(Settings settings) {
|
||||
final Translog.Durability durability = getFromSettings(settings, this.durability);
|
||||
if (durability != this.durability) {
|
||||
logger.info("updating durability from [{}] to [{}]", this.durability, durability);
|
||||
this.durability = durability;
|
||||
}
|
||||
}
|
||||
|
||||
public TimeValue getTranslogSyncInterval() {
|
||||
return syncInterval;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable
|
|||
* and 100 afterwards so we override the positionIncrementGap if it
|
||||
* doesn't match here.
|
||||
*/
|
||||
int overridePositionIncrementGap = StringFieldMapper.Defaults.positionIncrementGap(indexSettings.getIndexVersionCreated());
|
||||
int overridePositionIncrementGap = StringFieldMapper.Defaults.POSITION_INCREMENT_GAP;
|
||||
if (analyzerFactory instanceof CustomAnalyzerProvider) {
|
||||
((CustomAnalyzerProvider) analyzerFactory).build(this);
|
||||
/*
|
||||
|
|
|
@ -74,7 +74,7 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Custom
|
|||
tokenFilters.add(tokenFilter);
|
||||
}
|
||||
|
||||
int positionIncrementGap = StringFieldMapper.Defaults.positionIncrementGap(indexSettings.getIndexVersionCreated());
|
||||
int positionIncrementGap = StringFieldMapper.Defaults.POSITION_INCREMENT_GAP;
|
||||
|
||||
if (analyzerSettings.getAsMap().containsKey("position_offset_gap")){
|
||||
if (indexSettings.getIndexVersionCreated().before(Version.V_2_0_0)){
|
||||
|
|
|
@ -23,36 +23,24 @@ import org.apache.lucene.analysis.Analyzer;
|
|||
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
|
||||
import org.elasticsearch.common.collect.CopyOnWriteHashMap;
|
||||
|
||||
import java.util.AbstractMap;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public final class FieldNameAnalyzer extends DelegatingAnalyzerWrapper {
|
||||
|
||||
private final CopyOnWriteHashMap<String, Analyzer> analyzers;
|
||||
private final Analyzer defaultAnalyzer;
|
||||
private final Map<String, Analyzer> analyzers;
|
||||
|
||||
public FieldNameAnalyzer(Analyzer defaultAnalyzer) {
|
||||
this(new CopyOnWriteHashMap<>(), defaultAnalyzer);
|
||||
}
|
||||
|
||||
public FieldNameAnalyzer(Map<String, Analyzer> analyzers, Analyzer defaultAnalyzer) {
|
||||
public FieldNameAnalyzer(Map<String, Analyzer> analyzers) {
|
||||
super(Analyzer.PER_FIELD_REUSE_STRATEGY);
|
||||
this.analyzers = CopyOnWriteHashMap.copyOf(analyzers);
|
||||
this.defaultAnalyzer = defaultAnalyzer;
|
||||
}
|
||||
|
||||
public Map<String, Analyzer> analyzers() {
|
||||
return analyzers;
|
||||
}
|
||||
|
||||
public Analyzer defaultAnalyzer() {
|
||||
return defaultAnalyzer;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Analyzer getWrappedAnalyzer(String fieldName) {
|
||||
Analyzer analyzer = analyzers.get(fieldName);
|
||||
|
@ -63,18 +51,4 @@ public final class FieldNameAnalyzer extends DelegatingAnalyzerWrapper {
|
|||
// Fields need to be explicitly added
|
||||
throw new IllegalArgumentException("Field [" + fieldName + "] has no associated analyzer");
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a new instance that contains the union of this and of the provided analyzers.
|
||||
*/
|
||||
public FieldNameAnalyzer copyAndAddAll(Stream<? extends Map.Entry<String, Analyzer>> mappers) {
|
||||
CopyOnWriteHashMap<String, Analyzer> result = analyzers.copyAndPutAll(mappers.map((e) -> {
|
||||
if (e.getValue() == null) {
|
||||
return new AbstractMap.SimpleImmutableEntry<>(e.getKey(), defaultAnalyzer);
|
||||
}
|
||||
return e;
|
||||
}));
|
||||
return new FieldNameAnalyzer(result, defaultAnalyzer);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.index.ReaderUtil;
|
|||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.search.join.BitSetProducer;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
|
@ -127,12 +128,12 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
|
|||
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
|
||||
searcher.setQueryCache(null);
|
||||
final Weight weight = searcher.createNormalizedWeight(query, false);
|
||||
final DocIdSetIterator it = weight.scorer(context);
|
||||
Scorer s = weight.scorer(context);
|
||||
final BitSet bitSet;
|
||||
if (it == null) {
|
||||
if (s == null) {
|
||||
bitSet = null;
|
||||
} else {
|
||||
bitSet = BitSet.of(it, context.reader().maxDoc());
|
||||
bitSet = BitSet.of(s.iterator(), context.reader().maxDoc());
|
||||
}
|
||||
|
||||
Value value = new Value(bitSet, shardId);
|
||||
|
|
|
@ -54,7 +54,7 @@ public class PerFieldMappingPostingFormatCodec extends Lucene54Codec {
|
|||
|
||||
@Override
|
||||
public PostingsFormat getPostingsFormatForField(String field) {
|
||||
final MappedFieldType indexName = mapperService.indexName(field);
|
||||
final MappedFieldType indexName = mapperService.fullName(field);
|
||||
if (indexName == null) {
|
||||
logger.warn("no index mapper found for field: [{}] returning default postings format", field);
|
||||
} else if (indexName instanceof CompletionFieldMapper.CompletionFieldType) {
|
||||
|
|
|
@ -53,7 +53,6 @@ public final class EngineConfig {
|
|||
private volatile ByteSizeValue indexingBufferSize;
|
||||
private volatile ByteSizeValue versionMapSize;
|
||||
private volatile String versionMapSizeSetting;
|
||||
private volatile boolean compoundOnFlush = true;
|
||||
private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis();
|
||||
private volatile boolean enableGcDeletes = true;
|
||||
private final TimeValue flushMergesAfter;
|
||||
|
@ -73,11 +72,6 @@ public final class EngineConfig {
|
|||
private final QueryCache queryCache;
|
||||
private final QueryCachingPolicy queryCachingPolicy;
|
||||
|
||||
/**
|
||||
* Index setting for compound file on flush. This setting is realtime updateable.
|
||||
*/
|
||||
public static final String INDEX_COMPOUND_ON_FLUSH = "index.compound_on_flush";
|
||||
|
||||
/**
|
||||
* Index setting to enable / disable deletes garbage collection.
|
||||
* This setting is realtime updateable
|
||||
|
@ -132,7 +126,6 @@ public final class EngineConfig {
|
|||
this.similarity = similarity;
|
||||
this.codecService = codecService;
|
||||
this.eventListener = eventListener;
|
||||
this.compoundOnFlush = settings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush);
|
||||
codecName = settings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME);
|
||||
// We start up inactive and rely on IndexingMemoryController to give us our fair share once we start indexing:
|
||||
indexingBufferSize = IndexingMemoryController.INACTIVE_SHARD_INDEXING_BUFFER;
|
||||
|
@ -208,13 +201,6 @@ public final class EngineConfig {
|
|||
return indexingBufferSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff flushed segments should be written as compound file system. Defaults to <code>true</code>
|
||||
*/
|
||||
public boolean isCompoundOnFlush() {
|
||||
return compoundOnFlush;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the GC deletes cycle in milliseconds.
|
||||
*/
|
||||
|
@ -346,13 +332,6 @@ public final class EngineConfig {
|
|||
this.gcDeletesInMillis = gcDeletesInMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets if flushed segments should be written as compound file system. Defaults to <code>true</code>
|
||||
*/
|
||||
public void setCompoundOnFlush(boolean compoundOnFlush) {
|
||||
this.compoundOnFlush = compoundOnFlush;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link org.elasticsearch.index.shard.TranslogRecoveryPerformer} for this engine. This class is used
|
||||
* to apply transaction log operations to the engine. It encapsulates all the logic to transfer the translog entry into
|
||||
|
|
|
@ -313,7 +313,6 @@ public class InternalEngine extends Engine {
|
|||
try {
|
||||
final LiveIndexWriterConfig iwc = indexWriter.getConfig();
|
||||
iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().mbFrac());
|
||||
iwc.setUseCompoundFile(engineConfig.isCompoundOnFlush());
|
||||
} catch (AlreadyClosedException ex) {
|
||||
// ignore
|
||||
}
|
||||
|
@ -939,7 +938,7 @@ public class InternalEngine extends Engine {
|
|||
* here but with 1s poll this is only executed twice at most
|
||||
* in combination with the default writelock timeout*/
|
||||
iwc.setWriteLockTimeout(5000);
|
||||
iwc.setUseCompoundFile(this.engineConfig.isCompoundOnFlush());
|
||||
iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh
|
||||
// Warm-up hook for newly-merged segments. Warming up segments here is better since it will be performed at the end
|
||||
// of the merge operation and won't slow down _refresh
|
||||
iwc.setMergedSegmentWarmer(new IndexReaderWarmer() {
|
||||
|
@ -1129,20 +1128,18 @@ public class InternalEngine extends Engine {
|
|||
@Override
|
||||
protected void handleMergeException(final Directory dir, final Throwable exc) {
|
||||
logger.error("failed to merge", exc);
|
||||
if (config().getMergeSchedulerConfig().isNotifyOnMergeFailure()) {
|
||||
engineConfig.getThreadPool().generic().execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
logger.debug("merge failure action rejected", t);
|
||||
}
|
||||
engineConfig.getThreadPool().generic().execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
logger.debug("merge failure action rejected", t);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
MergePolicy.MergeException e = new MergePolicy.MergeException(exc, dir);
|
||||
failEngine("merge failed", e);
|
||||
}
|
||||
});
|
||||
}
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
MergePolicy.MergeException e = new MergePolicy.MergeException(exc, dir);
|
||||
failEngine("merge failed", e);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.index.LeafReaderContext;
|
|||
import org.apache.lucene.search.DocIdSet;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.FieldComparatorSource;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.search.join.BitSetProducer;
|
||||
|
@ -79,7 +80,7 @@ public interface IndexFieldData<FD extends AtomicFieldData> extends IndexCompone
|
|||
/**
|
||||
* The field name.
|
||||
*/
|
||||
MappedFieldType.Names getFieldNames();
|
||||
String getFieldName();
|
||||
|
||||
/**
|
||||
* The field data type.
|
||||
|
@ -139,7 +140,8 @@ public interface IndexFieldData<FD extends AtomicFieldData> extends IndexCompone
|
|||
* Get a {@link DocIdSet} that matches the inner documents.
|
||||
*/
|
||||
public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException {
|
||||
return innerFilter.scorer(ctx);
|
||||
Scorer s = innerFilter.scorer(ctx);
|
||||
return s == null ? null : s.iterator();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.index.fielddata;
|
|||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
/**
|
||||
|
@ -49,12 +48,12 @@ public interface IndexFieldDataCache {
|
|||
/**
|
||||
* Called after the fielddata is loaded during the cache phase
|
||||
*/
|
||||
void onCache(ShardId shardId, MappedFieldType.Names fieldNames, FieldDataType fieldDataType, Accountable ramUsage);
|
||||
void onCache(ShardId shardId, String fieldName, FieldDataType fieldDataType, Accountable ramUsage);
|
||||
|
||||
/**
|
||||
* Called after the fielddata is unloaded
|
||||
*/
|
||||
void onRemoval(ShardId shardId, MappedFieldType.Names fieldNames, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes);
|
||||
void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes);
|
||||
}
|
||||
|
||||
class None implements IndexFieldDataCache {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue