Merge branch 'master' into feature/ingest
This commit is contained in:
commit
5f2df6b95a
|
@ -1,37 +1,42 @@
|
|||
|
||||
# intellij files
|
||||
.idea/
|
||||
.gradle/
|
||||
*.iml
|
||||
*.ipr
|
||||
*.iws
|
||||
work/
|
||||
/data/
|
||||
logs/
|
||||
.DS_Store
|
||||
build/
|
||||
generated-resources/
|
||||
**/.local*
|
||||
docs/html/
|
||||
docs/build.log
|
||||
/tmp/
|
||||
backwards/
|
||||
html_docs
|
||||
.vagrant/
|
||||
|
||||
## eclipse ignores (use 'mvn eclipse:eclipse' to build eclipse projects)
|
||||
## All files (.project, .classpath, .settings/*) should be generated through Maven which
|
||||
## will correctly set the classpath based on the declared dependencies and write settings
|
||||
## files to ensure common coding style across Eclipse and IDEA.
|
||||
# eclipse files
|
||||
.project
|
||||
.classpath
|
||||
eclipse-build
|
||||
.settings
|
||||
|
||||
## netbeans ignores
|
||||
# netbeans files
|
||||
nb-configuration.xml
|
||||
nbactions.xml
|
||||
|
||||
dependency-reduced-pom.xml
|
||||
# gradle stuff
|
||||
.gradle/
|
||||
build/
|
||||
generated-resources/
|
||||
|
||||
# old patterns specific to maven
|
||||
# maven stuff (to be removed when trunk becomes 4.x)
|
||||
*-execution-hints.log
|
||||
target/
|
||||
dependency-reduced-pom.xml
|
||||
|
||||
# testing stuff
|
||||
**/.local*
|
||||
.vagrant/
|
||||
|
||||
# osx stuff
|
||||
.DS_Store
|
||||
|
||||
# needed in case docs build is run...maybe we can configure doc build to generate files under build?
|
||||
html_docs
|
||||
|
||||
# random old stuff that we should look at the necessity of...
|
||||
/tmp/
|
||||
backwards/
|
||||
|
||||
|
||||
|
|
|
@ -290,14 +290,14 @@ The REST tests are run automatically when executing the "gradle check" command.
|
|||
REST tests use the following command:
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
gradle :distribution:tar:integTest \
|
||||
gradle :distribution:integ-test-zip:integTest \
|
||||
-Dtests.class=org.elasticsearch.test.rest.RestIT
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
A specific test case can be run with
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
gradle :distribution:tar:integTest \
|
||||
gradle :distribution:integ-test-zip:integTest \
|
||||
-Dtests.class=org.elasticsearch.test.rest.RestIT \
|
||||
-Dtests.method="test {p0=cat.shards/10_basic/Help}"
|
||||
---------------------------------------------------------------------------
|
||||
|
|
17
build.gradle
17
build.gradle
|
@ -97,6 +97,7 @@ subprojects {
|
|||
// the "value" -quiet is added, separated by a space. This is ok since the javadoc
|
||||
// command already adds -quiet, so we are just duplicating it
|
||||
// see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959
|
||||
javadoc.options.encoding='UTF8'
|
||||
javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet')
|
||||
}
|
||||
}
|
||||
|
@ -108,7 +109,7 @@ subprojects {
|
|||
ext.projectSubstitutions = [
|
||||
"org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec',
|
||||
"org.elasticsearch:elasticsearch:${version}": ':core',
|
||||
"org.elasticsearch:test-framework:${version}": ':test-framework',
|
||||
"org.elasticsearch.test:framework:${version}": ':test:framework',
|
||||
"org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip',
|
||||
"org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip',
|
||||
"org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar',
|
||||
|
@ -122,6 +123,16 @@ subprojects {
|
|||
}
|
||||
}
|
||||
}
|
||||
// For reasons we don't fully understand yet, external dependencies are not picked up by Ant's optional tasks.
|
||||
// But you can easily do it in another way.
|
||||
// Only if your buildscript and Ant's optional task need the same library would you have to define it twice.
|
||||
// https://docs.gradle.org/current/userguide/organizing_build_logic.html
|
||||
configurations {
|
||||
forbiddenApis
|
||||
}
|
||||
dependencies {
|
||||
forbiddenApis 'de.thetaphi:forbiddenapis:2.0'
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure similar tasks in dependent projects run first. The projectsEvaluated here is
|
||||
|
@ -130,8 +141,8 @@ subprojects {
|
|||
// the dependency is added.
|
||||
gradle.projectsEvaluated {
|
||||
allprojects {
|
||||
if (project.path == ':test-framework') {
|
||||
// :test-framework:test cannot run before and after :core:test
|
||||
if (project.path == ':test:framework') {
|
||||
// :test:framework:test cannot run before and after :core:test
|
||||
return
|
||||
}
|
||||
configurations.all {
|
||||
|
|
|
@ -202,7 +202,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
|
||||
// force all dependencies added directly to compile/testCompile to be non-transitive, except for ES itself
|
||||
Closure disableTransitiveDeps = { ModuleDependency dep ->
|
||||
if (!(dep instanceof ProjectDependency) && dep.getGroup() != 'org.elasticsearch') {
|
||||
if (!(dep instanceof ProjectDependency) && dep.group.startsWith('org.elasticsearch') == false) {
|
||||
dep.transitive = false
|
||||
|
||||
// also create a configuration just for this dependency version, so that later
|
||||
|
@ -302,6 +302,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
options.compilerArgs << '-profile' << project.compactProfile
|
||||
}
|
||||
options.encoding = 'UTF-8'
|
||||
//options.incremental = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
private static void configureDependencies(Project project) {
|
||||
project.dependencies {
|
||||
provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}"
|
||||
testCompile "org.elasticsearch:test-framework:${project.versions.elasticsearch}"
|
||||
testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}"
|
||||
// we "upgrade" these optional deps to provided for plugins, since they will run
|
||||
// with a full elasticsearch server that includes optional deps
|
||||
provided "com.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||
|
|
|
@ -34,7 +34,8 @@ class PrecommitTasks {
|
|||
List<Task> precommitTasks = [
|
||||
configureForbiddenApis(project),
|
||||
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
|
||||
project.tasks.create('jarHell', JarHellTask.class)]
|
||||
project.tasks.create('jarHell', JarHellTask.class),
|
||||
project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class)]
|
||||
|
||||
// tasks with just tests don't need dependency licenses, so this flag makes adding
|
||||
// the task optional
|
||||
|
|
|
@ -0,0 +1,207 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import java.nio.file.Files
|
||||
import java.nio.file.FileVisitResult
|
||||
import java.nio.file.Path
|
||||
import java.nio.file.SimpleFileVisitor
|
||||
import java.nio.file.attribute.BasicFileAttributes
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.artifacts.UnknownConfigurationException
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
|
||||
import org.apache.tools.ant.BuildLogger
|
||||
import org.apache.tools.ant.Project
|
||||
|
||||
/**
|
||||
* Basic static checking to keep tabs on third party JARs
|
||||
*/
|
||||
public class ThirdPartyAuditTask extends DefaultTask {
|
||||
|
||||
// true to be lenient about MISSING CLASSES
|
||||
private boolean missingClasses;
|
||||
|
||||
// patterns for classes to exclude, because we understand their issues
|
||||
private String[] excludes = new String[0];
|
||||
|
||||
ThirdPartyAuditTask() {
|
||||
dependsOn(project.configurations.testCompile)
|
||||
description = "Checks third party JAR bytecode for missing classes, use of internal APIs, and other horrors'"
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to true to be lenient with missing classes. By default this check will fail if it finds
|
||||
* MISSING CLASSES. This means the set of jars is incomplete. However, in some cases
|
||||
* this can be due to intentional exclusions that are well-tested and understood.
|
||||
*/
|
||||
public void setMissingClasses(boolean value) {
|
||||
missingClasses = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if leniency about missing classes is enabled.
|
||||
*/
|
||||
public boolean isMissingClasses() {
|
||||
return missingClasses;
|
||||
}
|
||||
|
||||
/**
|
||||
* classes that should be excluded from the scan,
|
||||
* e.g. because we know what sheisty stuff those particular classes are up to.
|
||||
*/
|
||||
public void setExcludes(String[] classes) {
|
||||
for (String s : classes) {
|
||||
if (s.indexOf('*') != -1) {
|
||||
throw new IllegalArgumentException("illegal third party audit exclusion: '" + s + "', wildcards are not permitted!")
|
||||
}
|
||||
}
|
||||
excludes = classes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns current list of exclusions.
|
||||
*/
|
||||
public String[] getExcludes() {
|
||||
return excludes;
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
public void check() {
|
||||
AntBuilder ant = new AntBuilder()
|
||||
|
||||
// we are noisy for many reasons, working around performance problems with forbidden-apis, dealing
|
||||
// with warnings about missing classes, etc. so we use our own "quiet" AntBuilder
|
||||
ant.project.buildListeners.each { listener ->
|
||||
if (listener instanceof BuildLogger) {
|
||||
listener.messageOutputLevel = Project.MSG_ERR;
|
||||
}
|
||||
};
|
||||
|
||||
// we only want third party dependencies.
|
||||
FileCollection jars = project.configurations.testCompile.fileCollection({ dependency ->
|
||||
dependency.group.startsWith("org.elasticsearch") == false
|
||||
})
|
||||
|
||||
// we don't want provided dependencies, which we have already scanned. e.g. don't
|
||||
// scan ES core's dependencies for every single plugin
|
||||
try {
|
||||
jars -= project.configurations.getByName("provided")
|
||||
} catch (UnknownConfigurationException ignored) {}
|
||||
|
||||
// no dependencies matched, we are done
|
||||
if (jars.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
ant.taskdef(name: "thirdPartyAudit",
|
||||
classname: "de.thetaphi.forbiddenapis.ant.AntTask",
|
||||
classpath: project.configurations.forbiddenApis.asPath)
|
||||
|
||||
// print which jars we are going to scan, always
|
||||
// this is not the time to try to be succinct! Forbidden will print plenty on its own!
|
||||
Set<String> names = new HashSet<>()
|
||||
for (File jar : jars) {
|
||||
names.add(jar.getName())
|
||||
}
|
||||
logger.error("[thirdPartyAudit] Scanning: " + names)
|
||||
|
||||
// warn that classes are missing
|
||||
// TODO: move these to excludes list!
|
||||
if (missingClasses) {
|
||||
logger.warn("[thirdPartyAudit] WARNING: CLASSES ARE MISSING! Expect NoClassDefFoundError in bug reports from users!")
|
||||
}
|
||||
|
||||
// TODO: forbidden-apis + zipfileset gives O(n^2) behavior unless we dump to a tmpdir first,
|
||||
// and then remove our temp dir afterwards. don't complain: try it yourself.
|
||||
// we don't use gradle temp dir handling, just google it, or try it yourself.
|
||||
|
||||
File tmpDir = new File(project.buildDir, 'tmp/thirdPartyAudit')
|
||||
|
||||
// clean up any previous mess (if we failed), then unzip everything to one directory
|
||||
ant.delete(dir: tmpDir.getAbsolutePath())
|
||||
tmpDir.mkdirs()
|
||||
for (File jar : jars) {
|
||||
ant.unzip(src: jar.getAbsolutePath(), dest: tmpDir.getAbsolutePath())
|
||||
}
|
||||
|
||||
// convert exclusion class names to binary file names
|
||||
String[] excludedFiles = new String[excludes.length];
|
||||
for (int i = 0; i < excludes.length; i++) {
|
||||
excludedFiles[i] = excludes[i].replace('.', '/') + ".class"
|
||||
// check if the excluded file exists, if not, sure sign things are outdated
|
||||
if (! new File(tmpDir, excludedFiles[i]).exists()) {
|
||||
throw new IllegalStateException("bogus thirdPartyAudit exclusion: '" + excludes[i] + "', not found in any dependency")
|
||||
}
|
||||
}
|
||||
|
||||
// jarHellReprise
|
||||
checkSheistyClasses(tmpDir.toPath(), new HashSet<>(Arrays.asList(excludedFiles)));
|
||||
|
||||
ant.thirdPartyAudit(internalRuntimeForbidden: true,
|
||||
failOnUnsupportedJava: false,
|
||||
failOnMissingClasses: !missingClasses,
|
||||
classpath: project.configurations.testCompile.asPath) {
|
||||
fileset(dir: tmpDir, excludes: excludedFiles.join(','))
|
||||
}
|
||||
// clean up our mess (if we succeed)
|
||||
ant.delete(dir: tmpDir.getAbsolutePath())
|
||||
}
|
||||
|
||||
/**
|
||||
* check for sheisty classes: if they also exist in the extensions classloader, its jar hell with the jdk!
|
||||
*/
|
||||
private void checkSheistyClasses(Path root, Set<String> excluded) {
|
||||
// system.parent = extensions loader.
|
||||
// note: for jigsaw, this evilness will need modifications (e.g. use jrt filesystem!).
|
||||
// but groovy/gradle needs to work at all first!
|
||||
ClassLoader ext = ClassLoader.getSystemClassLoader().getParent()
|
||||
assert ext != null
|
||||
|
||||
Set<String> sheistySet = new TreeSet<>();
|
||||
Files.walkFileTree(root, new SimpleFileVisitor<Path>() {
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
String entry = root.relativize(file).toString()
|
||||
if (entry.endsWith(".class")) {
|
||||
if (ext.getResource(entry) != null) {
|
||||
sheistySet.add(entry);
|
||||
}
|
||||
}
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
});
|
||||
|
||||
// check if we are ok
|
||||
if (sheistySet.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// leniency against exclusions list
|
||||
sheistySet.removeAll(excluded);
|
||||
|
||||
if (sheistySet.isEmpty()) {
|
||||
logger.warn("[thirdPartyAudit] WARNING: JAR HELL WITH JDK! Expect insanely hard-to-debug problems!")
|
||||
} else {
|
||||
throw new IllegalStateException("JAR HELL WITH JDK! " + sheistySet);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -42,7 +42,7 @@ public class StandaloneTestBasePlugin implements Plugin<Project> {
|
|||
|
||||
// only setup tests to build
|
||||
project.sourceSets.create('test')
|
||||
project.dependencies.add('testCompile', "org.elasticsearch:test-framework:${VersionProperties.elasticsearch}")
|
||||
project.dependencies.add('testCompile', "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}")
|
||||
|
||||
project.eclipse.classpath.sourceSets = [project.sourceSets.test]
|
||||
project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime]
|
||||
|
|
|
@ -82,7 +82,7 @@ dependencies {
|
|||
compile "net.java.dev.jna:jna:${versions.jna}", optional
|
||||
|
||||
if (isEclipse == false || project.path == ":core-tests") {
|
||||
testCompile("org.elasticsearch:test-framework:${version}") {
|
||||
testCompile("org.elasticsearch.test:framework:${version}") {
|
||||
// tests use the locally compiled version of core
|
||||
exclude group: 'org.elasticsearch', module: 'elasticsearch'
|
||||
}
|
||||
|
@ -111,6 +111,14 @@ forbiddenPatterns {
|
|||
exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt'
|
||||
}
|
||||
|
||||
// classes are missing, e.g. org.jboss.marshalling.Marshaller
|
||||
thirdPartyAudit.missingClasses = true
|
||||
// uses internal sun ssl classes!
|
||||
thirdPartyAudit.excludes = [
|
||||
// uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name)
|
||||
'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator',
|
||||
]
|
||||
|
||||
// dependency license are currently checked in distribution
|
||||
dependencyLicenses.enabled = false
|
||||
|
||||
|
|
|
@ -268,11 +268,15 @@ public class Version {
|
|||
public static final int V_2_0_1_ID = 2000199;
|
||||
public static final Version V_2_0_1 = new Version(V_2_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_2_ID = 2000299;
|
||||
public static final Version V_2_0_2 = new Version(V_2_0_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_0_2 = new Version(V_2_0_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_3_ID = 2000399;
|
||||
public static final Version V_2_0_3 = new Version(V_2_0_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_1_0_ID = 2010099;
|
||||
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_1_ID = 2010199;
|
||||
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_2_ID = 2010299;
|
||||
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_2_0_ID = 2020099;
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
|
||||
public static final int V_3_0_0_ID = 3000099;
|
||||
|
@ -293,10 +297,14 @@ public class Version {
|
|||
return V_3_0_0;
|
||||
case V_2_2_0_ID:
|
||||
return V_2_2_0;
|
||||
case V_2_1_2_ID:
|
||||
return V_2_1_2;
|
||||
case V_2_1_1_ID:
|
||||
return V_2_1_1;
|
||||
case V_2_1_0_ID:
|
||||
return V_2_1_0;
|
||||
case V_2_0_3_ID:
|
||||
return V_2_0_3;
|
||||
case V_2_0_2_ID:
|
||||
return V_2_0_2;
|
||||
case V_2_0_1_ID:
|
||||
|
|
|
@ -107,6 +107,8 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction;
|
|||
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
|
||||
import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.TransportSyncedFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction;
|
||||
import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteIndexTemplateAction;
|
||||
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction;
|
||||
|
@ -293,6 +295,7 @@ public class ActionModule extends AbstractModule {
|
|||
registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
|
||||
registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class);
|
||||
registerAction(FlushAction.INSTANCE, TransportFlushAction.class);
|
||||
registerAction(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class);
|
||||
registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class);
|
||||
registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class);
|
||||
registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);
|
||||
|
|
|
@ -33,6 +33,8 @@ import org.elasticsearch.script.ScriptService;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Collections;
|
||||
|
||||
public class TransportRenderSearchTemplateAction extends HandledTransportAction<RenderSearchTemplateRequest, RenderSearchTemplateResponse> {
|
||||
|
||||
private final ScriptService scriptService;
|
||||
|
@ -55,7 +57,7 @@ public class TransportRenderSearchTemplateAction extends HandledTransportAction<
|
|||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request);
|
||||
ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request, Collections.emptyMap());
|
||||
BytesReference processedTemplate = (BytesReference) executable.run();
|
||||
RenderSearchTemplateResponse response = new RenderSearchTemplateResponse();
|
||||
response.source(processedTemplate);
|
||||
|
|
|
@ -17,35 +17,28 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest;
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.action.RestActionModule;
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class RestModule extends AbstractModule {
|
||||
public class SyncedFlushAction extends Action<SyncedFlushRequest, SyncedFlushResponse, SyncedFlushRequestBuilder> {
|
||||
|
||||
private final Settings settings;
|
||||
private List<Class<? extends BaseRestHandler>> restPluginsActions = new ArrayList<>();
|
||||
public static final SyncedFlushAction INSTANCE = new SyncedFlushAction();
|
||||
public static final String NAME = "indices:admin/synced_flush";
|
||||
|
||||
public void addRestAction(Class<? extends BaseRestHandler> restAction) {
|
||||
restPluginsActions.add(restAction);
|
||||
private SyncedFlushAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
public RestModule(Settings settings) {
|
||||
this.settings = settings;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(RestController.class).asEagerSingleton();
|
||||
new RestActionModule(restPluginsActions).configure(binder());
|
||||
public SyncedFlushResponse newResponse() {
|
||||
return new SyncedFlushResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SyncedFlushRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new SyncedFlushRequestBuilder(client, this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* A synced flush request to sync flush one or more indices. The synced flush process of an index performs a flush
|
||||
* and writes the same sync id to primary and all copies.
|
||||
*
|
||||
* <p>Best created with {@link org.elasticsearch.client.Requests#syncedFlushRequest(String...)}. </p>
|
||||
*
|
||||
* @see org.elasticsearch.client.Requests#flushRequest(String...)
|
||||
* @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest)
|
||||
* @see SyncedFlushResponse
|
||||
*/
|
||||
public class SyncedFlushRequest extends BroadcastRequest<SyncedFlushRequest> {
|
||||
|
||||
public SyncedFlushRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy constructor that creates a new synced flush request that is a copy of the one provided as an argument.
|
||||
* The new request will inherit though headers and context from the original request that caused it.
|
||||
*/
|
||||
public SyncedFlushRequest(ActionRequest originalRequest) {
|
||||
super(originalRequest);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will
|
||||
* be sync flushed.
|
||||
*/
|
||||
public SyncedFlushRequest(String... indices) {
|
||||
super(indices);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SyncedFlushRequest{" +
|
||||
"indices=" + Arrays.toString(indices) + "}";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class SyncedFlushRequestBuilder extends ActionRequestBuilder<SyncedFlushRequest, SyncedFlushResponse, SyncedFlushRequestBuilder> {
|
||||
|
||||
public SyncedFlushRequestBuilder(ElasticsearchClient client, SyncedFlushAction action) {
|
||||
super(client, action, new SyncedFlushRequest());
|
||||
}
|
||||
|
||||
public SyncedFlushRequestBuilder setIndices(String[] indices) {
|
||||
super.request().indices(indices);
|
||||
return this;
|
||||
}
|
||||
|
||||
public SyncedFlushRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
|
||||
super.request().indicesOptions(indicesOptions);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -16,16 +16,25 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.indices.flush;
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.indices.flush.ShardsSyncedFlushResult;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -34,13 +43,16 @@ import static java.util.Collections.unmodifiableMap;
|
|||
/**
|
||||
* The result of performing a sync flush operation on all shards of multiple indices
|
||||
*/
|
||||
public class IndicesSyncedFlushResult implements ToXContent {
|
||||
public class SyncedFlushResponse extends ActionResponse implements ToXContent {
|
||||
|
||||
final Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex;
|
||||
final ShardCounts shardCounts;
|
||||
Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex;
|
||||
ShardCounts shardCounts;
|
||||
|
||||
SyncedFlushResponse() {
|
||||
|
||||
public IndicesSyncedFlushResult(Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex) {
|
||||
}
|
||||
|
||||
public SyncedFlushResponse(Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex) {
|
||||
// shardsResultPerIndex is never modified after it is passed to this
|
||||
// constructor so this is safe even though shardsResultPerIndex is a
|
||||
// ConcurrentHashMap
|
||||
|
@ -48,17 +60,23 @@ public class IndicesSyncedFlushResult implements ToXContent {
|
|||
this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values()));
|
||||
}
|
||||
|
||||
/** total number shards, including replicas, both assigned and unassigned */
|
||||
/**
|
||||
* total number shards, including replicas, both assigned and unassigned
|
||||
*/
|
||||
public int totalShards() {
|
||||
return shardCounts.total;
|
||||
}
|
||||
|
||||
/** total number of shards for which the operation failed */
|
||||
/**
|
||||
* total number of shards for which the operation failed
|
||||
*/
|
||||
public int failedShards() {
|
||||
return shardCounts.failed;
|
||||
}
|
||||
|
||||
/** total number of shards which were successfully sync-flushed */
|
||||
/**
|
||||
* total number of shards which were successfully sync-flushed
|
||||
*/
|
||||
public int successfulShards() {
|
||||
return shardCounts.successful;
|
||||
}
|
||||
|
@ -91,8 +109,8 @@ public class IndicesSyncedFlushResult implements ToXContent {
|
|||
builder.endObject();
|
||||
continue;
|
||||
}
|
||||
Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> failedShards = shardResults.failedShards();
|
||||
for (Map.Entry<ShardRouting, SyncedFlushService.SyncedFlushResponse> shardEntry : failedShards.entrySet()) {
|
||||
Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failedShards = shardResults.failedShards();
|
||||
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry : failedShards.entrySet()) {
|
||||
builder.startObject();
|
||||
builder.field(Fields.SHARD, shardResults.shardId().id());
|
||||
builder.field(Fields.REASON, shardEntry.getValue().failureReason());
|
||||
|
@ -123,11 +141,11 @@ public class IndicesSyncedFlushResult implements ToXContent {
|
|||
return new ShardCounts(total, successful, failed);
|
||||
}
|
||||
|
||||
static final class ShardCounts implements ToXContent {
|
||||
static final class ShardCounts implements ToXContent, Streamable {
|
||||
|
||||
public final int total;
|
||||
public final int successful;
|
||||
public final int failed;
|
||||
public int total;
|
||||
public int successful;
|
||||
public int failed;
|
||||
|
||||
ShardCounts(int total, int successful, int failed) {
|
||||
this.total = total;
|
||||
|
@ -135,6 +153,10 @@ public class IndicesSyncedFlushResult implements ToXContent {
|
|||
this.failed = failed;
|
||||
}
|
||||
|
||||
ShardCounts() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(Fields.TOTAL, total);
|
||||
|
@ -142,6 +164,20 @@ public class IndicesSyncedFlushResult implements ToXContent {
|
|||
builder.field(Fields.FAILED, failed);
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
total = in.readInt();
|
||||
successful = in.readInt();
|
||||
failed = in.readInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeInt(total);
|
||||
out.writeInt(successful);
|
||||
out.writeInt(failed);
|
||||
}
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
|
@ -154,4 +190,37 @@ public class IndicesSyncedFlushResult implements ToXContent {
|
|||
static final XContentBuilderString ROUTING = new XContentBuilderString("routing");
|
||||
static final XContentBuilderString REASON = new XContentBuilderString("reason");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
shardCounts = new ShardCounts();
|
||||
shardCounts.readFrom(in);
|
||||
Map<String, List<ShardsSyncedFlushResult>> tmpShardsResultPerIndex = new HashMap<>();
|
||||
int numShardsResults = in.readInt();
|
||||
for (int i =0 ; i< numShardsResults; i++) {
|
||||
String index = in.readString();
|
||||
List<ShardsSyncedFlushResult> shardsSyncedFlushResults = new ArrayList<>();
|
||||
int numShards = in.readInt();
|
||||
for (int j =0; j< numShards; j++) {
|
||||
shardsSyncedFlushResults.add(ShardsSyncedFlushResult.readShardsSyncedFlushResult(in));
|
||||
}
|
||||
tmpShardsResultPerIndex.put(index, shardsSyncedFlushResults);
|
||||
}
|
||||
shardsResultPerIndex = Collections.unmodifiableMap(tmpShardsResultPerIndex);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
shardCounts.writeTo(out);
|
||||
out.writeInt(shardsResultPerIndex.size());
|
||||
for (Map.Entry<String, List<ShardsSyncedFlushResult>> entry : shardsResultPerIndex.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeInt(entry.getValue().size());
|
||||
for (ShardsSyncedFlushResult shardsSyncedFlushResult : entry.getValue()) {
|
||||
shardsSyncedFlushResult.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
/**
|
||||
* Synced flush Action.
|
||||
*/
|
||||
public class TransportSyncedFlushAction extends HandledTransportAction<SyncedFlushRequest, SyncedFlushResponse> {
|
||||
|
||||
SyncedFlushService syncedFlushService;
|
||||
|
||||
@Inject
|
||||
public TransportSyncedFlushAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SyncedFlushService syncedFlushService) {
|
||||
super(settings, SyncedFlushAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SyncedFlushRequest::new);
|
||||
this.syncedFlushService = syncedFlushService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener) {
|
||||
syncedFlushService.attemptSyncedFlush(request.indices(), request.indicesOptions(), listener);
|
||||
}
|
||||
}
|
|
@ -56,13 +56,14 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
public static class StoreStatus implements Streamable, ToXContent, Comparable<StoreStatus> {
|
||||
private DiscoveryNode node;
|
||||
private long version;
|
||||
private String allocationId;
|
||||
private Throwable storeException;
|
||||
private Allocation allocation;
|
||||
private AllocationStatus allocationStatus;
|
||||
|
||||
/**
|
||||
* The status of the shard store with respect to the cluster
|
||||
*/
|
||||
public enum Allocation {
|
||||
public enum AllocationStatus {
|
||||
|
||||
/**
|
||||
* Allocated as primary
|
||||
|
@ -81,16 +82,16 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
|
||||
private final byte id;
|
||||
|
||||
Allocation(byte id) {
|
||||
AllocationStatus(byte id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
private static Allocation fromId(byte id) {
|
||||
private static AllocationStatus fromId(byte id) {
|
||||
switch (id) {
|
||||
case 0: return PRIMARY;
|
||||
case 1: return REPLICA;
|
||||
case 2: return UNUSED;
|
||||
default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]");
|
||||
default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -99,11 +100,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
case 0: return "primary";
|
||||
case 1: return "replica";
|
||||
case 2: return "unused";
|
||||
default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]");
|
||||
default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
private static Allocation readFrom(StreamInput in) throws IOException {
|
||||
private static AllocationStatus readFrom(StreamInput in) throws IOException {
|
||||
return fromId(in.readByte());
|
||||
}
|
||||
|
||||
|
@ -115,10 +116,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
private StoreStatus() {
|
||||
}
|
||||
|
||||
public StoreStatus(DiscoveryNode node, long version, Allocation allocation, Throwable storeException) {
|
||||
public StoreStatus(DiscoveryNode node, long version, String allocationId, AllocationStatus allocationStatus, Throwable storeException) {
|
||||
this.node = node;
|
||||
this.version = version;
|
||||
this.allocation = allocation;
|
||||
this.allocationId = allocationId;
|
||||
this.allocationStatus = allocationStatus;
|
||||
this.storeException = storeException;
|
||||
}
|
||||
|
||||
|
@ -130,13 +132,20 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
}
|
||||
|
||||
/**
|
||||
* Version of the store, used to select the store that will be
|
||||
* used as a primary.
|
||||
* Version of the store
|
||||
*/
|
||||
public long getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
/**
|
||||
* AllocationStatus id of the store, used to select the store that will be
|
||||
* used as a primary.
|
||||
*/
|
||||
public String getAllocationId() {
|
||||
return allocationId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Exception while trying to open the
|
||||
* shard index or from when the shard failed
|
||||
|
@ -146,13 +155,13 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
}
|
||||
|
||||
/**
|
||||
* The allocation status of the store.
|
||||
* {@link Allocation#PRIMARY} indicates a primary shard copy
|
||||
* {@link Allocation#REPLICA} indicates a replica shard copy
|
||||
* {@link Allocation#UNUSED} indicates an unused shard copy
|
||||
* The allocationStatus status of the store.
|
||||
* {@link AllocationStatus#PRIMARY} indicates a primary shard copy
|
||||
* {@link AllocationStatus#REPLICA} indicates a replica shard copy
|
||||
* {@link AllocationStatus#UNUSED} indicates an unused shard copy
|
||||
*/
|
||||
public Allocation getAllocation() {
|
||||
return allocation;
|
||||
public AllocationStatus getAllocationStatus() {
|
||||
return allocationStatus;
|
||||
}
|
||||
|
||||
static StoreStatus readStoreStatus(StreamInput in) throws IOException {
|
||||
|
@ -165,7 +174,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
node = DiscoveryNode.readNode(in);
|
||||
version = in.readLong();
|
||||
allocation = Allocation.readFrom(in);
|
||||
allocationId = in.readOptionalString();
|
||||
allocationStatus = AllocationStatus.readFrom(in);
|
||||
if (in.readBoolean()) {
|
||||
storeException = in.readThrowable();
|
||||
}
|
||||
|
@ -175,7 +185,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
node.writeTo(out);
|
||||
out.writeLong(version);
|
||||
allocation.writeTo(out);
|
||||
out.writeOptionalString(allocationId);
|
||||
allocationStatus.writeTo(out);
|
||||
if (storeException != null) {
|
||||
out.writeBoolean(true);
|
||||
out.writeThrowable(storeException);
|
||||
|
@ -188,7 +199,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
node.toXContent(builder, params);
|
||||
builder.field(Fields.VERSION, version);
|
||||
builder.field(Fields.ALLOCATED, allocation.value());
|
||||
builder.field(Fields.ALLOCATION_ID, allocationId);
|
||||
builder.field(Fields.ALLOCATED, allocationStatus.value());
|
||||
if (storeException != null) {
|
||||
builder.startObject(Fields.STORE_EXCEPTION);
|
||||
ElasticsearchException.toXContent(builder, params, storeException);
|
||||
|
@ -206,7 +218,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
} else {
|
||||
int compare = Long.compare(other.version, version);
|
||||
if (compare == 0) {
|
||||
return Integer.compare(allocation.id, other.allocation.id);
|
||||
return Integer.compare(allocationStatus.id, other.allocationStatus.id);
|
||||
}
|
||||
return compare;
|
||||
}
|
||||
|
@ -379,6 +391,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
static final XContentBuilderString STORES = new XContentBuilderString("stores");
|
||||
// StoreStatus fields
|
||||
static final XContentBuilderString VERSION = new XContentBuilderString("version");
|
||||
static final XContentBuilderString ALLOCATION_ID = new XContentBuilderString("allocation_id");
|
||||
static final XContentBuilderString STORE_EXCEPTION = new XContentBuilderString("store_exception");
|
||||
static final XContentBuilderString ALLOCATED = new XContentBuilderString("allocation");
|
||||
}
|
||||
|
|
|
@ -179,8 +179,8 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
}
|
||||
for (NodeGatewayStartedShards response : fetchResponse.responses) {
|
||||
if (shardExistsInNode(response)) {
|
||||
IndicesShardStoresResponse.StoreStatus.Allocation allocation = getAllocation(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode());
|
||||
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), allocation, response.storeException()));
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode());
|
||||
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), response.allocationId(), allocationStatus, response.storeException()));
|
||||
}
|
||||
}
|
||||
CollectionUtil.timSort(storeStatuses);
|
||||
|
@ -193,27 +193,27 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder)));
|
||||
}
|
||||
|
||||
private IndicesShardStoresResponse.StoreStatus.Allocation getAllocation(String index, int shardID, DiscoveryNode node) {
|
||||
private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) {
|
||||
for (ShardRouting shardRouting : routingNodes.node(node.id())) {
|
||||
ShardId shardId = shardRouting.shardId();
|
||||
if (shardId.id() == shardID && shardId.getIndex().equals(index)) {
|
||||
if (shardRouting.primary()) {
|
||||
return IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY;
|
||||
return IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY;
|
||||
} else if (shardRouting.assignedToNode()) {
|
||||
return IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA;
|
||||
return IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA;
|
||||
} else {
|
||||
return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED;
|
||||
return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED;
|
||||
}
|
||||
}
|
||||
}
|
||||
return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED;
|
||||
return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED;
|
||||
}
|
||||
|
||||
/**
|
||||
* A shard exists/existed in a node only if shard state file exists in the node
|
||||
*/
|
||||
private boolean shardExistsInNode(final NodeGatewayStartedShards response) {
|
||||
return response.storeException() != null || response.version() != -1;
|
||||
return response.storeException() != null || response.version() != -1 || response.allocationId() != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -0,0 +1,203 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
/**
|
||||
* Provides a backoff policy for bulk requests. Whenever a bulk request is rejected due to resource constraints (i.e. the client's internal
|
||||
* thread pool is full), the backoff policy decides how long the bulk processor will wait before the operation is retried internally.
|
||||
*
|
||||
* Notes for implementing custom subclasses:
|
||||
*
|
||||
* The underlying mathematical principle of <code>BackoffPolicy</code> are progressions which can be either finite or infinite although
|
||||
* the latter should not be used for retrying. A progression can be mapped to a <code>java.util.Iterator</code> with the following
|
||||
* semantics:
|
||||
*
|
||||
* <ul>
|
||||
* <li><code>#hasNext()</code> determines whether the progression has more elements. Return <code>true</code> for infinite progressions</li>
|
||||
* <li><code>#next()</code> determines the next element in the progression, i.e. the next wait time period</li>
|
||||
* </ul>
|
||||
*
|
||||
* Note that backoff policies are exposed as <code>Iterables</code> in order to be consumed multiple times.
|
||||
*/
|
||||
public abstract class BackoffPolicy implements Iterable<TimeValue> {
|
||||
private static final BackoffPolicy NO_BACKOFF = new NoBackoff();
|
||||
|
||||
/**
|
||||
* Creates a backoff policy that will not allow any backoff, i.e. an operation will fail after the first attempt.
|
||||
*
|
||||
* @return A backoff policy without any backoff period. The returned instance is thread safe.
|
||||
*/
|
||||
public static BackoffPolicy noBackoff() {
|
||||
return NO_BACKOFF;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an new constant backoff policy with the provided configuration.
|
||||
*
|
||||
* @param delay The delay defines how long to wait between retry attempts. Must not be null.
|
||||
* Must be <= <code>Integer.MAX_VALUE</code> ms.
|
||||
* @param maxNumberOfRetries The maximum number of retries. Must be a non-negative number.
|
||||
* @return A backoff policy with a constant wait time between retries. The returned instance is thread safe but each
|
||||
* iterator created from it should only be used by a single thread.
|
||||
*/
|
||||
public static BackoffPolicy constantBackoff(TimeValue delay, int maxNumberOfRetries) {
|
||||
return new ConstantBackoff(checkDelay(delay), maxNumberOfRetries);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an new exponential backoff policy with a default configuration of 50 ms initial wait period and 8 retries taking
|
||||
* roughly 5.1 seconds in total.
|
||||
*
|
||||
* @return A backoff policy with an exponential increase in wait time for retries. The returned instance is thread safe but each
|
||||
* iterator created from it should only be used by a single thread.
|
||||
*/
|
||||
public static BackoffPolicy exponentialBackoff() {
|
||||
return exponentialBackoff(TimeValue.timeValueMillis(50), 8);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an new exponential backoff policy with the provided configuration.
|
||||
*
|
||||
* @param initialDelay The initial delay defines how long to wait for the first retry attempt. Must not be null.
|
||||
* Must be <= <code>Integer.MAX_VALUE</code> ms.
|
||||
* @param maxNumberOfRetries The maximum number of retries. Must be a non-negative number.
|
||||
* @return A backoff policy with an exponential increase in wait time for retries. The returned instance is thread safe but each
|
||||
* iterator created from it should only be used by a single thread.
|
||||
*/
|
||||
public static BackoffPolicy exponentialBackoff(TimeValue initialDelay, int maxNumberOfRetries) {
|
||||
return new ExponentialBackoff((int) checkDelay(initialDelay).millis(), maxNumberOfRetries);
|
||||
}
|
||||
|
||||
private static TimeValue checkDelay(TimeValue delay) {
|
||||
if (delay.millis() > Integer.MAX_VALUE) {
|
||||
throw new IllegalArgumentException("delay must be <= " + Integer.MAX_VALUE + " ms");
|
||||
}
|
||||
return delay;
|
||||
}
|
||||
|
||||
private static class NoBackoff extends BackoffPolicy {
|
||||
@Override
|
||||
public Iterator<TimeValue> iterator() {
|
||||
return new Iterator<TimeValue>() {
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeValue next() {
|
||||
throw new NoSuchElementException("No backoff");
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private static class ExponentialBackoff extends BackoffPolicy {
|
||||
private final int start;
|
||||
|
||||
private final int numberOfElements;
|
||||
|
||||
private ExponentialBackoff(int start, int numberOfElements) {
|
||||
assert start >= 0;
|
||||
assert numberOfElements >= 0;
|
||||
this.start = start;
|
||||
this.numberOfElements = numberOfElements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<TimeValue> iterator() {
|
||||
return new ExponentialBackoffIterator(start, numberOfElements);
|
||||
}
|
||||
}
|
||||
|
||||
private static class ExponentialBackoffIterator implements Iterator<TimeValue> {
|
||||
private final int numberOfElements;
|
||||
|
||||
private final int start;
|
||||
|
||||
private int currentlyConsumed;
|
||||
|
||||
private ExponentialBackoffIterator(int start, int numberOfElements) {
|
||||
this.start = start;
|
||||
this.numberOfElements = numberOfElements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return currentlyConsumed < numberOfElements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeValue next() {
|
||||
if (!hasNext()) {
|
||||
throw new NoSuchElementException("Only up to " + numberOfElements + " elements");
|
||||
}
|
||||
int result = start + 10 * ((int) Math.exp(0.8d * (currentlyConsumed)) - 1);
|
||||
currentlyConsumed++;
|
||||
return TimeValue.timeValueMillis(result);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class ConstantBackoff extends BackoffPolicy {
|
||||
private final TimeValue delay;
|
||||
|
||||
private final int numberOfElements;
|
||||
|
||||
public ConstantBackoff(TimeValue delay, int numberOfElements) {
|
||||
assert numberOfElements >= 0;
|
||||
this.delay = delay;
|
||||
this.numberOfElements = numberOfElements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<TimeValue> iterator() {
|
||||
return new ConstantBackoffIterator(delay, numberOfElements);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class ConstantBackoffIterator implements Iterator<TimeValue> {
|
||||
private final TimeValue delay;
|
||||
private final int numberOfElements;
|
||||
private int curr;
|
||||
|
||||
public ConstantBackoffIterator(TimeValue delay, int numberOfElements) {
|
||||
this.delay = delay;
|
||||
this.numberOfElements = numberOfElements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return curr < numberOfElements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeValue next() {
|
||||
if (!hasNext()) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
curr++;
|
||||
return delay;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
|
@ -48,7 +47,7 @@ public class BulkProcessor implements Closeable {
|
|||
/**
|
||||
* A listener for the execution.
|
||||
*/
|
||||
public static interface Listener {
|
||||
public interface Listener {
|
||||
|
||||
/**
|
||||
* Callback before the bulk is executed.
|
||||
|
@ -79,6 +78,7 @@ public class BulkProcessor implements Closeable {
|
|||
private int bulkActions = 1000;
|
||||
private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB);
|
||||
private TimeValue flushInterval = null;
|
||||
private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff();
|
||||
|
||||
/**
|
||||
* Creates a builder of bulk processor with the client to use and the listener that will be used
|
||||
|
@ -136,11 +136,27 @@ public class BulkProcessor implements Closeable {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally
|
||||
* in case they have failed due to resource constraints (i.e. a thread pool was full).
|
||||
*
|
||||
* The default is to back off exponentially.
|
||||
*
|
||||
* @see org.elasticsearch.action.bulk.BackoffPolicy#exponentialBackoff()
|
||||
*/
|
||||
public Builder setBackoffPolicy(BackoffPolicy backoffPolicy) {
|
||||
if (backoffPolicy == null) {
|
||||
throw new NullPointerException("'backoffPolicy' must not be null. To disable backoff, pass BackoffPolicy.noBackoff()");
|
||||
}
|
||||
this.backoffPolicy = backoffPolicy;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a new bulk processor.
|
||||
*/
|
||||
public BulkProcessor build() {
|
||||
return new BulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval);
|
||||
return new BulkProcessor(client, backoffPolicy, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -148,42 +164,31 @@ public class BulkProcessor implements Closeable {
|
|||
if (client == null) {
|
||||
throw new NullPointerException("The client you specified while building a BulkProcessor is null");
|
||||
}
|
||||
|
||||
|
||||
return new Builder(client, listener);
|
||||
}
|
||||
|
||||
private final Client client;
|
||||
private final Listener listener;
|
||||
|
||||
private final String name;
|
||||
|
||||
private final int concurrentRequests;
|
||||
private final int bulkActions;
|
||||
private final long bulkSize;
|
||||
private final TimeValue flushInterval;
|
||||
|
||||
private final Semaphore semaphore;
|
||||
|
||||
private final ScheduledThreadPoolExecutor scheduler;
|
||||
private final ScheduledFuture scheduledFuture;
|
||||
|
||||
private final AtomicLong executionIdGen = new AtomicLong();
|
||||
|
||||
private BulkRequest bulkRequest;
|
||||
private final BulkRequestHandler bulkRequestHandler;
|
||||
|
||||
private volatile boolean closed = false;
|
||||
|
||||
BulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
|
||||
this.client = client;
|
||||
this.listener = listener;
|
||||
this.name = name;
|
||||
this.concurrentRequests = concurrentRequests;
|
||||
BulkProcessor(Client client, BackoffPolicy backoffPolicy, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
|
||||
this.bulkActions = bulkActions;
|
||||
this.bulkSize = bulkSize.bytes();
|
||||
|
||||
this.semaphore = new Semaphore(concurrentRequests);
|
||||
this.bulkRequest = new BulkRequest();
|
||||
this.bulkRequestHandler = (concurrentRequests == 0) ? BulkRequestHandler.syncHandler(client, backoffPolicy, listener) : BulkRequestHandler.asyncHandler(client, backoffPolicy, listener, concurrentRequests);
|
||||
|
||||
this.flushInterval = flushInterval;
|
||||
if (flushInterval != null) {
|
||||
this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor"));
|
||||
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
|
||||
|
@ -231,14 +236,7 @@ public class BulkProcessor implements Closeable {
|
|||
if (bulkRequest.numberOfActions() > 0) {
|
||||
execute();
|
||||
}
|
||||
if (this.concurrentRequests < 1) {
|
||||
return true;
|
||||
}
|
||||
if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) {
|
||||
semaphore.release(this.concurrentRequests);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return this.bulkRequestHandler.awaitClose(timeout, unit);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -308,58 +306,7 @@ public class BulkProcessor implements Closeable {
|
|||
final long executionId = executionIdGen.incrementAndGet();
|
||||
|
||||
this.bulkRequest = new BulkRequest();
|
||||
|
||||
if (concurrentRequests == 0) {
|
||||
// execute in a blocking fashion...
|
||||
boolean afterCalled = false;
|
||||
try {
|
||||
listener.beforeBulk(executionId, bulkRequest);
|
||||
BulkResponse bulkItemResponses = client.bulk(bulkRequest).actionGet();
|
||||
afterCalled = true;
|
||||
listener.afterBulk(executionId, bulkRequest, bulkItemResponses);
|
||||
} catch (Exception e) {
|
||||
if (!afterCalled) {
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
boolean success = false;
|
||||
boolean acquired = false;
|
||||
try {
|
||||
listener.beforeBulk(executionId, bulkRequest);
|
||||
semaphore.acquire();
|
||||
acquired = true;
|
||||
client.bulk(bulkRequest, new ActionListener<BulkResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkResponse response) {
|
||||
try {
|
||||
listener.afterBulk(executionId, bulkRequest, response);
|
||||
} finally {
|
||||
semaphore.release();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
try {
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
} finally {
|
||||
semaphore.release();
|
||||
}
|
||||
}
|
||||
});
|
||||
success = true;
|
||||
} catch (InterruptedException e) {
|
||||
Thread.interrupted();
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
} catch (Throwable t) {
|
||||
listener.afterBulk(executionId, bulkRequest, t);
|
||||
} finally {
|
||||
if (!success && acquired) { // if we fail on client.bulk() release the semaphore
|
||||
semaphore.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
this.bulkRequestHandler.execute(bulkRequest, executionId);
|
||||
}
|
||||
|
||||
private boolean isOverTheLimit() {
|
||||
|
|
|
@ -0,0 +1,160 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
|
||||
import java.util.concurrent.Semaphore;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Abstracts the low-level details of bulk request handling
|
||||
*/
|
||||
abstract class BulkRequestHandler {
|
||||
protected final ESLogger logger;
|
||||
protected final Client client;
|
||||
|
||||
protected BulkRequestHandler(Client client) {
|
||||
this.client = client;
|
||||
this.logger = Loggers.getLogger(getClass(), client.settings());
|
||||
}
|
||||
|
||||
|
||||
public abstract void execute(BulkRequest bulkRequest, long executionId);
|
||||
|
||||
public abstract boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException;
|
||||
|
||||
|
||||
public static BulkRequestHandler syncHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener) {
|
||||
return new SyncBulkRequestHandler(client, backoffPolicy, listener);
|
||||
}
|
||||
|
||||
public static BulkRequestHandler asyncHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, int concurrentRequests) {
|
||||
return new AsyncBulkRequestHandler(client, backoffPolicy, listener, concurrentRequests);
|
||||
}
|
||||
|
||||
private static class SyncBulkRequestHandler extends BulkRequestHandler {
|
||||
private final BulkProcessor.Listener listener;
|
||||
private final BackoffPolicy backoffPolicy;
|
||||
|
||||
public SyncBulkRequestHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener) {
|
||||
super(client);
|
||||
this.backoffPolicy = backoffPolicy;
|
||||
this.listener = listener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute(BulkRequest bulkRequest, long executionId) {
|
||||
boolean afterCalled = false;
|
||||
try {
|
||||
listener.beforeBulk(executionId, bulkRequest);
|
||||
BulkResponse bulkResponse = Retry
|
||||
.on(EsRejectedExecutionException.class)
|
||||
.policy(backoffPolicy)
|
||||
.withSyncBackoff(client, bulkRequest);
|
||||
afterCalled = true;
|
||||
listener.afterBulk(executionId, bulkRequest, bulkResponse);
|
||||
} catch (Exception e) {
|
||||
if (!afterCalled) {
|
||||
logger.warn("Failed to executed bulk request {}.", e, executionId);
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
|
||||
// we are "closed" immediately as there is no request in flight
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
private static class AsyncBulkRequestHandler extends BulkRequestHandler {
|
||||
private final BackoffPolicy backoffPolicy;
|
||||
private final BulkProcessor.Listener listener;
|
||||
private final Semaphore semaphore;
|
||||
private final int concurrentRequests;
|
||||
|
||||
private AsyncBulkRequestHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, int concurrentRequests) {
|
||||
super(client);
|
||||
this.backoffPolicy = backoffPolicy;
|
||||
assert concurrentRequests > 0;
|
||||
this.listener = listener;
|
||||
this.concurrentRequests = concurrentRequests;
|
||||
this.semaphore = new Semaphore(concurrentRequests);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute(BulkRequest bulkRequest, long executionId) {
|
||||
boolean bulkRequestSetupSuccessful = false;
|
||||
boolean acquired = false;
|
||||
try {
|
||||
listener.beforeBulk(executionId, bulkRequest);
|
||||
semaphore.acquire();
|
||||
acquired = true;
|
||||
Retry.on(EsRejectedExecutionException.class)
|
||||
.policy(backoffPolicy)
|
||||
.withAsyncBackoff(client, bulkRequest, new ActionListener<BulkResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkResponse response) {
|
||||
try {
|
||||
listener.afterBulk(executionId, bulkRequest, response);
|
||||
} finally {
|
||||
semaphore.release();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
try {
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
} finally {
|
||||
semaphore.release();
|
||||
}
|
||||
}
|
||||
});
|
||||
bulkRequestSetupSuccessful = true;
|
||||
} catch (InterruptedException e) {
|
||||
// This is intentionally wrong to avoid changing the behaviour implicitly with this PR. It will be fixed in #14833
|
||||
Thread.interrupted();
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
} catch (Throwable t) {
|
||||
logger.warn("Failed to executed bulk request {}.", t, executionId);
|
||||
listener.afterBulk(executionId, bulkRequest, t);
|
||||
} finally {
|
||||
if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore
|
||||
semaphore.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
|
||||
if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) {
|
||||
semaphore.release(this.concurrentRequests);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,237 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* Encapsulates synchronous and asynchronous retry logic.
|
||||
*/
|
||||
class Retry {
|
||||
private final Class<? extends Throwable> retryOnThrowable;
|
||||
|
||||
private BackoffPolicy backoffPolicy;
|
||||
|
||||
public static Retry on(Class<? extends Throwable> retryOnThrowable) {
|
||||
return new Retry(retryOnThrowable);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param backoffPolicy The backoff policy that defines how long and how often to wait for retries.
|
||||
*/
|
||||
public Retry policy(BackoffPolicy backoffPolicy) {
|
||||
this.backoffPolicy = backoffPolicy;
|
||||
return this;
|
||||
}
|
||||
|
||||
Retry(Class<? extends Throwable> retryOnThrowable) {
|
||||
this.retryOnThrowable = retryOnThrowable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes #bulk(BulkRequest, ActionListener) on the provided client. Backs off on the provided exception and delegates results to the
|
||||
* provided listener.
|
||||
*
|
||||
* @param client Client invoking the bulk request.
|
||||
* @param bulkRequest The bulk request that should be executed.
|
||||
* @param listener A listener that is invoked when the bulk request finishes or completes with an exception. The listener is not
|
||||
*/
|
||||
public void withAsyncBackoff(Client client, BulkRequest bulkRequest, ActionListener<BulkResponse> listener) {
|
||||
AsyncRetryHandler r = new AsyncRetryHandler(retryOnThrowable, backoffPolicy, client, listener);
|
||||
r.execute(bulkRequest);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes #bulk(BulkRequest) on the provided client. Backs off on the provided exception.
|
||||
*
|
||||
* @param client Client invoking the bulk request.
|
||||
* @param bulkRequest The bulk request that should be executed.
|
||||
* @return the bulk response as returned by the client.
|
||||
* @throws Exception Any exception thrown by the callable.
|
||||
*/
|
||||
public BulkResponse withSyncBackoff(Client client, BulkRequest bulkRequest) throws Exception {
|
||||
return SyncRetryHandler
|
||||
.create(retryOnThrowable, backoffPolicy, client)
|
||||
.executeBlocking(bulkRequest)
|
||||
.actionGet();
|
||||
}
|
||||
|
||||
static class AbstractRetryHandler implements ActionListener<BulkResponse> {
|
||||
private final ESLogger logger;
|
||||
private final Client client;
|
||||
private final ActionListener<BulkResponse> listener;
|
||||
private final Iterator<TimeValue> backoff;
|
||||
private final Class<? extends Throwable> retryOnThrowable;
|
||||
// Access only when holding a client-side lock, see also #addResponses()
|
||||
private final List<BulkItemResponse> responses = new ArrayList<>();
|
||||
private final long startTimestampNanos;
|
||||
// needed to construct the next bulk request based on the response to the previous one
|
||||
// volatile as we're called from a scheduled thread
|
||||
private volatile BulkRequest currentBulkRequest;
|
||||
private volatile ScheduledFuture<?> scheduledRequestFuture;
|
||||
|
||||
public AbstractRetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener<BulkResponse> listener) {
|
||||
this.retryOnThrowable = retryOnThrowable;
|
||||
this.backoff = backoffPolicy.iterator();
|
||||
this.client = client;
|
||||
this.listener = listener;
|
||||
this.logger = Loggers.getLogger(getClass(), client.settings());
|
||||
// in contrast to System.currentTimeMillis(), nanoTime() uses a monotonic clock under the hood
|
||||
this.startTimestampNanos = System.nanoTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResponse(BulkResponse bulkItemResponses) {
|
||||
if (!bulkItemResponses.hasFailures()) {
|
||||
// we're done here, include all responses
|
||||
addResponses(bulkItemResponses, (r -> true));
|
||||
finishHim();
|
||||
} else {
|
||||
if (canRetry(bulkItemResponses)) {
|
||||
addResponses(bulkItemResponses, (r -> !r.isFailed()));
|
||||
retry(createBulkRequestForRetry(bulkItemResponses));
|
||||
} else {
|
||||
addResponses(bulkItemResponses, (r -> true));
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
try {
|
||||
listener.onFailure(e);
|
||||
} finally {
|
||||
FutureUtils.cancel(scheduledRequestFuture);
|
||||
}
|
||||
}
|
||||
|
||||
private void retry(BulkRequest bulkRequestForRetry) {
|
||||
assert backoff.hasNext();
|
||||
TimeValue next = backoff.next();
|
||||
logger.trace("Retry of bulk request scheduled in {} ms.", next.millis());
|
||||
scheduledRequestFuture = client.threadPool().schedule(next, ThreadPool.Names.SAME, (() -> this.execute(bulkRequestForRetry)));
|
||||
}
|
||||
|
||||
private BulkRequest createBulkRequestForRetry(BulkResponse bulkItemResponses) {
|
||||
BulkRequest requestToReissue = new BulkRequest();
|
||||
int index = 0;
|
||||
for (BulkItemResponse bulkItemResponse : bulkItemResponses.getItems()) {
|
||||
if (bulkItemResponse.isFailed()) {
|
||||
requestToReissue.add(currentBulkRequest.requests().get(index));
|
||||
}
|
||||
index++;
|
||||
}
|
||||
return requestToReissue;
|
||||
}
|
||||
|
||||
private boolean canRetry(BulkResponse bulkItemResponses) {
|
||||
if (!backoff.hasNext()) {
|
||||
return false;
|
||||
}
|
||||
for (BulkItemResponse bulkItemResponse : bulkItemResponses) {
|
||||
if (bulkItemResponse.isFailed()) {
|
||||
Throwable cause = bulkItemResponse.getFailure().getCause();
|
||||
Throwable rootCause = ExceptionsHelper.unwrapCause(cause);
|
||||
if (!rootCause.getClass().equals(retryOnThrowable)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
try {
|
||||
listener.onResponse(getAccumulatedResponse());
|
||||
} finally {
|
||||
FutureUtils.cancel(scheduledRequestFuture);
|
||||
}
|
||||
}
|
||||
|
||||
private void addResponses(BulkResponse response, Predicate<BulkItemResponse> filter) {
|
||||
for (BulkItemResponse bulkItemResponse : response) {
|
||||
if (filter.test(bulkItemResponse)) {
|
||||
// Use client-side lock here to avoid visibility issues. This method may be called multiple times
|
||||
// (based on how many retries we have to issue) and relying that the response handling code will be
|
||||
// scheduled on the same thread is fragile.
|
||||
synchronized (responses) {
|
||||
responses.add(bulkItemResponse);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private BulkResponse getAccumulatedResponse() {
|
||||
BulkItemResponse[] itemResponses;
|
||||
synchronized (responses) {
|
||||
itemResponses = responses.toArray(new BulkItemResponse[1]);
|
||||
}
|
||||
long stopTimestamp = System.nanoTime();
|
||||
long totalLatencyMs = TimeValue.timeValueNanos(stopTimestamp - startTimestampNanos).millis();
|
||||
return new BulkResponse(itemResponses, totalLatencyMs);
|
||||
}
|
||||
|
||||
public void execute(BulkRequest bulkRequest) {
|
||||
this.currentBulkRequest = bulkRequest;
|
||||
client.bulk(bulkRequest, this);
|
||||
}
|
||||
}
|
||||
|
||||
static class AsyncRetryHandler extends AbstractRetryHandler {
|
||||
public AsyncRetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener<BulkResponse> listener) {
|
||||
super(retryOnThrowable, backoffPolicy, client, listener);
|
||||
}
|
||||
}
|
||||
|
||||
static class SyncRetryHandler extends AbstractRetryHandler {
|
||||
private final PlainActionFuture<BulkResponse> actionFuture;
|
||||
|
||||
public static SyncRetryHandler create(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client) {
|
||||
PlainActionFuture<BulkResponse> actionFuture = PlainActionFuture.newFuture();
|
||||
return new SyncRetryHandler(retryOnThrowable, backoffPolicy, client, actionFuture);
|
||||
}
|
||||
|
||||
public SyncRetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client, PlainActionFuture<BulkResponse> actionFuture) {
|
||||
super(retryOnThrowable, backoffPolicy, client, actionFuture);
|
||||
this.actionFuture = actionFuture;
|
||||
}
|
||||
|
||||
public ActionFuture<BulkResponse> executeBlocking(BulkRequest bulkRequest) {
|
||||
super.execute(bulkRequest);
|
||||
return actionFuture;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.percolate;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
|
@ -37,8 +36,6 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.text.StringText;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.percolator.PercolatorService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
|
|
@ -473,6 +473,14 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the query be profiled. Defaults to <code>false</code>
|
||||
*/
|
||||
public SearchRequestBuilder setProfile(boolean profile) {
|
||||
sourceBuilder().profile(profile);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
if (request.source() != null) {
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -32,9 +33,12 @@ import org.elasticsearch.rest.action.support.RestActions;
|
|||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.search.aggregations.Aggregations;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.profile.ProfileShardResult;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure;
|
||||
import static org.elasticsearch.search.internal.InternalSearchResponse.readInternalSearchResponse;
|
||||
|
@ -160,6 +164,16 @@ public class SearchResponse extends ActionResponse implements StatusToXContent {
|
|||
this.scrollId = scrollId;
|
||||
}
|
||||
|
||||
/**
|
||||
* If profiling was enabled, this returns an object containing the profile results from
|
||||
* each shard. If profiling was not enabled, this will return null
|
||||
*
|
||||
* @return The profile results or null
|
||||
*/
|
||||
public @Nullable Map<String, List<ProfileShardResult>> getProfileResults() {
|
||||
return internalResponse.profile();
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final XContentBuilderString _SCROLL_ID = new XContentBuilderString("_scroll_id");
|
||||
static final XContentBuilderString TOOK = new XContentBuilderString("took");
|
||||
|
|
|
@ -19,8 +19,16 @@
|
|||
|
||||
package org.elasticsearch.action.support.broadcast.node;
|
||||
|
||||
import org.elasticsearch.action.*;
|
||||
import org.elasticsearch.action.support.*;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
|
||||
|
@ -37,7 +45,14 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.*;
|
||||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
||||
import org.elasticsearch.transport.NodeShouldNotConnectException;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -394,7 +409,15 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
e.setIndex(shardRouting.getIndex());
|
||||
e.setShard(shardRouting.shardId());
|
||||
shardResults[shardIndex] = e;
|
||||
logger.debug("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary());
|
||||
if (TransportActions.isShardNotAvailableException(t)) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] failed to execute operation for shard [{}]", t, actionName, shardRouting.shortSummary());
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] failed to execute operation for shard [{}]", t, actionName, shardRouting.shortSummary());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -300,11 +300,15 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
if (t instanceof RetryOnReplicaException) {
|
||||
logger.trace("Retrying operation on replica, action [{}], request [{}]", t, actionName, request);
|
||||
logger.trace("Retrying operation on replica, action [{}], request [{}]", t, transportReplicaAction, request);
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
public void onNewClusterState(ClusterState state) {
|
||||
threadPool.executor(executor).execute(AsyncReplicaAction.this);
|
||||
// Forking a thread on local node via transport service so that custom transport service have an
|
||||
// opportunity to execute custom logic before the replica operation begins
|
||||
String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
|
||||
TransportChannelResponseHandler<TransportResponse.Empty> handler = TransportChannelResponseHandler.emptyResponseHandler(logger, channel, extraMessage);
|
||||
transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.elasticsearch.search.fetch.source.FetchSourceContext;
|
|||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -245,7 +246,7 @@ public class UpdateHelper extends AbstractComponent {
|
|||
private Map<String, Object> executeScript(UpdateRequest request, Map<String, Object> ctx) {
|
||||
try {
|
||||
if (scriptService != null) {
|
||||
ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request);
|
||||
ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request, Collections.emptyMap());
|
||||
script.setNextVar("ctx", ctx);
|
||||
script.run();
|
||||
// we need to unwrap the ctx...
|
||||
|
|
|
@ -53,8 +53,8 @@ import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse;
|
|||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder;
|
||||
|
@ -82,11 +82,14 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest
|
|||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse;
|
||||
|
@ -390,6 +393,29 @@ public interface IndicesAdminClient extends ElasticsearchClient {
|
|||
*/
|
||||
FlushRequestBuilder prepareFlush(String... indices);
|
||||
|
||||
/**
|
||||
* Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
|
||||
*
|
||||
* @param request The sync flush request
|
||||
* @return A result future
|
||||
* @see org.elasticsearch.client.Requests#syncedFlushRequest(String...)
|
||||
*/
|
||||
ActionFuture<SyncedFlushResponse> syncedFlush(SyncedFlushRequest request);
|
||||
|
||||
/**
|
||||
* Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
|
||||
*
|
||||
* @param request The sync flush request
|
||||
* @param listener A listener to be notified with a result
|
||||
* @see org.elasticsearch.client.Requests#syncedFlushRequest(String...)
|
||||
*/
|
||||
void syncedFlush(SyncedFlushRequest request, ActionListener <SyncedFlushResponse> listener);
|
||||
|
||||
/**
|
||||
* Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
|
||||
*/
|
||||
SyncedFlushRequestBuilder prepareSyncedFlush(String... indices);
|
||||
|
||||
/**
|
||||
* Explicitly force merge one or more indices into a the number of segments.
|
||||
*
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
|||
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
|
@ -131,7 +132,7 @@ public class Requests {
|
|||
public static SuggestRequest suggestRequest(String... indices) {
|
||||
return new SuggestRequest(indices);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates a search request against one or more indices. Note, the search source must be set either using the
|
||||
* actual JSON search source, or the {@link org.elasticsearch.search.builder.SearchSourceBuilder}.
|
||||
|
@ -265,6 +266,17 @@ public class Requests {
|
|||
return new FlushRequest(indices);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a synced flush indices request.
|
||||
*
|
||||
* @param indices The indices to sync flush. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
|
||||
* @return The synced flush request
|
||||
* @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest)
|
||||
*/
|
||||
public static SyncedFlushRequest syncedFlushRequest(String... indices) {
|
||||
return new SyncedFlushRequest(indices);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a force merge request.
|
||||
*
|
||||
|
|
|
@ -188,6 +188,10 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
|
|||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction;
|
||||
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder;
|
||||
|
@ -1315,6 +1319,21 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
return new FlushRequestBuilder(this, FlushAction.INSTANCE).setIndices(indices);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<SyncedFlushResponse> syncedFlush(SyncedFlushRequest request) {
|
||||
return execute(SyncedFlushAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void syncedFlush(SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener) {
|
||||
execute(SyncedFlushAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SyncedFlushRequestBuilder prepareSyncedFlush(String... indices) {
|
||||
return new SyncedFlushRequestBuilder(this, SyncedFlushAction.INSTANCE).setIndices(indices);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void getMappings(GetMappingsRequest request, ActionListener<GetMappingsResponse> listener) {
|
||||
execute(GetMappingsAction.INSTANCE, request, listener);
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.client.support.Headers;
|
|||
import org.elasticsearch.client.transport.support.TransportProxyClient;
|
||||
import org.elasticsearch.cluster.ClusterNameModule;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Injector;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
|
@ -43,19 +42,15 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.settings.SettingsFilter;
|
||||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.EnvironmentModule;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerModule;
|
||||
import org.elasticsearch.monitor.MonitorService;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
import org.elasticsearch.node.settings.NodeSettingsService;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.PluginsModule;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPoolModule;
|
||||
import org.elasticsearch.transport.TransportModule;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.netty.NettyTransport;
|
||||
|
||||
|
@ -69,7 +64,7 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
|||
* The transport client allows to create a client that is not part of the cluster, but simply connects to one
|
||||
* or more nodes directly by adding their respective addresses using {@link #addTransportAddress(org.elasticsearch.common.transport.TransportAddress)}.
|
||||
* <p>
|
||||
* The transport client important modules used is the {@link org.elasticsearch.transport.TransportModule} which is
|
||||
* The transport client important modules used is the {@link org.elasticsearch.common.network.NetworkModule} which is
|
||||
* started in client mode (only connects, no bind).
|
||||
*/
|
||||
public class TransportClient extends AbstractClient {
|
||||
|
@ -143,10 +138,9 @@ public class TransportClient extends AbstractClient {
|
|||
}
|
||||
modules.add(new PluginsModule(pluginsService));
|
||||
modules.add(new SettingsModule(this.settings, settingsFilter ));
|
||||
modules.add(new NetworkModule(networkService));
|
||||
modules.add(new NetworkModule(networkService, this.settings, true));
|
||||
modules.add(new ClusterNameModule(this.settings));
|
||||
modules.add(new ThreadPoolModule(threadPool));
|
||||
modules.add(new TransportModule(this.settings));
|
||||
modules.add(new SearchModule() {
|
||||
@Override
|
||||
protected void configure() {
|
||||
|
@ -154,7 +148,6 @@ public class TransportClient extends AbstractClient {
|
|||
}
|
||||
});
|
||||
modules.add(new ActionModule(true));
|
||||
modules.add(new ClientTransportModule());
|
||||
modules.add(new CircuitBreakerModule(this.settings));
|
||||
|
||||
pluginsService.processModules(modules);
|
||||
|
|
|
@ -37,6 +37,13 @@ public interface ClusterStateTaskExecutor<T> {
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback invoked after new cluster state is published. Note that
|
||||
* this method is not invoked if the cluster state was not updated.
|
||||
*/
|
||||
default void clusterStatePublished(ClusterState newClusterState) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the result of a batched execution of cluster state update tasks
|
||||
* @param <T> the type of the cluster state update task
|
||||
|
|
|
@ -20,7 +20,12 @@
|
|||
package org.elasticsearch.cluster.action.shard;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskConfig;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.NotMasterException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
|
@ -37,19 +42,24 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.*;
|
||||
import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class ShardStateAction extends AbstractComponent {
|
||||
|
||||
public class ShardStateAction extends AbstractComponent {
|
||||
public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started";
|
||||
public static final String SHARD_FAILED_ACTION_NAME = "internal:cluster/shard/failure";
|
||||
|
||||
|
@ -97,18 +107,101 @@ public class ShardStateAction extends AbstractComponent {
|
|||
options = TransportRequestOptions.builder().withTimeout(timeout).build();
|
||||
}
|
||||
transportService.sendRequest(masterNode,
|
||||
SHARD_FAILED_ACTION_NAME, shardRoutingEntry, options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
SHARD_FAILED_ACTION_NAME, shardRoutingEntry, options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
@Override
|
||||
public void handleResponse(TransportResponse.Empty response) {
|
||||
listener.onSuccess();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.warn("unexpected failure while sending request to [{}] to fail shard [{}]", exp, masterNode, shardRoutingEntry);
|
||||
listener.onShardFailedFailure(masterNode, exp);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
|
||||
@Override
|
||||
public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
|
||||
handleShardFailureOnMaster(request, new ClusterStateTaskListener() {
|
||||
@Override
|
||||
public void handleResponse(TransportResponse.Empty response) {
|
||||
listener.onSuccess();
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure while failing shard [{}]", t, request.shardRouting);
|
||||
try {
|
||||
channel.sendResponse(t);
|
||||
} catch (Throwable channelThrowable) {
|
||||
logger.warn("failed to send failure [{}] while failing shard [{}]", channelThrowable, t, request.shardRouting);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.warn("failed to send failed shard to {}", exp, masterNode);
|
||||
listener.onShardFailedFailure(masterNode, exp);
|
||||
public void onNoLongerMaster(String source) {
|
||||
logger.error("no longer master while failing shard [{}]", request.shardRouting);
|
||||
try {
|
||||
channel.sendResponse(new NotMasterException(source));
|
||||
} catch (Throwable channelThrowable) {
|
||||
logger.warn("failed to send no longer master while failing shard [{}]", channelThrowable, request.shardRouting);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
try {
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
} catch (Throwable channelThrowable) {
|
||||
logger.warn("failed to send response while failing shard [{}]", channelThrowable, request.shardRouting);
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry> {
|
||||
@Override
|
||||
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder();
|
||||
List<FailedRerouteAllocation.FailedShard> failedShards = new ArrayList<>(tasks.size());
|
||||
for (ShardRoutingEntry task : tasks) {
|
||||
failedShards.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
|
||||
}
|
||||
ClusterState maybeUpdatedState = currentState;
|
||||
try {
|
||||
RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, failedShards);
|
||||
if (result.changed()) {
|
||||
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
|
||||
}
|
||||
batchResultBuilder.successes(tasks);
|
||||
} catch (Throwable t) {
|
||||
batchResultBuilder.failures(tasks, t);
|
||||
}
|
||||
return batchResultBuilder.build(maybeUpdatedState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStatePublished(ClusterState newClusterState) {
|
||||
int numberOfUnassignedShards = newClusterState.getRoutingNodes().unassigned().size();
|
||||
if (numberOfUnassignedShards > 0) {
|
||||
String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(reason + ", scheduling a reroute");
|
||||
}
|
||||
routingService.reroute(reason);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler();
|
||||
|
||||
private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry, ClusterStateTaskListener listener) {
|
||||
logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
|
||||
shardRoutingEntry,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH),
|
||||
shardFailedClusterStateHandler,
|
||||
listener);
|
||||
}
|
||||
|
||||
public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) {
|
||||
|
@ -124,74 +217,20 @@ public class ShardStateAction extends AbstractComponent {
|
|||
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason, null);
|
||||
logger.debug("{} sending shard started for {}", shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
|
||||
transportService.sendRequest(masterNode,
|
||||
SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.warn("failed to send shard started to [{}]", exp, masterNode);
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler();
|
||||
|
||||
private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) {
|
||||
logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
|
||||
shardRoutingEntry,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH),
|
||||
shardFailedClusterStateHandler,
|
||||
shardFailedClusterStateHandler);
|
||||
}
|
||||
|
||||
class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
|
||||
@Override
|
||||
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder();
|
||||
List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
|
||||
for (ShardRoutingEntry task : tasks) {
|
||||
shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
|
||||
}
|
||||
ClusterState maybeUpdatedState = currentState;
|
||||
try {
|
||||
RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied);
|
||||
if (result.changed()) {
|
||||
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
|
||||
SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.warn("failed to send shard started to [{}]", exp, masterNode);
|
||||
}
|
||||
batchResultBuilder.successes(tasks);
|
||||
} catch (Throwable t) {
|
||||
batchResultBuilder.failures(tasks, t);
|
||||
}
|
||||
return batchResultBuilder.build(maybeUpdatedState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) {
|
||||
logger.trace("unassigned shards after shard failures. scheduling a reroute.");
|
||||
routingService.reroute("unassigned shards after shard failures, scheduling a reroute");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure during [{}]", t, source);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private final ShardStartedClusterStateHandler shardStartedClusterStateHandler =
|
||||
new ShardStartedClusterStateHandler();
|
||||
|
||||
private void shardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) {
|
||||
logger.debug("received shard started for {}", shardRoutingEntry);
|
||||
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
|
||||
shardRoutingEntry,
|
||||
ClusterStateTaskConfig.build(Priority.URGENT),
|
||||
shardStartedClusterStateHandler,
|
||||
shardStartedClusterStateHandler);
|
||||
class ShardStartedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
|
||||
@Override
|
||||
public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
|
||||
handleShardStartedOnMaster(request);
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
|
||||
class ShardStartedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
|
||||
|
@ -223,26 +262,20 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
|
||||
private final ShardStartedClusterStateHandler shardStartedClusterStateHandler = new ShardStartedClusterStateHandler();
|
||||
|
||||
@Override
|
||||
public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
|
||||
handleShardFailureOnMaster(request);
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
private void handleShardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) {
|
||||
logger.debug("received shard started for {}", shardRoutingEntry);
|
||||
|
||||
class ShardStartedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
|
||||
shardStartedOnMaster(request);
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
|
||||
shardRoutingEntry,
|
||||
ClusterStateTaskConfig.build(Priority.URGENT),
|
||||
shardStartedClusterStateHandler,
|
||||
shardStartedClusterStateHandler);
|
||||
}
|
||||
|
||||
public static class ShardRoutingEntry extends TransportRequest {
|
||||
|
||||
ShardRouting shardRouting;
|
||||
String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
|
||||
String message;
|
||||
|
@ -283,8 +316,13 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
|
||||
public interface Listener {
|
||||
default void onSuccess() {}
|
||||
default void onShardFailedNoMaster() {}
|
||||
default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) {}
|
||||
default void onSuccess() {
|
||||
}
|
||||
|
||||
default void onShardFailedNoMaster() {
|
||||
}
|
||||
|
||||
default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -621,7 +621,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
public int numberOfReplicas() {
|
||||
return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1);
|
||||
}
|
||||
|
||||
|
||||
public Builder creationDate(long creationDate) {
|
||||
settings = settingsBuilder().put(settings).put(SETTING_CREATION_DATE, creationDate).build();
|
||||
return this;
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.elasticsearch.rest.RestStatus;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
* Service responsible for submitting open/close index requests
|
||||
|
@ -92,14 +91,6 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
}
|
||||
|
||||
if (indexMetaData.getState() != IndexMetaData.State.CLOSE) {
|
||||
IndexRoutingTable indexRoutingTable = currentState.routingTable().index(index);
|
||||
for (IndexShardRoutingTable shard : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : shard) {
|
||||
if (shardRouting.primary() == true && shardRouting.allocatedPostIndexCreate() == false) {
|
||||
throw new IndexPrimaryShardNotAllocatedException(new Index(index));
|
||||
}
|
||||
}
|
||||
}
|
||||
indicesToClose.add(index);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,6 @@ import org.elasticsearch.index.IndexService;
|
|||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.InvalidTypeNameException;
|
||||
import org.elasticsearch.percolator.PercolatorService;
|
||||
|
@ -237,8 +236,8 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
}
|
||||
|
||||
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException {
|
||||
Map<String, DocumentMapper> newMappers = new HashMap<>();
|
||||
Map<String, DocumentMapper> existingMappers = new HashMap<>();
|
||||
String mappingType = request.type();
|
||||
CompressedXContent mappingUpdateSource = new CompressedXContent(request.source());
|
||||
for (String index : request.indices()) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(index);
|
||||
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
|
||||
|
@ -246,16 +245,13 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
|
||||
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
|
||||
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false);
|
||||
newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, false);
|
||||
} else {
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
|
||||
newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, existingMapper == null);
|
||||
if (existingMapper != null) {
|
||||
// first, simulate
|
||||
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
|
||||
// if we have conflicts, throw an exception
|
||||
if (mergeResult.hasConflicts()) {
|
||||
throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(mergeResult.buildConflicts()) + "}");
|
||||
}
|
||||
// this will just throw exceptions in case of problems
|
||||
existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
|
||||
} else {
|
||||
// TODO: can we find a better place for this validation?
|
||||
// The reason this validation is here is that the mapper service doesn't learn about
|
||||
|
@ -274,36 +270,31 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
}
|
||||
newMappers.put(index, newMapper);
|
||||
if (existingMapper != null) {
|
||||
existingMappers.put(index, existingMapper);
|
||||
if (mappingType == null) {
|
||||
mappingType = newMapper.type();
|
||||
} else if (mappingType.equals(newMapper.type()) == false) {
|
||||
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
|
||||
}
|
||||
}
|
||||
assert mappingType != null;
|
||||
|
||||
String mappingType = request.type();
|
||||
if (mappingType == null) {
|
||||
mappingType = newMappers.values().iterator().next().type();
|
||||
} else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
|
||||
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
|
||||
}
|
||||
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
|
||||
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
|
||||
}
|
||||
final Map<String, MappingMetaData> mappings = new HashMap<>();
|
||||
for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) {
|
||||
String index = entry.getKey();
|
||||
for (String index : request.indices()) {
|
||||
// do the actual merge here on the master, and update the mapping source
|
||||
DocumentMapper newMapper = entry.getValue();
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
CompressedXContent existingSource = null;
|
||||
if (existingMappers.containsKey(entry.getKey())) {
|
||||
existingSource = existingMappers.get(entry.getKey()).mappingSource();
|
||||
DocumentMapper existingMapper = indexService.mapperService().documentMapper(mappingType);
|
||||
if (existingMapper != null) {
|
||||
existingSource = existingMapper.mappingSource();
|
||||
}
|
||||
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes());
|
||||
DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, true, request.updateAllTypes());
|
||||
CompressedXContent updatedSource = mergedMapper.mappingSource();
|
||||
|
||||
if (existingSource != null) {
|
||||
|
@ -322,9 +313,9 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
} else {
|
||||
mappings.put(index, new MappingMetaData(mergedMapper));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
|
||||
logger.debug("[{}] create_mapping [{}] with source [{}]", index, mappingType, updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] create_mapping [{}]", index, newMapper.type());
|
||||
logger.info("[{}] create_mapping [{}]", index, mappingType);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing;
|
|||
|
||||
import com.carrotsearch.hppc.ObjectIntHashMap;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
|
@ -31,7 +30,14 @@ import org.elasticsearch.common.Randomness;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
|
@ -78,7 +84,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
Map<String, List<ShardRouting>> nodesToShards = new HashMap<>();
|
||||
// fill in the nodeToShards with the "live" nodes
|
||||
for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().dataNodes().values()) {
|
||||
nodesToShards.put(cursor.value.id(), new ArrayList<ShardRouting>());
|
||||
nodesToShards.put(cursor.value.id(), new ArrayList<>());
|
||||
}
|
||||
|
||||
// fill in the inverse of node -> shards allocated
|
||||
|
@ -91,21 +97,13 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
// by the ShardId, as this is common for primary and replicas.
|
||||
// A replica Set might have one (and not more) replicas with the state of RELOCATING.
|
||||
if (shard.assignedToNode()) {
|
||||
List<ShardRouting> entries = nodesToShards.get(shard.currentNodeId());
|
||||
if (entries == null) {
|
||||
entries = new ArrayList<>();
|
||||
nodesToShards.put(shard.currentNodeId(), entries);
|
||||
}
|
||||
List<ShardRouting> entries = nodesToShards.computeIfAbsent(shard.currentNodeId(), k -> new ArrayList<>());
|
||||
final ShardRouting sr = getRouting(shard, readOnly);
|
||||
entries.add(sr);
|
||||
assignedShardsAdd(sr);
|
||||
if (shard.relocating()) {
|
||||
entries = nodesToShards.get(shard.relocatingNodeId());
|
||||
relocatingShards++;
|
||||
if (entries == null) {
|
||||
entries = new ArrayList<>();
|
||||
nodesToShards.put(shard.relocatingNodeId(), entries);
|
||||
}
|
||||
entries = nodesToShards.computeIfAbsent(shard.relocatingNodeId(), k -> new ArrayList<>());
|
||||
// add the counterpart shard with relocatingNodeId reflecting the source from which
|
||||
// it's relocating from.
|
||||
ShardRouting targetShardRouting = shard.buildTargetRelocatingShard();
|
||||
|
@ -121,7 +119,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
inactiveShardCount++;
|
||||
}
|
||||
} else {
|
||||
final ShardRouting sr = getRouting(shard, readOnly);
|
||||
final ShardRouting sr = getRouting(shard, readOnly);
|
||||
assignedShardsAdd(sr);
|
||||
unassignedShards.add(sr);
|
||||
}
|
||||
|
@ -449,12 +447,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
// no unassigned
|
||||
return;
|
||||
}
|
||||
List<ShardRouting> shards = assignedShards.get(shard.shardId());
|
||||
if (shards == null) {
|
||||
shards = new ArrayList<>();
|
||||
assignedShards.put(shard.shardId(), shards);
|
||||
}
|
||||
assert assertInstanceNotInList(shard, shards);
|
||||
List<ShardRouting> shards = assignedShards.computeIfAbsent(shard.shardId(), k -> new ArrayList<>());
|
||||
assert assertInstanceNotInList(shard, shards);
|
||||
shards.add(shard);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -267,7 +269,7 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
return shardIdentifier;
|
||||
}
|
||||
|
||||
public boolean allocatedPostIndexCreate() {
|
||||
public boolean allocatedPostIndexCreate(IndexMetaData indexMetaData) {
|
||||
if (active()) {
|
||||
return true;
|
||||
}
|
||||
|
@ -279,6 +281,11 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
return false;
|
||||
}
|
||||
|
||||
if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_3_0_0)) {
|
||||
// when no shards with this id have ever been active for this index
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,13 +22,13 @@ package org.elasticsearch.cluster.routing.allocation.decider;
|
|||
import com.carrotsearch.hppc.ObjectLookupContainer;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.DiskUsage;
|
||||
import org.elasticsearch.cluster.EmptyClusterInfoService;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
|
@ -360,7 +360,8 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
}
|
||||
|
||||
// a flag for whether the primary shard has been previously allocated
|
||||
boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate();
|
||||
IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
|
||||
boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData);
|
||||
|
||||
// checks for exact byte comparisons
|
||||
if (freeBytes < freeBytesThresholdLow.bytes()) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
|
@ -82,8 +83,8 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe
|
|||
return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored");
|
||||
}
|
||||
|
||||
Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings();
|
||||
String enableIndexValue = indexSettings.get(INDEX_ROUTING_ALLOCATION_ENABLE);
|
||||
IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
|
||||
String enableIndexValue = indexMetaData.getSettings().get(INDEX_ROUTING_ALLOCATION_ENABLE);
|
||||
final Allocation enable;
|
||||
if (enableIndexValue != null) {
|
||||
enable = Allocation.parse(enableIndexValue);
|
||||
|
@ -96,7 +97,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe
|
|||
case NONE:
|
||||
return allocation.decision(Decision.NO, NAME, "no allocations are allowed");
|
||||
case NEW_PRIMARIES:
|
||||
if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate() == false) {
|
||||
if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData) == false) {
|
||||
return allocation.decision(Decision.YES, NAME, "new primary allocations are allowed");
|
||||
} else {
|
||||
return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden");
|
||||
|
|
|
@ -20,8 +20,19 @@
|
|||
package org.elasticsearch.cluster.service;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterState.Builder;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskConfig;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.LocalNodeMasterListener;
|
||||
import org.elasticsearch.cluster.TimeoutClusterStateListener;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
|
@ -39,10 +50,16 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.text.StringText;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.*;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
|
||||
import org.elasticsearch.common.util.concurrent.PrioritizedRunnable;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
|
@ -50,8 +67,20 @@ import org.elasticsearch.node.settings.NodeSettingsService;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
@ -292,6 +321,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
if (config.timeout() != null) {
|
||||
updateTasksExecutor.execute(updateTask, threadPool.scheduler(), config.timeout(), () -> threadPool.generic().execute(() -> {
|
||||
if (updateTask.processed.getAndSet(true) == false) {
|
||||
logger.debug("cluster state update task [{}] timed out after [{}]", source, config.timeout());
|
||||
listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source));
|
||||
}}));
|
||||
} else {
|
||||
|
@ -327,7 +357,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
timeInQueue = 0;
|
||||
}
|
||||
|
||||
pendingClusterTasks.add(new PendingClusterTask(pending.insertionOrder, pending.priority, new StringText(source), timeInQueue, pending.executing));
|
||||
pendingClusterTasks.add(new PendingClusterTask(pending.insertionOrder, pending.priority, new Text(source), timeInQueue, pending.executing));
|
||||
}
|
||||
return pendingClusterTasks;
|
||||
}
|
||||
|
@ -413,6 +443,15 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
|
||||
assert batchResult.executionResults != null;
|
||||
assert batchResult.executionResults.size() == toExecute.size()
|
||||
: String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", toExecute.size(), toExecute.size() == 1 ? "" : "s", batchResult.executionResults.size());
|
||||
boolean assertsEnabled = false;
|
||||
assert (assertsEnabled = true);
|
||||
if (assertsEnabled) {
|
||||
for (UpdateTask<T> updateTask : toExecute) {
|
||||
assert batchResult.executionResults.containsKey(updateTask.task) : "missing task result for [" + updateTask.task + "]";
|
||||
}
|
||||
}
|
||||
|
||||
ClusterState newClusterState = batchResult.resultingState;
|
||||
final ArrayList<UpdateTask<T>> proccessedListeners = new ArrayList<>();
|
||||
|
@ -421,7 +460,13 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask.task.toString();
|
||||
final ClusterStateTaskExecutor.TaskResult executionResult =
|
||||
batchResult.executionResults.get(updateTask.task);
|
||||
executionResult.handle(() -> proccessedListeners.add(updateTask), ex -> updateTask.listener.onFailure(updateTask.source, ex));
|
||||
executionResult.handle(
|
||||
() -> proccessedListeners.add(updateTask),
|
||||
ex -> {
|
||||
logger.debug("cluster state update task [{}] failed", ex, updateTask.source);
|
||||
updateTask.listener.onFailure(updateTask.source, ex);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
if (previousClusterState == newClusterState) {
|
||||
|
@ -560,6 +605,8 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
|
||||
}
|
||||
|
||||
executor.clusterStatePublished(newClusterState);
|
||||
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.text.StringAndBytesText;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
|
||||
|
@ -256,13 +256,13 @@ public abstract class StreamInput extends InputStream {
|
|||
if (length == -1) {
|
||||
return null;
|
||||
}
|
||||
return new StringAndBytesText(readBytesReference(length));
|
||||
return new Text(readBytesReference(length));
|
||||
}
|
||||
|
||||
public Text readText() throws IOException {
|
||||
// use StringAndBytes so we can cache the string if its ever converted to it
|
||||
int length = readInt();
|
||||
return new StringAndBytesText(readBytesReference(length));
|
||||
return new Text(readBytesReference(length));
|
||||
}
|
||||
|
||||
@Nullable
|
||||
|
|
|
@ -149,6 +149,10 @@ public final class AllTermQuery extends Query {
|
|||
return null;
|
||||
}
|
||||
final TermState state = termStates.get(context.ord);
|
||||
if (state == null) {
|
||||
// Term does not exist in this segment
|
||||
return null;
|
||||
}
|
||||
termsEnum.seekExact(term.bytes(), state);
|
||||
PostingsEnum docs = termsEnum.postings(null, PostingsEnum.PAYLOADS);
|
||||
assert docs != null;
|
||||
|
|
|
@ -19,21 +19,362 @@
|
|||
|
||||
package org.elasticsearch.common.network;
|
||||
|
||||
import org.elasticsearch.client.support.Headers;
|
||||
import org.elasticsearch.client.transport.TransportClientNodesService;
|
||||
import org.elasticsearch.client.transport.support.TransportProxyClient;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.ExtensionPoint;
|
||||
import org.elasticsearch.http.HttpServer;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
import org.elasticsearch.http.netty.NettyHttpServerTransport;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestHandler;
|
||||
import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerifyRepositoryAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.reroute.RestClusterRerouteAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterGetSettingsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterUpdateSettingsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.shards.RestClusterSearchShardsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.snapshots.create.RestCreateSnapshotAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.snapshots.delete.RestDeleteSnapshotAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.snapshots.get.RestGetSnapshotsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestoreSnapshotAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetAliasesAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetIndicesAliasesAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.alias.head.RestAliasesExistAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.alias.put.RestIndexPutAliasAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.cache.clear.RestClearIndicesCacheAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.close.RestCloseIndexAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.create.RestCreateIndexAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.delete.RestDeleteIndexAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExistsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.flush.RestSyncedFlushAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.forcemerge.RestForceMergeAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.get.RestGetIndicesAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.settings.RestUpdateSettingsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.shards.RestIndicesShardStoresAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.stats.RestIndicesStatsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.template.delete.RestDeleteIndexTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.template.get.RestGetIndexTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.template.head.RestHeadIndexTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.validate.template.RestRenderSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction;
|
||||
import org.elasticsearch.rest.action.bulk.RestBulkAction;
|
||||
import org.elasticsearch.rest.action.cat.AbstractCatAction;
|
||||
import org.elasticsearch.rest.action.cat.RestAliasAction;
|
||||
import org.elasticsearch.rest.action.cat.RestAllocationAction;
|
||||
import org.elasticsearch.rest.action.cat.RestCatAction;
|
||||
import org.elasticsearch.rest.action.cat.RestFielddataAction;
|
||||
import org.elasticsearch.rest.action.cat.RestHealthAction;
|
||||
import org.elasticsearch.rest.action.cat.RestIndicesAction;
|
||||
import org.elasticsearch.rest.action.cat.RestMasterAction;
|
||||
import org.elasticsearch.rest.action.cat.RestNodeAttrsAction;
|
||||
import org.elasticsearch.rest.action.cat.RestNodesAction;
|
||||
import org.elasticsearch.rest.action.cat.RestPluginsAction;
|
||||
import org.elasticsearch.rest.action.cat.RestRepositoriesAction;
|
||||
import org.elasticsearch.rest.action.cat.RestSegmentsAction;
|
||||
import org.elasticsearch.rest.action.cat.RestShardsAction;
|
||||
import org.elasticsearch.rest.action.cat.RestSnapshotAction;
|
||||
import org.elasticsearch.rest.action.cat.RestThreadPoolAction;
|
||||
import org.elasticsearch.rest.action.delete.RestDeleteAction;
|
||||
import org.elasticsearch.rest.action.explain.RestExplainAction;
|
||||
import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction;
|
||||
import org.elasticsearch.rest.action.get.RestGetAction;
|
||||
import org.elasticsearch.rest.action.get.RestGetSourceAction;
|
||||
import org.elasticsearch.rest.action.get.RestHeadAction;
|
||||
import org.elasticsearch.rest.action.get.RestMultiGetAction;
|
||||
import org.elasticsearch.rest.action.index.RestIndexAction;
|
||||
import org.elasticsearch.rest.action.main.RestMainAction;
|
||||
import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction;
|
||||
import org.elasticsearch.rest.action.percolate.RestPercolateAction;
|
||||
import org.elasticsearch.rest.action.script.RestDeleteIndexedScriptAction;
|
||||
import org.elasticsearch.rest.action.script.RestGetIndexedScriptAction;
|
||||
import org.elasticsearch.rest.action.script.RestPutIndexedScriptAction;
|
||||
import org.elasticsearch.rest.action.search.RestClearScrollAction;
|
||||
import org.elasticsearch.rest.action.search.RestMultiSearchAction;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.rest.action.search.RestSearchScrollAction;
|
||||
import org.elasticsearch.rest.action.suggest.RestSuggestAction;
|
||||
import org.elasticsearch.rest.action.template.RestDeleteSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.template.RestGetSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.template.RestPutSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction;
|
||||
import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction;
|
||||
import org.elasticsearch.rest.action.update.RestUpdateAction;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.local.LocalTransport;
|
||||
import org.elasticsearch.transport.netty.NettyTransport;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*
|
||||
* A module to handle registering and binding all network related classes.
|
||||
*/
|
||||
public class NetworkModule extends AbstractModule {
|
||||
|
||||
private final NetworkService networkService;
|
||||
public static final String TRANSPORT_TYPE_KEY = "transport.type";
|
||||
public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type";
|
||||
|
||||
public NetworkModule(NetworkService networkService) {
|
||||
public static final String LOCAL_TRANSPORT = "local";
|
||||
public static final String NETTY_TRANSPORT = "netty";
|
||||
|
||||
public static final String HTTP_TYPE_KEY = "http.type";
|
||||
public static final String HTTP_ENABLED = "http.enabled";
|
||||
|
||||
private static final List<Class<? extends RestHandler>> builtinRestHandlers = Arrays.asList(
|
||||
RestMainAction.class,
|
||||
|
||||
RestNodesInfoAction.class,
|
||||
RestNodesStatsAction.class,
|
||||
RestNodesHotThreadsAction.class,
|
||||
RestClusterStatsAction.class,
|
||||
RestClusterStateAction.class,
|
||||
RestClusterHealthAction.class,
|
||||
RestClusterUpdateSettingsAction.class,
|
||||
RestClusterGetSettingsAction.class,
|
||||
RestClusterRerouteAction.class,
|
||||
RestClusterSearchShardsAction.class,
|
||||
RestPendingClusterTasksAction.class,
|
||||
RestPutRepositoryAction.class,
|
||||
RestGetRepositoriesAction.class,
|
||||
RestDeleteRepositoryAction.class,
|
||||
RestVerifyRepositoryAction.class,
|
||||
RestGetSnapshotsAction.class,
|
||||
RestCreateSnapshotAction.class,
|
||||
RestRestoreSnapshotAction.class,
|
||||
RestDeleteSnapshotAction.class,
|
||||
RestSnapshotsStatusAction.class,
|
||||
|
||||
RestIndicesExistsAction.class,
|
||||
RestTypesExistsAction.class,
|
||||
RestGetIndicesAction.class,
|
||||
RestIndicesStatsAction.class,
|
||||
RestIndicesSegmentsAction.class,
|
||||
RestIndicesShardStoresAction.class,
|
||||
RestGetAliasesAction.class,
|
||||
RestAliasesExistAction.class,
|
||||
RestIndexDeleteAliasesAction.class,
|
||||
RestIndexPutAliasAction.class,
|
||||
RestIndicesAliasesAction.class,
|
||||
RestGetIndicesAliasesAction.class,
|
||||
RestCreateIndexAction.class,
|
||||
RestDeleteIndexAction.class,
|
||||
RestCloseIndexAction.class,
|
||||
RestOpenIndexAction.class,
|
||||
|
||||
RestUpdateSettingsAction.class,
|
||||
RestGetSettingsAction.class,
|
||||
|
||||
RestAnalyzeAction.class,
|
||||
RestGetIndexTemplateAction.class,
|
||||
RestPutIndexTemplateAction.class,
|
||||
RestDeleteIndexTemplateAction.class,
|
||||
RestHeadIndexTemplateAction.class,
|
||||
|
||||
RestPutWarmerAction.class,
|
||||
RestDeleteWarmerAction.class,
|
||||
RestGetWarmerAction.class,
|
||||
|
||||
RestPutMappingAction.class,
|
||||
RestGetMappingAction.class,
|
||||
RestGetFieldMappingAction.class,
|
||||
|
||||
RestRefreshAction.class,
|
||||
RestFlushAction.class,
|
||||
RestSyncedFlushAction.class,
|
||||
RestForceMergeAction.class,
|
||||
RestUpgradeAction.class,
|
||||
RestClearIndicesCacheAction.class,
|
||||
|
||||
RestIndexAction.class,
|
||||
RestGetAction.class,
|
||||
RestGetSourceAction.class,
|
||||
RestHeadAction.class,
|
||||
RestMultiGetAction.class,
|
||||
RestDeleteAction.class,
|
||||
org.elasticsearch.rest.action.count.RestCountAction.class,
|
||||
RestSuggestAction.class,
|
||||
RestTermVectorsAction.class,
|
||||
RestMultiTermVectorsAction.class,
|
||||
RestBulkAction.class,
|
||||
RestUpdateAction.class,
|
||||
RestPercolateAction.class,
|
||||
RestMultiPercolateAction.class,
|
||||
|
||||
RestSearchAction.class,
|
||||
RestSearchScrollAction.class,
|
||||
RestClearScrollAction.class,
|
||||
RestMultiSearchAction.class,
|
||||
RestRenderSearchTemplateAction.class,
|
||||
|
||||
RestValidateQueryAction.class,
|
||||
|
||||
RestExplainAction.class,
|
||||
|
||||
RestRecoveryAction.class,
|
||||
|
||||
// Templates API
|
||||
RestGetSearchTemplateAction.class,
|
||||
RestPutSearchTemplateAction.class,
|
||||
RestDeleteSearchTemplateAction.class,
|
||||
|
||||
// Scripts API
|
||||
RestGetIndexedScriptAction.class,
|
||||
RestPutIndexedScriptAction.class,
|
||||
RestDeleteIndexedScriptAction.class,
|
||||
|
||||
RestFieldStatsAction.class,
|
||||
|
||||
// no abstract cat action
|
||||
RestCatAction.class
|
||||
);
|
||||
|
||||
private static final List<Class<? extends AbstractCatAction>> builtinCatHandlers = Arrays.asList(
|
||||
RestAllocationAction.class,
|
||||
RestShardsAction.class,
|
||||
RestMasterAction.class,
|
||||
RestNodesAction.class,
|
||||
RestIndicesAction.class,
|
||||
RestSegmentsAction.class,
|
||||
// Fully qualified to prevent interference with rest.action.count.RestCountAction
|
||||
org.elasticsearch.rest.action.cat.RestCountAction.class,
|
||||
// Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction
|
||||
org.elasticsearch.rest.action.cat.RestRecoveryAction.class,
|
||||
RestHealthAction.class,
|
||||
org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction.class,
|
||||
RestAliasAction.class,
|
||||
RestThreadPoolAction.class,
|
||||
RestPluginsAction.class,
|
||||
RestFielddataAction.class,
|
||||
RestNodeAttrsAction.class,
|
||||
RestRepositoriesAction.class,
|
||||
RestSnapshotAction.class
|
||||
);
|
||||
|
||||
private final NetworkService networkService;
|
||||
private final Settings settings;
|
||||
private final boolean transportClient;
|
||||
|
||||
private final ExtensionPoint.SelectedType<TransportService> transportServiceTypes = new ExtensionPoint.SelectedType<>("transport_service", TransportService.class);
|
||||
private final ExtensionPoint.SelectedType<Transport> transportTypes = new ExtensionPoint.SelectedType<>("transport", Transport.class);
|
||||
private final ExtensionPoint.SelectedType<HttpServerTransport> httpTransportTypes = new ExtensionPoint.SelectedType<>("http_transport", HttpServerTransport.class);
|
||||
private final ExtensionPoint.ClassSet<RestHandler> restHandlers = new ExtensionPoint.ClassSet<>("rest_handler", RestHandler.class);
|
||||
// we must separate the cat rest handlers so RestCatAction can collect them...
|
||||
private final ExtensionPoint.ClassSet<AbstractCatAction> catHandlers = new ExtensionPoint.ClassSet<>("cat_handler", AbstractCatAction.class);
|
||||
|
||||
/**
|
||||
* Creates a network module that custom networking classes can be plugged into.
|
||||
*
|
||||
* @param networkService A constructed network service object to bind.
|
||||
* @param settings The settings for the node
|
||||
* @param transportClient True if only transport classes should be allowed to be registered, false otherwise.
|
||||
*/
|
||||
public NetworkModule(NetworkService networkService, Settings settings, boolean transportClient) {
|
||||
this.networkService = networkService;
|
||||
this.settings = settings;
|
||||
this.transportClient = transportClient;
|
||||
registerTransportService(NETTY_TRANSPORT, TransportService.class);
|
||||
registerTransport(LOCAL_TRANSPORT, LocalTransport.class);
|
||||
registerTransport(NETTY_TRANSPORT, NettyTransport.class);
|
||||
|
||||
if (transportClient == false) {
|
||||
registerHttpTransport(NETTY_TRANSPORT, NettyHttpServerTransport.class);
|
||||
|
||||
for (Class<? extends AbstractCatAction> catAction : builtinCatHandlers) {
|
||||
catHandlers.registerExtension(catAction);
|
||||
}
|
||||
for (Class<? extends RestHandler> restAction : builtinRestHandlers) {
|
||||
restHandlers.registerExtension(restAction);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds a transport service implementation that can be selected by setting {@link #TRANSPORT_SERVICE_TYPE_KEY}. */
|
||||
public void registerTransportService(String name, Class<? extends TransportService> clazz) {
|
||||
transportServiceTypes.registerExtension(name, clazz);
|
||||
}
|
||||
|
||||
/** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */
|
||||
public void registerTransport(String name, Class<? extends Transport> clazz) {
|
||||
transportTypes.registerExtension(name, clazz);
|
||||
}
|
||||
|
||||
/** Adds an http transport implementation that can be selected by setting {@link #HTTP_TYPE_KEY}. */
|
||||
// TODO: we need another name than "http transport"....so confusing with transportClient...
|
||||
public void registerHttpTransport(String name, Class<? extends HttpServerTransport> clazz) {
|
||||
if (transportClient) {
|
||||
throw new IllegalArgumentException("Cannot register http transport " + clazz.getName() + " for transport client");
|
||||
}
|
||||
httpTransportTypes.registerExtension(name, clazz);
|
||||
}
|
||||
|
||||
/** Adds an additional rest action. */
|
||||
// TODO: change this further to eliminate the middle man, ie RestController, and just register method and path here
|
||||
public void registerRestHandler(Class<? extends RestHandler> clazz) {
|
||||
if (transportClient) {
|
||||
throw new IllegalArgumentException("Cannot register rest handler " + clazz.getName() + " for transport client");
|
||||
}
|
||||
if (AbstractCatAction.class.isAssignableFrom(clazz)) {
|
||||
catHandlers.registerExtension(clazz.asSubclass(AbstractCatAction.class));
|
||||
} else {
|
||||
restHandlers.registerExtension(clazz);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(NetworkService.class).toInstance(networkService);
|
||||
bind(NamedWriteableRegistry.class).asEagerSingleton();
|
||||
|
||||
transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, NETTY_TRANSPORT);
|
||||
String defaultTransport = DiscoveryNode.localNode(settings) ? LOCAL_TRANSPORT : NETTY_TRANSPORT;
|
||||
transportTypes.bindType(binder(), settings, TRANSPORT_TYPE_KEY, defaultTransport);
|
||||
|
||||
if (transportClient) {
|
||||
bind(Headers.class).asEagerSingleton();
|
||||
bind(TransportProxyClient.class).asEagerSingleton();
|
||||
bind(TransportClientNodesService.class).asEagerSingleton();
|
||||
} else {
|
||||
if (settings.getAsBoolean(HTTP_ENABLED, true)) {
|
||||
bind(HttpServer.class).asEagerSingleton();
|
||||
httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_KEY, NETTY_TRANSPORT);
|
||||
}
|
||||
bind(RestController.class).asEagerSingleton();
|
||||
catHandlers.bind(binder());
|
||||
restHandlers.bind(binder());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,82 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.text;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
||||
/**
|
||||
* A {@link BytesReference} representation of the text, will always convert on the fly to a {@link String}.
|
||||
*/
|
||||
public class BytesText implements Text {
|
||||
|
||||
private BytesReference bytes;
|
||||
private int hash;
|
||||
|
||||
public BytesText(BytesReference bytes) {
|
||||
this.bytes = bytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasBytes() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesReference bytes() {
|
||||
return bytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasString() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String string() {
|
||||
// TODO: we can optimize the conversion based on the bytes reference API similar to UnicodeUtil
|
||||
if (!bytes.hasArray()) {
|
||||
bytes = bytes.toBytesArray();
|
||||
}
|
||||
return new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return string();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
if (hash == 0) {
|
||||
hash = bytes.hashCode();
|
||||
}
|
||||
return hash;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
return bytes().equals(((Text) obj).bytes());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(Text text) {
|
||||
return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
|
||||
}
|
||||
}
|
|
@ -1,111 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.text;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
||||
/**
|
||||
* Both {@link String} and {@link BytesReference} representation of the text. Starts with one of those, and if
|
||||
* the other is requests, caches the other one in a local reference so no additional conversion will be needed.
|
||||
*/
|
||||
public class StringAndBytesText implements Text {
|
||||
|
||||
public static final Text[] EMPTY_ARRAY = new Text[0];
|
||||
|
||||
public static Text[] convertFromStringArray(String[] strings) {
|
||||
if (strings.length == 0) {
|
||||
return EMPTY_ARRAY;
|
||||
}
|
||||
Text[] texts = new Text[strings.length];
|
||||
for (int i = 0; i < strings.length; i++) {
|
||||
texts[i] = new StringAndBytesText(strings[i]);
|
||||
}
|
||||
return texts;
|
||||
}
|
||||
|
||||
private BytesReference bytes;
|
||||
private String text;
|
||||
private int hash;
|
||||
|
||||
public StringAndBytesText(BytesReference bytes) {
|
||||
this.bytes = bytes;
|
||||
}
|
||||
|
||||
public StringAndBytesText(String text) {
|
||||
this.text = text;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasBytes() {
|
||||
return bytes != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesReference bytes() {
|
||||
if (bytes == null) {
|
||||
bytes = new BytesArray(text.getBytes(StandardCharsets.UTF_8));
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasString() {
|
||||
return text != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String string() {
|
||||
// TODO: we can optimize the conversion based on the bytes reference API similar to UnicodeUtil
|
||||
if (text == null) {
|
||||
if (!bytes.hasArray()) {
|
||||
bytes = bytes.toBytesArray();
|
||||
}
|
||||
text = new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8);
|
||||
}
|
||||
return text;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return string();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
if (hash == 0) {
|
||||
hash = bytes().hashCode();
|
||||
}
|
||||
return hash;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
return bytes().equals(((Text) obj).bytes());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(Text text) {
|
||||
return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
|
||||
}
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.text;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
||||
/**
|
||||
* A {@link String} only representation of the text. Will always convert to bytes on the fly.
|
||||
*/
|
||||
public class StringText implements Text {
|
||||
|
||||
public static final Text[] EMPTY_ARRAY = new Text[0];
|
||||
|
||||
public static Text[] convertFromStringArray(String[] strings) {
|
||||
if (strings.length == 0) {
|
||||
return EMPTY_ARRAY;
|
||||
}
|
||||
Text[] texts = new Text[strings.length];
|
||||
for (int i = 0; i < strings.length; i++) {
|
||||
texts[i] = new StringText(strings[i]);
|
||||
}
|
||||
return texts;
|
||||
}
|
||||
|
||||
private final String text;
|
||||
private int hash;
|
||||
|
||||
public StringText(String text) {
|
||||
this.text = text;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasBytes() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesReference bytes() {
|
||||
return new BytesArray(text.getBytes(StandardCharsets.UTF_8));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasString() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String string() {
|
||||
return text;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return string();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// we use bytes here so we can be consistent with other text implementations
|
||||
if (hash == 0) {
|
||||
hash = bytes().hashCode();
|
||||
}
|
||||
return hash;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
// we use bytes here so we can be consistent with other text implementations
|
||||
return bytes().equals(((Text) obj).bytes());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(Text text) {
|
||||
return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
|
||||
}
|
||||
}
|
|
@ -18,39 +18,101 @@
|
|||
*/
|
||||
package org.elasticsearch.common.text;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
||||
|
||||
/**
|
||||
* Text represents a (usually) long text data. We use this abstraction instead of {@link String}
|
||||
* so we can represent it in a more optimized manner in memory as well as serializing it over the
|
||||
* network as well as converting it to json format.
|
||||
* Both {@link String} and {@link BytesReference} representation of the text. Starts with one of those, and if
|
||||
* the other is requests, caches the other one in a local reference so no additional conversion will be needed.
|
||||
*/
|
||||
public interface Text extends Comparable<Text> {
|
||||
public final class Text implements Comparable<Text> {
|
||||
|
||||
public static final Text[] EMPTY_ARRAY = new Text[0];
|
||||
|
||||
public static Text[] convertFromStringArray(String[] strings) {
|
||||
if (strings.length == 0) {
|
||||
return EMPTY_ARRAY;
|
||||
}
|
||||
Text[] texts = new Text[strings.length];
|
||||
for (int i = 0; i < strings.length; i++) {
|
||||
texts[i] = new Text(strings[i]);
|
||||
}
|
||||
return texts;
|
||||
}
|
||||
|
||||
private BytesReference bytes;
|
||||
private String text;
|
||||
private int hash;
|
||||
|
||||
public Text(BytesReference bytes) {
|
||||
this.bytes = bytes;
|
||||
}
|
||||
|
||||
public Text(String text) {
|
||||
this.text = text;
|
||||
}
|
||||
|
||||
/**
|
||||
* Are bytes available without the need to be converted into bytes when calling {@link #bytes()}.
|
||||
* Whether a {@link BytesReference} view of the data is already materialized.
|
||||
*/
|
||||
boolean hasBytes();
|
||||
public boolean hasBytes() {
|
||||
return bytes != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* The UTF8 bytes representing the the text, might be converted on the fly, see {@link #hasBytes()}
|
||||
* Returns a {@link BytesReference} view of the data.
|
||||
*/
|
||||
BytesReference bytes();
|
||||
public BytesReference bytes() {
|
||||
if (bytes == null) {
|
||||
bytes = new BytesArray(text.getBytes(StandardCharsets.UTF_8));
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Is there a {@link String} representation of the text. If not, then it {@link #hasBytes()}.
|
||||
* Whether a {@link String} view of the data is already materialized.
|
||||
*/
|
||||
boolean hasString();
|
||||
public boolean hasString() {
|
||||
return text != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the string representation of the text, might be converted to a string on the fly.
|
||||
* Returns a {@link String} view of the data.
|
||||
*/
|
||||
String string();
|
||||
public String string() {
|
||||
if (text == null) {
|
||||
if (!bytes.hasArray()) {
|
||||
bytes = bytes.toBytesArray();
|
||||
}
|
||||
text = new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8);
|
||||
}
|
||||
return text;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the string representation of the text, might be converted to a string on the fly.
|
||||
*/
|
||||
@Override
|
||||
String toString();
|
||||
public String toString() {
|
||||
return string();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
if (hash == 0) {
|
||||
hash = bytes().hashCode();
|
||||
}
|
||||
return hash;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
return bytes().equals(((Text) obj).bytes());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(Text text) {
|
||||
return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ import java.util.Map;
|
|||
*/
|
||||
public final class XContentBuilder implements BytesStream, Releasable {
|
||||
|
||||
public static enum FieldCaseConversion {
|
||||
public enum FieldCaseConversion {
|
||||
/**
|
||||
* No conversion will occur.
|
||||
*/
|
||||
|
@ -251,14 +251,7 @@ public final class XContentBuilder implements BytesStream, Releasable {
|
|||
}
|
||||
|
||||
public XContentBuilder field(XContentBuilderString name) throws IOException {
|
||||
if (fieldCaseConversion == FieldCaseConversion.UNDERSCORE) {
|
||||
generator.writeFieldName(name.underscore());
|
||||
} else if (fieldCaseConversion == FieldCaseConversion.CAMELCASE) {
|
||||
generator.writeFieldName(name.camelCase());
|
||||
} else {
|
||||
generator.writeFieldName(name.underscore());
|
||||
}
|
||||
return this;
|
||||
return field(name, fieldCaseConversion);
|
||||
}
|
||||
|
||||
public XContentBuilder field(XContentBuilderString name, FieldCaseConversion conversion) throws IOException {
|
||||
|
@ -273,22 +266,13 @@ public final class XContentBuilder implements BytesStream, Releasable {
|
|||
}
|
||||
|
||||
public XContentBuilder field(String name) throws IOException {
|
||||
if (fieldCaseConversion == FieldCaseConversion.UNDERSCORE) {
|
||||
if (cachedStringBuilder == null) {
|
||||
cachedStringBuilder = new StringBuilder();
|
||||
}
|
||||
name = Strings.toUnderscoreCase(name, cachedStringBuilder);
|
||||
} else if (fieldCaseConversion == FieldCaseConversion.CAMELCASE) {
|
||||
if (cachedStringBuilder == null) {
|
||||
cachedStringBuilder = new StringBuilder();
|
||||
}
|
||||
name = Strings.toCamelCase(name, cachedStringBuilder);
|
||||
}
|
||||
generator.writeFieldName(name);
|
||||
return this;
|
||||
return field(name, fieldCaseConversion);
|
||||
}
|
||||
|
||||
public XContentBuilder field(String name, FieldCaseConversion conversion) throws IOException {
|
||||
if (name == null) {
|
||||
throw new IllegalArgumentException("field name cannot be null");
|
||||
}
|
||||
if (conversion == FieldCaseConversion.UNDERSCORE) {
|
||||
if (cachedStringBuilder == null) {
|
||||
cachedStringBuilder = new StringBuilder();
|
||||
|
|
|
@ -21,7 +21,12 @@ package org.elasticsearch.env;
|
|||
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.store.*;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.store.NativeFSLockFactory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -31,6 +36,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -38,11 +44,25 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.index.store.FsDirectoryService;
|
||||
import org.elasticsearch.monitor.fs.FsInfo;
|
||||
import org.elasticsearch.monitor.fs.FsProbe;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.*;
|
||||
import java.util.*;
|
||||
import java.nio.file.AtomicMoveNotSupportedException;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.FileStore;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Semaphore;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
@ -145,7 +165,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
|
|||
for (int dirIndex = 0; dirIndex < environment.dataWithClusterFiles().length; dirIndex++) {
|
||||
Path dir = environment.dataWithClusterFiles()[dirIndex].resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId));
|
||||
Files.createDirectories(dir);
|
||||
|
||||
|
||||
try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
|
||||
logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
|
||||
try {
|
||||
|
@ -187,6 +207,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
|
|||
}
|
||||
|
||||
maybeLogPathDetails();
|
||||
maybeLogHeapDetails();
|
||||
|
||||
if (settings.getAsBoolean(SETTING_ENABLE_LUCENE_SEGMENT_INFOS_TRACE, false)) {
|
||||
SegmentInfos.setInfoStream(System.out);
|
||||
|
@ -274,6 +295,13 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
private void maybeLogHeapDetails() {
|
||||
JvmInfo jvmInfo = JvmInfo.jvmInfo();
|
||||
ByteSizeValue maxHeapSize = jvmInfo.getMem().getHeapMax();
|
||||
String useCompressedOops = jvmInfo.useCompressedOops();
|
||||
logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops);
|
||||
}
|
||||
|
||||
private static String toString(Collection<String> items) {
|
||||
StringBuilder b = new StringBuilder();
|
||||
for(String item : items) {
|
||||
|
@ -811,7 +839,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
|
|||
// Sanity check:
|
||||
assert Integer.parseInt(shardPath.getName(count-1).toString()) >= 0;
|
||||
assert "indices".equals(shardPath.getName(count-3).toString());
|
||||
|
||||
|
||||
return shardPath.getParent().getParent().getParent();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.gateway;
|
||||
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -30,8 +31,10 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* The primary shard allocator allocates primary shard that were not created as
|
||||
|
@ -39,6 +42,7 @@ import java.util.*;
|
|||
*/
|
||||
public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
|
||||
@Deprecated
|
||||
public static final String INDEX_RECOVERY_INITIAL_SHARDS = "index.recovery.initial_shards";
|
||||
|
||||
private final String initialShards;
|
||||
|
@ -56,13 +60,21 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
|
||||
while (unassignedIterator.hasNext()) {
|
||||
ShardRouting shard = unassignedIterator.next();
|
||||
final ShardRouting shard = unassignedIterator.next();
|
||||
|
||||
if (needToFindPrimaryCopy(shard) == false) {
|
||||
if (shard.primary() == false) {
|
||||
continue;
|
||||
}
|
||||
|
||||
AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState = fetchData(shard, allocation);
|
||||
final IndexMetaData indexMetaData = metaData.index(shard.getIndex());
|
||||
final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings, Collections.emptyList());
|
||||
|
||||
if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
|
||||
// when we create a fresh index
|
||||
continue;
|
||||
}
|
||||
|
||||
final AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState = fetchData(shard, allocation);
|
||||
if (shardState.hasData() == false) {
|
||||
logger.trace("{}: ignoring allocation, still fetching shard started state", shard);
|
||||
allocation.setHasPendingAsyncFetch();
|
||||
|
@ -70,25 +82,50 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
continue;
|
||||
}
|
||||
|
||||
IndexMetaData indexMetaData = metaData.index(shard.getIndex());
|
||||
Settings indexSettings = Settings.builder().put(settings).put(indexMetaData.getSettings()).build();
|
||||
final Set<String> lastActiveAllocationIds = indexMetaData.activeAllocationIds(shard.id());
|
||||
final boolean snapshotRestore = shard.restoreSource() != null;
|
||||
final boolean recoverOnAnyNode = recoverOnAnyNode(indexSettings);
|
||||
|
||||
NodesAndVersions nodesAndVersions = buildNodesAndVersions(shard, recoverOnAnyNode(indexSettings), allocation.getIgnoreNodes(shard.shardId()), shardState);
|
||||
logger.debug("[{}][{}] found {} allocations of {}, highest version: [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound, shard, nodesAndVersions.highestVersion);
|
||||
final NodesAndVersions nodesAndVersions;
|
||||
final boolean enoughAllocationsFound;
|
||||
|
||||
if (isEnoughAllocationsFound(shard, indexMetaData, nodesAndVersions) == false) {
|
||||
// if we are restoring this shard we still can allocate
|
||||
if (shard.restoreSource() == null) {
|
||||
if (lastActiveAllocationIds.isEmpty()) {
|
||||
assert indexSettings.getIndexVersionCreated().before(Version.V_3_0_0) : "trying to allocated a primary with an empty allocation id set, but index is new";
|
||||
// when we load an old index (after upgrading cluster) or restore a snapshot of an old index
|
||||
// fall back to old version-based allocation mode
|
||||
// Note that once the shard has been active, lastActiveAllocationIds will be non-empty
|
||||
nodesAndVersions = buildNodesAndVersions(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState);
|
||||
if (snapshotRestore || recoverOnAnyNode) {
|
||||
enoughAllocationsFound = nodesAndVersions.allocationsFound > 0;
|
||||
} else {
|
||||
enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(shard, indexMetaData, nodesAndVersions);
|
||||
}
|
||||
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}, highest version: [{}]", shard.index(), shard.id(), Version.V_3_0_0, nodesAndVersions.allocationsFound, shard, nodesAndVersions.highestVersion);
|
||||
} else {
|
||||
assert lastActiveAllocationIds.isEmpty() == false;
|
||||
// use allocation ids to select nodes
|
||||
nodesAndVersions = buildAllocationIdBasedNodes(shard, snapshotRestore || recoverOnAnyNode,
|
||||
allocation.getIgnoreNodes(shard.shardId()), lastActiveAllocationIds, shardState);
|
||||
enoughAllocationsFound = nodesAndVersions.allocationsFound > 0;
|
||||
logger.debug("[{}][{}]: found {} allocations of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound, shard, lastActiveAllocationIds);
|
||||
}
|
||||
|
||||
if (enoughAllocationsFound == false){
|
||||
if (snapshotRestore) {
|
||||
// let BalancedShardsAllocator take care of allocating this shard
|
||||
logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.restoreSource());
|
||||
} else if (recoverOnAnyNode) {
|
||||
// let BalancedShardsAllocator take care of allocating this shard
|
||||
logger.debug("[{}][{}]: missing local data, recover from any node", shard.index(), shard.id());
|
||||
} else {
|
||||
// we can't really allocate, so ignore it and continue
|
||||
unassignedIterator.removeAndIgnore();
|
||||
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound);
|
||||
} else {
|
||||
logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.restoreSource());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesAndVersions);
|
||||
final NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesAndVersions.nodes);
|
||||
if (nodesToAllocate.yesNodes.isEmpty() == false) {
|
||||
DiscoveryNode node = nodesToAllocate.yesNodes.get(0);
|
||||
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node);
|
||||
|
@ -109,63 +146,99 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Does the shard need to find a primary copy?
|
||||
* Builds a list of nodes. If matchAnyShard is set to false, only nodes that have an allocation id matching
|
||||
* lastActiveAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but
|
||||
* entries with matching allocation id are always at the front of the list.
|
||||
*/
|
||||
boolean needToFindPrimaryCopy(ShardRouting shard) {
|
||||
if (shard.primary() == false) {
|
||||
return false;
|
||||
protected NodesAndVersions buildAllocationIdBasedNodes(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
|
||||
Set<String> lastActiveAllocationIds, AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState) {
|
||||
List<DiscoveryNode> matchingNodes = new ArrayList<>();
|
||||
List<DiscoveryNode> nonMatchingNodes = new ArrayList<>();
|
||||
long highestVersion = -1;
|
||||
for (TransportNodesListGatewayStartedShards.NodeGatewayStartedShards nodeShardState : shardState.getData().values()) {
|
||||
DiscoveryNode node = nodeShardState.getNode();
|
||||
String allocationId = nodeShardState.allocationId();
|
||||
|
||||
if (ignoreNodes.contains(node.id())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (nodeShardState.storeException() == null) {
|
||||
if (allocationId == null && nodeShardState.version() != -1) {
|
||||
// old shard with no allocation id, assign dummy value so that it gets added below in case of matchAnyShard
|
||||
allocationId = "_n/a_";
|
||||
}
|
||||
|
||||
logger.trace("[{}] on node [{}] has allocation id [{}] of shard", shard, nodeShardState.getNode(), allocationId);
|
||||
} else {
|
||||
logger.trace("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", nodeShardState.storeException(), shard, nodeShardState.getNode(), allocationId);
|
||||
allocationId = null;
|
||||
}
|
||||
|
||||
if (allocationId != null) {
|
||||
if (lastActiveAllocationIds.contains(allocationId)) {
|
||||
matchingNodes.add(node);
|
||||
highestVersion = Math.max(highestVersion, nodeShardState.version());
|
||||
} else if (matchAnyShard) {
|
||||
nonMatchingNodes.add(node);
|
||||
highestVersion = Math.max(highestVersion, nodeShardState.version());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// this is an API allocation, ignore since we know there is no data...
|
||||
if (shard.allocatedPostIndexCreate() == false) {
|
||||
return false;
|
||||
}
|
||||
List<DiscoveryNode> nodes = new ArrayList<>();
|
||||
nodes.addAll(matchingNodes);
|
||||
nodes.addAll(nonMatchingNodes);
|
||||
|
||||
return true;
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("{} candidates for allocation: {}", shard, nodes.stream().map(DiscoveryNode::name).collect(Collectors.joining(", ")));
|
||||
}
|
||||
return new NodesAndVersions(nodes, nodes.size(), highestVersion);
|
||||
}
|
||||
|
||||
private boolean isEnoughAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesAndVersions nodesAndVersions) {
|
||||
/**
|
||||
* used by old version-based allocation
|
||||
*/
|
||||
private boolean isEnoughVersionBasedAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesAndVersions nodesAndVersions) {
|
||||
// check if the counts meets the minimum set
|
||||
int requiredAllocation = 1;
|
||||
// if we restore from a repository one copy is more then enough
|
||||
if (shard.restoreSource() == null) {
|
||||
try {
|
||||
String initialShards = indexMetaData.getSettings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards));
|
||||
if ("quorum".equals(initialShards)) {
|
||||
if (indexMetaData.getNumberOfReplicas() > 1) {
|
||||
requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2) + 1;
|
||||
}
|
||||
} else if ("quorum-1".equals(initialShards) || "half".equals(initialShards)) {
|
||||
if (indexMetaData.getNumberOfReplicas() > 2) {
|
||||
requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2);
|
||||
}
|
||||
} else if ("one".equals(initialShards)) {
|
||||
requiredAllocation = 1;
|
||||
} else if ("full".equals(initialShards) || "all".equals(initialShards)) {
|
||||
requiredAllocation = indexMetaData.getNumberOfReplicas() + 1;
|
||||
} else if ("full-1".equals(initialShards) || "all-1".equals(initialShards)) {
|
||||
if (indexMetaData.getNumberOfReplicas() > 1) {
|
||||
requiredAllocation = indexMetaData.getNumberOfReplicas();
|
||||
}
|
||||
} else {
|
||||
requiredAllocation = Integer.parseInt(initialShards);
|
||||
try {
|
||||
String initialShards = indexMetaData.getSettings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards));
|
||||
if ("quorum".equals(initialShards)) {
|
||||
if (indexMetaData.getNumberOfReplicas() > 1) {
|
||||
requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2) + 1;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("[{}][{}] failed to derived initial_shards from value {}, ignore allocation for {}", shard.index(), shard.id(), initialShards, shard);
|
||||
} else if ("quorum-1".equals(initialShards) || "half".equals(initialShards)) {
|
||||
if (indexMetaData.getNumberOfReplicas() > 2) {
|
||||
requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2);
|
||||
}
|
||||
} else if ("one".equals(initialShards)) {
|
||||
requiredAllocation = 1;
|
||||
} else if ("full".equals(initialShards) || "all".equals(initialShards)) {
|
||||
requiredAllocation = indexMetaData.getNumberOfReplicas() + 1;
|
||||
} else if ("full-1".equals(initialShards) || "all-1".equals(initialShards)) {
|
||||
if (indexMetaData.getNumberOfReplicas() > 1) {
|
||||
requiredAllocation = indexMetaData.getNumberOfReplicas();
|
||||
}
|
||||
} else {
|
||||
requiredAllocation = Integer.parseInt(initialShards);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("[{}][{}] failed to derived initial_shards from value {}, ignore allocation for {}", shard.index(), shard.id(), initialShards, shard);
|
||||
}
|
||||
|
||||
return nodesAndVersions.allocationsFound >= requiredAllocation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Based on the nodes and versions, build the list of yes/no/throttle nodes that the shard applies to.
|
||||
* Split the list of nodes to lists of yes/no/throttle nodes based on allocation deciders
|
||||
*/
|
||||
private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, NodesAndVersions nodesAndVersions) {
|
||||
private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, List<DiscoveryNode> nodes) {
|
||||
List<DiscoveryNode> yesNodes = new ArrayList<>();
|
||||
List<DiscoveryNode> throttledNodes = new ArrayList<>();
|
||||
List<DiscoveryNode> noNodes = new ArrayList<>();
|
||||
for (DiscoveryNode discoNode : nodesAndVersions.nodes) {
|
||||
for (DiscoveryNode discoNode : nodes) {
|
||||
RoutingNode node = allocation.routingNodes().node(discoNode.id());
|
||||
if (node == null) {
|
||||
continue;
|
||||
|
@ -184,9 +257,11 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Builds a list of nodes and version
|
||||
* Builds a list of nodes. If matchAnyShard is set to false, only nodes that have the highest shard version
|
||||
* are added to the list. Otherwise, any node that has a shard is added to the list, but entries with highest
|
||||
* version are always at the front of the list.
|
||||
*/
|
||||
NodesAndVersions buildNodesAndVersions(ShardRouting shard, boolean recoveryOnAnyNode, Set<String> ignoreNodes,
|
||||
NodesAndVersions buildNodesAndVersions(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
|
||||
AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState) {
|
||||
final Map<DiscoveryNode, Long> nodesWithVersion = new HashMap<>();
|
||||
int numberOfAllocationsFound = 0;
|
||||
|
@ -208,20 +283,15 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
version = -1;
|
||||
}
|
||||
|
||||
if (recoveryOnAnyNode) {
|
||||
numberOfAllocationsFound++;
|
||||
if (version > highestVersion) {
|
||||
highestVersion = version;
|
||||
}
|
||||
// We always put the node without clearing the map
|
||||
nodesWithVersion.put(node, version);
|
||||
} else if (version != -1) {
|
||||
if (version != -1) {
|
||||
numberOfAllocationsFound++;
|
||||
// If we've found a new "best" candidate, clear the
|
||||
// current candidates and add it
|
||||
if (version > highestVersion) {
|
||||
highestVersion = version;
|
||||
nodesWithVersion.clear();
|
||||
if (matchAnyShard == false) {
|
||||
nodesWithVersion.clear();
|
||||
}
|
||||
nodesWithVersion.put(node, version);
|
||||
} else if (version == highestVersion) {
|
||||
// If the candidate is the same, add it to the
|
||||
|
@ -258,9 +328,9 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
* Return {@code true} if the index is configured to allow shards to be
|
||||
* recovered on any node
|
||||
*/
|
||||
private boolean recoverOnAnyNode(Settings idxSettings) {
|
||||
return IndexMetaData.isOnSharedFilesystem(idxSettings) &&
|
||||
idxSettings.getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false);
|
||||
private boolean recoverOnAnyNode(IndexSettings indexSettings) {
|
||||
return indexSettings.isOnSharedFilesystem()
|
||||
&& indexSettings.getSettings().getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false);
|
||||
}
|
||||
|
||||
protected abstract AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);
|
||||
|
|
|
@ -24,6 +24,8 @@ import com.carrotsearch.hppc.ObjectLongMap;
|
|||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectLongCursor;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
|
@ -56,6 +58,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
*/
|
||||
public boolean processExistingRecoveries(RoutingAllocation allocation) {
|
||||
boolean changed = false;
|
||||
MetaData metaData = allocation.metaData();
|
||||
for (RoutingNodes.RoutingNodesIterator nodes = allocation.routingNodes().nodes(); nodes.hasNext(); ) {
|
||||
nodes.next();
|
||||
for (RoutingNodes.RoutingNodeIterator it = nodes.nodeShards(); it.hasNext(); ) {
|
||||
|
@ -69,8 +72,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
if (shard.relocatingNodeId() != null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
|
||||
if (shard.allocatedPostIndexCreate() == false) {
|
||||
IndexMetaData indexMetaData = metaData.index(shard.getIndex());
|
||||
if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -114,6 +119,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
boolean changed = false;
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
|
||||
MetaData metaData = allocation.metaData();
|
||||
while (unassignedIterator.hasNext()) {
|
||||
ShardRouting shard = unassignedIterator.next();
|
||||
if (shard.primary()) {
|
||||
|
@ -121,7 +127,8 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
}
|
||||
|
||||
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
|
||||
if (shard.allocatedPostIndexCreate() == false) {
|
||||
IndexMetaData indexMetaData = metaData.index(shard.getIndex());
|
||||
if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -139,7 +139,8 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
|
|||
Store.tryOpenIndex(shardPath.resolveIndex());
|
||||
} catch (Exception exception) {
|
||||
logger.trace("{} can't open index for shard [{}] in path [{}]", exception, shardId, shardStateMetaData, (shardPath != null) ? shardPath.resolveIndex() : "");
|
||||
return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, exception);
|
||||
String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null;
|
||||
return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, allocationId, exception);
|
||||
}
|
||||
}
|
||||
// old shard metadata doesn't have the actual index UUID so we need to check if the actual uuid in the metadata
|
||||
|
@ -149,11 +150,12 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
|
|||
logger.warn("{} shard state info found but indexUUID didn't match expected [{}] actual [{}]", shardId, indexUUID, shardStateMetaData.indexUUID);
|
||||
} else {
|
||||
logger.debug("{} shard state info found: [{}]", shardId, shardStateMetaData);
|
||||
return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version);
|
||||
String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null;
|
||||
return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, allocationId);
|
||||
}
|
||||
}
|
||||
logger.trace("{} no local shard info found", shardId);
|
||||
return new NodeGatewayStartedShards(clusterService.localNode(), -1);
|
||||
return new NodeGatewayStartedShards(clusterService.localNode(), -1, null);
|
||||
} catch (Exception e) {
|
||||
throw new ElasticsearchException("failed to load started shards", e);
|
||||
}
|
||||
|
@ -277,17 +279,19 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
|
|||
public static class NodeGatewayStartedShards extends BaseNodeResponse {
|
||||
|
||||
private long version = -1;
|
||||
private String allocationId = null;
|
||||
private Throwable storeException = null;
|
||||
|
||||
public NodeGatewayStartedShards() {
|
||||
}
|
||||
public NodeGatewayStartedShards(DiscoveryNode node, long version) {
|
||||
this(node, version, null);
|
||||
public NodeGatewayStartedShards(DiscoveryNode node, long version, String allocationId) {
|
||||
this(node, version, allocationId, null);
|
||||
}
|
||||
|
||||
public NodeGatewayStartedShards(DiscoveryNode node, long version, Throwable storeException) {
|
||||
public NodeGatewayStartedShards(DiscoveryNode node, long version, String allocationId, Throwable storeException) {
|
||||
super(node);
|
||||
this.version = version;
|
||||
this.allocationId = allocationId;
|
||||
this.storeException = storeException;
|
||||
}
|
||||
|
||||
|
@ -295,6 +299,10 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
|
|||
return this.version;
|
||||
}
|
||||
|
||||
public String allocationId() {
|
||||
return this.allocationId;
|
||||
}
|
||||
|
||||
public Throwable storeException() {
|
||||
return this.storeException;
|
||||
}
|
||||
|
@ -303,16 +311,17 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
version = in.readLong();
|
||||
allocationId = in.readOptionalString();
|
||||
if (in.readBoolean()) {
|
||||
storeException = in.readThrowable();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeLong(version);
|
||||
out.writeOptionalString(allocationId);
|
||||
if (storeException != null) {
|
||||
out.writeBoolean(true);
|
||||
out.writeThrowable(storeException);
|
||||
|
|
|
@ -1,59 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.http.netty.NettyHttpServerTransport;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class HttpServerModule extends AbstractModule {
|
||||
|
||||
private final Settings settings;
|
||||
private final ESLogger logger;
|
||||
|
||||
private Class<? extends HttpServerTransport> httpServerTransportClass;
|
||||
|
||||
public HttpServerModule(Settings settings) {
|
||||
this.settings = settings;
|
||||
this.logger = Loggers.getLogger(getClass(), settings);
|
||||
this.httpServerTransportClass = NettyHttpServerTransport.class;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(HttpServerTransport.class).to(httpServerTransportClass).asEagerSingleton();
|
||||
bind(HttpServer.class).asEagerSingleton();
|
||||
}
|
||||
|
||||
public void setHttpServerTransport(Class<? extends HttpServerTransport> httpServerTransport, String source) {
|
||||
Objects.requireNonNull(httpServerTransport, "Configured http server transport may not be null");
|
||||
Objects.requireNonNull(source, "Plugin, that changes transport may not be null");
|
||||
logger.info("Using [{}] as http transport, overridden by [{}]", httpServerTransportClass.getName(), source);
|
||||
this.httpServerTransportClass = httpServerTransport;
|
||||
}
|
||||
}
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -48,7 +47,13 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.ParsedQuery;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.shard.*;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.index.shard.IndexSearcherWrapper;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShadowIndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||
import org.elasticsearch.index.shard.ShardPath;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
|
@ -73,7 +78,7 @@ import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public final class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable<IndexShard>{
|
||||
public final class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable<IndexShard> {
|
||||
|
||||
private final IndexEventListener eventListener;
|
||||
private final AnalysisService analysisService;
|
||||
|
@ -93,7 +98,6 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
private final AtomicBoolean deleted = new AtomicBoolean(false);
|
||||
private final IndexSettings indexSettings;
|
||||
|
||||
@Inject
|
||||
public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv,
|
||||
SimilarityService similarityService,
|
||||
ShardStoreDeleter shardStoreDeleter,
|
||||
|
@ -146,7 +150,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
*/
|
||||
@Nullable
|
||||
public IndexShard getShardOrNull(int shardId) {
|
||||
return shards.get(shardId);
|
||||
return shards.get(shardId);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -160,13 +164,17 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
return indexShard;
|
||||
}
|
||||
|
||||
public Set<Integer> shardIds() { return shards.keySet(); }
|
||||
public Set<Integer> shardIds() {
|
||||
return shards.keySet();
|
||||
}
|
||||
|
||||
public IndexCache cache() {
|
||||
return indexCache;
|
||||
}
|
||||
|
||||
public IndexFieldDataService fieldData() { return indexFieldData; }
|
||||
public IndexFieldDataService fieldData() {
|
||||
return indexFieldData;
|
||||
}
|
||||
|
||||
public AnalysisService analysisService() {
|
||||
return this.analysisService;
|
||||
|
@ -207,7 +215,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
private long getAvgShardSizeInBytes() throws IOException {
|
||||
long sum = 0;
|
||||
int count = 0;
|
||||
for(IndexShard indexShard : this) {
|
||||
for (IndexShard indexShard : this) {
|
||||
sum += indexShard.store().stats().sizeInBytes();
|
||||
count++;
|
||||
}
|
||||
|
@ -254,17 +262,17 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
// TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard
|
||||
// that's being relocated/replicated we know how large it will become once it's done copying:
|
||||
// Count up how many shards are currently on each data path:
|
||||
Map<Path,Integer> dataPathToShardCount = new HashMap<>();
|
||||
for(IndexShard shard : this) {
|
||||
Map<Path, Integer> dataPathToShardCount = new HashMap<>();
|
||||
for (IndexShard shard : this) {
|
||||
Path dataPath = shard.shardPath().getRootStatePath();
|
||||
Integer curCount = dataPathToShardCount.get(dataPath);
|
||||
if (curCount == null) {
|
||||
curCount = 0;
|
||||
}
|
||||
dataPathToShardCount.put(dataPath, curCount+1);
|
||||
dataPathToShardCount.put(dataPath, curCount + 1);
|
||||
}
|
||||
path = ShardPath.selectNewPathForShard(nodeEnv, shardId, this.indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(),
|
||||
dataPathToShardCount);
|
||||
dataPathToShardCount);
|
||||
logger.debug("{} creating using a new path [{}]", shardId, path);
|
||||
} else {
|
||||
logger.debug("{} creating using an existing path [{}]", shardId, path);
|
||||
|
@ -277,7 +285,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
logger.debug("creating shard_id {}", shardId);
|
||||
// if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary.
|
||||
final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false ||
|
||||
(primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
|
||||
(primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
|
||||
store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> nodeServicesProvider.getIndicesQueryCache().onClose(shardId)));
|
||||
if (useShadowEngine(primary, indexSettings)) {
|
||||
indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider);
|
||||
|
@ -462,6 +470,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the filter associated with listed filtering aliases.
|
||||
* <p>
|
||||
|
|
|
@ -781,10 +781,14 @@ public class InternalEngine extends Engine {
|
|||
// we need to fail the engine. it might have already been failed before
|
||||
// but we are double-checking it's failed and closed
|
||||
if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) {
|
||||
failEngine("already closed by tragic event", indexWriter.getTragicException());
|
||||
failEngine("already closed by tragic event on the index writer", indexWriter.getTragicException());
|
||||
} else if (translog.isOpen() == false && translog.getTragicException() != null) {
|
||||
failEngine("already closed by tragic event on the translog", translog.getTragicException());
|
||||
}
|
||||
return true;
|
||||
} else if (t != null && indexWriter.isOpen() == false && indexWriter.getTragicException() == t) {
|
||||
} else if (t != null &&
|
||||
((indexWriter.isOpen() == false && indexWriter.getTragicException() == t)
|
||||
|| (translog.isOpen() == false && translog.getTragicException() == t))) {
|
||||
// this spot on - we are handling the tragic event exception here so we have to fail the engine
|
||||
// right away
|
||||
failEngine(source, t);
|
||||
|
|
|
@ -19,16 +19,9 @@
|
|||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
public class ContentPath {
|
||||
public final class ContentPath {
|
||||
|
||||
public enum Type {
|
||||
JUST_NAME,
|
||||
FULL,
|
||||
}
|
||||
|
||||
private Type pathType;
|
||||
|
||||
private final char delimiter;
|
||||
private static final char DELIMITER = '.';
|
||||
|
||||
private final StringBuilder sb;
|
||||
|
||||
|
@ -47,7 +40,6 @@ public class ContentPath {
|
|||
* number of path elements to not be included in {@link #pathAsText(String)}.
|
||||
*/
|
||||
public ContentPath(int offset) {
|
||||
this.delimiter = '.';
|
||||
this.sb = new StringBuilder();
|
||||
this.offset = offset;
|
||||
reset();
|
||||
|
@ -71,26 +63,11 @@ public class ContentPath {
|
|||
}
|
||||
|
||||
public String pathAsText(String name) {
|
||||
if (pathType == Type.JUST_NAME) {
|
||||
return name;
|
||||
}
|
||||
return fullPathAsText(name);
|
||||
}
|
||||
|
||||
public String fullPathAsText(String name) {
|
||||
sb.setLength(0);
|
||||
for (int i = offset; i < index; i++) {
|
||||
sb.append(path[i]).append(delimiter);
|
||||
sb.append(path[i]).append(DELIMITER);
|
||||
}
|
||||
sb.append(name);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public Type pathType() {
|
||||
return pathType;
|
||||
}
|
||||
|
||||
public void pathType(Type type) {
|
||||
this.pathType = type;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.text.StringAndBytesText;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.util.concurrent.ReleasableLock;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -52,6 +52,7 @@ import org.elasticsearch.search.internal.SearchContext;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -113,11 +114,11 @@ public class DocumentMapper implements ToXContent {
|
|||
private final MapperService mapperService;
|
||||
|
||||
private final String type;
|
||||
private final StringAndBytesText typeText;
|
||||
private final Text typeText;
|
||||
|
||||
private volatile CompressedXContent mappingSource;
|
||||
|
||||
private final Mapping mapping;
|
||||
private volatile Mapping mapping;
|
||||
|
||||
private final DocumentParser documentParser;
|
||||
|
||||
|
@ -137,7 +138,7 @@ public class DocumentMapper implements ToXContent {
|
|||
ReentrantReadWriteLock mappingLock) {
|
||||
this.mapperService = mapperService;
|
||||
this.type = rootObjectMapper.name();
|
||||
this.typeText = new StringAndBytesText(this.type);
|
||||
this.typeText = new Text(this.type);
|
||||
this.mapping = new Mapping(
|
||||
Version.indexCreated(indexSettings),
|
||||
rootObjectMapper,
|
||||
|
@ -352,16 +353,19 @@ public class DocumentMapper implements ToXContent {
|
|||
mapperService.addMappers(type, objectMappers, fieldMappers);
|
||||
}
|
||||
|
||||
public MergeResult merge(Mapping mapping, boolean simulate, boolean updateAllTypes) {
|
||||
public void merge(Mapping mapping, boolean simulate, boolean updateAllTypes) {
|
||||
try (ReleasableLock lock = mappingWriteLock.acquire()) {
|
||||
mapperService.checkMappersCompatibility(type, mapping, updateAllTypes);
|
||||
final MergeResult mergeResult = new MergeResult(simulate, updateAllTypes);
|
||||
this.mapping.merge(mapping, mergeResult);
|
||||
// do the merge even if simulate == false so that we get exceptions
|
||||
Mapping merged = this.mapping.merge(mapping, updateAllTypes);
|
||||
if (simulate == false) {
|
||||
addMappers(mergeResult.getNewObjectMappers(), mergeResult.getNewFieldMappers(), updateAllTypes);
|
||||
this.mapping = merged;
|
||||
Collection<ObjectMapper> objectMappers = new ArrayList<>();
|
||||
Collection<FieldMapper> fieldMappers = new ArrayList<>(Arrays.asList(merged.metadataMappers));
|
||||
MapperUtils.collect(merged.root, objectMappers, fieldMappers);
|
||||
addMappers(objectMappers, fieldMappers, updateAllTypes);
|
||||
refreshSource();
|
||||
}
|
||||
return mergeResult;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -234,9 +234,6 @@ class DocumentParser implements Closeable {
|
|||
nestedDoc.add(new Field(TypeFieldMapper.NAME, mapper.nestedTypePathAsString(), TypeFieldMapper.Defaults.FIELD_TYPE));
|
||||
}
|
||||
|
||||
ContentPath.Type origPathType = context.path().pathType();
|
||||
context.path().pathType(mapper.pathType());
|
||||
|
||||
// if we are at the end of the previous object, advance
|
||||
if (token == XContentParser.Token.END_OBJECT) {
|
||||
token = parser.nextToken();
|
||||
|
@ -267,12 +264,11 @@ class DocumentParser implements Closeable {
|
|||
if (update == null) {
|
||||
update = newUpdate;
|
||||
} else {
|
||||
MapperUtils.merge(update, newUpdate);
|
||||
update = update.merge(newUpdate, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
// restore the enable path flag
|
||||
context.path().pathType(origPathType);
|
||||
if (nested.isNested()) {
|
||||
ParseContext.Document nestedDoc = context.doc();
|
||||
ParseContext.Document parentDoc = nestedDoc.getParent();
|
||||
|
@ -341,7 +337,7 @@ class DocumentParser implements Closeable {
|
|||
context.path().remove();
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.object(currentFieldName).enabled(true).pathType(mapper.pathType());
|
||||
builder = MapperBuilders.object(currentFieldName).enabled(true);
|
||||
// if this is a non root object, then explicitly set the dynamic behavior if set
|
||||
if (!(mapper instanceof RootObjectMapper) && mapper.dynamic() != ObjectMapper.Defaults.DYNAMIC) {
|
||||
((ObjectMapper.Builder) builder).dynamic(mapper.dynamic());
|
||||
|
@ -610,7 +606,7 @@ class DocumentParser implements Closeable {
|
|||
return null;
|
||||
}
|
||||
final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
|
||||
final MappedFieldType existingFieldType = context.mapperService().fullName(context.path().fullPathAsText(currentFieldName));
|
||||
final MappedFieldType existingFieldType = context.mapperService().fullName(context.path().pathAsText(currentFieldName));
|
||||
Mapper.Builder builder = null;
|
||||
if (existingFieldType != null) {
|
||||
// create a builder of the same type
|
||||
|
@ -695,7 +691,7 @@ class DocumentParser implements Closeable {
|
|||
if (paths.length > 1) {
|
||||
ObjectMapper parent = context.root();
|
||||
for (int i = 0; i < paths.length-1; i++) {
|
||||
mapper = context.docMapper().objectMappers().get(context.path().fullPathAsText(paths[i]));
|
||||
mapper = context.docMapper().objectMappers().get(context.path().pathAsText(paths[i]));
|
||||
if (mapper == null) {
|
||||
// One mapping is missing, check if we are allowed to create a dynamic one.
|
||||
ObjectMapper.Dynamic dynamic = parent.dynamic();
|
||||
|
@ -713,12 +709,12 @@ class DocumentParser implements Closeable {
|
|||
if (!(parent instanceof RootObjectMapper) && parent.dynamic() != ObjectMapper.Defaults.DYNAMIC) {
|
||||
((ObjectMapper.Builder) builder).dynamic(parent.dynamic());
|
||||
}
|
||||
builder = MapperBuilders.object(paths[i]).enabled(true).pathType(parent.pathType());
|
||||
builder = MapperBuilders.object(paths[i]).enabled(true);
|
||||
}
|
||||
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
|
||||
mapper = (ObjectMapper) builder.build(builderContext);
|
||||
if (mapper.nested() != ObjectMapper.Nested.NO) {
|
||||
throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().fullPathAsText(paths[i]) + "]) through `copy_to`");
|
||||
throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to`");
|
||||
}
|
||||
break;
|
||||
case FALSE:
|
||||
|
@ -759,7 +755,7 @@ class DocumentParser implements Closeable {
|
|||
private static <M extends Mapper> M parseAndMergeUpdate(M mapper, ParseContext context) throws IOException {
|
||||
final Mapper update = parseObjectOrField(context, mapper);
|
||||
if (update != null) {
|
||||
MapperUtils.merge(mapper, update);
|
||||
mapper = (M) mapper.merge(update, false);
|
||||
}
|
||||
return mapper;
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
public abstract class FieldMapper extends Mapper {
|
||||
public abstract class FieldMapper extends Mapper implements Cloneable {
|
||||
|
||||
public abstract static class Builder<T extends Builder, Y extends FieldMapper> extends Mapper.Builder<T, Y> {
|
||||
|
||||
|
@ -64,10 +64,10 @@ public abstract class FieldMapper extends Mapper {
|
|||
protected final MultiFields.Builder multiFieldsBuilder;
|
||||
protected CopyTo copyTo;
|
||||
|
||||
protected Builder(String name, MappedFieldType fieldType) {
|
||||
protected Builder(String name, MappedFieldType fieldType, MappedFieldType defaultFieldType) {
|
||||
super(name);
|
||||
this.fieldType = fieldType.clone();
|
||||
this.defaultFieldType = fieldType.clone();
|
||||
this.defaultFieldType = defaultFieldType.clone();
|
||||
this.defaultOptions = fieldType.indexOptions(); // we have to store it the fieldType is mutable
|
||||
multiFieldsBuilder = new MultiFields.Builder();
|
||||
}
|
||||
|
@ -84,8 +84,13 @@ public abstract class FieldMapper extends Mapper {
|
|||
* if the fieldType has a non-null option we are all good it might have been set through a different
|
||||
* call.
|
||||
*/
|
||||
final IndexOptions options = getDefaultIndexOption();
|
||||
assert options != IndexOptions.NONE : "default IndexOptions is NONE can't enable indexing";
|
||||
IndexOptions options = getDefaultIndexOption();
|
||||
if (options == IndexOptions.NONE) {
|
||||
// can happen when an existing type on the same index has disabled indexing
|
||||
// since we inherit the default field type from the first mapper that is
|
||||
// created on an index
|
||||
throw new IllegalArgumentException("mapper [" + name + "] has different [index] values from other types of the same index");
|
||||
}
|
||||
fieldType.setIndexOptions(options);
|
||||
}
|
||||
} else {
|
||||
|
@ -202,11 +207,6 @@ public abstract class FieldMapper extends Mapper {
|
|||
return this;
|
||||
}
|
||||
|
||||
public T multiFieldPathType(ContentPath.Type pathType) {
|
||||
multiFieldsBuilder.pathType(pathType);
|
||||
return builder;
|
||||
}
|
||||
|
||||
public T addMultiField(Mapper.Builder mapperBuilder) {
|
||||
multiFieldsBuilder.add(mapperBuilder);
|
||||
return builder;
|
||||
|
@ -237,7 +237,7 @@ public abstract class FieldMapper extends Mapper {
|
|||
}
|
||||
|
||||
protected String buildFullName(BuilderContext context) {
|
||||
return context.path().fullPathAsText(name);
|
||||
return context.path().pathAsText(name);
|
||||
}
|
||||
|
||||
protected void setupFieldType(BuilderContext context) {
|
||||
|
@ -270,7 +270,7 @@ public abstract class FieldMapper extends Mapper {
|
|||
|
||||
protected MappedFieldTypeReference fieldTypeRef;
|
||||
protected final MappedFieldType defaultFieldType;
|
||||
protected final MultiFields multiFields;
|
||||
protected MultiFields multiFields;
|
||||
protected CopyTo copyTo;
|
||||
protected final boolean indexCreatedBefore2x;
|
||||
|
||||
|
@ -359,26 +359,41 @@ public abstract class FieldMapper extends Mapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected FieldMapper clone() {
|
||||
try {
|
||||
return (FieldMapper) super.clone();
|
||||
} catch (CloneNotSupportedException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldMapper merge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
FieldMapper merged = clone();
|
||||
merged.doMerge(mergeWith, updateAllTypes);
|
||||
return merged;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge changes coming from {@code mergeWith} in place.
|
||||
* @param updateAllTypes TODO
|
||||
*/
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
String mergedType = mergeWith.getClass().getSimpleName();
|
||||
if (mergeWith instanceof FieldMapper) {
|
||||
mergedType = ((FieldMapper) mergeWith).contentType();
|
||||
}
|
||||
mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]");
|
||||
// different types, return
|
||||
return;
|
||||
throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]");
|
||||
}
|
||||
FieldMapper fieldMergeWith = (FieldMapper) mergeWith;
|
||||
multiFields.merge(mergeWith, mergeResult);
|
||||
multiFields = multiFields.merge(fieldMergeWith.multiFields);
|
||||
|
||||
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
|
||||
// apply changeable values
|
||||
MappedFieldType fieldType = fieldMergeWith.fieldType().clone();
|
||||
fieldType.freeze();
|
||||
fieldTypeRef.set(fieldType);
|
||||
this.copyTo = fieldMergeWith.copyTo;
|
||||
}
|
||||
// apply changeable values
|
||||
MappedFieldType fieldType = fieldMergeWith.fieldType().clone();
|
||||
fieldType.freeze();
|
||||
fieldTypeRef.set(fieldType);
|
||||
this.copyTo = fieldMergeWith.copyTo;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -520,18 +535,12 @@ public abstract class FieldMapper extends Mapper {
|
|||
public static class MultiFields {
|
||||
|
||||
public static MultiFields empty() {
|
||||
return new MultiFields(ContentPath.Type.FULL, ImmutableOpenMap.<String, FieldMapper>of());
|
||||
return new MultiFields(ImmutableOpenMap.<String, FieldMapper>of());
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
private final ImmutableOpenMap.Builder<String, Mapper.Builder> mapperBuilders = ImmutableOpenMap.builder();
|
||||
private ContentPath.Type pathType = ContentPath.Type.FULL;
|
||||
|
||||
public Builder pathType(ContentPath.Type pathType) {
|
||||
this.pathType = pathType;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder add(Mapper.Builder builder) {
|
||||
mapperBuilders.put(builder.name(), builder);
|
||||
|
@ -540,13 +549,9 @@ public abstract class FieldMapper extends Mapper {
|
|||
|
||||
@SuppressWarnings("unchecked")
|
||||
public MultiFields build(FieldMapper.Builder mainFieldBuilder, BuilderContext context) {
|
||||
if (pathType == ContentPath.Type.FULL && mapperBuilders.isEmpty()) {
|
||||
if (mapperBuilders.isEmpty()) {
|
||||
return empty();
|
||||
} else if (mapperBuilders.isEmpty()) {
|
||||
return new MultiFields(pathType, ImmutableOpenMap.<String, FieldMapper>of());
|
||||
} else {
|
||||
ContentPath.Type origPathType = context.path().pathType();
|
||||
context.path().pathType(pathType);
|
||||
context.path().add(mainFieldBuilder.name());
|
||||
ImmutableOpenMap.Builder mapperBuilders = this.mapperBuilders;
|
||||
for (ObjectObjectCursor<String, Mapper.Builder> cursor : this.mapperBuilders) {
|
||||
|
@ -557,26 +562,25 @@ public abstract class FieldMapper extends Mapper {
|
|||
mapperBuilders.put(key, mapper);
|
||||
}
|
||||
context.path().remove();
|
||||
context.path().pathType(origPathType);
|
||||
ImmutableOpenMap.Builder<String, FieldMapper> mappers = mapperBuilders.cast();
|
||||
return new MultiFields(pathType, mappers.build());
|
||||
return new MultiFields(mappers.build());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private final ContentPath.Type pathType;
|
||||
private volatile ImmutableOpenMap<String, FieldMapper> mappers;
|
||||
private final ImmutableOpenMap<String, FieldMapper> mappers;
|
||||
|
||||
public MultiFields(ContentPath.Type pathType, ImmutableOpenMap<String, FieldMapper> mappers) {
|
||||
this.pathType = pathType;
|
||||
this.mappers = mappers;
|
||||
private MultiFields(ImmutableOpenMap<String, FieldMapper> mappers) {
|
||||
ImmutableOpenMap.Builder<String, FieldMapper> builder = new ImmutableOpenMap.Builder<>();
|
||||
// we disable the all in multi-field mappers
|
||||
for (ObjectCursor<FieldMapper> cursor : mappers.values()) {
|
||||
for (ObjectObjectCursor<String, FieldMapper> cursor : mappers) {
|
||||
FieldMapper mapper = cursor.value;
|
||||
if (mapper instanceof AllFieldMapper.IncludeInAll) {
|
||||
((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll();
|
||||
mapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll();
|
||||
}
|
||||
builder.put(cursor.key, mapper);
|
||||
}
|
||||
this.mappers = builder.build();
|
||||
}
|
||||
|
||||
public void parse(FieldMapper mainField, ParseContext context) throws IOException {
|
||||
|
@ -587,58 +591,33 @@ public abstract class FieldMapper extends Mapper {
|
|||
|
||||
context = context.createMultiFieldContext();
|
||||
|
||||
ContentPath.Type origPathType = context.path().pathType();
|
||||
context.path().pathType(pathType);
|
||||
|
||||
context.path().add(mainField.simpleName());
|
||||
for (ObjectCursor<FieldMapper> cursor : mappers.values()) {
|
||||
cursor.value.parse(context);
|
||||
}
|
||||
context.path().remove();
|
||||
context.path().pathType(origPathType);
|
||||
}
|
||||
|
||||
// No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
FieldMapper mergeWithMultiField = (FieldMapper) mergeWith;
|
||||
public MultiFields merge(MultiFields mergeWith) {
|
||||
ImmutableOpenMap.Builder<String, FieldMapper> newMappersBuilder = ImmutableOpenMap.builder(mappers);
|
||||
|
||||
List<FieldMapper> newFieldMappers = null;
|
||||
ImmutableOpenMap.Builder<String, FieldMapper> newMappersBuilder = null;
|
||||
|
||||
for (ObjectCursor<FieldMapper> cursor : mergeWithMultiField.multiFields.mappers.values()) {
|
||||
for (ObjectCursor<FieldMapper> cursor : mergeWith.mappers.values()) {
|
||||
FieldMapper mergeWithMapper = cursor.value;
|
||||
Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName());
|
||||
FieldMapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName());
|
||||
if (mergeIntoMapper == null) {
|
||||
// no mapping, simply add it if not simulating
|
||||
if (!mergeResult.simulate()) {
|
||||
// we disable the all in multi-field mappers
|
||||
if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) {
|
||||
((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll();
|
||||
}
|
||||
if (newMappersBuilder == null) {
|
||||
newMappersBuilder = ImmutableOpenMap.builder(mappers);
|
||||
}
|
||||
newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper);
|
||||
if (mergeWithMapper instanceof FieldMapper) {
|
||||
if (newFieldMappers == null) {
|
||||
newFieldMappers = new ArrayList<>(2);
|
||||
}
|
||||
newFieldMappers.add(mergeWithMapper);
|
||||
}
|
||||
// we disable the all in multi-field mappers
|
||||
if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) {
|
||||
mergeWithMapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll();
|
||||
}
|
||||
newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper);
|
||||
} else {
|
||||
mergeIntoMapper.merge(mergeWithMapper, mergeResult);
|
||||
FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper, false);
|
||||
newMappersBuilder.put(merged.simpleName(), merged); // override previous definition
|
||||
}
|
||||
}
|
||||
|
||||
// first add all field mappers
|
||||
if (newFieldMappers != null) {
|
||||
mergeResult.addFieldMappers(newFieldMappers);
|
||||
}
|
||||
// now publish mappers
|
||||
if (newMappersBuilder != null) {
|
||||
mappers = newMappersBuilder.build();
|
||||
}
|
||||
ImmutableOpenMap<String, FieldMapper> mappers = newMappersBuilder.build();
|
||||
return new MultiFields(mappers);
|
||||
}
|
||||
|
||||
public Iterator<Mapper> iterator() {
|
||||
|
@ -646,9 +625,6 @@ public abstract class FieldMapper extends Mapper {
|
|||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (pathType != ContentPath.Type.FULL) {
|
||||
builder.field("path", pathType.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
if (!mappers.isEmpty()) {
|
||||
// sort the mappers so we get consistent serialization format
|
||||
Mapper[] sortedMappers = mappers.values().toArray(Mapper.class);
|
||||
|
|
|
@ -174,5 +174,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
|||
/** Returns the canonical name which uniquely identifies the mapper against other mappers in a type. */
|
||||
public abstract String name();
|
||||
|
||||
public abstract void merge(Mapper mergeWith, MergeResult mergeResult);
|
||||
/** Return the merge of {@code mergeWith} into this.
|
||||
* Both {@code this} and {@code mergeWith} will be left unmodified. */
|
||||
public abstract Mapper merge(Mapper mergeWith, boolean updateAllTypes);
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
|
@ -92,7 +91,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
private final ReleasableLock mappingWriteLock = new ReleasableLock(mappingLock.writeLock());
|
||||
|
||||
private volatile FieldTypeLookup fieldTypes;
|
||||
private volatile ImmutableOpenMap<String, ObjectMapper> fullPathObjectMappers = ImmutableOpenMap.of();
|
||||
private volatile Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>();
|
||||
private boolean hasNested = false; // updated dynamically to true when a nested object is added
|
||||
|
||||
private final DocumentMapperParser documentParser;
|
||||
|
@ -199,6 +198,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
public DocumentMapper merge(String type, CompressedXContent mappingSource, boolean applyDefault, boolean updateAllTypes) {
|
||||
if (DEFAULT_MAPPING.equals(type)) {
|
||||
// verify we can parse it
|
||||
// NOTE: never apply the default here
|
||||
DocumentMapper mapper = documentParser.parseCompressed(type, mappingSource);
|
||||
// still add it as a document mapper so we have it registered and, for example, persisted back into
|
||||
// the cluster meta data if needed, or checked for existence
|
||||
|
@ -212,75 +212,70 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
}
|
||||
return mapper;
|
||||
} else {
|
||||
return merge(parse(type, mappingSource, applyDefault), updateAllTypes);
|
||||
try (ReleasableLock lock = mappingWriteLock.acquire()) {
|
||||
// only apply the default mapping if we don't have the type yet
|
||||
applyDefault &= mappers.containsKey(type) == false;
|
||||
return merge(parse(type, mappingSource, applyDefault), updateAllTypes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// never expose this to the outside world, we need to reparse the doc mapper so we get fresh
|
||||
// instances of field mappers to properly remove existing doc mapper
|
||||
private DocumentMapper merge(DocumentMapper mapper, boolean updateAllTypes) {
|
||||
try (ReleasableLock lock = mappingWriteLock.acquire()) {
|
||||
if (mapper.type().length() == 0) {
|
||||
throw new InvalidTypeNameException("mapping type name is empty");
|
||||
}
|
||||
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]");
|
||||
}
|
||||
if (mapper.type().charAt(0) == '_') {
|
||||
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'");
|
||||
}
|
||||
if (mapper.type().contains("#")) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it");
|
||||
}
|
||||
if (mapper.type().contains(",")) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it");
|
||||
}
|
||||
if (mapper.type().equals(mapper.parentFieldMapper().type())) {
|
||||
throw new IllegalArgumentException("The [_parent.type] option can't point to the same type");
|
||||
}
|
||||
if (typeNameStartsWithIllegalDot(mapper)) {
|
||||
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) {
|
||||
throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'");
|
||||
} else {
|
||||
logger.warn("Type [{}] starts with a '.', it is recommended not to start a type name with a '.'", mapper.type());
|
||||
}
|
||||
}
|
||||
// we can add new field/object mappers while the old ones are there
|
||||
// since we get new instances of those, and when we remove, we remove
|
||||
// by instance equality
|
||||
DocumentMapper oldMapper = mappers.get(mapper.type());
|
||||
|
||||
if (oldMapper != null) {
|
||||
// simulate first
|
||||
MergeResult result = oldMapper.merge(mapper.mapping(), true, updateAllTypes);
|
||||
if (result.hasConflicts()) {
|
||||
throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(result.buildConflicts()) + "}");
|
||||
}
|
||||
// then apply for real
|
||||
result = oldMapper.merge(mapper.mapping(), false, updateAllTypes);
|
||||
assert result.hasConflicts() == false; // we already simulated
|
||||
return oldMapper;
|
||||
if (mapper.type().length() == 0) {
|
||||
throw new InvalidTypeNameException("mapping type name is empty");
|
||||
}
|
||||
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]");
|
||||
}
|
||||
if (mapper.type().charAt(0) == '_') {
|
||||
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'");
|
||||
}
|
||||
if (mapper.type().contains("#")) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it");
|
||||
}
|
||||
if (mapper.type().contains(",")) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it");
|
||||
}
|
||||
if (mapper.type().equals(mapper.parentFieldMapper().type())) {
|
||||
throw new IllegalArgumentException("The [_parent.type] option can't point to the same type");
|
||||
}
|
||||
if (typeNameStartsWithIllegalDot(mapper)) {
|
||||
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) {
|
||||
throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'");
|
||||
} else {
|
||||
Tuple<Collection<ObjectMapper>, Collection<FieldMapper>> newMappers = checkMappersCompatibility(
|
||||
mapper.type(), mapper.mapping(), updateAllTypes);
|
||||
Collection<ObjectMapper> newObjectMappers = newMappers.v1();
|
||||
Collection<FieldMapper> newFieldMappers = newMappers.v2();
|
||||
addMappers(mapper.type(), newObjectMappers, newFieldMappers);
|
||||
|
||||
for (DocumentTypeListener typeListener : typeListeners) {
|
||||
typeListener.beforeCreate(mapper);
|
||||
}
|
||||
mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map();
|
||||
if (mapper.parentFieldMapper().active()) {
|
||||
Set<String> newParentTypes = new HashSet<>(parentTypes.size() + 1);
|
||||
newParentTypes.addAll(parentTypes);
|
||||
newParentTypes.add(mapper.parentFieldMapper().type());
|
||||
parentTypes = unmodifiableSet(newParentTypes);
|
||||
}
|
||||
assert assertSerialization(mapper);
|
||||
return mapper;
|
||||
logger.warn("Type [{}] starts with a '.', it is recommended not to start a type name with a '.'", mapper.type());
|
||||
}
|
||||
}
|
||||
// we can add new field/object mappers while the old ones are there
|
||||
// since we get new instances of those, and when we remove, we remove
|
||||
// by instance equality
|
||||
DocumentMapper oldMapper = mappers.get(mapper.type());
|
||||
|
||||
if (oldMapper != null) {
|
||||
oldMapper.merge(mapper.mapping(), false, updateAllTypes);
|
||||
return oldMapper;
|
||||
} else {
|
||||
Tuple<Collection<ObjectMapper>, Collection<FieldMapper>> newMappers = checkMappersCompatibility(
|
||||
mapper.type(), mapper.mapping(), updateAllTypes);
|
||||
Collection<ObjectMapper> newObjectMappers = newMappers.v1();
|
||||
Collection<FieldMapper> newFieldMappers = newMappers.v2();
|
||||
addMappers(mapper.type(), newObjectMappers, newFieldMappers);
|
||||
|
||||
for (DocumentTypeListener typeListener : typeListeners) {
|
||||
typeListener.beforeCreate(mapper);
|
||||
}
|
||||
mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map();
|
||||
if (mapper.parentFieldMapper().active()) {
|
||||
Set<String> newParentTypes = new HashSet<>(parentTypes.size() + 1);
|
||||
newParentTypes.addAll(parentTypes);
|
||||
newParentTypes.add(mapper.parentFieldMapper().type());
|
||||
parentTypes = unmodifiableSet(newParentTypes);
|
||||
}
|
||||
assert assertSerialization(mapper);
|
||||
return mapper;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean typeNameStartsWithIllegalDot(DocumentMapper mapper) {
|
||||
|
@ -300,19 +295,56 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
return true;
|
||||
}
|
||||
|
||||
private void checkFieldUniqueness(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
|
||||
final Set<String> objectFullNames = new HashSet<>();
|
||||
for (ObjectMapper objectMapper : objectMappers) {
|
||||
final String fullPath = objectMapper.fullPath();
|
||||
if (objectFullNames.add(fullPath) == false) {
|
||||
throw new IllegalArgumentException("Object mapper [" + fullPath + "] is defined twice in mapping for type [" + type + "]");
|
||||
}
|
||||
}
|
||||
|
||||
if (indexSettings.getIndexVersionCreated().before(Version.V_3_0_0)) {
|
||||
// Before 3.0 some metadata mappers are also registered under the root object mapper
|
||||
// So we avoid false positives by deduplicating mappers
|
||||
// given that we check exact equality, this would still catch the case that a mapper
|
||||
// is defined under the root object
|
||||
Collection<FieldMapper> uniqueFieldMappers = Collections.newSetFromMap(new IdentityHashMap<>());
|
||||
uniqueFieldMappers.addAll(fieldMappers);
|
||||
fieldMappers = uniqueFieldMappers;
|
||||
}
|
||||
|
||||
final Set<String> fieldNames = new HashSet<>();
|
||||
for (FieldMapper fieldMapper : fieldMappers) {
|
||||
final String name = fieldMapper.name();
|
||||
if (objectFullNames.contains(name)) {
|
||||
throw new IllegalArgumentException("Field [" + name + "] is defined both as an object and a field in [" + type + "]");
|
||||
} else if (fieldNames.add(name) == false) {
|
||||
throw new IllegalArgumentException("Field [" + name + "] is defined twice in [" + type + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void checkMappersCompatibility(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers, boolean updateAllTypes) {
|
||||
assert mappingLock.isWriteLockedByCurrentThread();
|
||||
|
||||
checkFieldUniqueness(type, objectMappers, fieldMappers);
|
||||
|
||||
for (ObjectMapper newObjectMapper : objectMappers) {
|
||||
ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath());
|
||||
if (existingObjectMapper != null) {
|
||||
MergeResult result = new MergeResult(true, updateAllTypes);
|
||||
existingObjectMapper.merge(newObjectMapper, result);
|
||||
if (result.hasConflicts()) {
|
||||
throw new IllegalArgumentException("Mapper for [" + newObjectMapper.fullPath() + "] conflicts with existing mapping in other types" +
|
||||
Arrays.toString(result.buildConflicts()));
|
||||
}
|
||||
// simulate a merge and ignore the result, we are just interested
|
||||
// in exceptions here
|
||||
existingObjectMapper.merge(newObjectMapper, updateAllTypes);
|
||||
}
|
||||
}
|
||||
|
||||
for (FieldMapper fieldMapper : fieldMappers) {
|
||||
if (fullPathObjectMappers.containsKey(fieldMapper.name())) {
|
||||
throw new IllegalArgumentException("Field [" + fieldMapper.name() + "] is defined as a field in mapping [" + type + "] but this name is already used for an object in other types");
|
||||
}
|
||||
}
|
||||
|
||||
fieldTypes.checkCompatibility(type, fieldMappers, updateAllTypes);
|
||||
}
|
||||
|
||||
|
@ -320,9 +352,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
String type, Mapping mapping, boolean updateAllTypes) {
|
||||
List<ObjectMapper> objectMappers = new ArrayList<>();
|
||||
List<FieldMapper> fieldMappers = new ArrayList<>();
|
||||
for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) {
|
||||
fieldMappers.add(metadataMapper);
|
||||
}
|
||||
Collections.addAll(fieldMappers, mapping.metadataMappers);
|
||||
MapperUtils.collect(mapping.root, objectMappers, fieldMappers);
|
||||
checkMappersCompatibility(type, objectMappers, fieldMappers, updateAllTypes);
|
||||
return new Tuple<>(objectMappers, fieldMappers);
|
||||
|
@ -330,14 +360,14 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
|
||||
protected void addMappers(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
|
||||
assert mappingLock.isWriteLockedByCurrentThread();
|
||||
ImmutableOpenMap.Builder<String, ObjectMapper> fullPathObjectMappers = ImmutableOpenMap.builder(this.fullPathObjectMappers);
|
||||
Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers);
|
||||
for (ObjectMapper objectMapper : objectMappers) {
|
||||
fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper);
|
||||
if (objectMapper.nested().isNested()) {
|
||||
hasNested = true;
|
||||
}
|
||||
}
|
||||
this.fullPathObjectMappers = fullPathObjectMappers.build();
|
||||
this.fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers);
|
||||
this.fieldTypes = this.fieldTypes.copyAndAddAll(type, fieldMappers);
|
||||
}
|
||||
|
||||
|
|
|
@ -27,52 +27,6 @@ import java.util.Collection;
|
|||
public enum MapperUtils {
|
||||
;
|
||||
|
||||
private static MergeResult newStrictMergeResult() {
|
||||
return new MergeResult(false, false) {
|
||||
|
||||
@Override
|
||||
public void addFieldMappers(Collection<FieldMapper> fieldMappers) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addObjectMappers(Collection<ObjectMapper> objectMappers) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<FieldMapper> getNewFieldMappers() {
|
||||
throw new UnsupportedOperationException("Strict merge result does not support new field mappers");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<ObjectMapper> getNewObjectMappers() {
|
||||
throw new UnsupportedOperationException("Strict merge result does not support new object mappers");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addConflict(String mergeFailure) {
|
||||
throw new MapperParsingException("Merging dynamic updates triggered a conflict: " + mergeFailure);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge {@code mergeWith} into {@code mergeTo}. Note: this method only
|
||||
* merges mappings, not lookup structures. Conflicts are returned as exceptions.
|
||||
*/
|
||||
public static void merge(Mapper mergeInto, Mapper mergeWith) {
|
||||
mergeInto.merge(mergeWith, newStrictMergeResult());
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge {@code mergeWith} into {@code mergeTo}. Note: this method only
|
||||
* merges mappings, not lookup structures. Conflicts are returned as exceptions.
|
||||
*/
|
||||
public static void merge(Mapping mergeInto, Mapping mergeWith) {
|
||||
mergeInto.merge(mergeWith, newStrictMergeResult());
|
||||
}
|
||||
|
||||
/** Split mapper and its descendants into object and field mappers. */
|
||||
public static void collect(Mapper mapper, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
|
||||
if (mapper instanceof RootObjectMapper) {
|
||||
|
|
|
@ -27,10 +27,12 @@ import org.elasticsearch.index.mapper.object.RootObjectMapper;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
@ -41,25 +43,27 @@ import static java.util.Collections.unmodifiableMap;
|
|||
*/
|
||||
public final class Mapping implements ToXContent {
|
||||
|
||||
public static final List<String> LEGACY_INCLUDE_IN_OBJECT = Arrays.asList("_all", "_id", "_parent", "_routing", "_timestamp", "_ttl");
|
||||
// Set of fields that were included into the root object mapper before 2.0
|
||||
public static final Set<String> LEGACY_INCLUDE_IN_OBJECT = Collections.unmodifiableSet(new HashSet<>(
|
||||
Arrays.asList("_all", "_id", "_parent", "_routing", "_timestamp", "_ttl")));
|
||||
|
||||
final Version indexCreated;
|
||||
final RootObjectMapper root;
|
||||
final MetadataFieldMapper[] metadataMappers;
|
||||
final Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappersMap;
|
||||
volatile Map<String, Object> meta;
|
||||
final Map<String, Object> meta;
|
||||
|
||||
public Mapping(Version indexCreated, RootObjectMapper rootObjectMapper, MetadataFieldMapper[] metadataMappers, Map<String, Object> meta) {
|
||||
this.indexCreated = indexCreated;
|
||||
this.root = rootObjectMapper;
|
||||
this.metadataMappers = metadataMappers;
|
||||
Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappersMap = new HashMap<>();
|
||||
for (MetadataFieldMapper metadataMapper : metadataMappers) {
|
||||
if (indexCreated.before(Version.V_2_0_0_beta1) && LEGACY_INCLUDE_IN_OBJECT.contains(metadataMapper.name())) {
|
||||
root.putMapper(metadataMapper);
|
||||
rootObjectMapper = rootObjectMapper.copyAndPutMapper(metadataMapper);
|
||||
}
|
||||
metadataMappersMap.put(metadataMapper.getClass(), metadataMapper);
|
||||
}
|
||||
this.root = rootObjectMapper;
|
||||
// keep root mappers sorted for consistent serialization
|
||||
Arrays.sort(metadataMappers, new Comparator<Mapper>() {
|
||||
@Override
|
||||
|
@ -90,21 +94,20 @@ public final class Mapping implements ToXContent {
|
|||
}
|
||||
|
||||
/** @see DocumentMapper#merge(Mapping, boolean, boolean) */
|
||||
public void merge(Mapping mergeWith, MergeResult mergeResult) {
|
||||
assert metadataMappers.length == mergeWith.metadataMappers.length;
|
||||
|
||||
root.merge(mergeWith.root, mergeResult);
|
||||
for (MetadataFieldMapper metadataMapper : metadataMappers) {
|
||||
MetadataFieldMapper mergeWithMetadataMapper = mergeWith.metadataMapper(metadataMapper.getClass());
|
||||
if (mergeWithMetadataMapper != null) {
|
||||
metadataMapper.merge(mergeWithMetadataMapper, mergeResult);
|
||||
public Mapping merge(Mapping mergeWith, boolean updateAllTypes) {
|
||||
RootObjectMapper mergedRoot = root.merge(mergeWith.root, updateAllTypes);
|
||||
Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> mergedMetaDataMappers = new HashMap<>(metadataMappersMap);
|
||||
for (MetadataFieldMapper metaMergeWith : mergeWith.metadataMappers) {
|
||||
MetadataFieldMapper mergeInto = mergedMetaDataMappers.get(metaMergeWith.getClass());
|
||||
MetadataFieldMapper merged;
|
||||
if (mergeInto == null) {
|
||||
merged = metaMergeWith;
|
||||
} else {
|
||||
merged = mergeInto.merge(metaMergeWith, updateAllTypes);
|
||||
}
|
||||
mergedMetaDataMappers.put(merged.getClass(), merged);
|
||||
}
|
||||
|
||||
if (mergeResult.simulate() == false) {
|
||||
// let the merge with attributes to override the attributes
|
||||
meta = mergeWith.meta;
|
||||
}
|
||||
return new Mapping(indexCreated, mergedRoot, mergedMetaDataMappers.values().toArray(new MetadataFieldMapper[0]), mergeWith.meta);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1,81 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
/** A container for tracking results of a mapping merge. */
|
||||
public class MergeResult {
|
||||
|
||||
private final boolean simulate;
|
||||
private final boolean updateAllTypes;
|
||||
|
||||
private final List<String> conflicts = new ArrayList<>();
|
||||
private final List<FieldMapper> newFieldMappers = new ArrayList<>();
|
||||
private final List<ObjectMapper> newObjectMappers = new ArrayList<>();
|
||||
|
||||
public MergeResult(boolean simulate, boolean updateAllTypes) {
|
||||
this.simulate = simulate;
|
||||
this.updateAllTypes = updateAllTypes;
|
||||
}
|
||||
|
||||
public void addFieldMappers(Collection<FieldMapper> fieldMappers) {
|
||||
assert simulate() == false;
|
||||
newFieldMappers.addAll(fieldMappers);
|
||||
}
|
||||
|
||||
public void addObjectMappers(Collection<ObjectMapper> objectMappers) {
|
||||
assert simulate() == false;
|
||||
newObjectMappers.addAll(objectMappers);
|
||||
}
|
||||
|
||||
public Collection<FieldMapper> getNewFieldMappers() {
|
||||
return newFieldMappers;
|
||||
}
|
||||
|
||||
public Collection<ObjectMapper> getNewObjectMappers() {
|
||||
return newObjectMappers;
|
||||
}
|
||||
|
||||
public boolean simulate() {
|
||||
return simulate;
|
||||
}
|
||||
|
||||
public boolean updateAllTypes() {
|
||||
return updateAllTypes;
|
||||
}
|
||||
|
||||
public void addConflict(String mergeFailure) {
|
||||
conflicts.add(mergeFailure);
|
||||
}
|
||||
|
||||
public boolean hasConflicts() {
|
||||
return conflicts.isEmpty() == false;
|
||||
}
|
||||
|
||||
public String[] buildConflicts() {
|
||||
return conflicts.toArray(Strings.EMPTY_ARRAY);
|
||||
}
|
||||
}
|
|
@ -51,8 +51,8 @@ public abstract class MetadataFieldMapper extends FieldMapper {
|
|||
}
|
||||
|
||||
public abstract static class Builder<T extends Builder, Y extends MetadataFieldMapper> extends FieldMapper.Builder<T, Y> {
|
||||
public Builder(String name, MappedFieldType fieldType) {
|
||||
super(name, fieldType);
|
||||
public Builder(String name, MappedFieldType fieldType, MappedFieldType defaultFieldType) {
|
||||
super(name, fieldType, defaultFieldType);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -70,4 +70,8 @@ public abstract class MetadataFieldMapper extends FieldMapper {
|
|||
*/
|
||||
public abstract void postParse(ParseContext context) throws IOException;
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper merge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
return (MetadataFieldMapper) super.merge(mergeWith, updateAllTypes);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -595,7 +595,7 @@ public abstract class ParseContext {
|
|||
if (dynamicMappingsUpdate == null) {
|
||||
dynamicMappingsUpdate = mapper;
|
||||
} else {
|
||||
MapperUtils.merge(dynamicMappingsUpdate, mapper);
|
||||
dynamicMappingsUpdate = dynamicMappingsUpdate.merge(mapper, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -128,7 +128,7 @@ public class ParsedDocument {
|
|||
if (dynamicMappingsUpdate == null) {
|
||||
dynamicMappingsUpdate = update;
|
||||
} else {
|
||||
MapperUtils.merge(dynamicMappingsUpdate, update);
|
||||
dynamicMappingsUpdate = dynamicMappingsUpdate.merge(update, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ public class BinaryFieldMapper extends FieldMapper {
|
|||
public static class Builder extends FieldMapper.Builder<Builder, BinaryFieldMapper> {
|
||||
|
||||
public Builder(String name) {
|
||||
super(name, Defaults.FIELD_TYPE);
|
||||
super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
|
||||
builder = this;
|
||||
}
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ public class BooleanFieldMapper extends FieldMapper {
|
|||
public static class Builder extends FieldMapper.Builder<Builder, BooleanFieldMapper> {
|
||||
|
||||
public Builder(String name) {
|
||||
super(name, Defaults.FIELD_TYPE);
|
||||
super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
|
||||
this.builder = this;
|
||||
}
|
||||
|
||||
|
|
|
@ -77,8 +77,7 @@ public class ByteFieldMapper extends NumberFieldMapper {
|
|||
setupFieldType(context);
|
||||
ByteFieldMapper fieldMapper = new ByteFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
|
||||
coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (ByteFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -356,7 +356,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
|||
* @param name of the completion field to build
|
||||
*/
|
||||
public Builder(String name) {
|
||||
super(name, new CompletionFieldType());
|
||||
super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
|
||||
builder = this;
|
||||
}
|
||||
|
||||
|
@ -605,11 +605,9 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith;
|
||||
if (!mergeResult.simulate()) {
|
||||
this.maxInputLength = fieldMergeWith.maxInputLength;
|
||||
}
|
||||
this.maxInputLength = fieldMergeWith.maxInputLength;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -123,8 +123,7 @@ public class DateFieldMapper extends NumberFieldMapper {
|
|||
fieldType.setNullValue(nullValue);
|
||||
DateFieldMapper fieldMapper = new DateFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
|
||||
coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (DateFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -80,8 +80,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
|
|||
setupFieldType(context);
|
||||
DoubleFieldMapper fieldMapper = new DoubleFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context),
|
||||
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (DoubleFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -81,8 +81,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
|
|||
setupFieldType(context);
|
||||
FloatFieldMapper fieldMapper = new FloatFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context),
|
||||
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (FloatFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -85,8 +85,7 @@ public class IntegerFieldMapper extends NumberFieldMapper {
|
|||
IntegerFieldMapper fieldMapper = new IntegerFieldMapper(name, fieldType, defaultFieldType,
|
||||
ignoreMalformed(context), coerce(context),
|
||||
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (IntegerFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -84,8 +84,7 @@ public class LongFieldMapper extends NumberFieldMapper {
|
|||
setupFieldType(context);
|
||||
LongFieldMapper fieldMapper = new LongFieldMapper(name, fieldType, defaultFieldType,
|
||||
ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (LongFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -66,7 +66,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
|||
private Boolean coerce;
|
||||
|
||||
public Builder(String name, MappedFieldType fieldType, int defaultPrecisionStep) {
|
||||
super(name, fieldType);
|
||||
super(name, fieldType, fieldType);
|
||||
this.fieldType.setNumericPrecisionStep(defaultPrecisionStep);
|
||||
}
|
||||
|
||||
|
@ -183,22 +183,41 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
|||
}
|
||||
|
||||
@Override
|
||||
public void includeInAll(Boolean includeInAll) {
|
||||
protected NumberFieldMapper clone() {
|
||||
return (NumberFieldMapper) super.clone();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mapper includeInAll(Boolean includeInAll) {
|
||||
if (includeInAll != null) {
|
||||
this.includeInAll = includeInAll;
|
||||
NumberFieldMapper clone = clone();
|
||||
clone.includeInAll = includeInAll;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void includeInAllIfNotSet(Boolean includeInAll) {
|
||||
public Mapper includeInAllIfNotSet(Boolean includeInAll) {
|
||||
if (includeInAll != null && this.includeInAll == null) {
|
||||
this.includeInAll = includeInAll;
|
||||
NumberFieldMapper clone = clone();
|
||||
clone.includeInAll = includeInAll;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unsetIncludeInAll() {
|
||||
includeInAll = null;
|
||||
public Mapper unsetIncludeInAll() {
|
||||
if (includeInAll != null) {
|
||||
NumberFieldMapper clone = clone();
|
||||
clone.includeInAll = null;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -254,21 +273,16 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
}
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
NumberFieldMapper nfmMergeWith = (NumberFieldMapper) mergeWith;
|
||||
|
||||
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
|
||||
this.includeInAll = nfmMergeWith.includeInAll;
|
||||
if (nfmMergeWith.ignoreMalformed.explicit()) {
|
||||
this.ignoreMalformed = nfmMergeWith.ignoreMalformed;
|
||||
}
|
||||
if (nfmMergeWith.coerce.explicit()) {
|
||||
this.coerce = nfmMergeWith.coerce;
|
||||
}
|
||||
this.includeInAll = nfmMergeWith.includeInAll;
|
||||
if (nfmMergeWith.ignoreMalformed.explicit()) {
|
||||
this.ignoreMalformed = nfmMergeWith.ignoreMalformed;
|
||||
}
|
||||
if (nfmMergeWith.coerce.explicit()) {
|
||||
this.coerce = nfmMergeWith.coerce;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -81,8 +81,7 @@ public class ShortFieldMapper extends NumberFieldMapper {
|
|||
ShortFieldMapper fieldMapper = new ShortFieldMapper(name, fieldType, defaultFieldType,
|
||||
ignoreMalformed(context), coerce(context),
|
||||
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (ShortFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
|
||||
|
@ -99,7 +98,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
protected int ignoreAbove = Defaults.IGNORE_ABOVE;
|
||||
|
||||
public Builder(String name) {
|
||||
super(name, Defaults.FIELD_TYPE);
|
||||
super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
|
||||
builder = this;
|
||||
}
|
||||
|
||||
|
@ -150,8 +149,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
StringFieldMapper fieldMapper = new StringFieldMapper(
|
||||
name, fieldType, defaultFieldType, positionIncrementGap, ignoreAbove,
|
||||
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -257,22 +255,41 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
}
|
||||
|
||||
@Override
|
||||
public void includeInAll(Boolean includeInAll) {
|
||||
protected StringFieldMapper clone() {
|
||||
return (StringFieldMapper) super.clone();
|
||||
}
|
||||
|
||||
@Override
|
||||
public StringFieldMapper includeInAll(Boolean includeInAll) {
|
||||
if (includeInAll != null) {
|
||||
this.includeInAll = includeInAll;
|
||||
StringFieldMapper clone = clone();
|
||||
clone.includeInAll = includeInAll;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void includeInAllIfNotSet(Boolean includeInAll) {
|
||||
public StringFieldMapper includeInAllIfNotSet(Boolean includeInAll) {
|
||||
if (includeInAll != null && this.includeInAll == null) {
|
||||
this.includeInAll = includeInAll;
|
||||
StringFieldMapper clone = clone();
|
||||
clone.includeInAll = includeInAll;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unsetIncludeInAll() {
|
||||
includeInAll = null;
|
||||
public StringFieldMapper unsetIncludeInAll() {
|
||||
if (includeInAll != null) {
|
||||
StringFieldMapper clone = clone();
|
||||
clone.includeInAll = null;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -359,15 +376,10 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
}
|
||||
if (!mergeResult.simulate()) {
|
||||
this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll;
|
||||
this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove;
|
||||
}
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll;
|
||||
this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper.ValueAndBoost;
|
||||
|
||||
|
@ -81,8 +80,7 @@ public class TokenCountFieldMapper extends IntegerFieldMapper {
|
|||
TokenCountFieldMapper fieldMapper = new TokenCountFieldMapper(name, fieldType, defaultFieldType,
|
||||
ignoreMalformed(context), coerce(context), context.indexSettings(),
|
||||
analyzer, multiFieldsBuilder.build(this, context), copyTo);
|
||||
fieldMapper.includeInAll(includeInAll);
|
||||
return fieldMapper;
|
||||
return (TokenCountFieldMapper) fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -190,14 +188,9 @@ public class TokenCountFieldMapper extends IntegerFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
}
|
||||
if (!mergeResult.simulate()) {
|
||||
this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer;
|
||||
}
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -61,7 +61,6 @@ public class TypeParsers {
|
|||
|
||||
@Override
|
||||
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
ContentPath.Type pathType = null;
|
||||
FieldMapper.Builder mainFieldBuilder = null;
|
||||
List<FieldMapper.Builder> fields = null;
|
||||
String firstType = null;
|
||||
|
@ -70,10 +69,7 @@ public class TypeParsers {
|
|||
Map.Entry<String, Object> entry = iterator.next();
|
||||
String fieldName = Strings.toUnderscoreCase(entry.getKey());
|
||||
Object fieldNode = entry.getValue();
|
||||
if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
|
||||
pathType = parsePathType(name, fieldNode.toString());
|
||||
iterator.remove();
|
||||
} else if (fieldName.equals("fields")) {
|
||||
if (fieldName.equals("fields")) {
|
||||
Map<String, Object> fieldsNode = (Map<String, Object>) fieldNode;
|
||||
for (Iterator<Map.Entry<String, Object>> fieldsIterator = fieldsNode.entrySet().iterator(); fieldsIterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry1 = fieldsIterator.next();
|
||||
|
@ -132,17 +128,10 @@ public class TypeParsers {
|
|||
}
|
||||
}
|
||||
|
||||
if (fields != null && pathType != null) {
|
||||
if (fields != null) {
|
||||
for (Mapper.Builder field : fields) {
|
||||
mainFieldBuilder.addMultiField(field);
|
||||
}
|
||||
mainFieldBuilder.multiFieldPathType(pathType);
|
||||
} else if (fields != null) {
|
||||
for (Mapper.Builder field : fields) {
|
||||
mainFieldBuilder.addMultiField(field);
|
||||
}
|
||||
} else if (pathType != null) {
|
||||
mainFieldBuilder.multiFieldPathType(pathType);
|
||||
}
|
||||
return mainFieldBuilder;
|
||||
}
|
||||
|
@ -337,10 +326,7 @@ public class TypeParsers {
|
|||
|
||||
public static boolean parseMultiField(FieldMapper.Builder builder, String name, Mapper.TypeParser.ParserContext parserContext, String propName, Object propNode) {
|
||||
parserContext = parserContext.createMultiFieldContext(parserContext);
|
||||
if (propName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
|
||||
builder.multiFieldPathType(parsePathType(name, propNode.toString()));
|
||||
return true;
|
||||
} else if (propName.equals("fields")) {
|
||||
if (propName.equals("fields")) {
|
||||
|
||||
final Map<String, Object> multiFieldsPropNodes;
|
||||
|
||||
|
@ -457,17 +443,6 @@ public class TypeParsers {
|
|||
}
|
||||
}
|
||||
|
||||
public static ContentPath.Type parsePathType(String name, String path) throws MapperParsingException {
|
||||
path = Strings.toUnderscoreCase(path);
|
||||
if ("just_name".equals(path)) {
|
||||
return ContentPath.Type.JUST_NAME;
|
||||
} else if ("full".equals(path)) {
|
||||
return ContentPath.Type.FULL;
|
||||
} else {
|
||||
throw new MapperParsingException("wrong value for pathType [" + path + "] for object [" + name + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public static void parseCopyFields(Object propNode, FieldMapper.Builder builder) {
|
||||
FieldMapper.CopyTo.Builder copyToBuilder = new FieldMapper.CopyTo.Builder();
|
||||
|
|
|
@ -33,12 +33,10 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
|
||||
|
@ -74,7 +72,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
}
|
||||
|
||||
public static class Defaults {
|
||||
public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL;
|
||||
public static final boolean ENABLE_LATLON = false;
|
||||
public static final boolean ENABLE_GEOHASH = false;
|
||||
public static final boolean ENABLE_GEOHASH_PREFIX = false;
|
||||
|
@ -83,7 +80,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
}
|
||||
|
||||
public abstract static class Builder<T extends Builder, Y extends BaseGeoPointFieldMapper> extends FieldMapper.Builder<T, Y> {
|
||||
protected ContentPath.Type pathType = Defaults.PATH_TYPE;
|
||||
|
||||
protected boolean enableLatLon = Defaults.ENABLE_LATLON;
|
||||
|
||||
|
@ -98,7 +94,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
protected Boolean ignoreMalformed;
|
||||
|
||||
public Builder(String name, GeoPointFieldType fieldType) {
|
||||
super(name, fieldType);
|
||||
super(name, fieldType, fieldType);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -106,12 +102,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
return (GeoPointFieldType)fieldType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public T multiFieldPathType(ContentPath.Type pathType) {
|
||||
this.pathType = pathType;
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public T fieldDataSettings(Settings settings) {
|
||||
this.fieldDataSettings = settings;
|
||||
|
@ -159,13 +149,10 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
}
|
||||
|
||||
public abstract Y build(BuilderContext context, String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||
Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
|
||||
Settings indexSettings, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
|
||||
StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo);
|
||||
|
||||
public Y build(Mapper.BuilderContext context) {
|
||||
ContentPath.Type origPathType = context.path().pathType();
|
||||
context.path().pathType(pathType);
|
||||
|
||||
GeoPointFieldType geoPointFieldType = (GeoPointFieldType)fieldType;
|
||||
|
||||
DoubleFieldMapper latMapper = null;
|
||||
|
@ -191,9 +178,8 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
geoPointFieldType.setGeoHashEnabled(geoHashMapper.fieldType(), geoHashPrecision, enableGeoHashPrefix);
|
||||
}
|
||||
context.path().remove();
|
||||
context.path().pathType(origPathType);
|
||||
|
||||
return build(context, name, fieldType, defaultFieldType, context.indexSettings(), origPathType,
|
||||
return build(context, name, fieldType, defaultFieldType, context.indexSettings(),
|
||||
latMapper, lonMapper, geoHashMapper, multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo);
|
||||
}
|
||||
}
|
||||
|
@ -365,17 +351,14 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
|
||||
protected final DoubleFieldMapper lonMapper;
|
||||
|
||||
protected final ContentPath.Type pathType;
|
||||
|
||||
protected final StringFieldMapper geoHashMapper;
|
||||
|
||||
protected Explicit<Boolean> ignoreMalformed;
|
||||
|
||||
protected BaseGeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
|
||||
ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper,
|
||||
DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper,
|
||||
MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
||||
this.pathType = pathType;
|
||||
this.latMapper = latMapper;
|
||||
this.lonMapper = lonMapper;
|
||||
this.geoHashMapper = geoHashMapper;
|
||||
|
@ -388,17 +371,11 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
}
|
||||
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
BaseGeoPointFieldMapper gpfmMergeWith = (BaseGeoPointFieldMapper) mergeWith;
|
||||
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
|
||||
if (gpfmMergeWith.ignoreMalformed.explicit()) {
|
||||
this.ignoreMalformed = gpfmMergeWith.ignoreMalformed;
|
||||
}
|
||||
if (gpfmMergeWith.ignoreMalformed.explicit()) {
|
||||
this.ignoreMalformed = gpfmMergeWith.ignoreMalformed;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -441,8 +418,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
|
||||
@Override
|
||||
public Mapper parse(ParseContext context) throws IOException {
|
||||
ContentPath.Type origPathType = context.path().pathType();
|
||||
context.path().pathType(pathType);
|
||||
context.path().add(simpleName());
|
||||
|
||||
GeoPoint sparse = context.parseExternalValue(GeoPoint.class);
|
||||
|
@ -487,7 +462,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
}
|
||||
|
||||
context.path().remove();
|
||||
context.path().pathType(origPathType);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -512,9 +486,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
@Override
|
||||
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
||||
super.doXContentBody(builder, includeDefaults, params);
|
||||
if (includeDefaults || pathType != Defaults.PATH_TYPE) {
|
||||
builder.field("path", pathType.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
if (includeDefaults || fieldType().isLatLonEnabled() != GeoPointFieldMapper.Defaults.ENABLE_LATLON) {
|
||||
builder.field("lat_lon", fieldType().isLatLonEnabled());
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.common.Explicit;
|
|||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
|
@ -81,12 +80,12 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
|||
|
||||
@Override
|
||||
public GeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType,
|
||||
MappedFieldType defaultFieldType, Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper,
|
||||
MappedFieldType defaultFieldType, Settings indexSettings, DoubleFieldMapper latMapper,
|
||||
DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
CopyTo copyTo) {
|
||||
fieldType.setTokenized(false);
|
||||
setupFieldType(context);
|
||||
return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper,
|
||||
return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper,
|
||||
geoHashMapper, multiFields, ignoreMalformed, copyTo);
|
||||
}
|
||||
|
||||
|
@ -104,9 +103,9 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
|||
}
|
||||
|
||||
public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
|
||||
ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
|
||||
DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
|
||||
StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, geoHashMapper, multiFields,
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields,
|
||||
ignoreMalformed, copyTo);
|
||||
}
|
||||
|
||||
|
|
|
@ -35,11 +35,9 @@ import org.elasticsearch.common.unit.DistanceUnit;
|
|||
import org.elasticsearch.common.util.ByteUtils;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper.CustomNumericDocValuesField;
|
||||
|
@ -111,14 +109,14 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
|
|||
|
||||
@Override
|
||||
public GeoPointFieldMapperLegacy build(BuilderContext context, String simpleName, MappedFieldType fieldType,
|
||||
MappedFieldType defaultFieldType, Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper,
|
||||
MappedFieldType defaultFieldType, Settings indexSettings, DoubleFieldMapper latMapper,
|
||||
DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
CopyTo copyTo) {
|
||||
fieldType.setTokenized(false);
|
||||
setupFieldType(context);
|
||||
fieldType.setHasDocValues(false);
|
||||
defaultFieldType.setHasDocValues(false);
|
||||
return new GeoPointFieldMapperLegacy(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper,
|
||||
return new GeoPointFieldMapperLegacy(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper,
|
||||
geoHashMapper, multiFields, ignoreMalformed, coerce(context), copyTo);
|
||||
}
|
||||
|
||||
|
@ -288,32 +286,27 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
|
|||
protected Explicit<Boolean> coerce;
|
||||
|
||||
public GeoPointFieldMapperLegacy(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
|
||||
ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
|
||||
DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
|
||||
StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
Explicit<Boolean> coerce, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, geoHashMapper, multiFields,
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields,
|
||||
ignoreMalformed, copyTo);
|
||||
this.coerce = coerce;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
}
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
|
||||
GeoPointFieldMapperLegacy gpfmMergeWith = (GeoPointFieldMapperLegacy) mergeWith;
|
||||
if (gpfmMergeWith.coerce.explicit()) {
|
||||
if (coerce.explicit() && coerce.value() != gpfmMergeWith.coerce.value()) {
|
||||
mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] has different [coerce]");
|
||||
throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] has different [coerce]");
|
||||
}
|
||||
}
|
||||
|
||||
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
|
||||
if (gpfmMergeWith.coerce.explicit()) {
|
||||
this.coerce = gpfmMergeWith.coerce;
|
||||
}
|
||||
if (gpfmMergeWith.coerce.explicit()) {
|
||||
this.coerce = gpfmMergeWith.coerce;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -121,7 +120,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
private Boolean coerce;
|
||||
|
||||
public Builder(String name) {
|
||||
super(name, Defaults.FIELD_TYPE);
|
||||
super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -475,17 +474,12 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
return;
|
||||
}
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
|
||||
GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper)mergeWith;
|
||||
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
|
||||
if (gsfm.coerce.explicit()) {
|
||||
this.coerce = gsfm.coerce;
|
||||
}
|
||||
if (gsfm.coerce.explicit()) {
|
||||
this.coerce = gsfm.coerce;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
@ -58,11 +57,24 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
public interface IncludeInAll {
|
||||
|
||||
void includeInAll(Boolean includeInAll);
|
||||
/**
|
||||
* If {@code includeInAll} is not null then return a copy of this mapper
|
||||
* that will include values in the _all field according to {@code includeInAll}.
|
||||
*/
|
||||
Mapper includeInAll(Boolean includeInAll);
|
||||
|
||||
void includeInAllIfNotSet(Boolean includeInAll);
|
||||
/**
|
||||
* If {@code includeInAll} is not null and not set on this mapper yet, then
|
||||
* return a copy of this mapper that will include values in the _all field
|
||||
* according to {@code includeInAll}.
|
||||
*/
|
||||
Mapper includeInAllIfNotSet(Boolean includeInAll);
|
||||
|
||||
void unsetIncludeInAll();
|
||||
/**
|
||||
* If {@code includeInAll} was already set on this mapper then return a copy
|
||||
* of this mapper that has {@code includeInAll} not set.
|
||||
*/
|
||||
Mapper unsetIncludeInAll();
|
||||
}
|
||||
|
||||
public static final String NAME = "_all";
|
||||
|
@ -89,7 +101,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
private EnabledAttributeMapper enabled = Defaults.ENABLED;
|
||||
|
||||
public Builder(MappedFieldType existing) {
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
|
||||
builder = this;
|
||||
indexName = Defaults.INDEX_NAME;
|
||||
}
|
||||
|
@ -309,11 +321,11 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
if (((AllFieldMapper)mergeWith).enabled() != this.enabled() && ((AllFieldMapper)mergeWith).enabledState != Defaults.ENABLED) {
|
||||
mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled());
|
||||
throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled());
|
||||
}
|
||||
super.merge(mergeWith, mergeResult);
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -78,7 +78,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper {
|
|||
private boolean enabled = Defaults.ENABLED;
|
||||
|
||||
public Builder(MappedFieldType existing) {
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
|
||||
indexName = Defaults.NAME;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
|
@ -90,7 +89,7 @@ public class IdFieldMapper extends MetadataFieldMapper {
|
|||
private String path = Defaults.PATH;
|
||||
|
||||
public Builder(MappedFieldType existing) {
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
|
||||
indexName = Defaults.NAME;
|
||||
}
|
||||
|
||||
|
@ -331,7 +330,7 @@ public class IdFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
// do nothing here, no merging, but also no exception
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
@ -80,7 +79,7 @@ public class IndexFieldMapper extends MetadataFieldMapper {
|
|||
private EnabledAttributeMapper enabledState = EnabledAttributeMapper.UNSET_DISABLED;
|
||||
|
||||
public Builder(MappedFieldType existing) {
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
|
||||
indexName = Defaults.NAME;
|
||||
}
|
||||
|
||||
|
@ -279,12 +278,10 @@ public class IndexFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
IndexFieldMapper indexFieldMapperMergeWith = (IndexFieldMapper) mergeWith;
|
||||
if (!mergeResult.simulate()) {
|
||||
if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) {
|
||||
this.enabledState = indexFieldMapperMergeWith.enabledState;
|
||||
}
|
||||
if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) {
|
||||
this.enabledState = indexFieldMapperMergeWith.enabledState;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.DocumentMapper;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
|
@ -98,7 +97,7 @@ public class ParentFieldMapper extends MetadataFieldMapper {
|
|||
private final MappedFieldType childJoinFieldType = Defaults.JOIN_FIELD_TYPE.clone();
|
||||
|
||||
public Builder(String documentType) {
|
||||
super(Defaults.NAME, Defaults.FIELD_TYPE);
|
||||
super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
|
||||
this.indexName = name;
|
||||
this.documentType = documentType;
|
||||
builder = this;
|
||||
|
@ -371,11 +370,11 @@ public class ParentFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
super.merge(mergeWith, mergeResult);
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith;
|
||||
if (Objects.equals(parentType, fieldMergeWith.parentType) == false) {
|
||||
mergeResult.addConflict("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]");
|
||||
throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]");
|
||||
}
|
||||
|
||||
List<String> conflicts = new ArrayList<>();
|
||||
|
@ -383,13 +382,13 @@ public class ParentFieldMapper extends MetadataFieldMapper {
|
|||
parentJoinFieldType.checkCompatibility(fieldMergeWith.parentJoinFieldType, conflicts, true); // same here
|
||||
if (childJoinFieldType != null) {
|
||||
// TODO: this can be set to false when the old parent/child impl is removed, we can do eager global ordinals loading per type.
|
||||
childJoinFieldType.checkCompatibility(fieldMergeWith.childJoinFieldType, conflicts, mergeResult.updateAllTypes() == false);
|
||||
childJoinFieldType.checkCompatibility(fieldMergeWith.childJoinFieldType, conflicts, updateAllTypes == false);
|
||||
}
|
||||
for (String conflict : conflicts) {
|
||||
mergeResult.addConflict(conflict);
|
||||
if (conflicts.isEmpty() == false) {
|
||||
throw new IllegalArgumentException("Merge conflicts: " + conflicts);
|
||||
}
|
||||
|
||||
if (active() && mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
|
||||
if (active()) {
|
||||
childJoinFieldType = fieldMergeWith.childJoinFieldType.clone();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
|
||||
|
@ -78,7 +77,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
|
|||
private String path = Defaults.PATH;
|
||||
|
||||
public Builder(MappedFieldType existing) {
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
|
||||
}
|
||||
|
||||
public Builder required(boolean required) {
|
||||
|
@ -249,7 +248,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
// do nothing here, no merging, but also no exception
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,11 +41,11 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
@ -88,7 +88,7 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
private String[] excludes = null;
|
||||
|
||||
public Builder() {
|
||||
super(Defaults.NAME, Defaults.FIELD_TYPE);
|
||||
super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
|
||||
}
|
||||
|
||||
public Builder enabled(boolean enabled) {
|
||||
|
@ -310,18 +310,20 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith;
|
||||
if (mergeResult.simulate()) {
|
||||
if (this.enabled != sourceMergeWith.enabled) {
|
||||
mergeResult.addConflict("Cannot update enabled setting for [_source]");
|
||||
}
|
||||
if (Arrays.equals(includes(), sourceMergeWith.includes()) == false) {
|
||||
mergeResult.addConflict("Cannot update includes setting for [_source]");
|
||||
}
|
||||
if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) {
|
||||
mergeResult.addConflict("Cannot update excludes setting for [_source]");
|
||||
}
|
||||
List<String> conflicts = new ArrayList<>();
|
||||
if (this.enabled != sourceMergeWith.enabled) {
|
||||
conflicts.add("Cannot update enabled setting for [_source]");
|
||||
}
|
||||
if (Arrays.equals(includes(), sourceMergeWith.includes()) == false) {
|
||||
conflicts.add("Cannot update includes setting for [_source]");
|
||||
}
|
||||
if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) {
|
||||
conflicts.add("Cannot update excludes setting for [_source]");
|
||||
}
|
||||
if (conflicts.isEmpty() == false) {
|
||||
throw new IllegalArgumentException("Can't merge because of conflicts: " + conflicts);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.index.analysis.NumericLongAnalyzer;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.SourceToParse;
|
||||
|
@ -79,7 +78,7 @@ public class TTLFieldMapper extends MetadataFieldMapper {
|
|||
private long defaultTTL = Defaults.DEFAULT;
|
||||
|
||||
public Builder() {
|
||||
super(Defaults.NAME, Defaults.TTL_FIELD_TYPE);
|
||||
super(Defaults.NAME, Defaults.TTL_FIELD_TYPE, Defaults.FIELD_TYPE);
|
||||
}
|
||||
|
||||
public Builder enabled(EnabledAttributeMapper enabled) {
|
||||
|
@ -258,21 +257,19 @@ public class TTLFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
TTLFieldMapper ttlMergeWith = (TTLFieldMapper) mergeWith;
|
||||
if (((TTLFieldMapper) mergeWith).enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with
|
||||
if (this.enabledState == EnabledAttributeMapper.ENABLED && ((TTLFieldMapper) mergeWith).enabledState == EnabledAttributeMapper.DISABLED) {
|
||||
mergeResult.addConflict("_ttl cannot be disabled once it was enabled.");
|
||||
if (ttlMergeWith.enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with
|
||||
if (this.enabledState == EnabledAttributeMapper.ENABLED && ttlMergeWith.enabledState == EnabledAttributeMapper.DISABLED) {
|
||||
throw new IllegalArgumentException("_ttl cannot be disabled once it was enabled.");
|
||||
} else {
|
||||
if (!mergeResult.simulate()) {
|
||||
this.enabledState = ttlMergeWith.enabledState;
|
||||
}
|
||||
this.enabledState = ttlMergeWith.enabledState;
|
||||
}
|
||||
}
|
||||
if (ttlMergeWith.defaultTTL != -1) {
|
||||
// we never build the default when the field is disabled so we should also not set it
|
||||
// (it does not make a difference though as everything that is not build in toXContent will also not be set in the cluster)
|
||||
if (!mergeResult.simulate() && (enabledState == EnabledAttributeMapper.ENABLED)) {
|
||||
if (enabledState == EnabledAttributeMapper.ENABLED) {
|
||||
this.defaultTTL = ttlMergeWith.defaultTTL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,13 +33,13 @@ import org.elasticsearch.index.analysis.NumericDateAnalyzer;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -96,7 +96,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
|||
private Boolean ignoreMissing = null;
|
||||
|
||||
public Builder(MappedFieldType existing) {
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
|
||||
if (existing != null) {
|
||||
// if there is an existing type, always use that store value (only matters for < 2.0)
|
||||
explicitStore = true;
|
||||
|
@ -379,31 +379,32 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
TimestampFieldMapper timestampFieldMapperMergeWith = (TimestampFieldMapper) mergeWith;
|
||||
super.merge(mergeWith, mergeResult);
|
||||
if (!mergeResult.simulate()) {
|
||||
if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) {
|
||||
this.enabledState = timestampFieldMapperMergeWith.enabledState;
|
||||
}
|
||||
} else {
|
||||
if (timestampFieldMapperMergeWith.defaultTimestamp() == null && defaultTimestamp == null) {
|
||||
return;
|
||||
}
|
||||
if (defaultTimestamp == null) {
|
||||
mergeResult.addConflict("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp());
|
||||
} else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) {
|
||||
mergeResult.addConflict("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null");
|
||||
} else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) {
|
||||
mergeResult.addConflict("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp());
|
||||
}
|
||||
if (this.path != null) {
|
||||
if (path.equals(timestampFieldMapperMergeWith.path()) == false) {
|
||||
mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path()));
|
||||
}
|
||||
} else if (timestampFieldMapperMergeWith.path() != null) {
|
||||
mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing");
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) {
|
||||
this.enabledState = timestampFieldMapperMergeWith.enabledState;
|
||||
}
|
||||
if (timestampFieldMapperMergeWith.defaultTimestamp() == null && defaultTimestamp == null) {
|
||||
return;
|
||||
}
|
||||
List<String> conflicts = new ArrayList<>();
|
||||
if (defaultTimestamp == null) {
|
||||
conflicts.add("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp());
|
||||
} else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) {
|
||||
conflicts.add("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null");
|
||||
} else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) {
|
||||
conflicts.add("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp());
|
||||
}
|
||||
if (this.path != null) {
|
||||
if (path.equals(timestampFieldMapperMergeWith.path()) == false) {
|
||||
conflicts.add("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path()));
|
||||
}
|
||||
} else if (timestampFieldMapperMergeWith.path() != null) {
|
||||
conflicts.add("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing");
|
||||
}
|
||||
if (conflicts.isEmpty() == false) {
|
||||
throw new IllegalArgumentException("Conflicts: " + conflicts);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
|
@ -81,7 +80,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
|||
public static class Builder extends MetadataFieldMapper.Builder<Builder, TypeFieldMapper> {
|
||||
|
||||
public Builder(MappedFieldType existing) {
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
|
||||
indexName = Defaults.NAME;
|
||||
}
|
||||
|
||||
|
@ -225,7 +224,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
// do nothing here, no merging, but also no exception
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.ParseContext.Document;
|
||||
|
@ -79,7 +78,7 @@ public class UidFieldMapper extends MetadataFieldMapper {
|
|||
public static class Builder extends MetadataFieldMapper.Builder<Builder, UidFieldMapper> {
|
||||
|
||||
public Builder(MappedFieldType existing) {
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
|
||||
indexName = Defaults.NAME;
|
||||
}
|
||||
|
||||
|
@ -225,7 +224,7 @@ public class UidFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
// do nothing here, no merging, but also no exception
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.ParseContext.Document;
|
||||
|
@ -62,7 +61,7 @@ public class VersionFieldMapper extends MetadataFieldMapper {
|
|||
public static class Builder extends MetadataFieldMapper.Builder<Builder, VersionFieldMapper> {
|
||||
|
||||
public Builder() {
|
||||
super(Defaults.NAME, Defaults.FIELD_TYPE);
|
||||
super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -166,7 +165,7 @@ public class VersionFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
// nothing to do
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue