Merge branch 'master' into feature/reindex

This commit is contained in:
Nik Everett 2016-02-26 16:59:54 -05:00
commit c38119bae9
1013 changed files with 28958 additions and 19681 deletions

View File

@ -54,7 +54,7 @@ Once your changes and tests are ready to submit for review:
1. Test your changes
Run the test suite to make sure that nothing is broken. See the
[TESTING](TESTING.asciidoc) file for help running tests.
[TESTING](../TESTING.asciidoc) file for help running tests.
2. Sign the Contributor License Agreement

34
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,34 @@
<!--
GitHub is reserved for bug reports and feature requests. The best place
to ask a general question is at the Elastic Discourse forums at
https://discuss.elastic.co. If you are in fact posting a bug report or
a feature request, please include one and only one of the below blocks
in your new issue.
-->
<!--
If you are filing a bug report, please remove the below feature
request block and provide responses for all of the below items.
-->
**Elasticsearch version**:
**JVM version**:
**OS version**:
**Description of the problem including expected versus actual behavior**:
**Steps to reproduce**:
1.
2.
3.
**Provide logs (if relevant)**:
<!--
If you are filing a feature request, please remove the above bug
report block and provide responses for all of the below items.
-->
**Describe the feature**:

View File

@ -1,3 +1,5 @@
import java.nio.file.Files
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
@ -40,6 +42,15 @@ archivesBaseName = 'build-tools'
Properties props = new Properties()
props.load(project.file('version.properties').newDataInputStream())
version = props.getProperty('elasticsearch')
boolean snapshot = "true".equals(System.getProperty("build.snapshot", "true"));
if (snapshot) {
// we update the version property to reflect if we are building a snapshot or a release build
// we write this back out below to load it in the Build.java which will be shown in rest main action
// to indicate this being a snapshot build or a release build.
version += "-SNAPSHOT"
props.put("elasticsearch", version);
}
repositories {
mavenCentral()
@ -66,9 +77,22 @@ dependencies {
compile 'org.apache.rat:apache-rat:0.11'
}
File tempPropertiesFile = new File(project.buildDir, "version.properties")
task writeVersionProperties {
inputs.properties(props)
doLast {
OutputStream stream = Files.newOutputStream(tempPropertiesFile.toPath());
try {
props.store(stream, "UTF-8");
} finally {
stream.close();
}
}
}
processResources {
inputs.file('version.properties')
from 'version.properties'
dependsOn writeVersionProperties
from tempPropertiesFile
}
extraArchive {

View File

@ -354,11 +354,17 @@ class BuildPlugin implements Plugin<Project> {
static void configureJarManifest(Project project) {
project.tasks.withType(Jar) { Jar jarTask ->
jarTask.doFirst {
boolean isSnapshot = VersionProperties.elasticsearch.endsWith("-SNAPSHOT");
String version = VersionProperties.elasticsearch;
if (isSnapshot) {
version = version.substring(0, version.length() - 9)
}
// this doFirst is added before the info plugin, therefore it will run
// after the doFirst added by the info plugin, and we can override attributes
jarTask.manifest.attributes(
'X-Compile-Elasticsearch-Version': VersionProperties.elasticsearch,
'X-Compile-Elasticsearch-Version': version,
'X-Compile-Lucene-Version': VersionProperties.lucene,
'X-Compile-Elasticsearch-Snapshot': isSnapshot,
'Build-Date': ZonedDateTime.now(ZoneOffset.UTC),
'Build-Java-Version': project.javaVersion)
if (jarTask.manifest.attributes.containsKey('Change') == false) {
@ -394,7 +400,7 @@ class BuildPlugin implements Plugin<Project> {
// we use './temp' since this is per JVM and tests are forbidden from writing to CWD
systemProperty 'java.io.tmpdir', './temp'
systemProperty 'java.awt.headless', 'true'
systemProperty 'tests.maven', 'true' // TODO: rename this once we've switched to gradle!
systemProperty 'tests.gradle', 'true'
systemProperty 'tests.artifact', project.name
systemProperty 'tests.task', path
systemProperty 'tests.security.manager', 'true'

View File

@ -67,7 +67,6 @@ public class PluginBuildPlugin extends BuildPlugin {
provided "com.vividsolutions:jts:${project.versions.jts}"
provided "log4j:log4j:${project.versions.log4j}"
provided "log4j:apache-log4j-extras:${project.versions.log4j}"
provided "org.slf4j:slf4j-api:${project.versions.slf4j}"
provided "net.java.dev.jna:jna:${project.versions.jna}"
}
}
@ -101,11 +100,6 @@ public class PluginBuildPlugin extends BuildPlugin {
from pluginMetadata // metadata (eg custom security policy)
from project.jar // this plugin's jar
from project.configurations.runtime - project.configurations.provided // the dep jars
// hack just for slf4j, in case it is "upgrade" from provided to compile,
// since it is not actually provided in distributions
from project.configurations.runtime.fileCollection { Dependency dep ->
return dep.name == 'slf4j-api' && project.configurations.compile.dependencies.contains(dep)
}
// extra files for the plugin to go into the zip
from('src/main/packaging') // TODO: move all config/bin/_size/etc into packaging
from('src/main') {

View File

@ -245,7 +245,8 @@ class ClusterFormationTasks {
return setup
}
Copy copyConfig = project.tasks.create(name: name, type: Copy, dependsOn: setup)
copyConfig.into(new File(node.homeDir, 'config')) // copy must always have a general dest dir, even though we don't use it
File configDir = new File(node.homeDir, 'config')
copyConfig.into(configDir) // copy must always have a general dest dir, even though we don't use it
for (Map.Entry<String,Object> extraConfigFile : node.config.extraConfigFiles.entrySet()) {
copyConfig.doFirst {
// make sure the copy won't be a no-op or act on a directory
@ -258,9 +259,12 @@ class ClusterFormationTasks {
}
}
File destConfigFile = new File(node.homeDir, 'config/' + extraConfigFile.getKey())
copyConfig.into(destConfigFile.canonicalFile.parentFile)
.from({ extraConfigFile.getValue() }) // wrap in closure to delay resolution to execution time
.rename { destConfigFile.name }
// wrap source file in closure to delay resolution to execution time
copyConfig.from({ extraConfigFile.getValue() }) {
// this must be in a closure so it is only applied to the single file specified in from above
into(configDir.toPath().relativize(destConfigFile.canonicalFile.parentFile.toPath()).toFile())
rename { destConfigFile.name }
}
}
return copyConfig
}

View File

@ -374,9 +374,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]io[/\\]Channels.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]io[/\\]stream[/\\]NamedWriteableRegistry.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]joda[/\\]Joda.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]logging[/\\]ESLoggerFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]logging[/\\]Loggers.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]logging[/\\]log4j[/\\]LogConfigurator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]Lucene.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]all[/\\]AllTermQuery.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]index[/\\]ElasticsearchDirectoryReader.java" checks="LineLength" />
@ -649,8 +646,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analysis[/\\]PreBuiltCacheFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analysis[/\\]PreBuiltTokenFilters.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]breaker[/\\]HierarchyCircuitBreakerService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]cache[/\\]query[/\\]terms[/\\]TermsLookup.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]cache[/\\]request[/\\]IndicesRequestCache.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]cluster[/\\]IndicesClusterStateService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]fielddata[/\\]cache[/\\]IndicesFieldDataCache.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]fielddata[/\\]cache[/\\]IndicesFieldDataCacheListener.java" checks="LineLength" />
@ -1306,7 +1301,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analysis[/\\]PreBuiltAnalyzerIntegrationIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analyze[/\\]AnalyzeActionIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analyze[/\\]HunspellServiceIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]cache[/\\]query[/\\]IndicesRequestCacheIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]exists[/\\]indices[/\\]IndicesExistsIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]exists[/\\]types[/\\]TypesExistsIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]flush[/\\]FlushIT.java" checks="LineLength" />
@ -1575,13 +1569,6 @@
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]JavaScriptScriptMultiThreadedTests.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]JavaScriptSecurityTests.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]SimpleBench.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-painless[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]Definition.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-painless[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]PainlessPlugin.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-painless[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]ConditionalTests.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-painless[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]FieldTests.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-painless[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]FloatOverflowEnabledTests.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-painless[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]IntegerOverflowEnabledTests.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-painless[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]ScriptEngineTests.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugin[/\\]python[/\\]PythonPlugin.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonScriptEngineTests.java" checks="LineLength" />
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonScriptMultiThreadedTests.java" checks="LineLength" />

View File

@ -1,10 +1,10 @@
elasticsearch = 3.0.0-SNAPSHOT
lucene = 5.5.0-snapshot-850c6c2
elasticsearch = 3.0.0
lucene = 5.5.0
# optional dependencies
spatial4j = 0.5
jts = 1.13
jackson = 2.6.2
jackson = 2.7.1
log4j = 1.2.17
slf4j = 1.6.2
jna = 4.1.0
@ -13,6 +13,8 @@ jna = 4.1.0
# test dependencies
randomizedrunner = 2.3.2
junit = 4.11
# TODO: Upgrade httpclient to a version > 4.5.1 once released. Then remove o.e.test.rest.client.StrictHostnameVerifier* and use
# DefaultHostnameVerifier instead since we no longer need to workaround https://issues.apache.org/jira/browse/HTTPCLIENT-1698
httpclient = 4.3.6
httpcore = 4.3.3
commonslogging = 1.1.3

View File

@ -77,7 +77,6 @@ dependencies {
// logging
compile "log4j:log4j:${versions.log4j}", optional
compile "log4j:apache-log4j-extras:${versions.log4j}", optional
compile "org.slf4j:slf4j-api:${versions.slf4j}", optional
compile "net.java.dev.jna:jna:${versions.jna}", optional
@ -224,8 +223,9 @@ thirdPartyAudit.excludes = [
'org.osgi.util.tracker.ServiceTracker',
'org.osgi.util.tracker.ServiceTrackerCustomizer',
'org.slf4j.impl.StaticMDCBinder',
'org.slf4j.impl.StaticMarkerBinder',
// from org.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional
'org.slf4j.Logger',
'org.slf4j.LoggerFactory',
]
// dependency license are currently checked in distribution

View File

@ -45,6 +45,7 @@ public class Build {
static {
final String shortHash;
final String date;
final boolean isSnapshot;
Path path = getElasticsearchCodebase();
if (path.toString().endsWith(".jar")) {
@ -52,6 +53,7 @@ public class Build {
Manifest manifest = jar.getManifest();
shortHash = manifest.getMainAttributes().getValue("Change");
date = manifest.getMainAttributes().getValue("Build-Date");
isSnapshot = "true".equals(manifest.getMainAttributes().getValue("X-Compile-Elasticsearch-Snapshot"));
} catch (IOException e) {
throw new RuntimeException(e);
}
@ -59,6 +61,7 @@ public class Build {
// not running from a jar (unit tests, IDE)
shortHash = "Unknown";
date = "Unknown";
isSnapshot = true;
}
if (shortHash == null) {
throw new IllegalStateException("Error finding the build shortHash. " +
@ -69,9 +72,11 @@ public class Build {
"Stopping Elasticsearch now so it doesn't run in subtly broken ways. This is likely a build bug.");
}
CURRENT = new Build(shortHash, date);
CURRENT = new Build(shortHash, date, isSnapshot);
}
private final boolean isSnapshot;
/**
* Returns path to elasticsearch codebase path
*/
@ -88,9 +93,10 @@ public class Build {
private String shortHash;
private String date;
Build(String shortHash, String date) {
Build(String shortHash, String date, boolean isSnapshot) {
this.shortHash = shortHash;
this.date = date;
this.isSnapshot = isSnapshot;
}
public String shortHash() {
@ -104,12 +110,18 @@ public class Build {
public static Build readBuild(StreamInput in) throws IOException {
String hash = in.readString();
String date = in.readString();
return new Build(hash, date);
boolean snapshot = in.readBoolean();
return new Build(hash, date, snapshot);
}
public static void writeBuild(Build build, StreamOutput out) throws IOException {
out.writeString(build.shortHash());
out.writeString(build.date());
out.writeBoolean(build.isSnapshot());
}
public boolean isSnapshot() {
return isSnapshot;
}
@Override

View File

@ -23,7 +23,7 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.Index;

View File

@ -43,253 +43,229 @@ public class Version {
public static final org.apache.lucene.util.Version LUCENE_3_EMULATION_VERSION = org.apache.lucene.util.Version.LUCENE_4_0_0;
public static final int V_0_18_0_ID = /*00*/180099;
public static final Version V_0_18_0 = new Version(V_0_18_0_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_18_0 = new Version(V_0_18_0_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_1_ID = /*00*/180199;
public static final Version V_0_18_1 = new Version(V_0_18_1_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_18_1 = new Version(V_0_18_1_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_2_ID = /*00*/180299;
public static final Version V_0_18_2 = new Version(V_0_18_2_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_18_2 = new Version(V_0_18_2_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_3_ID = /*00*/180399;
public static final Version V_0_18_3 = new Version(V_0_18_3_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_18_3 = new Version(V_0_18_3_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_4_ID = /*00*/180499;
public static final Version V_0_18_4 = new Version(V_0_18_4_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_18_4 = new Version(V_0_18_4_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_5_ID = /*00*/180599;
public static final Version V_0_18_5 = new Version(V_0_18_5_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_18_5 = new Version(V_0_18_5_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_6_ID = /*00*/180699;
public static final Version V_0_18_6 = new Version(V_0_18_6_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_18_6 = new Version(V_0_18_6_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_7_ID = /*00*/180799;
public static final Version V_0_18_7 = new Version(V_0_18_7_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_18_7 = new Version(V_0_18_7_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_8_ID = /*00*/180899;
public static final Version V_0_18_8 = new Version(V_0_18_8_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_18_8 = new Version(V_0_18_8_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_0_RC1_ID = /*00*/190051;
public static final Version V_0_19_0_RC1 = new Version(V_0_19_0_RC1_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_0_RC1 = new Version(V_0_19_0_RC1_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_0_RC2_ID = /*00*/190052;
public static final Version V_0_19_0_RC2 = new Version(V_0_19_0_RC2_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_0_RC2 = new Version(V_0_19_0_RC2_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_0_RC3_ID = /*00*/190053;
public static final Version V_0_19_0_RC3 = new Version(V_0_19_0_RC3_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_0_RC3 = new Version(V_0_19_0_RC3_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_0_ID = /*00*/190099;
public static final Version V_0_19_0 = new Version(V_0_19_0_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_0 = new Version(V_0_19_0_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_1_ID = /*00*/190199;
public static final Version V_0_19_1 = new Version(V_0_19_1_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_1 = new Version(V_0_19_1_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_2_ID = /*00*/190299;
public static final Version V_0_19_2 = new Version(V_0_19_2_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_2 = new Version(V_0_19_2_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_3_ID = /*00*/190399;
public static final Version V_0_19_3 = new Version(V_0_19_3_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_3 = new Version(V_0_19_3_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_4_ID = /*00*/190499;
public static final Version V_0_19_4 = new Version(V_0_19_4_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_4 = new Version(V_0_19_4_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_5_ID = /*00*/190599;
public static final Version V_0_19_5 = new Version(V_0_19_5_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_5 = new Version(V_0_19_5_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_6_ID = /*00*/190699;
public static final Version V_0_19_6 = new Version(V_0_19_6_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_6 = new Version(V_0_19_6_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_7_ID = /*00*/190799;
public static final Version V_0_19_7 = new Version(V_0_19_7_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_7 = new Version(V_0_19_7_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_8_ID = /*00*/190899;
public static final Version V_0_19_8 = new Version(V_0_19_8_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_8 = new Version(V_0_19_8_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_9_ID = /*00*/190999;
public static final Version V_0_19_9 = new Version(V_0_19_9_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_9 = new Version(V_0_19_9_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_10_ID = /*00*/191099;
public static final Version V_0_19_10 = new Version(V_0_19_10_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_10 = new Version(V_0_19_10_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_11_ID = /*00*/191199;
public static final Version V_0_19_11 = new Version(V_0_19_11_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_11 = new Version(V_0_19_11_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_12_ID = /*00*/191299;
public static final Version V_0_19_12 = new Version(V_0_19_12_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_12 = new Version(V_0_19_12_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_13_ID = /*00*/191399;
public static final Version V_0_19_13 = new Version(V_0_19_13_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_19_13 = new Version(V_0_19_13_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_0_RC1_ID = /*00*/200051;
public static final Version V_0_20_0_RC1 = new Version(V_0_20_0_RC1_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_20_0_RC1 = new Version(V_0_20_0_RC1_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_0_ID = /*00*/200099;
public static final Version V_0_20_0 = new Version(V_0_20_0_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_20_0 = new Version(V_0_20_0_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_1_ID = /*00*/200199;
public static final Version V_0_20_1 = new Version(V_0_20_1_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_20_1 = new Version(V_0_20_1_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_2_ID = /*00*/200299;
public static final Version V_0_20_2 = new Version(V_0_20_2_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_20_2 = new Version(V_0_20_2_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_3_ID = /*00*/200399;
public static final Version V_0_20_3 = new Version(V_0_20_3_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_20_3 = new Version(V_0_20_3_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_4_ID = /*00*/200499;
public static final Version V_0_20_4 = new Version(V_0_20_4_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_20_4 = new Version(V_0_20_4_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_5_ID = /*00*/200599;
public static final Version V_0_20_5 = new Version(V_0_20_5_ID, false, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_20_5 = new Version(V_0_20_5_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_6_ID = /*00*/200699;
public static final Version V_0_20_6 = new Version(V_0_20_6_ID, false, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_7_ID = /*00*/200799;
public static final Version V_0_20_7 = new Version(V_0_20_7_ID, true, LUCENE_3_EMULATION_VERSION);
public static final Version V_0_20_6 = new Version(V_0_20_6_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_90_0_Beta1_ID = /*00*/900001;
public static final Version V_0_90_0_Beta1 = new Version(V_0_90_0_Beta1_ID, false, org.apache.lucene.util.Version.LUCENE_4_1);
public static final Version V_0_90_0_Beta1 = new Version(V_0_90_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_1);
public static final int V_0_90_0_RC1_ID = /*00*/900051;
public static final Version V_0_90_0_RC1 = new Version(V_0_90_0_RC1_ID, false, org.apache.lucene.util.Version.LUCENE_4_1);
public static final Version V_0_90_0_RC1 = new Version(V_0_90_0_RC1_ID, org.apache.lucene.util.Version.LUCENE_4_1);
public static final int V_0_90_0_RC2_ID = /*00*/900052;
public static final Version V_0_90_0_RC2 = new Version(V_0_90_0_RC2_ID, false, org.apache.lucene.util.Version.LUCENE_4_2);
public static final Version V_0_90_0_RC2 = new Version(V_0_90_0_RC2_ID, org.apache.lucene.util.Version.LUCENE_4_2);
public static final int V_0_90_0_ID = /*00*/900099;
public static final Version V_0_90_0 = new Version(V_0_90_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_2);
public static final Version V_0_90_0 = new Version(V_0_90_0_ID, org.apache.lucene.util.Version.LUCENE_4_2);
public static final int V_0_90_1_ID = /*00*/900199;
public static final Version V_0_90_1 = new Version(V_0_90_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_3);
public static final Version V_0_90_1 = new Version(V_0_90_1_ID, org.apache.lucene.util.Version.LUCENE_4_3);
public static final int V_0_90_2_ID = /*00*/900299;
public static final Version V_0_90_2 = new Version(V_0_90_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_3);
public static final Version V_0_90_2 = new Version(V_0_90_2_ID, org.apache.lucene.util.Version.LUCENE_4_3);
public static final int V_0_90_3_ID = /*00*/900399;
public static final Version V_0_90_3 = new Version(V_0_90_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_4);
public static final Version V_0_90_3 = new Version(V_0_90_3_ID, org.apache.lucene.util.Version.LUCENE_4_4);
public static final int V_0_90_4_ID = /*00*/900499;
public static final Version V_0_90_4 = new Version(V_0_90_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_4);
public static final Version V_0_90_4 = new Version(V_0_90_4_ID, org.apache.lucene.util.Version.LUCENE_4_4);
public static final int V_0_90_5_ID = /*00*/900599;
public static final Version V_0_90_5 = new Version(V_0_90_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_4);
public static final Version V_0_90_5 = new Version(V_0_90_5_ID, org.apache.lucene.util.Version.LUCENE_4_4);
public static final int V_0_90_6_ID = /*00*/900699;
public static final Version V_0_90_6 = new Version(V_0_90_6_ID, false, org.apache.lucene.util.Version.LUCENE_4_5);
public static final Version V_0_90_6 = new Version(V_0_90_6_ID, org.apache.lucene.util.Version.LUCENE_4_5);
public static final int V_0_90_7_ID = /*00*/900799;
public static final Version V_0_90_7 = new Version(V_0_90_7_ID, false, org.apache.lucene.util.Version.LUCENE_4_5);
public static final Version V_0_90_7 = new Version(V_0_90_7_ID, org.apache.lucene.util.Version.LUCENE_4_5);
public static final int V_0_90_8_ID = /*00*/900899;
public static final Version V_0_90_8 = new Version(V_0_90_8_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
public static final Version V_0_90_8 = new Version(V_0_90_8_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_0_90_9_ID = /*00*/900999;
public static final Version V_0_90_9 = new Version(V_0_90_9_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
public static final Version V_0_90_9 = new Version(V_0_90_9_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_0_90_10_ID = /*00*/901099;
public static final Version V_0_90_10 = new Version(V_0_90_10_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
public static final Version V_0_90_10 = new Version(V_0_90_10_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_0_90_11_ID = /*00*/901199;
public static final Version V_0_90_11 = new Version(V_0_90_11_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
public static final Version V_0_90_11 = new Version(V_0_90_11_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_0_90_12_ID = /*00*/901299;
public static final Version V_0_90_12 = new Version(V_0_90_12_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
public static final Version V_0_90_12 = new Version(V_0_90_12_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_0_90_13_ID = /*00*/901399;
public static final Version V_0_90_13 = new Version(V_0_90_13_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_0_90_14_ID = /*00*/901499;
public static final Version V_0_90_14 = new Version(V_0_90_14_ID, true, org.apache.lucene.util.Version.LUCENE_4_6);
public static final Version V_0_90_13 = new Version(V_0_90_13_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_0_0_Beta1_ID = 1000001;
public static final Version V_1_0_0_Beta1 = new Version(V_1_0_0_Beta1_ID, false, org.apache.lucene.util.Version.LUCENE_4_5);
public static final Version V_1_0_0_Beta1 = new Version(V_1_0_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_5);
public static final int V_1_0_0_Beta2_ID = 1000002;
public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_0_0_RC1_ID = 1000051;
public static final Version V_1_0_0_RC1 = new Version(V_1_0_0_RC1_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
public static final Version V_1_0_0_RC1 = new Version(V_1_0_0_RC1_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_0_0_RC2_ID = 1000052;
public static final Version V_1_0_0_RC2 = new Version(V_1_0_0_RC2_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
public static final Version V_1_0_0_RC2 = new Version(V_1_0_0_RC2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_0_0_ID = 1000099;
public static final Version V_1_0_0 = new Version(V_1_0_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
public static final Version V_1_0_0 = new Version(V_1_0_0_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_0_1_ID = 1000199;
public static final Version V_1_0_1 = new Version(V_1_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
public static final Version V_1_0_1 = new Version(V_1_0_1_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_0_2_ID = 1000299;
public static final Version V_1_0_2 = new Version(V_1_0_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
public static final Version V_1_0_2 = new Version(V_1_0_2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_0_3_ID = 1000399;
public static final Version V_1_0_3 = new Version(V_1_0_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_0_4_ID = 1000499;
public static final Version V_1_0_4 = new Version(V_1_0_4_ID, true, org.apache.lucene.util.Version.LUCENE_4_6);
public static final Version V_1_0_3 = new Version(V_1_0_3_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_1_0_ID = 1010099;
public static final Version V_1_1_0 = new Version(V_1_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_7);
public static final Version V_1_1_0 = new Version(V_1_1_0_ID, org.apache.lucene.util.Version.LUCENE_4_7);
public static final int V_1_1_1_ID = 1010199;
public static final Version V_1_1_1 = new Version(V_1_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_7);
public static final Version V_1_1_1 = new Version(V_1_1_1_ID, org.apache.lucene.util.Version.LUCENE_4_7);
public static final int V_1_1_2_ID = 1010299;
public static final Version V_1_1_2 = new Version(V_1_1_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_7);
public static final Version V_1_1_2 = new Version(V_1_1_2_ID, org.apache.lucene.util.Version.LUCENE_4_7);
public static final int V_1_2_0_ID = 1020099;
public static final Version V_1_2_0 = new Version(V_1_2_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_8);
public static final Version V_1_2_0 = new Version(V_1_2_0_ID, org.apache.lucene.util.Version.LUCENE_4_8);
public static final int V_1_2_1_ID = 1020199;
public static final Version V_1_2_1 = new Version(V_1_2_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_8);
public static final Version V_1_2_1 = new Version(V_1_2_1_ID, org.apache.lucene.util.Version.LUCENE_4_8);
public static final int V_1_2_2_ID = 1020299;
public static final Version V_1_2_2 = new Version(V_1_2_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_8);
public static final Version V_1_2_2 = new Version(V_1_2_2_ID, org.apache.lucene.util.Version.LUCENE_4_8);
public static final int V_1_2_3_ID = 1020399;
public static final Version V_1_2_3 = new Version(V_1_2_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_8);
public static final Version V_1_2_3 = new Version(V_1_2_3_ID, org.apache.lucene.util.Version.LUCENE_4_8);
public static final int V_1_2_4_ID = 1020499;
public static final Version V_1_2_4 = new Version(V_1_2_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_8);
public static final int V_1_2_5_ID = 1020599;
public static final Version V_1_2_5 = new Version(V_1_2_5_ID, true, org.apache.lucene.util.Version.LUCENE_4_8);
public static final Version V_1_2_4 = new Version(V_1_2_4_ID, org.apache.lucene.util.Version.LUCENE_4_8);
public static final int V_1_3_0_ID = 1030099;
public static final Version V_1_3_0 = new Version(V_1_3_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
public static final Version V_1_3_0 = new Version(V_1_3_0_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_1_ID = 1030199;
public static final Version V_1_3_1 = new Version(V_1_3_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
public static final Version V_1_3_1 = new Version(V_1_3_1_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_2_ID = 1030299;
public static final Version V_1_3_2 = new Version(V_1_3_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
public static final Version V_1_3_2 = new Version(V_1_3_2_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_3_ID = 1030399;
public static final Version V_1_3_3 = new Version(V_1_3_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
public static final Version V_1_3_3 = new Version(V_1_3_3_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_4_ID = 1030499;
public static final Version V_1_3_4 = new Version(V_1_3_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
public static final Version V_1_3_4 = new Version(V_1_3_4_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_5_ID = 1030599;
public static final Version V_1_3_5 = new Version(V_1_3_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
public static final Version V_1_3_5 = new Version(V_1_3_5_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_6_ID = 1030699;
public static final Version V_1_3_6 = new Version(V_1_3_6_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
public static final Version V_1_3_6 = new Version(V_1_3_6_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_7_ID = 1030799;
public static final Version V_1_3_7 = new Version(V_1_3_7_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
public static final Version V_1_3_7 = new Version(V_1_3_7_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_8_ID = 1030899;
public static final Version V_1_3_8 = new Version(V_1_3_8_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
public static final Version V_1_3_8 = new Version(V_1_3_8_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_9_ID = 1030999;
public static final Version V_1_3_9 = new Version(V_1_3_9_ID, false, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_10_ID = /*00*/1031099;
public static final Version V_1_3_10 = new Version(V_1_3_10_ID, true, org.apache.lucene.util.Version.LUCENE_4_9);
public static final Version V_1_3_9 = new Version(V_1_3_9_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_4_0_Beta1_ID = 1040001;
public static final Version V_1_4_0_Beta1 = new Version(V_1_4_0_Beta1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_1);
public static final Version V_1_4_0_Beta1 = new Version(V_1_4_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_10_1);
public static final int V_1_4_0_ID = 1040099;
public static final Version V_1_4_0 = new Version(V_1_4_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_2);
public static final Version V_1_4_0 = new Version(V_1_4_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
public static final int V_1_4_1_ID = 1040199;
public static final Version V_1_4_1 = new Version(V_1_4_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_2);
public static final Version V_1_4_1 = new Version(V_1_4_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
public static final int V_1_4_2_ID = 1040299;
public static final Version V_1_4_2 = new Version(V_1_4_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_2);
public static final Version V_1_4_2 = new Version(V_1_4_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
public static final int V_1_4_3_ID = 1040399;
public static final Version V_1_4_3 = new Version(V_1_4_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_3);
public static final Version V_1_4_3 = new Version(V_1_4_3_ID, org.apache.lucene.util.Version.LUCENE_4_10_3);
public static final int V_1_4_4_ID = 1040499;
public static final Version V_1_4_4 = new Version(V_1_4_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_3);
public static final Version V_1_4_4 = new Version(V_1_4_4_ID, org.apache.lucene.util.Version.LUCENE_4_10_3);
public static final int V_1_4_5_ID = 1040599;
public static final Version V_1_4_5 = new Version(V_1_4_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_4_6_ID = 1040699;
public static final Version V_1_4_6 = new Version(V_1_4_6_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_4_5 = new Version(V_1_4_5_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_5_0_ID = 1050099;
public static final Version V_1_5_0 = new Version(V_1_5_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_5_0 = new Version(V_1_5_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_5_1_ID = 1050199;
public static final Version V_1_5_1 = new Version(V_1_5_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_5_1 = new Version(V_1_5_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_5_2_ID = 1050299;
public static final Version V_1_5_2 = new Version(V_1_5_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_5_3_ID = 1050399;
public static final Version V_1_5_3 = new Version(V_1_5_3_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_5_2 = new Version(V_1_5_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_6_0_ID = 1060099;
public static final Version V_1_6_0 = new Version(V_1_6_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_6_0 = new Version(V_1_6_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_6_1_ID = 1060199;
public static final Version V_1_6_1 = new Version(V_1_6_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_6_1 = new Version(V_1_6_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_6_2_ID = 1060299;
public static final Version V_1_6_2 = new Version(V_1_6_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_6_3_ID = 1060399;
public static final Version V_1_6_3 = new Version(V_1_6_3_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_6_2 = new Version(V_1_6_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_0_ID = 1070099;
public static final Version V_1_7_0 = new Version(V_1_7_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_7_0 = new Version(V_1_7_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_1_ID = 1070199;
public static final Version V_1_7_1 = new Version(V_1_7_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_7_1 = new Version(V_1_7_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_2_ID = 1070299;
public static final Version V_1_7_2 = new Version(V_1_7_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_7_2 = new Version(V_1_7_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_3_ID = 1070399;
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_4_ID = 1070499;
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_5_ID = 1070599;
public static final Version V_1_7_5 = new Version(V_1_7_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_6_ID = 1070699;
public static final Version V_1_7_6 = new Version(V_1_7_6_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_7_5 = new Version(V_1_7_5_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_2_0_0_beta1_ID = 2000001;
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_0_beta2_ID = 2000002;
public static final Version V_2_0_0_beta2 = new Version(V_2_0_0_beta2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final Version V_2_0_0_beta2 = new Version(V_2_0_0_beta2_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_0_rc1_ID = 2000051;
public static final Version V_2_0_0_rc1 = new Version(V_2_0_0_rc1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final Version V_2_0_0_rc1 = new Version(V_2_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_0_ID = 2000099;
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_1_ID = 2000199;
public static final Version V_2_0_1 = new Version(V_2_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final Version V_2_0_1 = new Version(V_2_0_1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_2_ID = 2000299;
public static final Version V_2_0_2 = new Version(V_2_0_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_3_ID = 2000399;
public static final Version V_2_0_3 = new Version(V_2_0_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final Version V_2_0_2 = new Version(V_2_0_2_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_1_0_ID = 2010099;
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_1_1_ID = 2010199;
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_1_2_ID = 2010299;
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_1_3_ID = 2010399;
public static final Version V_2_1_3 = new Version(V_2_1_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_2_0_ID = 2020099;
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_4_1);
public static final int V_2_2_1_ID = 2020199;
public static final Version V_2_2_1 = new Version(V_2_2_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_1);
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
public static final int V_2_3_0_ID = 2030099;
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_3_0_0_ID = 3000099;
public static final Version V_3_0_0 = new Version(V_3_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final Version V_3_0_0 = new Version(V_3_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final Version CURRENT = V_3_0_0;
static {
@ -307,20 +283,14 @@ public class Version {
return V_3_0_0;
case V_2_3_0_ID:
return V_2_3_0;
case V_2_2_1_ID:
return V_2_2_1;
case V_2_2_0_ID:
return V_2_2_0;
case V_2_1_3_ID:
return V_2_1_3;
case V_2_1_2_ID:
return V_2_1_2;
case V_2_1_1_ID:
return V_2_1_1;
case V_2_1_0_ID:
return V_2_1_0;
case V_2_0_3_ID:
return V_2_0_3;
case V_2_0_2_ID:
return V_2_0_2;
case V_2_0_1_ID:
@ -333,8 +303,6 @@ public class Version {
return V_2_0_0_beta2;
case V_2_0_0_beta1_ID:
return V_2_0_0_beta1;
case V_1_7_6_ID:
return V_1_7_6;
case V_1_7_5_ID:
return V_1_7_5;
case V_1_7_4_ID:
@ -347,24 +315,18 @@ public class Version {
return V_1_7_1;
case V_1_7_0_ID:
return V_1_7_0;
case V_1_6_3_ID:
return V_1_6_3;
case V_1_6_2_ID:
return V_1_6_2;
case V_1_6_1_ID:
return V_1_6_1;
case V_1_6_0_ID:
return V_1_6_0;
case V_1_5_3_ID:
return V_1_5_3;
case V_1_5_2_ID:
return V_1_5_2;
case V_1_5_1_ID:
return V_1_5_1;
case V_1_5_0_ID:
return V_1_5_0;
case V_1_4_6_ID:
return V_1_4_6;
case V_1_4_5_ID:
return V_1_4_5;
case V_1_4_4_ID:
@ -379,8 +341,6 @@ public class Version {
return V_1_4_0;
case V_1_4_0_Beta1_ID:
return V_1_4_0_Beta1;
case V_1_3_10_ID:
return V_1_3_10;
case V_1_3_9_ID:
return V_1_3_9;
case V_1_3_8_ID:
@ -401,8 +361,6 @@ public class Version {
return V_1_3_1;
case V_1_3_0_ID:
return V_1_3_0;
case V_1_2_5_ID:
return V_1_2_5;
case V_1_2_4_ID:
return V_1_2_4;
case V_1_2_3_ID:
@ -419,8 +377,6 @@ public class Version {
return V_1_1_1;
case V_1_1_0_ID:
return V_1_1_0;
case V_1_0_4_ID:
return V_1_0_4;
case V_1_0_3_ID:
return V_1_0_3;
case V_1_0_2_ID:
@ -437,8 +393,6 @@ public class Version {
return V_1_0_0_Beta2;
case V_1_0_0_Beta1_ID:
return V_1_0_0_Beta1;
case V_0_90_14_ID:
return V_0_90_14;
case V_0_90_13_ID:
return V_0_90_13;
case V_0_90_12_ID:
@ -473,8 +427,6 @@ public class Version {
return V_0_90_0_RC1;
case V_0_90_0_Beta1_ID:
return V_0_90_0_Beta1;
case V_0_20_7_ID:
return V_0_20_7;
case V_0_20_6_ID:
return V_0_20_6;
case V_0_20_5_ID:
@ -544,7 +496,7 @@ public class Version {
case V_0_18_8_ID:
return V_0_18_8;
default:
return new Version(id, false, org.apache.lucene.util.Version.LATEST);
return new Version(id, org.apache.lucene.util.Version.LATEST);
}
}
@ -579,10 +531,6 @@ public class Version {
if (!Strings.hasLength(version)) {
return Version.CURRENT;
}
final boolean snapshot;
if (snapshot = version.endsWith("-SNAPSHOT")) {
version = version.substring(0, version.length() - 9);
}
String[] parts = version.split("\\.|\\-");
if (parts.length < 3 || parts.length > 4) {
throw new IllegalArgumentException("the version needs to contain major, minor, and revision, and optionally the build: " + version);
@ -607,11 +555,7 @@ public class Version {
}
}
final Version versionFromId = fromId(major + minor + revision + build);
if (snapshot != versionFromId.snapshot()) {
return new Version(versionFromId.id, snapshot, versionFromId.luceneVersion);
}
return versionFromId;
return fromId(major + minor + revision + build);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("unable to parse version " + version, e);
@ -623,23 +567,17 @@ public class Version {
public final byte minor;
public final byte revision;
public final byte build;
public final Boolean snapshot;
public final org.apache.lucene.util.Version luceneVersion;
Version(int id, boolean snapshot, org.apache.lucene.util.Version luceneVersion) {
Version(int id, org.apache.lucene.util.Version luceneVersion) {
this.id = id;
this.major = (byte) ((id / 1000000) % 100);
this.minor = (byte) ((id / 10000) % 100);
this.revision = (byte) ((id / 100) % 100);
this.build = (byte) (id % 100);
this.snapshot = snapshot;
this.luceneVersion = luceneVersion;
}
public boolean snapshot() {
return snapshot;
}
public boolean after(Version version) {
return version.id < id;
}
@ -667,10 +605,13 @@ public class Version {
return Version.smallest(this, fromId(major * 1000000 + 99));
}
/**
* Just the version number (without -SNAPSHOT if snapshot).
*/
public String number() {
@SuppressForbidden(reason = "System.out.*")
public static void main(String[] args) {
System.out.println("Version: " + Version.CURRENT + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date() + ", JVM: " + JvmInfo.jvmInfo().version());
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(major).append('.').append(minor).append('.').append(revision);
if (isBeta()) {
@ -691,21 +632,6 @@ public class Version {
return sb.toString();
}
@SuppressForbidden(reason = "System.out.*")
public static void main(String[] args) {
System.out.println("Version: " + Version.CURRENT + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date() + ", JVM: " + JvmInfo.jvmInfo().version());
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(number());
if (snapshot()) {
sb.append("-SNAPSHOT");
}
return sb.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {

View File

@ -36,14 +36,6 @@ public class CancelTasksRequest extends BaseTasksRequest<CancelTasksRequest> {
private String reason = DEFAULT_REASON;
/**
* Cancel tasks on the specified nodes. If none are passed, all cancellable tasks on
* all nodes will be cancelled.
*/
public CancelTasksRequest(String... nodesIds) {
super(nodesIds);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -54,7 +46,6 @@ public class CancelTasksRequest extends BaseTasksRequest<CancelTasksRequest> {
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(reason);
}
@Override

View File

@ -24,7 +24,6 @@ import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
import org.elasticsearch.action.support.tasks.TransportTasksAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
@ -36,6 +35,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.EmptyTransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
@ -84,9 +84,9 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
}
protected void processTasks(CancelTasksRequest request, Consumer<CancellableTask> operation) {
if (request.taskId() != BaseTasksRequest.ALL_TASKS) {
if (request.taskId().isSet() == false) {
// we are only checking one task, we can optimize it
CancellableTask task = taskManager.getCancellableTask(request.taskId());
CancellableTask task = taskManager.getCancellableTask(request.taskId().getId());
if (task != null) {
if (request.match(task)) {
operation.accept(task);
@ -94,7 +94,7 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support this operation");
}
} else {
if (taskManager.getTask(request.taskId()) != null) {
if (taskManager.getTask(request.taskId().getId()) != null) {
// The task exists, but doesn't support cancellation
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support cancellation");
} else {
@ -135,11 +135,14 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
}
private void setBanOnNodes(String reason, CancellableTask task, Set<String> nodes, BanLock banLock) {
sendSetBanRequest(nodes, new BanParentTaskRequest(clusterService.localNode().getId(), task.getId(), reason), banLock);
sendSetBanRequest(nodes,
BanParentTaskRequest.createSetBanParentTaskRequest(new TaskId(clusterService.localNode().getId(), task.getId()), reason),
banLock);
}
private void removeBanOnNodes(CancellableTask task, Set<String> nodes) {
sendRemoveBanRequest(nodes, new BanParentTaskRequest(clusterService.localNode().getId(), task.getId()));
sendRemoveBanRequest(nodes,
BanParentTaskRequest.createRemoveBanParentTaskRequest(new TaskId(clusterService.localNode().getId(), task.getId())));
}
private void sendSetBanRequest(Set<String> nodes, BanParentTaskRequest request, BanLock banLock) {
@ -148,8 +151,8 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
DiscoveryNode discoveryNode = clusterState.getNodes().get(node);
if (discoveryNode != null) {
// Check if node still in the cluster
logger.debug("Sending ban for tasks with the parent [{}:{}] to the node [{}], ban [{}]", request.parentNodeId, request
.parentTaskId, node, request.ban);
logger.debug("Sending ban for tasks with the parent [{}] to the node [{}], ban [{}]", request.parentTaskId, node,
request.ban);
transportService.sendRequest(discoveryNode, BAN_PARENT_ACTION_NAME, request,
new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
@ -164,8 +167,8 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
});
} else {
banLock.onBanSet();
logger.debug("Cannot send ban for tasks with the parent [{}:{}] to the node [{}] - the node no longer in the cluster",
request.parentNodeId, request.parentTaskId, node);
logger.debug("Cannot send ban for tasks with the parent [{}] to the node [{}] - the node no longer in the cluster",
request.parentTaskId, node);
}
}
}
@ -176,13 +179,12 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
DiscoveryNode discoveryNode = clusterState.getNodes().get(node);
if (discoveryNode != null) {
// Check if node still in the cluster
logger.debug("Sending remove ban for tasks with the parent [{}:{}] to the node [{}]", request.parentNodeId,
request.parentTaskId, node);
logger.debug("Sending remove ban for tasks with the parent [{}] to the node [{}]", request.parentTaskId, node);
transportService.sendRequest(discoveryNode, BAN_PARENT_ACTION_NAME, request, EmptyTransportResponseHandler
.INSTANCE_SAME);
} else {
logger.debug("Cannot send remove ban request for tasks with the parent [{}:{}] to the node [{}] - the node no longer in " +
"the cluster", request.parentNodeId, request.parentTaskId, node);
logger.debug("Cannot send remove ban request for tasks with the parent [{}] to the node [{}] - the node no longer in " +
"the cluster", request.parentTaskId, node);
}
}
}
@ -218,23 +220,27 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
private static class BanParentTaskRequest extends TransportRequest {
private String parentNodeId;
private long parentTaskId;
private TaskId parentTaskId;
private boolean ban;
private String reason;
BanParentTaskRequest(String parentNodeId, long parentTaskId, String reason) {
this.parentNodeId = parentNodeId;
static BanParentTaskRequest createSetBanParentTaskRequest(TaskId parentTaskId, String reason) {
return new BanParentTaskRequest(parentTaskId, reason);
}
static BanParentTaskRequest createRemoveBanParentTaskRequest(TaskId parentTaskId) {
return new BanParentTaskRequest(parentTaskId);
}
private BanParentTaskRequest(TaskId parentTaskId, String reason) {
this.parentTaskId = parentTaskId;
this.ban = true;
this.reason = reason;
}
BanParentTaskRequest(String parentNodeId, long parentTaskId) {
this.parentNodeId = parentNodeId;
private BanParentTaskRequest(TaskId parentTaskId) {
this.parentTaskId = parentTaskId;
this.ban = false;
}
@ -245,8 +251,7 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
parentNodeId = in.readString();
parentTaskId = in.readLong();
parentTaskId = new TaskId(in);
ban = in.readBoolean();
if (ban) {
reason = in.readString();
@ -256,8 +261,7 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(parentNodeId);
out.writeLong(parentTaskId);
parentTaskId.writeTo(out);
out.writeBoolean(ban);
if (ban) {
out.writeString(reason);
@ -269,13 +273,13 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
@Override
public void messageReceived(final BanParentTaskRequest request, final TransportChannel channel) throws Exception {
if (request.ban) {
logger.debug("Received ban for the parent [{}:{}] on the node [{}], reason: [{}]", request.parentNodeId, request
.parentTaskId, clusterService.localNode().getId(), request.reason);
taskManager.setBan(request.parentNodeId, request.parentTaskId, request.reason);
logger.debug("Received ban for the parent [{}] on the node [{}], reason: [{}]", request.parentTaskId,
clusterService.localNode().getId(), request.reason);
taskManager.setBan(request.parentTaskId, request.reason);
} else {
logger.debug("Removing ban for the parent [{}:{}] on the node [{}]", request.parentNodeId, request.parentTaskId,
logger.debug("Removing ban for the parent [{}] on the node [{}]", request.parentTaskId,
clusterService.localNode().getId());
taskManager.removeBan(request.parentNodeId, request.parentTaskId);
taskManager.removeBan(request.parentTaskId);
}
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}

View File

@ -32,14 +32,6 @@ public class ListTasksRequest extends BaseTasksRequest<ListTasksRequest> {
private boolean detailed = false;
/**
* Get information from nodes based on the nodes ids specified. If none are passed, information
* for all nodes will be returned.
*/
public ListTasksRequest(String... nodesIds) {
super(nodesIds);
}
/**
* Should the detailed task information be returned.
*/
@ -48,7 +40,7 @@ public class ListTasksRequest extends BaseTasksRequest<ListTasksRequest> {
}
/**
* Should the node settings be returned.
* Should the detailed task information be returned.
*/
public ListTasksRequest detailed(boolean detailed) {
this.detailed = detailed;

View File

@ -138,11 +138,13 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
}
builder.endObject();
}
builder.startArray("tasks");
builder.startObject("tasks");
for(TaskInfo task : entry.getValue()) {
builder.startObject(task.getTaskId().toString(), XContentBuilder.FieldCaseConversion.NONE);
task.toXContent(builder, params);
builder.endObject();
}
builder.endArray();
builder.endObject();
builder.endObject();
}
builder.endObject();

View File

@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
@ -41,7 +42,7 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
private final DiscoveryNode node;
private final long id;
private final TaskId taskId;
private final String type;
@ -51,28 +52,21 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
private final Task.Status status;
private final String parentNode;
private final TaskId parentTaskId;
private final long parentId;
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status) {
this(node, id, type, action, description, status, null, -1L);
}
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status, String parentNode, long parentId) {
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, Task.Status status, TaskId parentTaskId) {
this.node = node;
this.id = id;
this.taskId = new TaskId(node.getId(), id);
this.type = type;
this.action = action;
this.description = description;
this.status = status;
this.parentNode = parentNode;
this.parentId = parentId;
this.parentTaskId = parentTaskId;
}
public TaskInfo(StreamInput in) throws IOException {
node = DiscoveryNode.readNode(in);
id = in.readLong();
taskId = new TaskId(node.getId(), in.readLong());
type = in.readString();
action = in.readString();
description = in.readOptionalString();
@ -81,8 +75,11 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
} else {
status = null;
}
parentNode = in.readOptionalString();
parentId = in.readLong();
parentTaskId = new TaskId(in);
}
public TaskId getTaskId() {
return taskId;
}
public DiscoveryNode getNode() {
@ -90,7 +87,7 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
}
public long getId() {
return id;
return taskId.getId();
}
public String getType() {
@ -113,12 +110,8 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
return status;
}
public String getParentNode() {
return parentNode;
}
public long getParentId() {
return parentId;
public TaskId getParentTaskId() {
return parentTaskId;
}
@Override
@ -129,7 +122,7 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
@Override
public void writeTo(StreamOutput out) throws IOException {
node.writeTo(out);
out.writeLong(id);
out.writeLong(taskId.getId());
out.writeString(type);
out.writeString(action);
out.writeOptionalString(description);
@ -139,15 +132,13 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
} else {
out.writeBoolean(false);
}
out.writeOptionalString(parentNode);
out.writeLong(parentId);
parentTaskId.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("node", node.getId());
builder.field("id", id);
builder.field("id", taskId.getId());
builder.field("type", type);
builder.field("action", action);
if (status != null) {
@ -156,11 +147,9 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
if (description != null) {
builder.field("description", description);
}
if (parentNode != null) {
builder.field("parent_node", parentNode);
builder.field("parent_id", parentId);
if (parentTaskId.isSet() == false) {
builder.field("parent_task_id", parentTaskId.toString());
}
builder.endObject();
return builder;
}
}

View File

@ -27,11 +27,14 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.PendingClusterTask;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.List;
/**
*/
public class TransportPendingClusterTasksAction extends TransportMasterNodeReadAction<PendingClusterTasksRequest, PendingClusterTasksResponse> {
@ -63,6 +66,9 @@ public class TransportPendingClusterTasksAction extends TransportMasterNodeReadA
@Override
protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, ActionListener<PendingClusterTasksResponse> listener) {
listener.onResponse(new PendingClusterTasksResponse(clusterService.pendingTasks()));
logger.trace("fetching pending tasks from cluster service");
final List<PendingClusterTask> pendingTasks = clusterService.pendingTasks();
logger.trace("done fetching pending tasks from cluster service");
listener.onResponse(new PendingClusterTasksResponse(pendingTasks));
}
}

View File

@ -35,7 +35,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -98,7 +97,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
}
if (request.requestCache()) {
clearedAtLeastOne = true;
indicesService.getIndicesRequestCache().clear(shard);
indicesService.clearRequestCache(shard);
}
if (request.recycler()) {
logger.debug("Clear CacheRecycler on index [{}]", service.index());
@ -114,7 +113,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
} else {
service.cache().clear("api");
service.fieldData().clear();
indicesService.getIndicesRequestCache().clear(shard);
indicesService.clearRequestCache(shard);
}
}
}

View File

@ -26,7 +26,6 @@ import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.cache.request.RequestCacheStats;
import org.elasticsearch.index.engine.SegmentsStats;
@ -41,13 +40,11 @@ import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats;
import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.index.suggest.stats.SuggestStats;
import org.elasticsearch.index.translog.TranslogStats;
import org.elasticsearch.index.warmer.WarmerStats;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.indices.IndicesQueryCache;
import org.elasticsearch.search.suggest.completion.CompletionStats;
import java.io.IOException;

View File

@ -49,6 +49,7 @@ import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.internal.DefaultSearchContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
@ -76,17 +77,20 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
private final BigArrays bigArrays;
private final FetchPhase fetchPhase;
@Inject
public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, IndicesService indicesService,
ScriptService scriptService, PageCacheRecycler pageCacheRecycler,
BigArrays bigArrays, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
TransportService transportService, IndicesService indicesService, ScriptService scriptService,
PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, FetchPhase fetchPhase) {
super(settings, ValidateQueryAction.NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, ValidateQueryRequest::new, ShardValidateQueryRequest::new, ThreadPool.Names.SEARCH);
this.indicesService = indicesService;
this.scriptService = scriptService;
this.pageCacheRecycler = pageCacheRecycler;
this.bigArrays = bigArrays;
this.fetchPhase = fetchPhase;
}
@Override
@ -173,11 +177,9 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
Engine.Searcher searcher = indexShard.acquireSearcher("validate_query");
DefaultSearchContext searchContext = new DefaultSearchContext(0,
new ShardSearchLocalRequest(request.types(), request.nowInMillis(), request.filteringAliases()),
null, searcher, indexService, indexShard,
scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher,
SearchService.NO_TIMEOUT
);
new ShardSearchLocalRequest(request.types(), request.nowInMillis(), request.filteringAliases()), null, searcher,
indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(),
parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
SearchContext.setCurrent(searchContext);
try {
searchContext.parsedQuery(queryShardContext.toQuery(request.query()));

View File

@ -272,7 +272,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
list = new ArrayList<>();
requestsByShard.put(shardIt.shardId(), list);
}
list.add(new BulkItemRequest(i, new DeleteRequest(deleteRequest)));
list.add(new BulkItemRequest(i, deleteRequest));
}
} else {
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()).shardId();

View File

@ -30,6 +30,7 @@ import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.index.TransportIndexAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.action.update.UpdateHelper;
import org.elasticsearch.action.update.UpdateRequest;
@ -111,185 +112,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
Translog.Location location = null;
for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) {
BulkItemRequest item = request.items()[requestIndex];
if (item.request() instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) item.request();
preVersions[requestIndex] = indexRequest.version();
preVersionTypes[requestIndex] = indexRequest.versionType();
try {
WriteResult<IndexResponse> result = shardIndexOperation(request, indexRequest, metaData, indexShard, true);
location = locationToSync(location, result.location);
// add the response
IndexResponse indexResponse = result.response();
setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse));
} catch (Throwable e) {
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
if (retryPrimaryException(e)) {
// restore updated versions...
for (int j = 0; j < requestIndex; j++) {
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
}
throw (ElasticsearchException) e;
}
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
logger.trace("{} failed to execute bulk item (index) {}", e, request.shardId(), indexRequest);
} else {
logger.debug("{} failed to execute bulk item (index) {}", e, request.shardId(), indexRequest);
}
// if its a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
// then just use the response we got from the successful execution
if (item.getPrimaryResponse() != null && isConflictException(e)) {
setResponse(item, item.getPrimaryResponse());
} else {
setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(),
new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), e)));
}
}
} else if (item.request() instanceof DeleteRequest) {
DeleteRequest deleteRequest = (DeleteRequest) item.request();
preVersions[requestIndex] = deleteRequest.version();
preVersionTypes[requestIndex] = deleteRequest.versionType();
try {
// add the response
final WriteResult<DeleteResponse> writeResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard);
DeleteResponse deleteResponse = writeResult.response();
location = locationToSync(location, writeResult.location);
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, deleteResponse));
} catch (Throwable e) {
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
if (retryPrimaryException(e)) {
// restore updated versions...
for (int j = 0; j < requestIndex; j++) {
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
}
throw (ElasticsearchException) e;
}
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
logger.trace("{} failed to execute bulk item (delete) {}", e, request.shardId(), deleteRequest);
} else {
logger.debug("{} failed to execute bulk item (delete) {}", e, request.shardId(), deleteRequest);
}
// if its a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
// then just use the response we got from the successful execution
if (item.getPrimaryResponse() != null && isConflictException(e)) {
setResponse(item, item.getPrimaryResponse());
} else {
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE,
new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e)));
}
}
} else if (item.request() instanceof UpdateRequest) {
UpdateRequest updateRequest = (UpdateRequest) item.request();
preVersions[requestIndex] = updateRequest.version();
preVersionTypes[requestIndex] = updateRequest.versionType();
// We need to do the requested retries plus the initial attempt. We don't do < 1+retry_on_conflict because retry_on_conflict may be Integer.MAX_VALUE
for (int updateAttemptsCount = 0; updateAttemptsCount <= updateRequest.retryOnConflict(); updateAttemptsCount++) {
UpdateResult updateResult;
try {
updateResult = shardUpdateOperation(metaData, request, updateRequest, indexShard);
} catch (Throwable t) {
updateResult = new UpdateResult(null, null, false, t, null);
}
if (updateResult.success()) {
if (updateResult.writeResult != null) {
location = locationToSync(location, updateResult.writeResult.location);
}
switch (updateResult.result.operation()) {
case UPSERT:
case INDEX:
WriteResult<IndexResponse> result = updateResult.writeResult;
IndexRequest indexRequest = updateResult.request();
BytesReference indexSourceAsBytes = indexRequest.source();
// add the response
IndexResponse indexResponse = result.response();
UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.isCreated());
if (updateRequest.fields() != null && updateRequest.fields().length > 0) {
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
}
item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest);
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse));
break;
case DELETE:
WriteResult<DeleteResponse> writeResult = updateResult.writeResult;
DeleteResponse response = writeResult.response();
DeleteRequest deleteRequest = updateResult.request();
updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), false);
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null));
// Replace the update request to the translated delete request to execute on the replica.
item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest);
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse));
break;
case NONE:
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResult.noopResult));
item.setIgnoreOnReplica(); // no need to go to the replica
break;
}
// NOTE: Breaking out of the retry_on_conflict loop!
break;
} else if (updateResult.failure()) {
Throwable t = updateResult.error;
if (updateResult.retry) {
// updateAttemptCount is 0 based and marks current attempt, if it's equal to retryOnConflict we are going out of the iteration
if (updateAttemptsCount >= updateRequest.retryOnConflict()) {
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE,
new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), t)));
}
} else {
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
if (retryPrimaryException(t)) {
// restore updated versions...
for (int j = 0; j < requestIndex; j++) {
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
}
throw (ElasticsearchException) t;
}
// if its a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
// then just use the response we got from the successful execution
if (item.getPrimaryResponse() != null && isConflictException(t)) {
setResponse(item, item.getPrimaryResponse());
} else if (updateResult.result == null) {
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), t)));
} else {
switch (updateResult.result.operation()) {
case UPSERT:
case INDEX:
IndexRequest indexRequest = updateResult.request();
if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) {
logger.trace("{} failed to execute bulk item (index) {}", t, request.shardId(), indexRequest);
} else {
logger.debug("{} failed to execute bulk item (index) {}", t, request.shardId(), indexRequest);
}
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE,
new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), t)));
break;
case DELETE:
DeleteRequest deleteRequest = updateResult.request();
if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) {
logger.trace("{} failed to execute bulk item (delete) {}", t, request.shardId(), deleteRequest);
} else {
logger.debug("{} failed to execute bulk item (delete) {}", t, request.shardId(), deleteRequest);
}
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE,
new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), t)));
break;
}
}
// NOTE: Breaking out of the retry_on_conflict loop!
break;
}
}
}
} else {
throw new IllegalStateException("Unexpected index operation: " + item.request());
}
assert item.getPrimaryResponse() != null;
assert preVersionTypes[requestIndex] != null;
location = handleItem(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item);
}
processAfterWrite(request.refresh(), indexShard, location);
@ -301,6 +124,198 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
return new Tuple<>(new BulkShardResponse(request.shardId(), responses), request);
}
private Translog.Location handleItem(MetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) {
if (item.request() instanceof IndexRequest) {
location = index(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item);
} else if (item.request() instanceof DeleteRequest) {
location = delete(request, indexShard, preVersions, preVersionTypes, location, requestIndex, item);
} else if (item.request() instanceof UpdateRequest) {
Tuple<Translog.Location, BulkItemRequest> tuple = update(metaData, request, indexShard, preVersions, preVersionTypes, location, requestIndex, item);
location = tuple.v1();
item = tuple.v2();
} else {
throw new IllegalStateException("Unexpected index operation: " + item.request());
}
assert item.getPrimaryResponse() != null;
assert preVersionTypes[requestIndex] != null;
return location;
}
private Translog.Location index(MetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) {
IndexRequest indexRequest = (IndexRequest) item.request();
preVersions[requestIndex] = indexRequest.version();
preVersionTypes[requestIndex] = indexRequest.versionType();
try {
WriteResult<IndexResponse> result = shardIndexOperation(request, indexRequest, metaData, indexShard, true);
location = locationToSync(location, result.location);
// add the response
IndexResponse indexResponse = result.response();
setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse));
} catch (Throwable e) {
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
if (retryPrimaryException(e)) {
// restore updated versions...
for (int j = 0; j < requestIndex; j++) {
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
}
throw (ElasticsearchException) e;
}
logFailure(e, "index", request.shardId(), indexRequest);
// if its a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
// then just use the response we got from the successful execution
if (item.getPrimaryResponse() != null && isConflictException(e)) {
setResponse(item, item.getPrimaryResponse());
} else {
setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(),
new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), e)));
}
}
return location;
}
private <ReplicationRequestT extends ReplicationRequest<ReplicationRequestT>> void logFailure(Throwable e, String operation, ShardId shardId, ReplicationRequest<ReplicationRequestT> request) {
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
logger.trace("{} failed to execute bulk item ({}) {}", e, shardId, operation, request);
} else {
logger.debug("{} failed to execute bulk item ({}) {}", e, shardId, operation, request);
}
}
private Translog.Location delete(BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) {
DeleteRequest deleteRequest = (DeleteRequest) item.request();
preVersions[requestIndex] = deleteRequest.version();
preVersionTypes[requestIndex] = deleteRequest.versionType();
try {
// add the response
final WriteResult<DeleteResponse> writeResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard);
DeleteResponse deleteResponse = writeResult.response();
location = locationToSync(location, writeResult.location);
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, deleteResponse));
} catch (Throwable e) {
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
if (retryPrimaryException(e)) {
// restore updated versions...
for (int j = 0; j < requestIndex; j++) {
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
}
throw (ElasticsearchException) e;
}
logFailure(e, "delete", request.shardId(), deleteRequest);
// if its a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
// then just use the response we got from the successful execution
if (item.getPrimaryResponse() != null && isConflictException(e)) {
setResponse(item, item.getPrimaryResponse());
} else {
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE,
new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e)));
}
}
return location;
}
private Tuple<Translog.Location, BulkItemRequest> update(MetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) {
UpdateRequest updateRequest = (UpdateRequest) item.request();
preVersions[requestIndex] = updateRequest.version();
preVersionTypes[requestIndex] = updateRequest.versionType();
// We need to do the requested retries plus the initial attempt. We don't do < 1+retry_on_conflict because retry_on_conflict may be Integer.MAX_VALUE
for (int updateAttemptsCount = 0; updateAttemptsCount <= updateRequest.retryOnConflict(); updateAttemptsCount++) {
UpdateResult updateResult;
try {
updateResult = shardUpdateOperation(metaData, request, updateRequest, indexShard);
} catch (Throwable t) {
updateResult = new UpdateResult(null, null, false, t, null);
}
if (updateResult.success()) {
if (updateResult.writeResult != null) {
location = locationToSync(location, updateResult.writeResult.location);
}
switch (updateResult.result.operation()) {
case UPSERT:
case INDEX:
WriteResult<IndexResponse> result = updateResult.writeResult;
IndexRequest indexRequest = updateResult.request();
BytesReference indexSourceAsBytes = indexRequest.source();
// add the response
IndexResponse indexResponse = result.response();
UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.isCreated());
if (updateRequest.fields() != null && updateRequest.fields().length > 0) {
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
}
item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest);
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse));
break;
case DELETE:
WriteResult<DeleteResponse> writeResult = updateResult.writeResult;
DeleteResponse response = writeResult.response();
DeleteRequest deleteRequest = updateResult.request();
updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), false);
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null));
// Replace the update request to the translated delete request to execute on the replica.
item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest);
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse));
break;
case NONE:
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResult.noopResult));
item.setIgnoreOnReplica(); // no need to go to the replica
break;
}
// NOTE: Breaking out of the retry_on_conflict loop!
break;
} else if (updateResult.failure()) {
Throwable t = updateResult.error;
if (updateResult.retry) {
// updateAttemptCount is 0 based and marks current attempt, if it's equal to retryOnConflict we are going out of the iteration
if (updateAttemptsCount >= updateRequest.retryOnConflict()) {
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE,
new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), t)));
}
} else {
// rethrow the failure if we are going to retry on primary and let parent failure to handle it
if (retryPrimaryException(t)) {
// restore updated versions...
for (int j = 0; j < requestIndex; j++) {
applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
}
throw (ElasticsearchException) t;
}
// if its a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
// then just use the response we got from the successful execution
if (item.getPrimaryResponse() != null && isConflictException(t)) {
setResponse(item, item.getPrimaryResponse());
} else if (updateResult.result == null) {
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), t)));
} else {
switch (updateResult.result.operation()) {
case UPSERT:
case INDEX:
IndexRequest indexRequest = updateResult.request();
logFailure(t, "index", request.shardId(), indexRequest);
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE,
new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), t)));
break;
case DELETE:
DeleteRequest deleteRequest = updateResult.request();
logFailure(t, "delete", request.shardId(), deleteRequest);
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE,
new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), t)));
break;
}
}
// NOTE: Breaking out of the retry_on_conflict loop!
break;
}
}
}
return Tuple.tuple(location, item);
}
private void setResponse(BulkItemRequest request, BulkItemResponse response) {
request.setPrimaryResponse(response);
if (response.isFailed()) {

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.delete;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.DocumentRequest;
import org.elasticsearch.action.support.replication.ReplicationRequest;
@ -80,28 +79,6 @@ public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements
this.id = id;
}
/**
* Copy constructor that creates a new delete request that is a copy of the one provided as an argument.
*/
public DeleteRequest(DeleteRequest request) {
this(request, request);
}
/**
* Copy constructor that creates a new delete request that is a copy of the one provided as an argument.
* The new request will inherit though headers and context from the original request that caused it.
*/
public DeleteRequest(DeleteRequest request, ActionRequest originalRequest) {
super(request);
this.type = request.type();
this.id = request.id();
this.routing = request.routing();
this.parent = request.parent();
this.refresh = request.refresh();
this.version = request.version();
this.versionType = request.versionType();
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();

View File

@ -44,6 +44,7 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.internal.DefaultSearchContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
@ -68,17 +69,20 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
private final BigArrays bigArrays;
private final FetchPhase fetchPhase;
@Inject
public TransportExplainAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, IndicesService indicesService,
ScriptService scriptService, PageCacheRecycler pageCacheRecycler,
BigArrays bigArrays, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
TransportService transportService, IndicesService indicesService, ScriptService scriptService,
PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, FetchPhase fetchPhase) {
super(settings, ExplainAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
ExplainRequest::new, ThreadPool.Names.GET);
this.indicesService = indicesService;
this.scriptService = scriptService;
this.pageCacheRecycler = pageCacheRecycler;
this.bigArrays = bigArrays;
this.fetchPhase = fetchPhase;
}
@Override
@ -111,13 +115,10 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
}
SearchContext context = new DefaultSearchContext(
0, new ShardSearchLocalRequest(new String[]{request.type()}, request.nowInMillis, request.filteringAlias()),
null, result.searcher(), indexService, indexShard,
scriptService, pageCacheRecycler,
bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher,
SearchService.NO_TIMEOUT
);
SearchContext context = new DefaultSearchContext(0,
new ShardSearchLocalRequest(new String[] { request.type() }, request.nowInMillis, request.filteringAlias()), null,
result.searcher(), indexService, indexShard, scriptService, pageCacheRecycler, bigArrays,
threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
SearchContext.setCurrent(context);
try {

View File

@ -62,7 +62,7 @@ public class FieldStatsRequest extends BroadcastRequest<FieldStatsRequest> {
public void setIndexConstraints(IndexConstraint[] indexConstraints) {
if (indexConstraints == null) {
throw new NullPointerException("specified index_contraints can't be null");
throw new NullPointerException("specified index_constraints can't be null");
}
this.indexConstraints = indexConstraints;
}

View File

@ -67,26 +67,6 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
type = "_all";
}
/**
* Copy constructor that creates a new get request that is a copy of the one provided as an argument.
* The new request will inherit though headers and context from the original request that caused it.
*/
public GetRequest(GetRequest getRequest) {
this.index = getRequest.index;
this.type = getRequest.type;
this.id = getRequest.id;
this.routing = getRequest.routing;
this.parent = getRequest.parent;
this.preference = getRequest.preference;
this.fields = getRequest.fields;
this.fetchSourceContext = getRequest.fetchSourceContext;
this.refresh = getRequest.refresh;
this.realtime = getRequest.realtime;
this.version = getRequest.version;
this.versionType = getRequest.versionType;
this.ignoreErrorsOnGeneratedFields = getRequest.ignoreErrorsOnGeneratedFields;
}
/**
* Constructs a new get request against the specified index. The {@link #type(String)} and {@link #id(String)}
* must be set.

View File

@ -159,26 +159,6 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
public IndexRequest() {
}
/**
* Copy constructor that creates a new index request that is a copy of the one provided as an argument.
* The new request will inherit though headers and context from the original request that caused it.
*/
public IndexRequest(IndexRequest indexRequest) {
super(indexRequest);
this.type = indexRequest.type;
this.id = indexRequest.id;
this.routing = indexRequest.routing;
this.parent = indexRequest.parent;
this.timestamp = indexRequest.timestamp;
this.ttl = indexRequest.ttl;
this.source = indexRequest.source;
this.opType = indexRequest.opType;
this.refresh = indexRequest.refresh;
this.version = indexRequest.version;
this.versionType = indexRequest.versionType;
this.contentType = indexRequest.contentType;
}
/**
* Constructs a new index request against the specific index. The {@link #type(String)}
* {@link #source(byte[])} must be set.
@ -235,6 +215,11 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
validationException = addValidationError("ttl must not be negative", validationException);
}
}
if (id != null && id.getBytes(StandardCharsets.UTF_8).length > 512) {
validationException = addValidationError("id is too long, must be no longer than 512 bytes but was: " +
id.getBytes(StandardCharsets.UTF_8).length, validationException);
}
return validationException;
}
@ -647,6 +632,12 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
if (defaultTimestamp.equals(TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP)) {
timestamp = Long.toString(System.currentTimeMillis());
} else {
// if we are here, the defaultTimestamp is not
// TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP but
// this can only happen if defaultTimestamp was
// assigned again because mappingMd and
// mappingMd#timestamp() are not null
assert mappingMd != null;
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(defaultTimestamp, mappingMd.timestamp().dateTimeFormatter(), getVersion(metaData, concreteIndex));
}
}

View File

@ -103,7 +103,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
void processBulkIndexRequest(Task task, BulkRequest original, String action, ActionFilterChain chain, ActionListener<BulkResponse> listener) {
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, throwable) -> {
logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id(), throwable);
logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", throwable, indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id());
bulkRequestModifier.markCurrentItemAsFailed(throwable);
}, (throwable) -> {
if (throwable != null) {

View File

@ -26,7 +26,8 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorBuilder;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
import org.elasticsearch.search.highlight.HighlightBuilder;
import org.elasticsearch.search.sort.SortBuilder;
@ -152,16 +153,26 @@ public class PercolateRequestBuilder extends BroadcastOperationRequestBuilder<Pe
}
/**
* Delegates to {@link PercolateSourceBuilder#addAggregation(AbstractAggregationBuilder)}
* Delegates to
* {@link PercolateSourceBuilder#addAggregation(AggregatorBuilder)}
*/
public PercolateRequestBuilder addAggregation(AbstractAggregationBuilder aggregationBuilder) {
public PercolateRequestBuilder addAggregation(AggregatorBuilder<?> aggregationBuilder) {
sourceBuilder().addAggregation(aggregationBuilder);
return this;
}
/**
* Sets the percolate request definition directly on the request.
* This will overwrite any definitions set by any of the delegate methods.
* Delegates to
* {@link PercolateSourceBuilder#addAggregation(PipelineAggregatorBuilder)}
*/
public PercolateRequestBuilder addAggregation(PipelineAggregatorBuilder aggregationBuilder) {
sourceBuilder().addAggregation(aggregationBuilder);
return this;
}
/**
* Sets the percolate request definition directly on the request. This will
* overwrite any definitions set by any of the delegate methods.
*/
public PercolateRequestBuilder setSource(PercolateSourceBuilder source) {
sourceBuilder = source;

View File

@ -29,7 +29,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorBuilder;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.highlight.HighlightBuilder;
import org.elasticsearch.search.sort.ScoreSortBuilder;
import org.elasticsearch.search.sort.SortBuilder;
@ -51,7 +53,8 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
private List<SortBuilder> sorts;
private Boolean trackScores;
private HighlightBuilder highlightBuilder;
private List<AbstractAggregationBuilder> aggregations;
private List<AggregatorBuilder<?>> aggregationBuilders;
private List<PipelineAggregatorBuilder> pipelineAggregationBuilders;
/**
* Sets the document to run the percolate queries against.
@ -123,11 +126,22 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
/**
* Add an aggregation definition.
*/
public PercolateSourceBuilder addAggregation(AbstractAggregationBuilder aggregationBuilder) {
if (aggregations == null) {
aggregations = new ArrayList<>();
public PercolateSourceBuilder addAggregation(AggregatorBuilder<?> aggregationBuilder) {
if (aggregationBuilders == null) {
aggregationBuilders = new ArrayList<>();
}
aggregations.add(aggregationBuilder);
aggregationBuilders.add(aggregationBuilder);
return this;
}
/**
* Add an aggregation definition.
*/
public PercolateSourceBuilder addAggregation(PipelineAggregatorBuilder aggregationBuilder) {
if (pipelineAggregationBuilders == null) {
pipelineAggregationBuilders = new ArrayList<>();
}
pipelineAggregationBuilders.add(aggregationBuilder);
return this;
}
@ -157,13 +171,20 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
builder.field("track_scores", trackScores);
}
if (highlightBuilder != null) {
highlightBuilder.toXContent(builder, params);
builder.field(SearchSourceBuilder.HIGHLIGHT_FIELD.getPreferredName(), highlightBuilder);
}
if (aggregations != null) {
if (aggregationBuilders != null || pipelineAggregationBuilders != null) {
builder.field("aggregations");
builder.startObject();
for (AbstractAggregationBuilder aggregation : aggregations) {
aggregation.toXContent(builder, params);
if (aggregationBuilders != null) {
for (AggregatorBuilder<?> aggregation : aggregationBuilders) {
aggregation.toXContent(builder, params);
}
}
if (pipelineAggregationBuilders != null) {
for (PipelineAggregatorBuilder aggregation : pipelineAggregationBuilders) {
aggregation.toXContent(builder, params);
}
}
builder.endObject();
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.action.percolate;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.get.TransportGetAction;
import org.elasticsearch.action.support.ActionFilters;
@ -74,9 +73,7 @@ public class TransportPercolateAction extends TransportBroadcastAction<Percolate
protected void doExecute(Task task, final PercolateRequest request, final ActionListener<PercolateResponse> listener) {
request.startTime = System.currentTimeMillis();
if (request.getRequest() != null) {
//create a new get request to make sure it has the same headers and context as the original percolate request
GetRequest getRequest = new GetRequest(request.getRequest());
getAction.execute(getRequest, new ActionListener<GetResponse>() {
getAction.execute(request.getRequest(), new ActionListener<GetResponse>() {
@Override
public void onResponse(GetResponse getResponse) {
if (!getResponse.isExists()) {

View File

@ -76,23 +76,6 @@ public class SearchRequest extends ActionRequest<SearchRequest> implements Indic
public SearchRequest() {
}
/**
* Copy constructor that creates a new search request that is a copy of the one provided as an argument.
* The new request will inherit though headers and context from the original request that caused it.
*/
public SearchRequest(SearchRequest searchRequest) {
this.searchType = searchRequest.searchType;
this.indices = searchRequest.indices;
this.routing = searchRequest.routing;
this.preference = searchRequest.preference;
this.template = searchRequest.template;
this.source = searchRequest.source;
this.requestCache = searchRequest.requestCache;
this.scroll = searchRequest.scroll;
this.types = searchRequest.types;
this.indicesOptions = searchRequest.indicesOptions;
}
/**
* Constructs a new search request against the indices. No indices provided here means that search
* will run against all indices.

View File

@ -28,7 +28,8 @@ import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.Template;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorBuilder;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder;
import org.elasticsearch.search.highlight.HighlightBuilder;
@ -371,9 +372,17 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
}
/**
* Adds an get to the search operation.
* Adds an aggregation to the search operation.
*/
public SearchRequestBuilder addAggregation(AbstractAggregationBuilder aggregation) {
public SearchRequestBuilder addAggregation(AggregatorBuilder<?> aggregation) {
sourceBuilder().aggregation(aggregation);
return this;
}
/**
* Adds an aggregation to the search operation.
*/
public SearchRequestBuilder addAggregation(PipelineAggregatorBuilder aggregation) {
sourceBuilder().aggregation(aggregation);
return this;
}

View File

@ -59,8 +59,7 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
final AtomicInteger counter = new AtomicInteger(responses.length());
for (int i = 0; i < responses.length(); i++) {
final int index = i;
SearchRequest searchRequest = new SearchRequest(request.requests().get(i));
searchAction.execute(searchRequest, new ActionListener<SearchResponse>() {
searchAction.execute(request.requests().get(i), new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse searchResponse) {
responses.set(index, new MultiSearchResponse.Item(searchResponse, null));

View File

@ -52,7 +52,6 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
private final TransportSearchQueryThenFetchAction queryThenFetchAction;
private final TransportSearchDfsQueryAndFetchAction dfsQueryAndFetchAction;
private final TransportSearchQueryAndFetchAction queryAndFetchAction;
private final boolean optimizeSingleShard;
@Inject
public TransportSearchAction(Settings settings, ThreadPool threadPool,
@ -68,29 +67,25 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
this.queryThenFetchAction = queryThenFetchAction;
this.dfsQueryAndFetchAction = dfsQueryAndFetchAction;
this.queryAndFetchAction = queryAndFetchAction;
this.optimizeSingleShard = this.settings.getAsBoolean("action.search.optimize_single_shard", true);
}
@Override
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
// optimize search type for cases where there is only one shard group to search on
if (optimizeSingleShard) {
try {
ClusterState clusterState = clusterService.state();
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest);
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices());
int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap);
if (shardCount == 1) {
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
searchRequest.searchType(QUERY_AND_FETCH);
}
} catch (IndexNotFoundException | IndexClosedException e) {
// ignore these failures, we will notify the search response if its really the case from the actual action
} catch (Exception e) {
logger.debug("failed to optimize search type, continue as normal", e);
try {
ClusterState clusterState = clusterService.state();
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest);
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices());
int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap);
if (shardCount == 1) {
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
searchRequest.searchType(QUERY_AND_FETCH);
}
} catch (IndexNotFoundException | IndexClosedException e) {
// ignore these failures, we will notify the search response if its really the case from the actual action
} catch (Exception e) {
logger.debug("failed to optimize search type, continue as normal", e);
}
if (searchRequest.searchType() == DFS_QUERY_THEN_FETCH) {
dfsQueryThenFetchAction.execute(searchRequest, listener);
} else if (searchRequest.searchType() == SearchType.QUERY_THEN_FETCH) {

View File

@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
@ -31,40 +32,35 @@ import java.io.IOException;
*/
public abstract class ChildTaskActionRequest<Request extends ActionRequest<Request>> extends ActionRequest<Request> {
private String parentTaskNode;
private long parentTaskId;
private TaskId parentTaskId = TaskId.EMPTY_TASK_ID;
protected ChildTaskActionRequest() {
}
public void setParentTask(String parentTaskNode, long parentTaskId) {
this.parentTaskNode = parentTaskNode;
this.parentTaskId = parentTaskId;
this.parentTaskId = new TaskId(parentTaskNode, parentTaskId);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
parentTaskNode = in.readOptionalString();
parentTaskId = in.readLong();
parentTaskId = new TaskId(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalString(parentTaskNode);
out.writeLong(parentTaskId);
parentTaskId.writeTo(out);
}
@Override
public final Task createTask(long id, String type, String action) {
return createTask(id, type, action, parentTaskNode, parentTaskId);
return createTask(id, type, action, parentTaskId);
}
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
return new Task(id, type, action, getDescription(), parentTaskNode, parentTaskId);
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
return new Task(id, type, action, getDescription(), parentTaskId);
}
}

View File

@ -22,6 +22,7 @@ package org.elasticsearch.action.support;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.transport.TransportRequest;
import java.io.IOException;
@ -31,38 +32,33 @@ import java.io.IOException;
*/
public class ChildTaskRequest extends TransportRequest {
private String parentTaskNode;
private long parentTaskId;
private TaskId parentTaskId = TaskId.EMPTY_TASK_ID;
protected ChildTaskRequest() {
}
public void setParentTask(String parentTaskNode, long parentTaskId) {
this.parentTaskNode = parentTaskNode;
this.parentTaskId = parentTaskId;
this.parentTaskId = new TaskId(parentTaskNode, parentTaskId);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
parentTaskNode = in.readOptionalString();
parentTaskId = in.readLong();
parentTaskId = new TaskId(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalString(parentTaskNode);
out.writeLong(parentTaskId);
parentTaskId.writeTo(out);
}
@Override
public final Task createTask(long id, String type, String action) {
return createTask(id, type, action, parentTaskNode, parentTaskId);
return createTask(id, type, action, parentTaskId);
}
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
return new Task(id, type, action, getDescription(), parentTaskNode, parentTaskId);
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
return new Task(id, type, action, getDescription(), parentTaskId);
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.support.replication;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.index.shard.ShardId;
/**
@ -38,13 +37,4 @@ public class BasicReplicationRequest extends ReplicationRequest<BasicReplication
public BasicReplicationRequest(ShardId shardId) {
super(shardId);
}
/**
* Copy constructor that creates a new request that is a copy of the one
* provided as an argument.
*/
protected BasicReplicationRequest(BasicReplicationRequest request) {
super(request);
}
}

View File

@ -30,6 +30,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
@ -70,16 +71,6 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
this.shardId = shardId;
}
/**
* Copy constructor that creates a new request that is a copy of the one provided as an argument.
* The new request will inherit though headers and context from the original request that caused it.
*/
protected ReplicationRequest(Request request) {
this.timeout = request.timeout();
this.index = request.index();
this.consistencyLevel = request.consistencyLevel();
}
/**
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
*/
@ -196,8 +187,8 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
}
@Override
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
return new ReplicationTask(id, type, action, getDescription(), parentTaskNode, parentTaskId);
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
return new ReplicationTask(id, type, action, getDescription(), parentTaskId);
}
/**

View File

@ -19,11 +19,11 @@
package org.elasticsearch.action.support.replication;
import org.elasticsearch.common.inject.Provider;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
@ -35,8 +35,8 @@ import static java.util.Objects.requireNonNull;
public class ReplicationTask extends Task {
private volatile String phase = "starting";
public ReplicationTask(long id, String type, String action, String description, String parentNode, long parentId) {
super(id, type, action, description, parentNode, parentId);
public ReplicationTask(long id, String type, String action, String description, TaskId parentTaskId) {
super(id, type, action, description, parentTaskId);
}
/**

View File

@ -83,6 +83,7 @@ import java.util.Map;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
import java.util.function.Supplier;
/**
@ -461,60 +462,90 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
protected void doRun() {
setPhase(task, "routing");
final ClusterState state = observer.observedState();
ClusterBlockException blockException = state.blocks().globalBlockedException(globalBlockLevel());
if (blockException != null) {
handleBlockException(blockException);
return;
}
final String concreteIndex = resolveIndex() ? indexNameExpressionResolver.concreteSingleIndex(state, request) : request.index();
blockException = state.blocks().indexBlockedException(indexBlockLevel(), concreteIndex);
if (blockException != null) {
handleBlockException(blockException);
if (handleBlockExceptions(state)) {
return;
}
// request does not have a shardId yet, we need to pass the concrete index to resolve shardId
final String concreteIndex = concreteIndex(state);
resolveRequest(state.metaData(), concreteIndex, request);
assert request.shardId() != null : "request shardId must be set in resolveRequest";
IndexShardRoutingTable indexShard = state.getRoutingTable().shardRoutingTable(request.shardId());
final ShardRouting primary = indexShard.primaryShard();
if (primary == null || primary.active() == false) {
logger.trace("primary shard [{}] is not yet active, scheduling a retry: action [{}], request [{}], cluster state version [{}]", request.shardId(), actionName, request, state.version());
retryBecauseUnavailable(request.shardId(), "primary shard is not active");
return;
}
if (state.nodes().nodeExists(primary.currentNodeId()) == false) {
logger.trace("primary shard [{}] is assigned to an unknown node [{}], scheduling a retry: action [{}], request [{}], cluster state version [{}]", request.shardId(), primary.currentNodeId(), actionName, request, state.version());
retryBecauseUnavailable(request.shardId(), "primary shard isn't assigned to a known node.");
final ShardRouting primary = primary(state);
if (retryIfUnavailable(state, primary)) {
return;
}
final DiscoveryNode node = state.nodes().get(primary.currentNodeId());
taskManager.registerChildTask(task, node.getId());
if (primary.currentNodeId().equals(state.nodes().localNodeId())) {
setPhase(task, "waiting_on_primary");
if (logger.isTraceEnabled()) {
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}] ", transportPrimaryAction, request.shardId(), request, state.version(), primary.currentNodeId());
}
performAction(node, transportPrimaryAction, true);
performLocalAction(state, primary, node);
} else {
if (state.version() < request.routedBasedOnClusterVersion()) {
logger.trace("failed to find primary [{}] for request [{}] despite sender thinking it would be here. Local cluster state version [{}]] is older than on sending node (version [{}]), scheduling a retry...", request.shardId(), request, state.version(), request.routedBasedOnClusterVersion());
retryBecauseUnavailable(request.shardId(), "failed to find primary as current cluster state with version [" + state.version() + "] is stale (expected at least [" + request.routedBasedOnClusterVersion() + "]");
return;
} else {
// chasing the node with the active primary for a second hop requires that we are at least up-to-date with the current cluster state version
// this prevents redirect loops between two nodes when a primary was relocated and the relocation target is not aware that it is the active primary shard already.
request.routedBasedOnClusterVersion(state.version());
}
if (logger.isTraceEnabled()) {
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}]", actionName, request.shardId(), request, state.version(), primary.currentNodeId());
}
setPhase(task, "rerouted");
performAction(node, actionName, false);
performRemoteAction(state, primary, node);
}
}
private void performLocalAction(ClusterState state, ShardRouting primary, DiscoveryNode node) {
setPhase(task, "waiting_on_primary");
if (logger.isTraceEnabled()) {
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}] ", transportPrimaryAction, request.shardId(), request, state.version(), primary.currentNodeId());
}
performAction(node, transportPrimaryAction, true);
}
private void performRemoteAction(ClusterState state, ShardRouting primary, DiscoveryNode node) {
if (state.version() < request.routedBasedOnClusterVersion()) {
logger.trace("failed to find primary [{}] for request [{}] despite sender thinking it would be here. Local cluster state version [{}]] is older than on sending node (version [{}]), scheduling a retry...", request.shardId(), request, state.version(), request.routedBasedOnClusterVersion());
retryBecauseUnavailable(request.shardId(), "failed to find primary as current cluster state with version [" + state.version() + "] is stale (expected at least [" + request.routedBasedOnClusterVersion() + "]");
return;
} else {
// chasing the node with the active primary for a second hop requires that we are at least up-to-date with the current cluster state version
// this prevents redirect loops between two nodes when a primary was relocated and the relocation target is not aware that it is the active primary shard already.
request.routedBasedOnClusterVersion(state.version());
}
if (logger.isTraceEnabled()) {
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}]", actionName, request.shardId(), request, state.version(), primary.currentNodeId());
}
setPhase(task, "rerouted");
performAction(node, actionName, false);
}
private boolean retryIfUnavailable(ClusterState state, ShardRouting primary) {
if (primary == null || primary.active() == false) {
logger.trace("primary shard [{}] is not yet active, scheduling a retry: action [{}], request [{}], cluster state version [{}]", request.shardId(), actionName, request, state.version());
retryBecauseUnavailable(request.shardId(), "primary shard is not active");
return true;
}
if (state.nodes().nodeExists(primary.currentNodeId()) == false) {
logger.trace("primary shard [{}] is assigned to an unknown node [{}], scheduling a retry: action [{}], request [{}], cluster state version [{}]", request.shardId(), primary.currentNodeId(), actionName, request, state.version());
retryBecauseUnavailable(request.shardId(), "primary shard isn't assigned to a known node.");
return true;
}
return false;
}
private String concreteIndex(ClusterState state) {
return resolveIndex() ? indexNameExpressionResolver.concreteSingleIndex(state, request) : request.index();
}
private ShardRouting primary(ClusterState state) {
IndexShardRoutingTable indexShard = state.getRoutingTable().shardRoutingTable(request.shardId());
return indexShard.primaryShard();
}
private boolean handleBlockExceptions(ClusterState state) {
ClusterBlockException blockException = state.blocks().globalBlockedException(globalBlockLevel());
if (blockException != null) {
handleBlockException(blockException);
return true;
}
blockException = state.blocks().indexBlockedException(indexBlockLevel(), concreteIndex(state));
if (blockException != null) {
handleBlockException(blockException);
return true;
}
return false;
}
private void handleBlockException(ClusterBlockException blockException) {
if (blockException.retryable()) {
logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage());
@ -677,27 +708,36 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
// closed in finishAsFailed(e) in the case of error
indexShardReference = getIndexShardReferenceOnPrimary(shardId);
if (indexShardReference.isRelocated() == false) {
// execute locally
Tuple<Response, ReplicaRequest> primaryResponse = shardOperationOnPrimary(state.metaData(), request);
if (logger.isTraceEnabled()) {
logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version());
}
ReplicationPhase replicationPhase = new ReplicationPhase(task, primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
finishAndMoveToReplication(replicationPhase);
executeLocally();
} else {
// delegate primary phase to relocation target
// it is safe to execute primary phase on relocation target as there are no more in-flight operations where primary
// phase is executed on local shard and all subsequent operations are executed on relocation target as primary phase.
final ShardRouting primary = indexShardReference.routingEntry();
indexShardReference.close();
assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary;
DiscoveryNode relocatingNode = state.nodes().get(primary.relocatingNodeId());
transportService.sendRequest(relocatingNode, transportPrimaryAction, request, transportOptions,
TransportChannelResponseHandler.responseHandler(logger, TransportReplicationAction.this::newResponseInstance, channel,
"rerouting indexing to target primary " + primary));
executeRemotely();
}
}
private void executeLocally() throws Exception {
// execute locally
Tuple<Response, ReplicaRequest> primaryResponse = shardOperationOnPrimary(state.metaData(), request);
if (logger.isTraceEnabled()) {
logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version());
}
ReplicationPhase replicationPhase = new ReplicationPhase(task, primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
finishAndMoveToReplication(replicationPhase);
}
private void executeRemotely() {
// delegate primary phase to relocation target
// it is safe to execute primary phase on relocation target as there are no more in-flight operations where primary
// phase is executed on local shard and all subsequent operations are executed on relocation target as primary phase.
final ShardRouting primary = indexShardReference.routingEntry();
indexShardReference.close();
assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary;
DiscoveryNode relocatingNode = state.nodes().get(primary.relocatingNodeId());
transportService.sendRequest(relocatingNode, transportPrimaryAction, request, transportOptions,
TransportChannelResponseHandler.responseHandler(logger, TransportReplicationAction.this::newResponseInstance, channel,
"rerouting indexing to target primary " + primary));
}
/**
* checks whether we can perform a write based on the write consistency setting
* returns **null* if OK to proceed, or a string describing the reason to stop
@ -835,23 +875,48 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
// to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then.
// If the index gets deleted after primary operation, we skip replication
final ClusterState state = clusterService.state();
final IndexRoutingTable index = state.getRoutingTable().index(shardId.getIndex());
final IndexShardRoutingTable shardRoutingTable = (index != null) ? index.shard(shardId.id()) : null;
final IndexShardRoutingTable shardRoutingTable = state.getRoutingTable().shardRoutingTableOrNull(shardId);
final IndexMetaData indexMetaData = state.getMetaData().index(shardId.getIndex());
this.shards = (shardRoutingTable != null) ? shardRoutingTable.shards() : Collections.emptyList();
this.executeOnReplica = (indexMetaData == null) || shouldExecuteReplication(indexMetaData.getSettings());
this.nodes = state.getNodes();
List<ShardRouting> shards = shards(shardRoutingTable);
boolean executeOnReplica = (indexMetaData == null) || shouldExecuteReplication(indexMetaData.getSettings());
DiscoveryNodes nodes = state.getNodes();
if (shards.isEmpty()) {
logger.debug("replication phase for request [{}] on [{}] is skipped due to index deletion after primary operation", replicaRequest, shardId);
}
// we calculate number of target nodes to send replication operations, including nodes with relocating shards
AtomicInteger numberOfPendingShardInstances = new AtomicInteger();
this.totalShards = countTotalAndPending(shards, executeOnReplica, nodes, numberOfPendingShardInstances);
this.pending = numberOfPendingShardInstances;
this.shards = shards;
this.executeOnReplica = executeOnReplica;
this.nodes = nodes;
if (logger.isTraceEnabled()) {
logger.trace("replication phase started. pending [{}], action [{}], request [{}], cluster state version used [{}]", pending.get(),
transportReplicaAction, replicaRequest, state.version());
}
}
private int countTotalAndPending(List<ShardRouting> shards, boolean executeOnReplica, DiscoveryNodes nodes, AtomicInteger pending) {
assert pending.get() == 0;
int numberOfIgnoredShardInstances = performOnShards(shards, executeOnReplica, nodes, shard -> pending.incrementAndGet(), shard -> pending.incrementAndGet());
// one for the local primary copy
return 1 + numberOfIgnoredShardInstances + pending.get();
}
private int performOnShards(List<ShardRouting> shards, boolean executeOnReplica, DiscoveryNodes nodes, Consumer<ShardRouting> onLocalShard, Consumer<ShardRouting> onRelocatingShard) {
int numberOfIgnoredShardInstances = 0;
int numberOfPendingShardInstances = 0;
for (ShardRouting shard : shards) {
// the following logic to select the shards to replicate to is mirrored and explained in the doRun method below
if (shard.primary() == false && executeOnReplica == false) {
// If the replicas use shadow replicas, there is no reason to
// perform the action on the replica, so skip it and
// immediately return
// this delays mapping updates on replicas because they have
// to wait until they get the new mapping through the cluster
// state, which is why we recommend pre-defined mappings for
// indices using shadow replicas
numberOfIgnoredShardInstances++;
continue;
}
@ -859,20 +924,26 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
numberOfIgnoredShardInstances++;
continue;
}
// we index on a replica that is initializing as well since we might not have got the event
// yet that it was started. We will get an exception IllegalShardState exception if its not started
// and that's fine, we will ignore it
// we never execute replication operation locally as primary operation has already completed locally
// hence, we ignore any local shard for replication
if (nodes.localNodeId().equals(shard.currentNodeId()) == false) {
numberOfPendingShardInstances++;
onLocalShard.accept(shard);
}
// send operation to relocating shard
// local shard can be a relocation target of a primary that is in relocated state
if (shard.relocating() && nodes.localNodeId().equals(shard.relocatingNodeId()) == false) {
numberOfPendingShardInstances++;
onRelocatingShard.accept(shard);
}
}
// one for the local primary copy
this.totalShards = 1 + numberOfPendingShardInstances + numberOfIgnoredShardInstances;
this.pending = new AtomicInteger(numberOfPendingShardInstances);
if (logger.isTraceEnabled()) {
logger.trace("replication phase started. pending [{}], action [{}], request [{}], cluster state version used [{}]", pending.get(),
transportReplicaAction, replicaRequest, state.version());
}
return numberOfIgnoredShardInstances;
}
private List<ShardRouting> shards(IndexShardRoutingTable shardRoutingTable) {
return (shardRoutingTable != null) ? shardRoutingTable.shards() : Collections.emptyList();
}
/**
@ -912,36 +983,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
doFinish();
return;
}
for (ShardRouting shard : shards) {
if (shard.primary() == false && executeOnReplica == false) {
// If the replicas use shadow replicas, there is no reason to
// perform the action on the replica, so skip it and
// immediately return
// this delays mapping updates on replicas because they have
// to wait until they get the new mapping through the cluster
// state, which is why we recommend pre-defined mappings for
// indices using shadow replicas
continue;
}
if (shard.unassigned()) {
continue;
}
// we index on a replica that is initializing as well since we might not have got the event
// yet that it was started. We will get an exception IllegalShardState exception if its not started
// and that's fine, we will ignore it
// we never execute replication operation locally as primary operation has already completed locally
// hence, we ignore any local shard for replication
if (nodes.localNodeId().equals(shard.currentNodeId()) == false) {
performOnReplica(shard);
}
// send operation to relocating shard
// local shard can be a relocation target of a primary that is in relocated state
if (shard.relocating() && nodes.localNodeId().equals(shard.relocatingNodeId()) == false) {
performOnReplica(shard.buildTargetRelocatingShard());
}
}
performOnShards(shards, executeOnReplica, nodes, shard -> performOnReplica(shard), shard -> performOnReplica(shard.buildTargetRelocatingShard()));
}
/**

View File

@ -35,7 +35,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.shard.ShardId;

View File

@ -35,7 +35,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.threadpool.ThreadPool;

View File

@ -27,9 +27,12 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
import static org.elasticsearch.action.ValidateActions.addValidationError;
/**
* A base class for task requests
*/
@ -47,26 +50,21 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
private String[] actions = ALL_ACTIONS;
private String parentNode;
private TaskId parentTaskId = TaskId.EMPTY_TASK_ID;
private long parentTaskId = ALL_TASKS;
private long taskId = ALL_TASKS;
private TaskId taskId = TaskId.EMPTY_TASK_ID;
public BaseTasksRequest() {
}
@Override
public ActionRequestValidationException validate() {
return null;
}
/**
* Get information about tasks from nodes based on the nodes ids specified.
* If none are passed, information for all nodes will be returned.
*/
public BaseTasksRequest(String... nodesIds) {
this.nodesIds = nodesIds;
ActionRequestValidationException validationException = null;
if (taskId.isSet() == false && nodesIds.length > 0) {
validationException = addValidationError("task id cannot be used together with node ids",
validationException);
}
return validationException;
}
/**
@ -100,39 +98,26 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
*
* By default tasks with any ids are returned.
*/
public long taskId() {
public TaskId taskId() {
return taskId;
}
@SuppressWarnings("unchecked")
public final Request taskId(long taskId) {
public final Request taskId(TaskId taskId) {
this.taskId = taskId;
return (Request) this;
}
/**
* Returns the parent node id that tasks should be filtered by
*/
public String parentNode() {
return parentNode;
}
@SuppressWarnings("unchecked")
public Request parentNode(String parentNode) {
this.parentNode = parentNode;
return (Request) this;
}
/**
* Returns the parent task id that tasks should be filtered by
*/
public long parentTaskId() {
public TaskId parentTaskId() {
return parentTaskId;
}
@SuppressWarnings("unchecked")
public Request parentTaskId(long parentTaskId) {
public Request parentTaskId(TaskId parentTaskId) {
this.parentTaskId = parentTaskId;
return (Request) this;
}
@ -157,11 +142,10 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
taskId = new TaskId(in);
parentTaskId = new TaskId(in);
nodesIds = in.readStringArray();
taskId = in.readLong();
actions = in.readStringArray();
parentNode = in.readOptionalString();
parentTaskId = in.readLong();
if (in.readBoolean()) {
timeout = TimeValue.readTimeValue(in);
}
@ -170,11 +154,10 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
taskId.writeTo(out);
parentTaskId.writeTo(out);
out.writeStringArrayNullable(nodesIds);
out.writeLong(taskId);
out.writeStringArrayNullable(actions);
out.writeOptionalString(parentNode);
out.writeLong(parentTaskId);
out.writeOptionalStreamable(timeout);
}
@ -182,18 +165,13 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
if (actions() != null && actions().length > 0 && Regex.simpleMatch(actions(), task.getAction()) == false) {
return false;
}
if (taskId() != ALL_TASKS) {
if(taskId() != task.getId()) {
if (taskId().isSet() == false) {
if(taskId().getId() != task.getId()) {
return false;
}
}
if (parentNode() != null) {
if (parentNode().equals(task.getParentNode()) == false) {
return false;
}
}
if (parentTaskId() != ALL_TASKS) {
if (parentTaskId() != task.getParentId()) {
if (parentTaskId.isSet() == false) {
if (parentTaskId.equals(task.getParentTaskId()) == false) {
return false;
}
}

View File

@ -124,13 +124,17 @@ public abstract class TransportTasksAction<
}
protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) {
return clusterState.nodes().resolveNodesIds(request.nodesIds());
if (request.taskId().isSet()) {
return clusterState.nodes().resolveNodesIds(request.nodesIds());
} else {
return new String[]{request.taskId().getNodeId()};
}
}
protected void processTasks(TasksRequest request, Consumer<OperationTask> operation) {
if (request.taskId() != BaseTasksRequest.ALL_TASKS) {
if (request.taskId().isSet() == false) {
// we are only checking one task, we can optimize it
Task task = taskManager.getTask(request.taskId());
Task task = taskManager.getTask(request.taskId().getId());
if (task != null) {
if (request.match(task)) {
operation.accept((OperationTask) task);
@ -143,13 +147,14 @@ public abstract class TransportTasksAction<
} else {
for (Task task : taskManager.getTasks().values()) {
if (request.match(task)) {
operation.accept((OperationTask)task);
operation.accept((OperationTask) task);
}
}
}
}
protected abstract TasksResponse newResponse(TasksRequest request, List<TaskResponse> tasks, List<TaskOperationFailure> taskOperationFailures, List<FailedNodeException> failedNodeExceptions);
protected abstract TasksResponse newResponse(TasksRequest request, List<TaskResponse> tasks, List<TaskOperationFailure>
taskOperationFailures, List<FailedNodeException> failedNodeExceptions);
@SuppressWarnings("unchecked")
protected TasksResponse newResponse(TasksRequest request, AtomicReferenceArray responses) {
@ -232,34 +237,36 @@ public abstract class TransportTasksAction<
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
} else if (!clusterService.localNode().shouldConnectTo(node) && !clusterService.localNode().equals(node)) {
// the check "!clusterService.localNode().equals(node)" is to maintain backward comp. where before
// we allowed to connect from "local" client node to itself, certain tests rely on it, if we remove it, we need to fix
// we allowed to connect from "local" client node to itself, certain tests rely on it, if we remove it, we
// need to fix
// those (and they randomize the client node usage, so tricky to find when)
onFailure(idx, nodeId, new NodeShouldNotConnectException(clusterService.localNode(), node));
} else {
NodeTaskRequest nodeRequest = new NodeTaskRequest(request);
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
taskManager.registerChildTask(task, node.getId());
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(), new BaseTransportResponseHandler<NodeTasksResponse>() {
@Override
public NodeTasksResponse newInstance() {
return new NodeTasksResponse();
}
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(),
new BaseTransportResponseHandler<NodeTasksResponse>() {
@Override
public NodeTasksResponse newInstance() {
return new NodeTasksResponse();
}
@Override
public void handleResponse(NodeTasksResponse response) {
onOperation(idx, response);
}
@Override
public void handleResponse(NodeTasksResponse response) {
onOperation(idx, response);
}
@Override
public void handleException(TransportException exp) {
onFailure(idx, node.id(), exp);
}
@Override
public void handleException(TransportException exp) {
onFailure(idx, node.id(), exp);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
}
} catch (Throwable t) {
onFailure(idx, nodeId, t);

View File

@ -26,7 +26,6 @@ import org.elasticsearch.action.RoutingMissingException;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.delete.TransportDeleteAction;
import org.elasticsearch.action.index.IndexRequest;
@ -169,7 +168,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard);
switch (result.operation()) {
case UPSERT:
IndexRequest upsertRequest = new IndexRequest((IndexRequest)result.action());
IndexRequest upsertRequest = result.action();
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
final BytesReference upsertSourceBytes = upsertRequest.source();
indexAction.execute(upsertRequest, new ActionListener<IndexResponse>() {
@ -206,7 +205,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
});
break;
case INDEX:
IndexRequest indexRequest = new IndexRequest((IndexRequest)result.action());
IndexRequest indexRequest = result.action();
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
final BytesReference indexSourceBytes = indexRequest.source();
indexAction.execute(indexRequest, new ActionListener<IndexResponse>() {
@ -236,8 +235,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
});
break;
case DELETE:
DeleteRequest deleteRequest = new DeleteRequest(result.action(), request);
deleteAction.execute(deleteRequest, new ActionListener<DeleteResponse>() {
deleteAction.execute(result.action(), new ActionListener<DeleteResponse>() {
@Override
public void onResponse(DeleteResponse response) {
UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), false);

View File

@ -29,10 +29,10 @@ import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.logging.log4j.LogConfigurator;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
@ -41,12 +41,17 @@ import org.elasticsearch.monitor.os.OsProbe;
import org.elasticsearch.monitor.process.ProcessProbe;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.transport.TransportSettings;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Locale;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
@ -57,7 +62,6 @@ import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
final class Bootstrap {
private static volatile Bootstrap INSTANCE;
private volatile Node node;
private final CountDownLatch keepAliveLatch = new CountDownLatch(1);
private final Thread keepAliveThread;
@ -184,12 +188,13 @@ final class Bootstrap {
.put(settings)
.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true)
.build();
enforceOrLogLimits(nodeSettings);
node = new Node(nodeSettings);
}
@SuppressForbidden(reason = "Exception#printStackTrace()")
private static void setupLogging(Settings settings, Environment environment) {
private static void setupLogging(Settings settings) {
try {
Class.forName("org.apache.log4j.Logger");
LogConfigurator.configure(settings, true);
@ -249,18 +254,13 @@ final class Bootstrap {
Environment environment = initialSettings(foreground);
Settings settings = environment.settings();
setupLogging(settings, environment);
setupLogging(settings);
checkForCustomConfFile();
if (environment.pidFile() != null) {
PidFile.create(environment.pidFile(), true);
}
if (System.getProperty("es.max-open-files", "false").equals("true")) {
ESLogger logger = Loggers.getLogger(Bootstrap.class);
logger.info("max_open_files [{}]", ProcessProbe.getInstance().getMaxFileDescriptorCount());
}
// warn if running using the client VM
if (JvmInfo.jvmInfo().getVmName().toLowerCase(Locale.ROOT).contains("client")) {
ESLogger logger = Loggers.getLogger(Bootstrap.class);
@ -362,4 +362,48 @@ final class Bootstrap {
+ Version.CURRENT.luceneVersion + "] but the current lucene version is [" + org.apache.lucene.util.Version.LATEST + "]");
}
}
static final Set<Setting> ENFORCE_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
TransportSettings.BIND_HOST,
TransportSettings.HOST,
TransportSettings.PUBLISH_HOST,
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING
)));
private static boolean enforceLimits(Settings settings) {
for (Setting setting : ENFORCE_SETTINGS) {
if (setting.exists(settings)) {
return true;
}
}
return false;
}
static void enforceOrLogLimits(Settings settings) { // pkg private for testing
/* We enforce limits once any network host is configured. In this case we assume the node is running in production
* and all production limit checks must pass. This should be extended as we go to settings like:
* - discovery.zen.minimum_master_nodes
* - discovery.zen.ping.unicast.hosts is set if we use zen disco
* - ensure we can write in all data directories
* - fail if mlockall failed and was configured
* - fail if vm.max_map_count is under a certain limit (not sure if this works cross platform)
* - fail if the default cluster.name is used, if this is setup on network a real clustername should be used?*/
final boolean enforceLimits = enforceLimits(settings);
final ESLogger logger = Loggers.getLogger(Bootstrap.class);
final long maxFileDescriptorCount = ProcessProbe.getInstance().getMaxFileDescriptorCount();
if (maxFileDescriptorCount != -1) {
final int fileDescriptorCountThreshold = (1 << 16);
if (maxFileDescriptorCount < fileDescriptorCountThreshold) {
if (enforceLimits){
throw new IllegalStateException("max file descriptors [" + maxFileDescriptorCount
+ "] for elasticsearch process likely too low, increase it to at least [" + fileDescriptorCountThreshold +"]");
}
logger.warn(
"max file descriptors [{}] for elasticsearch process likely too low, consider increasing to at least [{}]",
maxFileDescriptorCount, fileDescriptorCountThreshold);
}
}
}
}

View File

@ -21,11 +21,13 @@ package org.elasticsearch.bootstrap;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.monitor.jvm.JvmInfo;
import java.lang.management.ManagementFactory;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
/** Checks that the JVM is ok and won't cause index corruption */
final class JVMCheck {
@ -42,10 +44,46 @@ final class JVMCheck {
*/
static final String JVM_BYPASS = "es.bypass.vm.check";
/**
* Metadata and messaging for checking and reporting HotSpot
* issues.
*/
interface HotSpotCheck {
/**
* If this HotSpot check should be executed.
*
* @return true if this HotSpot check should be executed
*/
boolean check();
/**
* The error message to display when this HotSpot issue is
* present.
*
* @return the error message for this HotSpot issue
*/
String getErrorMessage();
/**
* The warning message for this HotSpot issue if a workaround
* exists and is used.
*
* @return the warning message for this HotSpot issue
*/
Optional<String> getWarningMessage();
/**
* The workaround for this HotSpot issue, if one exists.
*
* @return the workaround for this HotSpot issue, if one exists
*/
Optional<String> getWorkaround();
}
/**
* Metadata and messaging for hotspot bugs.
*/
static final class HotspotBug {
static class HotspotBug implements HotSpotCheck {
/** OpenJDK bug URL */
final String bugUrl;
@ -59,7 +97,7 @@ final class JVMCheck {
}
/** Returns an error message to the user for a broken version */
String getErrorMessage() {
public String getErrorMessage() {
StringBuilder sb = new StringBuilder();
sb.append("Java version: ").append(fullVersion());
sb.append(" suffers from critical bug ").append(bugUrl);
@ -78,7 +116,7 @@ final class JVMCheck {
}
/** Warns the user when a workaround is being used to dodge the bug */
String getWarningMessage() {
public Optional<String> getWarningMessage() {
StringBuilder sb = new StringBuilder();
sb.append("Workaround flag ").append(workAround);
sb.append(" for bug ").append(bugUrl);
@ -88,15 +126,53 @@ final class JVMCheck {
sb.append(System.lineSeparator());
sb.append("Upgrading is preferred, see ").append(JVM_RECOMMENDATIONS);
sb.append(" for current recommendations.");
return Optional.of(sb.toString());
}
public boolean check() {
return true;
}
@Override
public Optional<String> getWorkaround() {
return Optional.of(workAround);
}
}
static class G1GCCheck implements HotSpotCheck {
@Override
public boolean check() {
return JvmInfo.jvmInfo().useG1GC().equals("true");
}
/** Returns an error message to the user for a broken version */
public String getErrorMessage() {
StringBuilder sb = new StringBuilder();
sb.append("Java version: ").append(fullVersion());
sb.append(" can cause data corruption");
sb.append(" when used with G1GC.");
sb.append(System.lineSeparator());
sb.append("Please upgrade the JVM, see ").append(JVM_RECOMMENDATIONS);
sb.append(" for current recommendations.");
return sb.toString();
}
@Override
public Optional<String> getWarningMessage() {
return Optional.empty();
}
@Override
public Optional<String> getWorkaround() {
return Optional.empty();
}
}
/** mapping of hotspot version to hotspot bug information for the most serious bugs */
static final Map<String,HotspotBug> JVM_BROKEN_HOTSPOT_VERSIONS;
static final Map<String, HotSpotCheck> JVM_BROKEN_HOTSPOT_VERSIONS;
static {
Map<String,HotspotBug> bugs = new HashMap<>();
Map<String, HotSpotCheck> bugs = new HashMap<>();
// 1.7.0: loop optimizer bug
bugs.put("21.0-b17", new HotspotBug("https://bugs.openjdk.java.net/browse/JDK-7070134", "-XX:-UseLoopPredicate"));
@ -104,6 +180,12 @@ final class JVMCheck {
bugs.put("24.0-b56", new HotspotBug("https://bugs.openjdk.java.net/browse/JDK-8024830", "-XX:-UseSuperWord"));
bugs.put("24.45-b08", new HotspotBug("https://bugs.openjdk.java.net/browse/JDK-8024830", "-XX:-UseSuperWord"));
bugs.put("24.51-b03", new HotspotBug("https://bugs.openjdk.java.net/browse/JDK-8024830", "-XX:-UseSuperWord"));
G1GCCheck g1GcCheck = new G1GCCheck();
bugs.put("25.0-b70", g1GcCheck);
bugs.put("25.11-b03", g1GcCheck);
bugs.put("25.20-b23", g1GcCheck);
bugs.put("25.25-b02", g1GcCheck);
bugs.put("25.31-b07", g1GcCheck);
JVM_BROKEN_HOTSPOT_VERSIONS = Collections.unmodifiableMap(bugs);
}
@ -115,10 +197,10 @@ final class JVMCheck {
if (Boolean.parseBoolean(System.getProperty(JVM_BYPASS))) {
Loggers.getLogger(JVMCheck.class).warn("bypassing jvm version check for version [{}], this can result in data corruption!", fullVersion());
} else if ("Oracle Corporation".equals(Constants.JVM_VENDOR)) {
HotspotBug bug = JVM_BROKEN_HOTSPOT_VERSIONS.get(Constants.JVM_VERSION);
if (bug != null) {
if (bug.workAround != null && ManagementFactory.getRuntimeMXBean().getInputArguments().contains(bug.workAround)) {
Loggers.getLogger(JVMCheck.class).warn(bug.getWarningMessage());
HotSpotCheck bug = JVM_BROKEN_HOTSPOT_VERSIONS.get(Constants.JVM_VERSION);
if (bug != null && bug.check()) {
if (bug.getWorkaround().isPresent() && ManagementFactory.getRuntimeMXBean().getInputArguments().contains(bug.getWorkaround().get())) {
Loggers.getLogger(JVMCheck.class).warn(bug.getWarningMessage().get());
} else {
throw new RuntimeException(bug.getErrorMessage());
}

View File

@ -98,7 +98,7 @@ import java.util.Map;
* <p>
* When running tests you have to pass it to the test runner like this:
* <pre>
* mvn test -Dtests.jvm.argline="-Djava.security.debug=access,failure" ...
* gradle test -Dtests.jvm.argline="-Djava.security.debug=access,failure" ...
* </pre>
* See <a href="https://docs.oracle.com/javase/7/docs/technotes/guides/security/troubleshooting-security.html">
* Troubleshooting Security</a> for information.

View File

@ -272,7 +272,7 @@ public interface ClusterAdminClient extends ElasticsearchClient {
*
* @param request The nodes tasks request
* @return The result future
* @see org.elasticsearch.client.Requests#listTasksRequest(String...)
* @see org.elasticsearch.client.Requests#listTasksRequest()
*/
ActionFuture<ListTasksResponse> listTasks(ListTasksRequest request);
@ -281,7 +281,7 @@ public interface ClusterAdminClient extends ElasticsearchClient {
*
* @param request The nodes tasks request
* @param listener A listener to be notified with a result
* @see org.elasticsearch.client.Requests#listTasksRequest(String...)
* @see org.elasticsearch.client.Requests#listTasksRequest()
*/
void listTasks(ListTasksRequest request, ActionListener<ListTasksResponse> listener);
@ -295,7 +295,7 @@ public interface ClusterAdminClient extends ElasticsearchClient {
*
* @param request The nodes tasks request
* @return The result future
* @see org.elasticsearch.client.Requests#cancelTasksRequest(String...)
* @see org.elasticsearch.client.Requests#cancelTasksRequest()
*/
ActionFuture<CancelTasksResponse> cancelTasks(CancelTasksRequest request);
@ -304,7 +304,7 @@ public interface ClusterAdminClient extends ElasticsearchClient {
*
* @param request The nodes tasks request
* @param listener A cancelener to be notified with a result
* @see org.elasticsearch.client.Requests#cancelTasksRequest(String...)
* @see org.elasticsearch.client.Requests#cancelTasksRequest()
*/
void cancelTasks(CancelTasksRequest request, ActionListener<CancelTasksResponse> listener);

View File

@ -419,23 +419,11 @@ public class Requests {
/**
* Creates a nodes tasks request against one or more nodes. Pass <tt>null</tt> or an empty array for all nodes.
*
* @param nodesIds The nodes ids to get the tasks for
* @return The nodes tasks request
* @see org.elasticsearch.client.ClusterAdminClient#listTasks(ListTasksRequest)
*/
public static ListTasksRequest listTasksRequest(String... nodesIds) {
return new ListTasksRequest(nodesIds);
}
/**
* Creates a nodes tasks request against one or more nodes. Pass <tt>null</tt> or an empty array for all nodes.
*
* @param nodesIds The nodes ids to cancel the tasks on
* @return The nodes tasks request
* @see org.elasticsearch.client.ClusterAdminClient#cancelTasks(CancelTasksRequest)
*/
public static CancelTasksRequest cancelTasksRequest(String... nodesIds) {
return new CancelTasksRequest(nodesIds);
public static CancelTasksRequest cancelTasksRequest() {
return new CancelTasksRequest();
}
/**

View File

@ -39,7 +39,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.indices.breaker.CircuitBreakerModule;
@ -155,7 +154,10 @@ public class TransportClient extends AbstractClient {
pluginsService.processModules(modules);
Injector injector = modules.createInjector();
injector.getInstance(TransportService.class).start();
final TransportService transportService = injector.getInstance(TransportService.class);
transportService.start();
transportService.acceptIncomingRequests();
TransportClient transportClient = new TransportClient(injector);
success = true;
return transportClient;

View File

@ -27,6 +27,8 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.indices.InvalidAliasNameException;
@ -143,7 +145,9 @@ public class AliasValidator extends AbstractComponent {
private void validateAliasFilter(XContentParser parser, QueryShardContext queryShardContext) throws IOException {
try {
queryShardContext.reset(parser);
queryShardContext.parseContext().parseInnerQueryBuilder().toFilter(queryShardContext);
QueryParseContext queryParseContext = queryShardContext.parseContext();
QueryBuilder<?> queryBuilder = QueryBuilder.rewriteQuery(queryParseContext.parseInnerQueryBuilder(), queryShardContext);
queryBuilder.toFilter(queryShardContext);
} finally {
queryShardContext.reset(null);
parser.close();

View File

@ -32,7 +32,6 @@ import org.elasticsearch.common.joda.DateMathParser;
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.indices.IndexClosedException;
import org.joda.time.DateTimeZone;
@ -527,29 +526,35 @@ public class IndexNameExpressionResolver extends AbstractComponent {
return expressions;
}
if (expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0))) || Regex.isMatchAllPattern(expressions.get(0)))) {
if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) {
return Arrays.asList(metaData.concreteAllIndices());
} else if (options.expandWildcardsOpen()) {
return Arrays.asList(metaData.concreteAllOpenIndices());
} else if (options.expandWildcardsClosed()) {
return Arrays.asList(metaData.concreteAllClosedIndices());
} else {
return Collections.emptyList();
}
if (isEmptyOrTrivialWildcard(expressions)) {
return resolveEmptyOrTrivialWildcard(options, metaData, true);
}
Set<String> result = innerResolve(context, expressions, options, metaData);
if (result == null) {
return expressions;
}
if (result.isEmpty() && !options.allowNoIndices()) {
IndexNotFoundException infe = new IndexNotFoundException((String)null);
infe.setResources("index_or_alias", expressions.toArray(new String[0]));
throw infe;
}
return new ArrayList<>(result);
}
private Set<String> innerResolve(Context context, List<String> expressions, IndicesOptions options, MetaData metaData) {
Set<String> result = null;
for (int i = 0; i < expressions.size(); i++) {
String expression = expressions.get(i);
if (metaData.getAliasAndIndexLookup().containsKey(expression)) {
if (aliasOrIndexExists(metaData, expression)) {
if (result != null) {
result.add(expression);
}
continue;
}
if (Strings.isEmpty(expression)) {
throw new IndexNotFoundException(expression);
throw infe(expression);
}
boolean add = true;
if (expression.charAt(0) == '+') {
@ -557,32 +562,19 @@ public class IndexNameExpressionResolver extends AbstractComponent {
if (i == 0) {
result = new HashSet<>();
}
add = true;
expression = expression.substring(1);
} else if (expression.charAt(0) == '-') {
// if its the first, fill it with all the indices...
if (i == 0) {
String[] concreteIndices;
if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) {
concreteIndices = metaData.concreteAllIndices();
} else if (options.expandWildcardsOpen()) {
concreteIndices = metaData.concreteAllOpenIndices();
} else if (options.expandWildcardsClosed()) {
concreteIndices = metaData.concreteAllClosedIndices();
} else {
assert false : "Shouldn't end up here";
concreteIndices = Strings.EMPTY_ARRAY;
}
result = new HashSet<>(Arrays.asList(concreteIndices));
List<String> concreteIndices = resolveEmptyOrTrivialWildcard(options, metaData, false);
result = new HashSet<>(concreteIndices);
}
add = false;
expression = expression.substring(1);
}
if (!Regex.isSimpleMatchPattern(expression)) {
if (!options.ignoreUnavailable() && !metaData.getAliasAndIndexLookup().containsKey(expression)) {
IndexNotFoundException infe = new IndexNotFoundException(expression);
infe.setResources("index_or_alias", expression);
throw infe;
if (!unavailableIgnoredOrExists(options, metaData, expression)) {
throw infe(expression);
}
if (result != null) {
if (add) {
@ -595,77 +587,119 @@ public class IndexNameExpressionResolver extends AbstractComponent {
}
if (result == null) {
// add all the previous ones...
result = new HashSet<>();
result.addAll(expressions.subList(0, i));
result = new HashSet<>(expressions.subList(0, i));
}
final IndexMetaData.State excludeState;
if (options.expandWildcardsOpen() && options.expandWildcardsClosed()){
excludeState = null;
} else if (options.expandWildcardsOpen() && options.expandWildcardsClosed() == false) {
excludeState = IndexMetaData.State.CLOSE;
} else if (options.expandWildcardsClosed() && options.expandWildcardsOpen() == false) {
excludeState = IndexMetaData.State.OPEN;
} else {
assert false : "this shouldn't get called if wildcards expand to none";
excludeState = null;
}
final Map<String, AliasOrIndex> matches;
if (Regex.isMatchAllPattern(expression)) {
// Can only happen if the expressions was initially: '-*'
matches = metaData.getAliasAndIndexLookup();
} else if (expression.indexOf("*") == expression.length() - 1) {
// Suffix wildcard:
assert expression.length() >= 2 : "expression [" + expression + "] should have at least a length of 2";
String fromPrefix = expression.substring(0, expression.length() - 1);
char[] toPrefixCharArr = fromPrefix.toCharArray();
toPrefixCharArr[toPrefixCharArr.length - 1]++;
String toPrefix = new String(toPrefixCharArr);
matches = metaData.getAliasAndIndexLookup().subMap(fromPrefix, toPrefix);
} else {
// Other wildcard expressions:
final String pattern = expression;
matches = metaData.getAliasAndIndexLookup()
.entrySet()
.stream()
.filter(e -> Regex.simpleMatch(pattern, e.getKey()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
Set<String> expand = new HashSet<>();
for (Map.Entry<String, AliasOrIndex> entry : matches.entrySet()) {
AliasOrIndex aliasOrIndex = entry.getValue();
if (context.isPreserveAliases() && aliasOrIndex.isAlias()) {
expand.add(entry.getKey());
} else {
for (IndexMetaData meta : aliasOrIndex.getIndices()) {
if (excludeState == null || meta.getState() != excludeState) {
expand.add(meta.getIndex().getName());
}
}
}
}
final IndexMetaData.State excludeState = excludeState(options);
final Map<String, AliasOrIndex> matches = matches(metaData, expression);
Set<String> expand = expand(context, excludeState, matches);
if (add) {
result.addAll(expand);
} else {
result.removeAll(expand);
}
if (matches.isEmpty() && options.allowNoIndices() == false) {
IndexNotFoundException infe = new IndexNotFoundException(expression);
infe.setResources("index_or_alias", expression);
throw infe;
if (!noIndicesAllowedOrMatches(options, matches)) {
throw infe(expression);
}
}
if (result == null) {
return expressions;
return result;
}
private boolean noIndicesAllowedOrMatches(IndicesOptions options, Map<String, AliasOrIndex> matches) {
return options.allowNoIndices() || !matches.isEmpty();
}
private boolean unavailableIgnoredOrExists(IndicesOptions options, MetaData metaData, String expression) {
return options.ignoreUnavailable() || aliasOrIndexExists(metaData, expression);
}
private boolean aliasOrIndexExists(MetaData metaData, String expression) {
return metaData.getAliasAndIndexLookup().containsKey(expression);
}
private static IndexNotFoundException infe(String expression) {
IndexNotFoundException infe = new IndexNotFoundException(expression);
infe.setResources("index_or_alias", expression);
return infe;
}
private static IndexMetaData.State excludeState(IndicesOptions options) {
final IndexMetaData.State excludeState;
if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) {
excludeState = null;
} else if (options.expandWildcardsOpen() && options.expandWildcardsClosed() == false) {
excludeState = IndexMetaData.State.CLOSE;
} else if (options.expandWildcardsClosed() && options.expandWildcardsOpen() == false) {
excludeState = IndexMetaData.State.OPEN;
} else {
assert false : "this shouldn't get called if wildcards expand to none";
excludeState = null;
}
if (result.isEmpty() && !options.allowNoIndices()) {
IndexNotFoundException infe = new IndexNotFoundException((String)null);
infe.setResources("index_or_alias", expressions.toArray(new String[0]));
throw infe;
return excludeState;
}
private static Map<String, AliasOrIndex> matches(MetaData metaData, String expression) {
if (Regex.isMatchAllPattern(expression)) {
// Can only happen if the expressions was initially: '-*'
return metaData.getAliasAndIndexLookup();
} else if (expression.indexOf("*") == expression.length() - 1) {
return suffixWildcard(metaData, expression);
} else {
return otherWildcard(metaData, expression);
}
}
private static Map<String, AliasOrIndex> suffixWildcard(MetaData metaData, String expression) {
assert expression.length() >= 2 : "expression [" + expression + "] should have at least a length of 2";
String fromPrefix = expression.substring(0, expression.length() - 1);
char[] toPrefixCharArr = fromPrefix.toCharArray();
toPrefixCharArr[toPrefixCharArr.length - 1]++;
String toPrefix = new String(toPrefixCharArr);
return metaData.getAliasAndIndexLookup().subMap(fromPrefix, toPrefix);
}
private static Map<String, AliasOrIndex> otherWildcard(MetaData metaData, String expression) {
final String pattern = expression;
return metaData.getAliasAndIndexLookup()
.entrySet()
.stream()
.filter(e -> Regex.simpleMatch(pattern, e.getKey()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
private static Set<String> expand(Context context, IndexMetaData.State excludeState, Map<String, AliasOrIndex> matches) {
Set<String> expand = new HashSet<>();
for (Map.Entry<String, AliasOrIndex> entry : matches.entrySet()) {
AliasOrIndex aliasOrIndex = entry.getValue();
if (context.isPreserveAliases() && aliasOrIndex.isAlias()) {
expand.add(entry.getKey());
} else {
for (IndexMetaData meta : aliasOrIndex.getIndices()) {
if (excludeState == null || meta.getState() != excludeState) {
expand.add(meta.getIndex().getName());
}
}
}
}
return expand;
}
private boolean isEmptyOrTrivialWildcard(List<String> expressions) {
return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0))) || Regex.isMatchAllPattern(expressions.get(0)));
}
private List<String> resolveEmptyOrTrivialWildcard(IndicesOptions options, MetaData metaData, boolean assertEmpty) {
if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) {
return Arrays.asList(metaData.concreteAllIndices());
} else if (options.expandWildcardsOpen()) {
return Arrays.asList(metaData.concreteAllOpenIndices());
} else if (options.expandWildcardsClosed()) {
return Arrays.asList(metaData.concreteAllClosedIndices());
} else {
assert assertEmpty : "Shouldn't end up here";
return Collections.emptyList();
}
return new ArrayList<>(result);
}
}

View File

@ -399,27 +399,16 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
// in the index,bulk,update and delete apis.
public String resolveIndexRouting(@Nullable String parent, @Nullable String routing, String aliasOrIndex) {
if (aliasOrIndex == null) {
if (routing == null) {
return parent;
}
return routing;
return routingOrParent(parent, routing);
}
AliasOrIndex result = getAliasAndIndexLookup().get(aliasOrIndex);
if (result == null || result.isAlias() == false) {
if (routing == null) {
return parent;
}
return routing;
return routingOrParent(parent, routing);
}
AliasOrIndex.Alias alias = (AliasOrIndex.Alias) result;
if (result.getIndices().size() > 1) {
String[] indexNames = new String[result.getIndices().size()];
int i = 0;
for (IndexMetaData indexMetaData : result.getIndices()) {
indexNames[i++] = indexMetaData.getIndex().getName();
}
throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one index associated with it [" + Arrays.toString(indexNames) + "], can't execute a single index op");
rejectSingleIndexOperation(aliasOrIndex, result);
}
AliasMetaData aliasMd = alias.getFirstAliasMetaData();
if (aliasMd.indexRouting() != null) {
@ -434,6 +423,19 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
// Alias routing overrides the parent routing (if any).
return aliasMd.indexRouting();
}
return routingOrParent(parent, routing);
}
private void rejectSingleIndexOperation(String aliasOrIndex, AliasOrIndex result) {
String[] indexNames = new String[result.getIndices().size()];
int i = 0;
for (IndexMetaData indexMetaData : result.getIndices()) {
indexNames[i++] = indexMetaData.getIndex().getName();
}
throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one index associated with it [" + Arrays.toString(indexNames) + "], can't execute a single index op");
}
private String routingOrParent(@Nullable String parent, @Nullable String routing) {
if (routing == null) {
return parent;
}

View File

@ -366,7 +366,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
}
}
if (ordered.isEmpty()) {
throw new IllegalArgumentException("No data node with critera [" + nodeAttribute + "] found");
throw new IllegalArgumentException("No data node with criteria [" + nodeAttribute + "] found");
}
return new PlainShardIterator(shardId, ordered);
}

View File

@ -458,7 +458,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
*/
public void started(ShardRouting shard) {
ensureMutable();
assert !shard.active() : "expected an intializing shard " + shard;
assert !shard.active() : "expected an initializing shard " + shard;
if (shard.relocatingNodeId() == null) {
// if this is not a target shard for relocation, we need to update statistics
inactiveShardCount--;

View File

@ -715,7 +715,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
}
if (logger.isTraceEnabled()) {
logger.trace("No eligable node found to assign shard [{}] decision [{}]", shard, decision.type());
logger.trace("No eligible node found to assign shard [{}] decision [{}]", shard, decision.type());
}
} else if (logger.isTraceEnabled()) {
logger.trace("No Node found to assign shard [{}]", shard);

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.routing.RestoreSource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
@ -46,8 +47,13 @@ public class NodeVersionAllocationDecider extends AllocationDecider {
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
if (shardRouting.primary()) {
if (shardRouting.currentNodeId() == null) {
// fresh primary, we can allocate wherever
return allocation.decision(Decision.YES, NAME, "primary shard can be allocated anywhere");
if (shardRouting.restoreSource() != null) {
// restoring from a snapshot - check that the node can handle the version
return isVersionCompatible(shardRouting.restoreSource(), node, allocation);
} else {
// fresh primary, we can allocate wherever
return allocation.decision(Decision.YES, NAME, "primary shard can be allocated anywhere");
}
} else {
// relocating primary, only migrate to newer host
return isVersionCompatible(allocation.routingNodes(), shardRouting.currentNodeId(), node, allocation);
@ -77,4 +83,15 @@ public class NodeVersionAllocationDecider extends AllocationDecider {
target.node().version(), source.node().version());
}
}
private Decision isVersionCompatible(RestoreSource restoreSource, final RoutingNode target, RoutingAllocation allocation) {
if (target.node().version().onOrAfter(restoreSource.version())) {
/* we can allocate if we can restore from a snapshot that is older or on the same version */
return allocation.decision(Decision.YES, NAME, "target node version [%s] is same or newer than snapshot version [%s]",
target.node().version(), restoreSource.version());
} else {
return allocation.decision(Decision.NO, NAME, "target node version [%s] is older than snapshot version [%s]",
target.node().version(), restoreSource.version());
}
}
}

View File

@ -18,8 +18,10 @@
*/
package org.elasticsearch.common;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.Locale;
import java.util.Objects;
/**
* <p>Encodes and decodes to and from Base64 notation.</p>
@ -161,7 +163,7 @@ import java.util.Locale;
* @author rob@iharder.net
* @version 2.3.7
*/
public class Base64 {
public final class Base64 {
/* ******** P U B L I C F I E L D S ******** */
@ -791,10 +793,7 @@ public class Base64 {
* @since 2.3.1
*/
public static byte[] encodeBytesToBytes(byte[] source, int off, int len, int options) throws java.io.IOException {
if (source == null) {
throw new NullPointerException("Cannot serialize a null array.");
} // end if: null
Objects.requireNonNull(source, "Cannot serialize a null array.");
if (off < 0) {
throw new IllegalArgumentException("Cannot have negative offset: " + off);
@ -809,103 +808,109 @@ public class Base64 {
String.format(Locale.ROOT, "Cannot have offset of %d and length of %d with array of length %d", off, len, source.length));
} // end if: off < 0
// Compress?
if ((options & GZIP) != 0) {
java.io.ByteArrayOutputStream baos = null;
java.util.zip.GZIPOutputStream gzos = null;
Base64.OutputStream b64os = null;
try {
// GZip -> Base64 -> ByteArray
baos = new java.io.ByteArrayOutputStream();
b64os = new Base64.OutputStream(baos, ENCODE | options);
gzos = new java.util.zip.GZIPOutputStream(b64os);
gzos.write(source, off, len);
gzos.close();
} // end try
catch (java.io.IOException e) {
// Catch it and then throw it immediately so that
// the finally{} block is called for cleanup.
throw e;
} // end catch
finally {
try {
gzos.close();
} catch (Exception e) {
}
try {
b64os.close();
} catch (Exception e) {
}
try {
baos.close();
} catch (Exception e) {
}
} // end finally
return baos.toByteArray();
return encodeCompressedBytes(source, off, len, options);
} // end if: compress
// Else, don't compress. Better not to use streams at all then.
else {
boolean breakLines = (options & DO_BREAK_LINES) != 0;
//int len43 = len * 4 / 3;
//byte[] outBuff = new byte[ ( len43 ) // Main 4:3
// + ( (len % 3) > 0 ? 4 : 0 ) // Account for padding
// + (breakLines ? ( len43 / MAX_LINE_LENGTH ) : 0) ]; // New lines
// Try to determine more precisely how big the array needs to be.
// If we get it right, we don't have to do an array copy, and
// we save a bunch of memory.
int encLen = (len / 3) * 4 + (len % 3 > 0 ? 4 : 0); // Bytes needed for actual encoding
if (breakLines) {
encLen += encLen / MAX_LINE_LENGTH; // Plus extra newline characters
}
byte[] outBuff = new byte[encLen];
int d = 0;
int e = 0;
int len2 = len - 2;
int lineLength = 0;
for (; d < len2; d += 3, e += 4) {
encode3to4(source, d + off, 3, outBuff, e, options);
lineLength += 4;
if (breakLines && lineLength >= MAX_LINE_LENGTH) {
outBuff[e + 4] = NEW_LINE;
e++;
lineLength = 0;
} // end if: end of line
} // en dfor: each piece of array
if (d < len) {
encode3to4(source, d + off, len - d, outBuff, e, options);
e += 4;
} // end if: some padding needed
// Only resize array if we didn't guess it right.
if (e <= outBuff.length - 1) {
// If breaking lines and the last byte falls right at
// the line length (76 bytes per line), there will be
// one extra byte, and the array will need to be resized.
// Not too bad of an estimate on array size, I'd say.
byte[] finalOut = new byte[e];
System.arraycopy(outBuff, 0, finalOut, 0, e);
//System.err.println("Having to resize array from " + outBuff.length + " to " + e );
return finalOut;
} else {
//System.err.println("No need to resize array.");
return outBuff;
}
return encodeNonCompressedBytes(source, off, len, options);
} // end else: don't compress
} // end encodeBytesToBytes
private static byte[] encodeNonCompressedBytes(byte[] source, int off, int len, int options) {
boolean breakLines = (options & DO_BREAK_LINES) != 0;
//int len43 = len * 4 / 3;
//byte[] outBuff = new byte[ ( len43 ) // Main 4:3
// + ( (len % 3) > 0 ? 4 : 0 ) // Account for padding
// + (breakLines ? ( len43 / MAX_LINE_LENGTH ) : 0) ]; // New lines
// Try to determine more precisely how big the array needs to be.
// If we get it right, we don't have to do an array copy, and
// we save a bunch of memory.
int encLen = (len / 3) * 4 + (len % 3 > 0 ? 4 : 0); // Bytes needed for actual encoding
if (breakLines) {
encLen += encLen / MAX_LINE_LENGTH; // Plus extra newline characters
}
byte[] outBuff = new byte[encLen];
int d = 0;
int e = 0;
int len2 = len - 2;
int lineLength = 0;
for (; d < len2; d += 3, e += 4) {
encode3to4(source, d + off, 3, outBuff, e, options);
lineLength += 4;
if (breakLines && lineLength >= MAX_LINE_LENGTH) {
outBuff[e + 4] = NEW_LINE;
e++;
lineLength = 0;
} // end if: end of line
} // en dfor: each piece of array
if (d < len) {
encode3to4(source, d + off, len - d, outBuff, e, options);
e += 4;
} // end if: some padding needed
// Only resize array if we didn't guess it right.
if (e <= outBuff.length - 1) {
// If breaking lines and the last byte falls right at
// the line length (76 bytes per line), there will be
// one extra byte, and the array will need to be resized.
// Not too bad of an estimate on array size, I'd say.
byte[] finalOut = new byte[e];
System.arraycopy(outBuff, 0, finalOut, 0, e);
//System.err.println("Having to resize array from " + outBuff.length + " to " + e );
return finalOut;
} else {
//System.err.println("No need to resize array.");
return outBuff;
}
}
private static byte[] encodeCompressedBytes(byte[] source, int off, int len, int options) throws IOException {
java.io.ByteArrayOutputStream baos = null;
java.util.zip.GZIPOutputStream gzos = null;
OutputStream b64os = null;
try {
// GZip -> Base64 -> ByteArray
baos = new java.io.ByteArrayOutputStream();
b64os = new OutputStream(baos, ENCODE | options);
gzos = new java.util.zip.GZIPOutputStream(b64os);
gzos.write(source, off, len);
gzos.close();
} // end try
catch (IOException e) {
// Catch it and then throw it immediately so that
// the finally{} block is called for cleanup.
throw e;
} // end catch
finally {
try {
gzos.close();
} catch (Exception e) {
}
try {
b64os.close();
} catch (Exception e) {
}
try {
baos.close();
} catch (Exception e) {
}
} // end finally
return baos.toByteArray();
}
/* ******** D E C O D I N G M E T H O D S ******** */
@ -937,17 +942,10 @@ public class Base64 {
* or there is not enough room in the array.
* @since 1.3
*/
private static int decode4to3(
byte[] source, int srcOffset,
byte[] destination, int destOffset, int options) {
private static int decode4to3(byte[] source, int srcOffset, byte[] destination, int destOffset, int options) {
// Lots of error checking and exception throwing
if (source == null) {
throw new NullPointerException("Source array was null.");
} // end if
if (destination == null) {
throw new NullPointerException("Destination array was null.");
} // end if
Objects.requireNonNull(source, "Source array was null.");
Objects.requireNonNull(destination, "Destination array was null.");
if (srcOffset < 0 || srcOffset + 3 >= source.length) {
throw new IllegalArgumentException(String.format(Locale.ROOT,
"Source array with length %d cannot have offset of %d and still process four bytes.", source.length, srcOffset));
@ -957,56 +955,36 @@ public class Base64 {
"Destination array with length %d cannot have offset of %d and still store three bytes.", destination.length, destOffset));
} // end if
byte[] DECODABET = getDecodabet(options);
// Two ways to do the same thing. Don't know which way I like best.
//int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 )
// | ( ( DECODABET[ source[ srcOffset + 1] ] << 24 ) >>> 12 );
int outBuff = ((DECODABET[source[srcOffset]] & 0xFF) << 18)
| ((DECODABET[source[srcOffset + 1]] & 0xFF) << 12);
destination[destOffset] = (byte) (outBuff >>> 16);
// Example: Dk==
if (source[srcOffset + 2] == EQUALS_SIGN) {
// Two ways to do the same thing. Don't know which way I like best.
//int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 )
// | ( ( DECODABET[ source[ srcOffset + 1] ] << 24 ) >>> 12 );
int outBuff = ((DECODABET[source[srcOffset]] & 0xFF) << 18)
| ((DECODABET[source[srcOffset + 1]] & 0xFF) << 12);
destination[destOffset] = (byte) (outBuff >>> 16);
return 1;
}
// Example: DkL=
else if (source[srcOffset + 3] == EQUALS_SIGN) {
// Two ways to do the same thing. Don't know which way I like best.
//int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 )
// | ( ( DECODABET[ source[ srcOffset + 1 ] ] << 24 ) >>> 12 )
// | ( ( DECODABET[ source[ srcOffset + 2 ] ] << 24 ) >>> 18 );
int outBuff = ((DECODABET[source[srcOffset]] & 0xFF) << 18)
| ((DECODABET[source[srcOffset + 1]] & 0xFF) << 12)
| ((DECODABET[source[srcOffset + 2]] & 0xFF) << 6);
outBuff |= ((DECODABET[source[srcOffset + 2]] & 0xFF) << 6);
destination[destOffset + 1] = (byte) (outBuff >>> 8);
destination[destOffset] = (byte) (outBuff >>> 16);
destination[destOffset + 1] = (byte) (outBuff >>> 8);
// Example: DkL=
if (source[srcOffset + 3] == EQUALS_SIGN) {
return 2;
}
outBuff |= ((DECODABET[source[srcOffset + 3]] & 0xFF));
destination[destOffset + 2] = (byte) (outBuff);
// Example: DkLE
else {
// Two ways to do the same thing. Don't know which way I like best.
//int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 )
// | ( ( DECODABET[ source[ srcOffset + 1 ] ] << 24 ) >>> 12 )
// | ( ( DECODABET[ source[ srcOffset + 2 ] ] << 24 ) >>> 18 )
// | ( ( DECODABET[ source[ srcOffset + 3 ] ] << 24 ) >>> 24 );
int outBuff = ((DECODABET[source[srcOffset]] & 0xFF) << 18)
| ((DECODABET[source[srcOffset + 1]] & 0xFF) << 12)
| ((DECODABET[source[srcOffset + 2]] & 0xFF) << 6)
| ((DECODABET[source[srcOffset + 3]] & 0xFF));
destination[destOffset] = (byte) (outBuff >> 16);
destination[destOffset + 1] = (byte) (outBuff >> 8);
destination[destOffset + 2] = (byte) (outBuff);
return 3;
}
} // end decodeToBytes
return 3;
}
/**
@ -1051,13 +1029,9 @@ public class Base64 {
* @throws java.io.IOException If bogus characters exist in source data
* @since 1.3
*/
public static byte[] decode(byte[] source, int off, int len, int options)
throws java.io.IOException {
public static byte[] decode(byte[] source, int off, int len, int options) throws java.io.IOException {
// Lots of error checking and exception throwing
if (source == null) {
throw new NullPointerException("Cannot decode null source array.");
} // end if
Objects.requireNonNull(source, "Cannot decode null source array.");
if (off < 0 || off + len > source.length) {
throw new IllegalArgumentException(String.format(Locale.ROOT,
"Source array with length %d cannot have offset of %d and process %d bytes.", source.length, off, len));
@ -1074,16 +1048,21 @@ public class Base64 {
int len34 = len * 3 / 4; // Estimate on array size
byte[] outBuff = new byte[len34]; // Upper limit on size of output
int outBuffPosn = 0; // Keep track of where we're writing
int outBuffPosn = decode(source, off, len, options, DECODABET, outBuff);
byte[] out = new byte[outBuffPosn];
System.arraycopy(outBuff, 0, out, 0, outBuffPosn);
return out;
} // end decode
private static int decode(byte[] source, int off, int len, int options, byte[] DECODABET, byte[] outBuff) throws IOException {
int outBuffPosn = 0; // Keep track of where we're writing
byte[] b4 = new byte[4]; // Four byte buffer from source, eliminating white space
int b4Posn = 0; // Keep track of four byte input buffer
int i = 0; // Source array counter
byte sbiDecode = 0; // Special value from DECODABET
for (int i = off; i < off + len; i++) { // Loop through source
for (i = off; i < off + len; i++) { // Loop through source
sbiDecode = DECODABET[source[i] & 0xFF];
byte sbiDecode = DECODABET[source[i] & 0xFF];
// White space, Equals sign, or legit Base64 character
// Note the values such as -5 and -9 in the
@ -1099,7 +1078,7 @@ public class Base64 {
if (source[i] == EQUALS_SIGN) {
// check if the equals sign is somewhere in between
if (i+1 < len + off) {
throw new java.io.IOException(String.format(Locale.ROOT,
throw new IOException(String.format(Locale.ROOT,
"Found equals sign at position %d of the base64 string, not at the end", i));
}
break;
@ -1107,7 +1086,7 @@ public class Base64 {
} // end if: quartet built
else {
if (source[i] == EQUALS_SIGN && len + off > i && source[i+1] != EQUALS_SIGN) {
throw new java.io.IOException(String.format(Locale.ROOT,
throw new IOException(String.format(Locale.ROOT,
"Found equals sign at position %d of the base64 string, not at the end", i));
} // enf if: equals sign and next character not as well
} // end else:
@ -1115,15 +1094,12 @@ public class Base64 {
} // end if: white space, equals sign or better
else {
// There's a bad input character in the Base64 stream.
throw new java.io.IOException(String.format(Locale.ROOT,
throw new IOException(String.format(Locale.ROOT,
"Bad Base64 input character decimal %d in array position %d", ((int) source[i]) & 0xFF, i));
} // end else:
} // each input character
byte[] out = new byte[outBuffPosn];
System.arraycopy(outBuff, 0, out, 0, outBuffPosn);
return out;
} // end decode
return outBuffPosn;
}
/**

View File

@ -118,37 +118,9 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
// .addAndGet() instead of looping (because we don't have to check a
// limit), which makes the RamAccountingTermsEnum case faster.
if (this.memoryBytesLimit == -1) {
newUsed = this.used.addAndGet(bytes);
if (logger.isTraceEnabled()) {
logger.trace("[{}] Adding [{}][{}] to used bytes [new used: [{}], limit: [-1b]]",
this.name, new ByteSizeValue(bytes), label, new ByteSizeValue(newUsed));
}
newUsed = noLimit(bytes, label);
} else {
// Otherwise, check the addition and commit the addition, looping if
// there are conflicts. May result in additional logging, but it's
// trace logging and shouldn't be counted on for additions.
long currentUsed;
do {
currentUsed = this.used.get();
newUsed = currentUsed + bytes;
long newUsedWithOverhead = (long) (newUsed * overheadConstant);
if (logger.isTraceEnabled()) {
logger.trace("[{}] Adding [{}][{}] to used bytes [new used: [{}], limit: {} [{}], estimate: {} [{}]]",
this.name,
new ByteSizeValue(bytes), label, new ByteSizeValue(newUsed),
memoryBytesLimit, new ByteSizeValue(memoryBytesLimit),
newUsedWithOverhead, new ByteSizeValue(newUsedWithOverhead));
}
if (memoryBytesLimit > 0 && newUsedWithOverhead > memoryBytesLimit) {
logger.warn("[{}] New used memory {} [{}] for data of [{}] would be larger than configured breaker: {} [{}], breaking",
this.name,
newUsedWithOverhead, new ByteSizeValue(newUsedWithOverhead), label,
memoryBytesLimit, new ByteSizeValue(memoryBytesLimit));
circuitBreak(label, newUsedWithOverhead);
}
// Attempt to set the new used value, but make sure it hasn't changed
// underneath us, if it has, keep trying until we are able to set it
} while (!this.used.compareAndSet(currentUsed, newUsed));
newUsed = limit(bytes, label);
}
// Additionally, we need to check that we haven't exceeded the parent's limit
@ -164,6 +136,45 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker {
return newUsed;
}
private long noLimit(long bytes, String label) {
long newUsed;
newUsed = this.used.addAndGet(bytes);
if (logger.isTraceEnabled()) {
logger.trace("[{}] Adding [{}][{}] to used bytes [new used: [{}], limit: [-1b]]",
this.name, new ByteSizeValue(bytes), label, new ByteSizeValue(newUsed));
}
return newUsed;
}
private long limit(long bytes, String label) {
long newUsed;// Otherwise, check the addition and commit the addition, looping if
// there are conflicts. May result in additional logging, but it's
// trace logging and shouldn't be counted on for additions.
long currentUsed;
do {
currentUsed = this.used.get();
newUsed = currentUsed + bytes;
long newUsedWithOverhead = (long) (newUsed * overheadConstant);
if (logger.isTraceEnabled()) {
logger.trace("[{}] Adding [{}][{}] to used bytes [new used: [{}], limit: {} [{}], estimate: {} [{}]]",
this.name,
new ByteSizeValue(bytes), label, new ByteSizeValue(newUsed),
memoryBytesLimit, new ByteSizeValue(memoryBytesLimit),
newUsedWithOverhead, new ByteSizeValue(newUsedWithOverhead));
}
if (memoryBytesLimit > 0 && newUsedWithOverhead > memoryBytesLimit) {
logger.warn("[{}] New used memory {} [{}] for data of [{}] would be larger than configured breaker: {} [{}], breaking",
this.name,
newUsedWithOverhead, new ByteSizeValue(newUsedWithOverhead), label,
memoryBytesLimit, new ByteSizeValue(memoryBytesLimit));
circuitBreak(label, newUsedWithOverhead);
}
// Attempt to set the new used value, but make sure it hasn't changed
// underneath us, if it has, keep trying until we are able to set it
} while (!this.used.compareAndSet(currentUsed, newUsed));
return newUsed;
}
/**
* Add an <b>exact</b> number of bytes, not checking for tripping the
* circuit breaker. This bypasses the overheadConstant multiplication.

View File

@ -39,6 +39,8 @@ import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
import org.elasticsearch.search.rescore.RescoreBuilder;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.search.aggregations.AggregatorBuilder;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
@ -67,24 +69,22 @@ import java.util.function.Supplier;
import static org.elasticsearch.ElasticsearchException.readException;
import static org.elasticsearch.ElasticsearchException.readStackTrace;
/**
* A stream from this node to another node. Technically, it can also be streamed to a byte array but that is mostly for testing.
*/
public abstract class StreamInput extends InputStream {
private final NamedWriteableRegistry namedWriteableRegistry;
private Version version = Version.CURRENT;
protected StreamInput() {
this.namedWriteableRegistry = new NamedWriteableRegistry();
}
protected StreamInput(NamedWriteableRegistry namedWriteableRegistry) {
this.namedWriteableRegistry = namedWriteableRegistry;
}
/**
* The version of the node on the other side of this stream.
*/
public Version getVersion() {
return this.version;
}
/**
* Set the version of the node on the other side of this stream.
*/
public void setVersion(Version version) {
this.version = version;
}
@ -663,6 +663,20 @@ public abstract class StreamInput extends InputStream {
throw new UnsupportedOperationException("can't read named writeable from StreamInput");
}
/**
* Reads a {@link AggregatorBuilder} from the current stream
*/
public AggregatorBuilder readAggregatorFactory() throws IOException {
return readNamedWriteable(AggregatorBuilder.class);
}
/**
* Reads a {@link PipelineAggregatorBuilder} from the current stream
*/
public PipelineAggregatorBuilder readPipelineAggregatorFactory() throws IOException {
return readNamedWriteable(PipelineAggregatorBuilder.class);
}
/**
* Reads a {@link QueryBuilder} from the current stream
*/

View File

@ -38,6 +38,8 @@ import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
import org.elasticsearch.search.rescore.RescoreBuilder;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.search.aggregations.AggregatorBuilder;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
import org.joda.time.ReadableInstant;
import java.io.EOFException;
@ -58,19 +60,24 @@ import java.util.List;
import java.util.Map;
/**
*
* A stream from another node to this node. Technically, it can also be streamed from a byte array but that is mostly for testing.
*/
public abstract class StreamOutput extends OutputStream {
private Version version = Version.CURRENT;
/**
* The version of the node on the other side of this stream.
*/
public Version getVersion() {
return this.version;
}
public StreamOutput setVersion(Version version) {
/**
* Set the version of the node on the other side of this stream.
*/
public void setVersion(Version version) {
this.version = version;
return this;
}
public long position() throws IOException {
@ -640,6 +647,20 @@ public abstract class StreamOutput extends OutputStream {
namedWriteable.writeTo(this);
}
/**
* Writes a {@link AggregatorBuilder} to the current stream
*/
public void writeAggregatorBuilder(AggregatorBuilder<?> builder) throws IOException {
writeNamedWriteable(builder);
}
/**
* Writes a {@link PipelineAggregatorBuilder} to the current stream
*/
public void writePipelineAggregatorBuilder(PipelineAggregatorBuilder<?> builder) throws IOException {
writeNamedWriteable(builder);
}
/**
* Writes a {@link QueryBuilder} to the current stream
*/

View File

@ -22,11 +22,26 @@ package org.elasticsearch.common.io.stream;
import java.io.IOException;
/**
* Implementers can be written to a {@linkplain StreamOutput} and read from a {@linkplain StreamInput}. This allows them to be "thrown
* across the wire" using Elasticsearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by
* serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. For
* example, {@link org.elasticsearch.common.unit.TimeValue} converts the time to nanoseconds for serialization.
*
* Prefer implementing {@link Writeable} over implementing this interface where possible. Lots of code depends on this interface so this
* isn't always possible.
*
* Implementers of this interface almost always declare a no arg constructor that is exclusively used for creating "empty" objects on which
* you then call {@link #readFrom(StreamInput)}. Because {@linkplain #readFrom(StreamInput)} isn't part of the constructor the fields
* on implementers cannot be final. It is these reasons that this interface has fallen out of favor compared to {@linkplain Writeable}.
*/
public interface Streamable {
/**
* Set this object's fields from a {@linkplain StreamInput}.
*/
void readFrom(StreamInput in) throws IOException;
/**
* Write this object's fields to a {@linkplain StreamOutput}.
*/
void writeTo(StreamOutput out) throws IOException;
}

View File

@ -20,11 +20,17 @@ package org.elasticsearch.common.io.stream;
import java.io.IOException;
/**
* Implementers can be read from {@linkplain StreamInput} by calling their {@link #readFrom(StreamInput)} method.
*
* It is common for implementers of this interface to declare a <code>public static final</code> instance of themselves named PROTOTYPE so
* users can call {@linkplain #readFrom(StreamInput)} on it. It is also fairly typical for readFrom to be implemented as a method that just
* calls a constructor that takes {@linkplain StreamInput} as a parameter. This allows the fields in the implementer to be
* <code>final</code>.
*/
public interface StreamableReader<T> {
/**
* Reads a copy of an object with the same type form the stream input
*
* The caller object remains unchanged.
* Reads an object of this type from the provided {@linkplain StreamInput}. The receiving instance remains unchanged.
*/
T readFrom(StreamInput in) throws IOException;
}

View File

@ -21,10 +21,20 @@ package org.elasticsearch.common.io.stream;
import java.io.IOException;
/**
* Implementers can be written to a {@linkplain StreamOutput} and read from a {@linkplain StreamInput}. This allows them to be "thrown
* across the wire" using Elasticsearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by
* serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. For
* example, {@link org.elasticsearch.common.unit.TimeValue} converts the time to nanoseconds for serialization.
* {@linkplain org.elasticsearch.common.unit.TimeValue} actually implements {@linkplain Streamable} not {@linkplain Writeable} but it has
* the same contract.
*
* Prefer implementing this interface over implementing {@link Streamable} where possible. Lots of code depends on {@linkplain Streamable}
* so this isn't always possible.
*/
public interface Writeable<T> extends StreamableReader<T> {
/**
* Writes the current object into the output stream out
* Write this into the {@linkplain StreamOutput}.
*/
void writeTo(StreamOutput out) throws IOException;
}

View File

@ -0,0 +1,23 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Classes for streaming objects from one Elasticsearch node to another over its binary internode protocol.
*/
package org.elasticsearch.common.io.stream;

View File

@ -17,13 +17,12 @@
* under the License.
*/
package org.elasticsearch.common.logging.log4j;
package org.elasticsearch.common.logging;
import org.apache.log4j.Layout;
import org.apache.log4j.WriterAppender;
import org.apache.log4j.helpers.LogLog;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
import java.io.OutputStream;

View File

@ -19,104 +19,188 @@
package org.elasticsearch.common.logging;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import static org.elasticsearch.common.logging.LoggerMessageFormat.format;
/**
* Contract for all elasticsearch loggers.
* Elasticsearch's logger wrapper.
*/
public interface ESLogger {
public class ESLogger {
private static final String FQCN = ESLogger.class.getName();
String getPrefix();
private final String prefix;
private final Logger logger;
String getName();
public ESLogger(String prefix, Logger logger) {
this.prefix = prefix;
this.logger = logger;
}
/**
* Allows to set the logger level
* If the new level is null, the logger will inherit its level
* from its nearest ancestor with a specific (non-null) level value.
* @param level the new level
* The prefix of the log.
*/
void setLevel(String level);
public String getPrefix() {
return this.prefix;
}
/**
* Returns the current logger level
* If the level is null, it means that the logger inherits its level
* from its nearest ancestor with a specific (non-null) level value.
* @return the logger level
* Fetch the underlying logger so we can look at it. Only exists for testing.
*/
String getLevel();
Logger getLogger() {
return logger;
}
/**
* Returns {@code true} if a TRACE level message is logged.
* Set the level of the logger. If the new level is null, the logger will inherit it's level from its nearest ancestor with a non-null
* level.
*/
boolean isTraceEnabled();
public void setLevel(String level) {
if (level == null) {
logger.setLevel(null);
} else if ("error".equalsIgnoreCase(level)) {
logger.setLevel(Level.ERROR);
} else if ("warn".equalsIgnoreCase(level)) {
logger.setLevel(Level.WARN);
} else if ("info".equalsIgnoreCase(level)) {
logger.setLevel(Level.INFO);
} else if ("debug".equalsIgnoreCase(level)) {
logger.setLevel(Level.DEBUG);
} else if ("trace".equalsIgnoreCase(level)) {
logger.setLevel(Level.TRACE);
}
}
/**
* Returns {@code true} if a DEBUG level message is logged.
* The level of this logger. If null then the logger is inheriting it's level from its nearest ancestor with a non-null level.
*/
boolean isDebugEnabled();
public String getLevel() {
if (logger.getLevel() == null) {
return null;
}
return logger.getLevel().toString();
}
/**
* Returns {@code true} if an INFO level message is logged.
* The name of this logger.
*/
boolean isInfoEnabled();
public String getName() {
return logger.getName();
}
/**
* Returns {@code true} if a WARN level message is logged.
* Returns {@code true} if a TRACE level message should be logged.
*/
boolean isWarnEnabled();
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
/**
* Returns {@code true} if an ERROR level message is logged.
* Returns {@code true} if a DEBUG level message should be logged.
*/
boolean isErrorEnabled();
public boolean isDebugEnabled() {
return logger.isDebugEnabled();
}
/**
* Returns {@code true} if an INFO level message should be logged.
*/
public boolean isInfoEnabled() {
return logger.isInfoEnabled();
}
/**
* Returns {@code true} if a WARN level message should be logged.
*/
public boolean isWarnEnabled() {
return logger.isEnabledFor(Level.WARN);
}
/**
* Returns {@code true} if an ERROR level message should be logged.
*/
public boolean isErrorEnabled() {
return logger.isEnabledFor(Level.ERROR);
}
/**
* Logs a TRACE level message.
*/
public void trace(String msg, Object... params) {
trace(msg, null, params);
}
/**
* Logs a TRACE level message with an exception.
*/
public void trace(String msg, Throwable cause, Object... params) {
if (isTraceEnabled()) {
logger.log(FQCN, Level.TRACE, format(prefix, msg, params), cause);
}
}
/**
* Logs a DEBUG level message.
*/
void trace(String msg, Object... params);
public void debug(String msg, Object... params) {
debug(msg, null, params);
}
/**
* Logs a DEBUG level message.
* Logs a DEBUG level message with an exception.
*/
void trace(String msg, Throwable cause, Object... params);
public void debug(String msg, Throwable cause, Object... params) {
if (isDebugEnabled()) {
logger.log(FQCN, Level.DEBUG, format(prefix, msg, params), cause);
}
}
/**
* Logs a DEBUG level message.
* Logs a INFO level message.
*/
void debug(String msg, Object... params);
public void info(String msg, Object... params) {
info(msg, null, params);
}
/**
* Logs a DEBUG level message.
* Logs a INFO level message with an exception.
*/
void debug(String msg, Throwable cause, Object... params);
/**
* Logs an INFO level message.
*/
void info(String msg, Object... params);
/**
* Logs an INFO level message.
*/
void info(String msg, Throwable cause, Object... params);
public void info(String msg, Throwable cause, Object... params) {
if (isInfoEnabled()) {
logger.log(FQCN, Level.INFO, format(prefix, msg, params), cause);
}
}
/**
* Logs a WARN level message.
*/
void warn(String msg, Object... params);
public void warn(String msg, Object... params) {
warn(msg, null, params);
}
/**
* Logs a WARN level message.
* Logs a WARN level message with an exception.
*/
void warn(String msg, Throwable cause, Object... params);
public void warn(String msg, Throwable cause, Object... params) {
if (isWarnEnabled()) {
logger.log(FQCN, Level.WARN, format(prefix, msg, params), cause);
}
}
/**
* Logs an ERROR level message.
* Logs a ERROR level message.
*/
void error(String msg, Object... params);
public void error(String msg, Object... params) {
error(msg, null, params);
}
/**
* Logs an ERROR level message.
* Logs a ERROR level message with an exception.
*/
void error(String msg, Throwable cause, Object... params);
public void error(String msg, Throwable cause, Object... params) {
if (isErrorEnabled()) {
logger.log(FQCN, Level.ERROR, format(prefix, msg, params), cause);
}
}
}

View File

@ -19,62 +19,29 @@
package org.elasticsearch.common.logging;
import org.elasticsearch.common.logging.jdk.JdkESLoggerFactory;
import org.elasticsearch.common.logging.log4j.Log4jESLoggerFactory;
import org.elasticsearch.common.logging.slf4j.Slf4jESLoggerFactory;
import org.elasticsearch.common.settings.AbstractScopedSettings;
import org.apache.log4j.Logger;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import java.util.Locale;
import java.util.Map;
import java.util.function.Consumer;
import java.util.regex.Pattern;
/**
* Factory to get {@link ESLogger}s
*/
public abstract class ESLoggerFactory {
public static final Setting<LogLevel> LOG_DEFAULT_LEVEL_SETTING = new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, false, Setting.Scope.CLUSTER);
public static final Setting<LogLevel> LOG_LEVEL_SETTING = Setting.dynamicKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse, true, Setting.Scope.CLUSTER);
private static volatile ESLoggerFactory defaultFactory = new JdkESLoggerFactory();
static {
try {
Class<?> loggerClazz = Class.forName("org.apache.log4j.Logger");
// below will throw a NoSuchMethod failure with using slf4j log4j bridge
loggerClazz.getMethod("setLevel", Class.forName("org.apache.log4j.Level"));
defaultFactory = new Log4jESLoggerFactory();
} catch (Throwable e) {
// no log4j
try {
Class.forName("org.slf4j.Logger");
defaultFactory = new Slf4jESLoggerFactory();
} catch (Throwable e1) {
// no slf4j
}
}
}
/**
* Changes the default factory.
*/
public static void setDefaultFactory(ESLoggerFactory defaultFactory) {
if (defaultFactory == null) {
throw new NullPointerException("defaultFactory");
}
ESLoggerFactory.defaultFactory = defaultFactory;
}
public static final Setting<LogLevel> LOG_DEFAULT_LEVEL_SETTING =
new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, false, Setting.Scope.CLUSTER);
public static final Setting<LogLevel> LOG_LEVEL_SETTING =
Setting.dynamicKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse, true, Setting.Scope.CLUSTER);
public static ESLogger getLogger(String prefix, String name) {
return defaultFactory.newInstance(prefix == null ? null : prefix.intern(), name.intern());
prefix = prefix == null ? null : prefix.intern();
name = name.intern();
return new ESLogger(prefix, Logger.getLogger(name));
}
public static ESLogger getLogger(String name) {
return defaultFactory.newInstance(name.intern());
return getLogger(null, name);
}
public static DeprecationLogger getDeprecationLogger(String name) {
@ -86,17 +53,13 @@ public abstract class ESLoggerFactory {
}
public static ESLogger getRootLogger() {
return defaultFactory.rootLogger();
return new ESLogger(null, Logger.getRootLogger());
}
public ESLogger newInstance(String name) {
return newInstance(null, name);
private ESLoggerFactory() {
// Utility class can't be built.
}
protected abstract ESLogger rootLogger();
protected abstract ESLogger newInstance(String prefix, String name);
public enum LogLevel {
WARN, TRACE, INFO, DEBUG, ERROR;
public static LogLevel parse(String level) {

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.common.logging.log4j;
package org.elasticsearch.common.logging;
import org.apache.log4j.PropertyConfigurator;
import org.elasticsearch.ElasticsearchException;
@ -39,13 +39,14 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.Strings.cleanPath;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
/**
*
* Configures log4j with a special set of replacements.
*/
public class LogConfigurator {
@ -54,10 +55,12 @@ public class LogConfigurator {
private static final Map<String, String> REPLACEMENTS;
static {
Map<String, String> replacements = new HashMap<>();
replacements.put("console", "org.elasticsearch.common.logging.log4j.ConsoleAppender");
// Appenders
replacements.put("async", "org.apache.log4j.AsyncAppender");
replacements.put("console", ConsoleAppender.class.getName());
replacements.put("dailyRollingFile", "org.apache.log4j.DailyRollingFileAppender");
replacements.put("externallyRolledFile", "org.apache.log4j.ExternallyRolledFileAppender");
replacements.put("extrasRollingFile", "org.apache.log4j.rolling.RollingFileAppender");
replacements.put("file", "org.apache.log4j.FileAppender");
replacements.put("jdbc", "org.apache.log4j.jdbc.JDBCAppender");
replacements.put("jms", "org.apache.log4j.net.JMSAppender");
@ -65,17 +68,18 @@ public class LogConfigurator {
replacements.put("ntevent", "org.apache.log4j.nt.NTEventLogAppender");
replacements.put("null", "org.apache.log4j.NullAppender");
replacements.put("rollingFile", "org.apache.log4j.RollingFileAppender");
replacements.put("extrasRollingFile", "org.apache.log4j.rolling.RollingFileAppender");
replacements.put("smtp", "org.apache.log4j.net.SMTPAppender");
replacements.put("socket", "org.apache.log4j.net.SocketAppender");
replacements.put("socketHub", "org.apache.log4j.net.SocketHubAppender");
replacements.put("syslog", "org.apache.log4j.net.SyslogAppender");
replacements.put("telnet", "org.apache.log4j.net.TelnetAppender");
replacements.put("terminal", "org.elasticsearch.common.logging.log4j.TerminalAppender");
// policies
replacements.put("terminal", TerminalAppender.class.getName());
// Policies
replacements.put("timeBased", "org.apache.log4j.rolling.TimeBasedRollingPolicy");
replacements.put("sizeBased", "org.apache.log4j.rolling.SizeBasedTriggeringPolicy");
// layouts
// Layouts
replacements.put("simple", "org.apache.log4j.SimpleLayout");
replacements.put("html", "org.apache.log4j.HTMLLayout");
replacements.put("pattern", "org.apache.log4j.PatternLayout");
@ -141,7 +145,8 @@ public class LogConfigurator {
static void resolveConfig(Environment env, final Settings.Builder settingsBuilder) {
try {
Files.walkFileTree(env.configFile(), EnumSet.of(FileVisitOption.FOLLOW_LINKS), Integer.MAX_VALUE, new SimpleFileVisitor<Path>() {
Set<FileVisitOption> options = EnumSet.of(FileVisitOption.FOLLOW_LINKS);
Files.walkFileTree(env.configFile(), options, Integer.MAX_VALUE, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
String fileName = file.getFileName().toString();

View File

@ -17,13 +17,13 @@
* under the License.
*/
package org.elasticsearch.common.logging.support;
package org.elasticsearch.common.logging;
import java.util.HashMap;
import java.util.Map;
import java.util.HashSet;
import java.util.Set;
/**
*
* Format string for Elasticsearch log messages.
*/
public class LoggerMessageFormat {
@ -79,13 +79,13 @@ public class LoggerMessageFormat {
// itself escaped: "abc x:\\{}"
// we have to consume one backward slash
sbuf.append(messagePattern.substring(i, j - 1));
deeplyAppendParameter(sbuf, argArray[L], new HashMap());
deeplyAppendParameter(sbuf, argArray[L], new HashSet<Object[]>());
i = j + 2;
}
} else {
// normal case
sbuf.append(messagePattern.substring(i, j));
deeplyAppendParameter(sbuf, argArray[L], new HashMap());
deeplyAppendParameter(sbuf, argArray[L], new HashSet<Object[]>());
i = j + 2;
}
}
@ -117,7 +117,7 @@ public class LoggerMessageFormat {
}
}
private static void deeplyAppendParameter(StringBuilder sbuf, Object o, Map seenMap) {
private static void deeplyAppendParameter(StringBuilder sbuf, Object o, Set<Object[]> seen) {
if (o == null) {
sbuf.append("null");
return;
@ -144,7 +144,7 @@ public class LoggerMessageFormat {
} else if (o instanceof double[]) {
doubleArrayAppend(sbuf, (double[]) o);
} else {
objectArrayAppend(sbuf, (Object[]) o, seenMap);
objectArrayAppend(sbuf, (Object[]) o, seen);
}
}
}
@ -159,18 +159,18 @@ public class LoggerMessageFormat {
}
private static void objectArrayAppend(StringBuilder sbuf, Object[] a, Map seenMap) {
private static void objectArrayAppend(StringBuilder sbuf, Object[] a, Set<Object[]> seen) {
sbuf.append('[');
if (!seenMap.containsKey(a)) {
seenMap.put(a, null);
if (!seen.contains(a)) {
seen.add(a);
final int len = a.length;
for (int i = 0; i < len; i++) {
deeplyAppendParameter(sbuf, a[i], seenMap);
deeplyAppendParameter(sbuf, a[i], seen);
if (i != len - 1)
sbuf.append(", ");
}
// allow repeats in siblings
seenMap.remove(a);
seen.remove(a);
} else {
sbuf.append("...");
}

View File

@ -35,8 +35,6 @@ import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
/**
* A set of utilities around Logging.
*
*
*/
public class Loggers {
@ -58,20 +56,24 @@ public class Loggers {
return consoleLoggingEnabled;
}
public static ESLogger getLogger(Class clazz, Settings settings, ShardId shardId, String... prefixes) {
public static ESLogger getLogger(Class<?> clazz, Settings settings, ShardId shardId, String... prefixes) {
return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
}
/** Just like {@link #getLogger(Class, org.elasticsearch.common.settings.Settings,ShardId,String...)} but String loggerName instead of Class. */
/**
* Just like {@link #getLogger(Class, org.elasticsearch.common.settings.Settings,ShardId,String...)} but String loggerName instead of
* Class.
*/
public static ESLogger getLogger(String loggerName, Settings settings, ShardId shardId, String... prefixes) {
return getLogger(loggerName, settings, asArrayList(shardId.getIndexName(), Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
return getLogger(loggerName, settings,
asArrayList(shardId.getIndexName(), Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
}
public static ESLogger getLogger(Class clazz, Settings settings, Index index, String... prefixes) {
public static ESLogger getLogger(Class<?> clazz, Settings settings, Index index, String... prefixes) {
return getLogger(clazz, settings, asArrayList(SPACE, index.getName(), prefixes).toArray(new String[0]));
}
public static ESLogger getLogger(Class clazz, Settings settings, String... prefixes) {
public static ESLogger getLogger(Class<?> clazz, Settings settings, String... prefixes) {
return getLogger(buildClassLoggerName(clazz), settings, prefixes);
}
@ -117,11 +119,11 @@ public class Loggers {
return ESLoggerFactory.getLogger(getLoggerName(s));
}
public static ESLogger getLogger(Class clazz) {
public static ESLogger getLogger(Class<?> clazz) {
return ESLoggerFactory.getLogger(getLoggerName(buildClassLoggerName(clazz)));
}
public static ESLogger getLogger(Class clazz, String... prefixes) {
public static ESLogger getLogger(Class<?> clazz, String... prefixes) {
return getLogger(buildClassLoggerName(clazz), prefixes);
}
@ -146,7 +148,7 @@ public class Loggers {
return ESLoggerFactory.getLogger(prefix, getLoggerName(name));
}
private static String buildClassLoggerName(Class clazz) {
private static String buildClassLoggerName(Class<?> clazz) {
String name = clazz.getName();
if (name.startsWith("org.elasticsearch.")) {
name = Classes.getPackageName(clazz);

View File

@ -18,7 +18,7 @@
*/
package org.elasticsearch.common.logging.log4j;
package org.elasticsearch.common.logging;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.spi.LoggingEvent;

View File

@ -1,108 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging.jdk;
import org.elasticsearch.common.logging.support.AbstractESLogger;
import java.util.logging.Level;
import java.util.logging.LogRecord;
/**
* A {@link LogRecord} which is used in conjunction with {@link JdkESLogger}
* with the ability to provide the class name, method name and line number
* information of the code calling the logger
*/
public class ESLogRecord extends LogRecord {
private static final String FQCN = AbstractESLogger.class.getName();
private String sourceClassName;
private String sourceMethodName;
private transient boolean needToInferCaller;
public ESLogRecord(Level level, String msg) {
super(level, msg);
needToInferCaller = true;
}
@Override
public String getSourceClassName() {
if (needToInferCaller) {
inferCaller();
}
return sourceClassName;
}
@Override
public void setSourceClassName(String sourceClassName) {
this.sourceClassName = sourceClassName;
needToInferCaller = false;
}
@Override
public String getSourceMethodName() {
if (needToInferCaller) {
inferCaller();
}
return sourceMethodName;
}
@Override
public void setSourceMethodName(String sourceMethodName) {
this.sourceMethodName = sourceMethodName;
needToInferCaller = false;
}
/**
* Determines the source information for the caller of the logger (class
* name, method name, and line number)
*/
private void inferCaller() {
needToInferCaller = false;
Throwable throwable = new Throwable();
boolean lookingForLogger = true;
for (final StackTraceElement frame : throwable.getStackTrace()) {
String cname = frame.getClassName();
boolean isLoggerImpl = isLoggerImplFrame(cname);
if (lookingForLogger) {
// Skip all frames until we have found the first logger frame.
if (isLoggerImpl) {
lookingForLogger = false;
}
} else {
if (!isLoggerImpl) {
// skip reflection call
if (!cname.startsWith("java.lang.reflect.") && !cname.startsWith("sun.reflect.")) {
// We've found the relevant frame.
setSourceClassName(cname);
setSourceMethodName(frame.getMethodName());
return;
}
}
}
}
// We haven't found a suitable frame, so just punt. This is
// OK as we are only committed to making a "best effort" here.
}
private boolean isLoggerImplFrame(String cname) {
// the log record could be created for a platform logger
return cname.equals(FQCN);
}
}

View File

@ -1,163 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging.jdk;
import org.elasticsearch.common.logging.support.AbstractESLogger;
import java.util.logging.Level;
import java.util.logging.LogRecord;
import java.util.logging.Logger;
/**
*
*/
public class JdkESLogger extends AbstractESLogger {
private final Logger logger;
public JdkESLogger(String prefix, Logger logger) {
super(prefix);
this.logger = logger;
}
@Override
public void setLevel(String level) {
if (level == null) {
logger.setLevel(null);
} else if ("error".equalsIgnoreCase(level)) {
logger.setLevel(Level.SEVERE);
} else if ("warn".equalsIgnoreCase(level)) {
logger.setLevel(Level.WARNING);
} else if ("info".equalsIgnoreCase(level)) {
logger.setLevel(Level.INFO);
} else if ("debug".equalsIgnoreCase(level)) {
logger.setLevel(Level.FINE);
} else if ("trace".equalsIgnoreCase(level)) {
logger.setLevel(Level.FINEST);
}
}
@Override
public String getLevel() {
if (logger.getLevel() == null) {
return null;
}
return logger.getLevel().toString();
}
@Override
public String getName() {
return logger.getName();
}
@Override
public boolean isTraceEnabled() {
return logger.isLoggable(Level.FINEST);
}
@Override
public boolean isDebugEnabled() {
return logger.isLoggable(Level.FINE);
}
@Override
public boolean isInfoEnabled() {
return logger.isLoggable(Level.INFO);
}
@Override
public boolean isWarnEnabled() {
return logger.isLoggable(Level.WARNING);
}
@Override
public boolean isErrorEnabled() {
return logger.isLoggable(Level.SEVERE);
}
@Override
protected void internalTrace(String msg) {
LogRecord record = new ESLogRecord(Level.FINEST, msg);
logger.log(record);
}
@Override
protected void internalTrace(String msg, Throwable cause) {
LogRecord record = new ESLogRecord(Level.FINEST, msg);
record.setThrown(cause);
logger.log(record);
}
@Override
protected void internalDebug(String msg) {
LogRecord record = new ESLogRecord(Level.FINE, msg);
logger.log(record);
}
@Override
protected void internalDebug(String msg, Throwable cause) {
LogRecord record = new ESLogRecord(Level.FINE, msg);
record.setThrown(cause);
logger.log(record);
}
@Override
protected void internalInfo(String msg) {
LogRecord record = new ESLogRecord(Level.INFO, msg);
logger.log(record);
}
@Override
protected void internalInfo(String msg, Throwable cause) {
LogRecord record = new ESLogRecord(Level.INFO, msg);
record.setThrown(cause);
logger.log(record);
}
@Override
protected void internalWarn(String msg) {
LogRecord record = new ESLogRecord(Level.WARNING, msg);
logger.log(record);
}
@Override
protected void internalWarn(String msg, Throwable cause) {
LogRecord record = new ESLogRecord(Level.WARNING, msg);
record.setThrown(cause);
logger.log(record);
}
@Override
protected void internalError(String msg) {
LogRecord record = new ESLogRecord(Level.SEVERE, msg);
logger.log(record);
}
@Override
protected void internalError(String msg, Throwable cause) {
LogRecord record = new ESLogRecord(Level.SEVERE, msg);
record.setThrown(cause);
logger.log(record);
}
protected Logger logger() {
return logger;
}
}

View File

@ -1,147 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging.log4j;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.elasticsearch.common.logging.support.AbstractESLogger;
/**
*
*/
public class Log4jESLogger extends AbstractESLogger {
private final org.apache.log4j.Logger logger;
private final String FQCN = AbstractESLogger.class.getName();
public Log4jESLogger(String prefix, Logger logger) {
super(prefix);
this.logger = logger;
}
public Logger logger() {
return logger;
}
@Override
public void setLevel(String level) {
if (level == null) {
logger.setLevel(null);
} else if ("error".equalsIgnoreCase(level)) {
logger.setLevel(Level.ERROR);
} else if ("warn".equalsIgnoreCase(level)) {
logger.setLevel(Level.WARN);
} else if ("info".equalsIgnoreCase(level)) {
logger.setLevel(Level.INFO);
} else if ("debug".equalsIgnoreCase(level)) {
logger.setLevel(Level.DEBUG);
} else if ("trace".equalsIgnoreCase(level)) {
logger.setLevel(Level.TRACE);
}
}
@Override
public String getLevel() {
if (logger.getLevel() == null) {
return null;
}
return logger.getLevel().toString();
}
@Override
public String getName() {
return logger.getName();
}
@Override
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
@Override
public boolean isDebugEnabled() {
return logger.isDebugEnabled();
}
@Override
public boolean isInfoEnabled() {
return logger.isInfoEnabled();
}
@Override
public boolean isWarnEnabled() {
return logger.isEnabledFor(Level.WARN);
}
@Override
public boolean isErrorEnabled() {
return logger.isEnabledFor(Level.ERROR);
}
@Override
protected void internalTrace(String msg) {
logger.log(FQCN, Level.TRACE, msg, null);
}
@Override
protected void internalTrace(String msg, Throwable cause) {
logger.log(FQCN, Level.TRACE, msg, cause);
}
@Override
protected void internalDebug(String msg) {
logger.log(FQCN, Level.DEBUG, msg, null);
}
@Override
protected void internalDebug(String msg, Throwable cause) {
logger.log(FQCN, Level.DEBUG, msg, cause);
}
@Override
protected void internalInfo(String msg) {
logger.log(FQCN, Level.INFO, msg, null);
}
@Override
protected void internalInfo(String msg, Throwable cause) {
logger.log(FQCN, Level.INFO, msg, cause);
}
@Override
protected void internalWarn(String msg) {
logger.log(FQCN, Level.WARN, msg, null);
}
@Override
protected void internalWarn(String msg, Throwable cause) {
logger.log(FQCN, Level.WARN, msg, cause);
}
@Override
protected void internalError(String msg) {
logger.log(FQCN, Level.ERROR, msg, null);
}
@Override
protected void internalError(String msg, Throwable cause) {
logger.log(FQCN, Level.ERROR, msg, cause);
}
}

View File

@ -1,179 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging.slf4j;
import org.elasticsearch.common.logging.support.AbstractESLogger;
import org.slf4j.Logger;
import org.slf4j.spi.LocationAwareLogger;
/**
*
*/
public class Slf4jESLogger extends AbstractESLogger {
private final Logger logger;
private final LocationAwareLogger lALogger;
private final String FQCN = AbstractESLogger.class.getName();
public Slf4jESLogger(String prefix, Logger logger) {
super(prefix);
this.logger = logger;
if (logger instanceof LocationAwareLogger) {
lALogger = (LocationAwareLogger) logger;
} else {
lALogger = null;
}
}
@Override
public void setLevel(String level) {
// can't set it in slf4j...
}
@Override
public String getLevel() {
// can't get it in slf4j...
return null;
}
@Override
public String getName() {
return logger.getName();
}
@Override
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
@Override
public boolean isDebugEnabled() {
return logger.isDebugEnabled();
}
@Override
public boolean isInfoEnabled() {
return logger.isInfoEnabled();
}
@Override
public boolean isWarnEnabled() {
return logger.isWarnEnabled();
}
@Override
public boolean isErrorEnabled() {
return logger.isErrorEnabled();
}
@Override
protected void internalTrace(String msg) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.TRACE_INT, msg, null, null);
} else {
logger.trace(msg);
}
}
@Override
protected void internalTrace(String msg, Throwable cause) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.TRACE_INT, msg, null, cause);
} else {
logger.trace(msg);
}
}
@Override
protected void internalDebug(String msg) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.DEBUG_INT, msg, null, null);
} else {
logger.debug(msg);
}
}
@Override
protected void internalDebug(String msg, Throwable cause) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.DEBUG_INT, msg, null, cause);
} else {
logger.debug(msg);
}
}
@Override
protected void internalInfo(String msg) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.INFO_INT, msg, null, null);
} else {
logger.info(msg);
}
}
@Override
protected void internalInfo(String msg, Throwable cause) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.INFO_INT, msg, null, cause);
} else {
logger.info(msg, cause);
}
}
@Override
protected void internalWarn(String msg) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.WARN_INT, msg, null, null);
} else {
logger.warn(msg);
}
}
@Override
protected void internalWarn(String msg, Throwable cause) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.WARN_INT, msg, null, cause);
} else {
logger.warn(msg);
}
}
@Override
protected void internalError(String msg) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.ERROR_INT, msg, null, null);
} else {
logger.error(msg);
}
}
@Override
protected void internalError(String msg, Throwable cause) {
if (lALogger != null) {
lALogger.log(null, FQCN, LocationAwareLogger.ERROR_INT, msg, null, cause);
} else {
logger.error(msg);
}
}
protected Logger logger() {
return logger;
}
}

View File

@ -1,133 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging.support;
import org.elasticsearch.common.logging.ESLogger;
/**
*
*/
public abstract class AbstractESLogger implements ESLogger {
private final String prefix;
protected AbstractESLogger(String prefix) {
this.prefix = prefix;
}
@Override
public String getPrefix() {
return this.prefix;
}
@Override
public void trace(String msg, Object... params) {
if (isTraceEnabled()) {
internalTrace(LoggerMessageFormat.format(prefix, msg, params));
}
}
protected abstract void internalTrace(String msg);
@Override
public void trace(String msg, Throwable cause, Object... params) {
if (isTraceEnabled()) {
internalTrace(LoggerMessageFormat.format(prefix, msg, params), cause);
}
}
protected abstract void internalTrace(String msg, Throwable cause);
@Override
public void debug(String msg, Object... params) {
if (isDebugEnabled()) {
internalDebug(LoggerMessageFormat.format(prefix, msg, params));
}
}
protected abstract void internalDebug(String msg);
@Override
public void debug(String msg, Throwable cause, Object... params) {
if (isDebugEnabled()) {
internalDebug(LoggerMessageFormat.format(prefix, msg, params), cause);
}
}
protected abstract void internalDebug(String msg, Throwable cause);
@Override
public void info(String msg, Object... params) {
if (isInfoEnabled()) {
internalInfo(LoggerMessageFormat.format(prefix, msg, params));
}
}
protected abstract void internalInfo(String msg);
@Override
public void info(String msg, Throwable cause, Object... params) {
if (isInfoEnabled()) {
internalInfo(LoggerMessageFormat.format(prefix, msg, params), cause);
}
}
protected abstract void internalInfo(String msg, Throwable cause);
@Override
public void warn(String msg, Object... params) {
if (isWarnEnabled()) {
internalWarn(LoggerMessageFormat.format(prefix, msg, params));
}
}
protected abstract void internalWarn(String msg);
@Override
public void warn(String msg, Throwable cause, Object... params) {
if (isWarnEnabled()) {
internalWarn(LoggerMessageFormat.format(prefix, msg, params), cause);
}
}
protected abstract void internalWarn(String msg, Throwable cause);
@Override
public void error(String msg, Object... params) {
if (isErrorEnabled()) {
internalError(LoggerMessageFormat.format(prefix, msg, params));
}
}
protected abstract void internalError(String msg);
@Override
public void error(String msg, Throwable cause, Object... params) {
if (isErrorEnabled()) {
internalError(LoggerMessageFormat.format(prefix, msg, params), cause);
}
}
protected abstract void internalError(String msg, Throwable cause);
}

View File

@ -18,8 +18,6 @@
*/
package org.elasticsearch.common.netty;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.transport.netty.NettyInternalESLoggerFactory;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
@ -74,7 +72,7 @@ public class NettyUtils {
* sized pages, and if its a single one, makes sure that it gets sliced and wrapped in a composite
* buffer.
*/
public static final boolean DEFAULT_GATHERING;
public static final boolean DEFAULT_GATHERING = true;
private static EsThreadNameDeterminer ES_THREAD_NAME_DETERMINER = new EsThreadNameDeterminer();
@ -95,13 +93,6 @@ public class NettyUtils {
});
ThreadRenamingRunnable.setThreadNameDeterminer(ES_THREAD_NAME_DETERMINER);
/**
* This is here just to give us an option to rollback the change, if its stable, we should remove
* the option to even set it.
*/
DEFAULT_GATHERING = Booleans.parseBoolean(System.getProperty("es.netty.gathering"), true);
Loggers.getLogger(NettyUtils.class).debug("using gathering [{}]", DEFAULT_GATHERING);
}
public static void setup() {

View File

@ -113,4 +113,8 @@ public final class Cidrs {
assert octets.length == 4;
return octetsToString(octets) + "/" + networkMask;
}
public static String createCIDR(long ipAddress, int networkMask) {
return octetsToCIDR(longToOctets(ipAddress), networkMask);
}
}

View File

@ -19,11 +19,13 @@
package org.elasticsearch.common.rounding;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import java.io.IOException;
import java.util.Objects;
/**
* A strategy for rounding long values.
@ -61,6 +63,12 @@ public abstract class Rounding implements Streamable {
*/
public abstract long nextRoundingValue(long value);
@Override
public abstract boolean equals(Object obj);
@Override
public abstract int hashCode();
/**
* Rounding strategy which is based on an interval
*
@ -70,6 +78,8 @@ public abstract class Rounding implements Streamable {
final static byte ID = 0;
public static final ParseField INTERVAL_FIELD = new ParseField("interval");
private long interval;
public Interval() { // for serialization
@ -126,12 +136,31 @@ public abstract class Rounding implements Streamable {
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(interval);
}
@Override
public int hashCode() {
return Objects.hash(interval);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Interval other = (Interval) obj;
return Objects.equals(interval, other.interval);
}
}
public static class FactorRounding extends Rounding {
final static byte ID = 7;
public static final ParseField FACTOR_FIELD = new ParseField("factor");
private Rounding rounding;
private float factor;
@ -166,7 +195,7 @@ public abstract class Rounding implements Streamable {
@Override
public void readFrom(StreamInput in) throws IOException {
rounding = (TimeZoneRounding) Rounding.Streams.read(in);
rounding = Rounding.Streams.read(in);
factor = in.readFloat();
}
@ -175,12 +204,32 @@ public abstract class Rounding implements Streamable {
Rounding.Streams.write(rounding, out);
out.writeFloat(factor);
}
@Override
public int hashCode() {
return Objects.hash(rounding, factor);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
FactorRounding other = (FactorRounding) obj;
return Objects.equals(rounding, other.rounding)
&& Objects.equals(factor, other.factor);
}
}
public static class OffsetRounding extends Rounding {
final static byte ID = 8;
public static final ParseField OFFSET_FIELD = new ParseField("offset");
private Rounding rounding;
private long offset;
@ -224,6 +273,24 @@ public abstract class Rounding implements Streamable {
Rounding.Streams.write(rounding, out);
out.writeLong(offset);
}
@Override
public int hashCode() {
return Objects.hash(rounding, offset);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
OffsetRounding other = (OffsetRounding) obj;
return Objects.equals(rounding, other.rounding)
&& Objects.equals(offset, other.offset);
}
}
public static class Streams {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.common.rounding;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
@ -27,10 +28,13 @@ import org.joda.time.DateTimeZone;
import org.joda.time.DurationField;
import java.io.IOException;
import java.util.Objects;
/**
*/
public abstract class TimeZoneRounding extends Rounding {
public static final ParseField INTERVAL_FIELD = new ParseField("interval");
public static final ParseField TIME_ZONE_FIELD = new ParseField("time_zone");
public static Builder builder(DateTimeUnit unit) {
return new Builder(unit);
@ -157,6 +161,24 @@ public abstract class TimeZoneRounding extends Rounding {
out.writeByte(unit.id());
out.writeString(timeZone.getID());
}
@Override
public int hashCode() {
return Objects.hash(unit, timeZone);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
TimeUnitRounding other = (TimeUnitRounding) obj;
return Objects.equals(unit, other.unit)
&& Objects.equals(timeZone, other.timeZone);
}
}
static class TimeIntervalRounding extends TimeZoneRounding {
@ -214,5 +236,23 @@ public abstract class TimeZoneRounding extends Rounding {
out.writeVLong(interval);
out.writeString(timeZone.getID());
}
@Override
public int hashCode() {
return Objects.hash(interval, timeZone);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
TimeIntervalRounding other = (TimeIntervalRounding) obj;
return Objects.equals(interval, other.interval)
&& Objects.equals(timeZone, other.timeZone);
}
}
}

View File

@ -22,7 +22,6 @@ package org.elasticsearch.common.settings;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.util.set.Sets;
import java.util.ArrayList;
import java.util.Collections;
@ -63,6 +62,11 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "]");
}
if (setting.hasComplexMatcher()) {
Setting<?> overlappingSetting = findOverlappingSetting(setting, complexMatchers);
if (overlappingSetting != null) {
throw new IllegalArgumentException("complex setting key: [" + setting.getKey() + "] overlaps existing setting key: [" +
overlappingSetting.getKey() + "]");
}
complexMatchers.putIfAbsent(setting.getKey(), setting);
} else {
keySettings.putIfAbsent(setting.getKey(), setting);
@ -410,4 +414,19 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
return changed;
}
private static Setting<?> findOverlappingSetting(Setting<?> newSetting, Map<String, Setting<?>> complexMatchers) {
assert newSetting.hasComplexMatcher();
if (complexMatchers.containsKey(newSetting.getKey())) {
// we return null here because we use a putIfAbsent call when inserting into the map, so if it exists then we already checked
// the setting to make sure there are no overlapping settings.
return null;
}
for (Setting<?> existingSetting : complexMatchers.values()) {
if (newSetting.match(existingSetting.getKey()) || existingSetting.match(newSetting.getKey())) {
return existingSetting;
}
}
return null;
}
}

View File

@ -65,8 +65,8 @@ import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.analysis.HunspellService;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
import org.elasticsearch.indices.IndicesQueryCache;
import org.elasticsearch.indices.IndicesRequestCache;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.store.IndicesStore;
@ -307,11 +307,10 @@ public final class ClusterSettings extends AbstractScopedSettings {
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
ScriptService.SCRIPT_CACHE_EXPIRE_SETTING,
ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING,
IndicesService.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL,
HunspellService.HUNSPELL_LAZY_LOAD,
HunspellService.HUNSPELL_IGNORE_CASE,
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,

View File

@ -39,7 +39,7 @@ import org.elasticsearch.index.store.FsDirectoryService;
import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.IndexWarmer;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
import org.elasticsearch.indices.IndicesRequestCache;
import java.util.Arrays;
import java.util.Collections;

View File

@ -102,7 +102,7 @@ public class SettingsModule extends AbstractModule {
}
public void registerSettingsFilterIfMissing(String filter) {
if (settingsFilterPattern.contains(filter)) {
if (settingsFilterPattern.contains(filter) == false) {
registerSettingsFilter(filter);
}
}

View File

@ -265,17 +265,17 @@ public class TimeValue implements Streamable {
long millis;
String lowerSValue = sValue.toLowerCase(Locale.ROOT).trim();
if (lowerSValue.endsWith("ms")) {
millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 2)));
millis = parse(lowerSValue, 2, 1);
} else if (lowerSValue.endsWith("s")) {
millis = (long) Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 1000;
millis = parse(lowerSValue, 1, 1000);
} else if (lowerSValue.endsWith("m")) {
millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 60 * 1000);
millis = parse(lowerSValue, 1, 60 * 1000);
} else if (lowerSValue.endsWith("h")) {
millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 60 * 60 * 1000);
millis = parse(lowerSValue, 1, 60 * 60 * 1000);
} else if (lowerSValue.endsWith("d")) {
millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 24 * 60 * 60 * 1000);
millis = parse(lowerSValue, 1, 24 * 60 * 60 * 1000);
} else if (lowerSValue.endsWith("w")) {
millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 7 * 24 * 60 * 60 * 1000);
millis = parse(lowerSValue, 1, 7 * 24 * 60 * 60 * 1000);
} else if (lowerSValue.equals("-1")) {
// Allow this special value to be unit-less:
millis = -1;
@ -292,6 +292,10 @@ public class TimeValue implements Streamable {
}
}
private static long parse(String s, int suffixLength, long scale) {
return (long) (Double.parseDouble(s.substring(0, s.length() - suffixLength)) * scale);
}
static final long C0 = 1L;
static final long C1 = C0 * 1000L;
static final long C2 = C1 * 1000L;

View File

@ -36,8 +36,6 @@ import java.util.concurrent.LinkedTransferQueue;
*/
public abstract class ConcurrentCollections {
private final static boolean useLinkedTransferQueue = Boolean.parseBoolean(System.getProperty("es.useLinkedTransferQueue", "false"));
static final int aggressiveConcurrencyLevel;
static {
@ -71,9 +69,6 @@ public abstract class ConcurrentCollections {
}
public static <T> Queue<T> newQueue() {
if (useLinkedTransferQueue) {
return new LinkedTransferQueue<>();
}
return new ConcurrentLinkedQueue<>();
}

View File

@ -43,9 +43,12 @@ import java.math.RoundingMode;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import java.util.function.BiConsumer;
/**
*
@ -1227,52 +1230,101 @@ public final class XContentBuilder implements BytesStream, Releasable {
generator.writeEndObject();
}
@FunctionalInterface
interface Writer {
void write(XContentGenerator g, Object v) throws IOException;
}
private final static Map<Class<?>, Writer> MAP;
static {
Map<Class<?>, Writer> map = new HashMap<>();
map.put(String.class, (g, v) -> g.writeString((String) v));
map.put(Integer.class, (g, v) -> g.writeNumber((Integer) v));
map.put(Long.class, (g, v) -> g.writeNumber((Long) v));
map.put(Float.class, (g, v) -> g.writeNumber((Float) v));
map.put(Double.class, (g, v) -> g.writeNumber((Double) v));
map.put(Byte.class, (g, v) -> g.writeNumber((Byte) v));
map.put(Short.class, (g, v) -> g.writeNumber((Short) v));
map.put(Boolean.class, (g, v) -> g.writeBoolean((Boolean) v));
map.put(GeoPoint.class, (g, v) -> {
g.writeStartObject();
g.writeNumberField("lat", ((GeoPoint) v).lat());
g.writeNumberField("lon", ((GeoPoint) v).lon());
g.writeEndObject();
});
map.put(int[].class, (g, v) -> {
g.writeStartArray();
for (int item : (int[]) v) {
g.writeNumber(item);
}
g.writeEndArray();
});
map.put(long[].class, (g, v) -> {
g.writeStartArray();
for (long item : (long[]) v) {
g.writeNumber(item);
}
g.writeEndArray();
});
map.put(float[].class, (g, v) -> {
g.writeStartArray();
for (float item : (float[]) v) {
g.writeNumber(item);
}
g.writeEndArray();
});
map.put(double[].class, (g, v) -> {
g.writeStartArray();
for (double item : (double[])v) {
g.writeNumber(item);
}
g.writeEndArray();
});
map.put(byte[].class, (g, v) -> g.writeBinary((byte[]) v));
map.put(short[].class, (g, v) -> {
g.writeStartArray();
for (short item : (short[])v) {
g.writeNumber(item);
}
g.writeEndArray();
});
map.put(BytesRef.class, (g, v) -> {
BytesRef bytes = (BytesRef) v;
g.writeBinary(bytes.bytes, bytes.offset, bytes.length);
});
map.put(Text.class, (g, v) -> {
Text text = (Text) v;
if (text.hasBytes() && text.bytes().hasArray()) {
g.writeUTF8String(text.bytes().array(), text.bytes().arrayOffset(), text.bytes().length());
} else if (text.hasString()) {
g.writeString(text.string());
} else {
BytesArray bytesArray = text.bytes().toBytesArray();
g.writeUTF8String(bytesArray.array(), bytesArray.arrayOffset(), bytesArray.length());
}
});
MAP = Collections.unmodifiableMap(map);
}
private void writeValue(Object value) throws IOException {
if (value == null) {
generator.writeNull();
return;
}
Class<?> type = value.getClass();
if (type == String.class) {
generator.writeString((String) value);
} else if (type == Integer.class) {
generator.writeNumber(((Integer) value).intValue());
} else if (type == Long.class) {
generator.writeNumber(((Long) value).longValue());
} else if (type == Float.class) {
generator.writeNumber(((Float) value).floatValue());
} else if (type == Double.class) {
generator.writeNumber(((Double) value).doubleValue());
} else if (type == Byte.class) {
generator.writeNumber(((Byte)value).byteValue());
} else if (type == Short.class) {
generator.writeNumber(((Short) value).shortValue());
} else if (type == Boolean.class) {
generator.writeBoolean(((Boolean) value).booleanValue());
} else if (type == GeoPoint.class) {
generator.writeStartObject();
generator.writeNumberField("lat", ((GeoPoint) value).lat());
generator.writeNumberField("lon", ((GeoPoint) value).lon());
generator.writeEndObject();
Writer writer = MAP.get(type);
if (writer != null) {
writer.write(generator, value);
} else if (value instanceof Map) {
writeMap((Map) value);
} else if (value instanceof Path) {
//Path implements Iterable<Path> and causes endless recursion and a StackOverFlow if treated as an Iterable here
generator.writeString(value.toString());
} else if (value instanceof Iterable) {
generator.writeStartArray();
for (Object v : (Iterable<?>) value) {
writeValue(v);
}
generator.writeEndArray();
writeIterable((Iterable<?>) value);
} else if (value instanceof Object[]) {
generator.writeStartArray();
for (Object v : (Object[]) value) {
writeValue(v);
}
generator.writeEndArray();
} else if (type == byte[].class) {
generator.writeBinary((byte[]) value);
writeObjectArray((Object[]) value);
} else if (value instanceof Date) {
generator.writeString(XContentBuilder.defaultDatePrinter.print(((Date) value).getTime()));
} else if (value instanceof Calendar) {
@ -1280,56 +1332,9 @@ public final class XContentBuilder implements BytesStream, Releasable {
} else if (value instanceof ReadableInstant) {
generator.writeString(XContentBuilder.defaultDatePrinter.print((((ReadableInstant) value)).getMillis()));
} else if (value instanceof BytesReference) {
BytesReference bytes = (BytesReference) value;
if (!bytes.hasArray()) {
bytes = bytes.toBytesArray();
}
generator.writeBinary(bytes.array(), bytes.arrayOffset(), bytes.length());
} else if (value instanceof BytesRef) {
BytesRef bytes = (BytesRef) value;
generator.writeBinary(bytes.bytes, bytes.offset, bytes.length);
} else if (value instanceof Text) {
Text text = (Text) value;
if (text.hasBytes() && text.bytes().hasArray()) {
generator.writeUTF8String(text.bytes().array(), text.bytes().arrayOffset(), text.bytes().length());
} else if (text.hasString()) {
generator.writeString(text.string());
} else {
BytesArray bytesArray = text.bytes().toBytesArray();
generator.writeUTF8String(bytesArray.array(), bytesArray.arrayOffset(), bytesArray.length());
}
writeBytesReference((BytesReference) value);
} else if (value instanceof ToXContent) {
((ToXContent) value).toXContent(this, ToXContent.EMPTY_PARAMS);
} else if (value instanceof double[]) {
generator.writeStartArray();
for (double v : (double[]) value) {
generator.writeNumber(v);
}
generator.writeEndArray();
} else if (value instanceof long[]) {
generator.writeStartArray();
for (long v : (long[]) value) {
generator.writeNumber(v);
}
generator.writeEndArray();
} else if (value instanceof int[]) {
generator.writeStartArray();
for (int v : (int[]) value) {
generator.writeNumber(v);
}
generator.writeEndArray();
} else if (value instanceof float[]) {
generator.writeStartArray();
for (float v : (float[]) value) {
generator.writeNumber(v);
}
generator.writeEndArray();
} else if (value instanceof short[]) {
generator.writeStartArray();
for (short v : (short[]) value) {
generator.writeNumber(v);
}
generator.writeEndArray();
} else {
// if this is a "value" object, like enum, DistanceUnit, ..., just toString it
// yea, it can be misleading when toString a Java class, but really, jackson should be used in that case
@ -1337,4 +1342,29 @@ public final class XContentBuilder implements BytesStream, Releasable {
//throw new ElasticsearchIllegalArgumentException("type not supported for generic value conversion: " + type);
}
}
private void writeBytesReference(BytesReference value) throws IOException {
BytesReference bytes = value;
if (!bytes.hasArray()) {
bytes = bytes.toBytesArray();
}
generator.writeBinary(bytes.array(), bytes.arrayOffset(), bytes.length());
}
private void writeIterable(Iterable<?> value) throws IOException {
generator.writeStartArray();
for (Object v : value) {
writeValue(v);
}
generator.writeEndArray();
}
private void writeObjectArray(Object[] value) throws IOException {
generator.writeStartArray();
for (Object v : value) {
writeValue(v);
}
generator.writeEndArray();
}
}

View File

@ -94,6 +94,12 @@ public interface Discovery extends LifecycleComponent<Discovery> {
DiscoveryStats stats();
/**
* Triggers the first join cycle
*/
void startInitialJoin();
/***
* @return the current value of minimum master nodes, or -1 for not set
*/

View File

@ -87,8 +87,9 @@ public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryServic
logger.info(discovery.nodeDescription());
}
public void waitForInitialState() {
public void joinClusterAndWaitForInitialState() {
try {
discovery.startInitialJoin();
if (!initialStateListener.waitForInitialState(initialStateTimeout)) {
logger.warn("waited for {} and no initial state was set by the discovery", initialStateTimeout);
}

View File

@ -100,6 +100,11 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
@Override
protected void doStart() {
}
@Override
public void startInitialJoin() {
synchronized (clusterGroups) {
ClusterGroup clusterGroup = clusterGroups.get(clusterName);
if (clusterGroup == null) {

View File

@ -216,7 +216,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
joinThreadControl.start();
pingService.start();
this.nodeJoinController = new NodeJoinController(clusterService, routingService, discoverySettings, settings);
}
@Override
public void startInitialJoin() {
// start the join thread from a cluster state update. See {@link JoinThreadControl} for details.
clusterService.submitStateUpdateTask("initial_join", new ClusterStateUpdateTask() {

View File

@ -40,9 +40,11 @@ import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.store.FsDirectoryService;
import org.elasticsearch.monitor.fs.FsInfo;
import org.elasticsearch.monitor.fs.FsProbe;
@ -76,7 +78,7 @@ import static java.util.Collections.unmodifiableSet;
/**
* A component that holds all data paths for a single node.
*/
public class NodeEnvironment extends AbstractComponent implements Closeable {
public final class NodeEnvironment extends AbstractComponent implements Closeable {
public static class NodePath {
/* ${data.paths}/nodes/{node.id} */
public final Path path;
@ -167,64 +169,71 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
localNodeId = -1;
return;
}
final NodePath[] nodePaths = new NodePath[environment.dataWithClusterFiles().length];
final Lock[] locks = new Lock[nodePaths.length];
sharedDataPath = environment.sharedDataFile();
boolean success = false;
int localNodeId = -1;
IOException lastException = null;
int maxLocalStorageNodes = MAX_LOCAL_STORAGE_NODES_SETTING.get(settings);
for (int possibleLockId = 0; possibleLockId < maxLocalStorageNodes; possibleLockId++) {
for (int dirIndex = 0; dirIndex < environment.dataWithClusterFiles().length; dirIndex++) {
Path dir = environment.dataWithClusterFiles()[dirIndex].resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId));
Files.createDirectories(dir);
try {
sharedDataPath = environment.sharedDataFile();
int localNodeId = -1;
IOException lastException = null;
int maxLocalStorageNodes = MAX_LOCAL_STORAGE_NODES_SETTING.get(settings);
for (int possibleLockId = 0; possibleLockId < maxLocalStorageNodes; possibleLockId++) {
for (int dirIndex = 0; dirIndex < environment.dataWithClusterFiles().length; dirIndex++) {
Path dir = environment.dataWithClusterFiles()[dirIndex].resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId));
Files.createDirectories(dir);
try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
try {
locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME);
nodePaths[dirIndex] = new NodePath(dir, environment);
localNodeId = possibleLockId;
} catch (LockObtainFailedException ex) {
logger.trace("failed to obtain node lock on {}", dir.toAbsolutePath());
try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
try {
locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME);
nodePaths[dirIndex] = new NodePath(dir, environment);
localNodeId = possibleLockId;
} catch (LockObtainFailedException ex) {
logger.trace("failed to obtain node lock on {}", dir.toAbsolutePath());
// release all the ones that were obtained up until now
releaseAndNullLocks(locks);
break;
}
} catch (IOException e) {
logger.trace("failed to obtain node lock on {}", e, dir.toAbsolutePath());
lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e);
// release all the ones that were obtained up until now
releaseAndNullLocks(locks);
break;
}
} catch (IOException e) {
logger.trace("failed to obtain node lock on {}", e, dir.toAbsolutePath());
lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e);
// release all the ones that were obtained up until now
releaseAndNullLocks(locks);
}
if (locks[0] != null) {
// we found a lock, break
break;
}
}
if (locks[0] != null) {
// we found a lock, break
break;
if (locks[0] == null) {
throw new IllegalStateException("Failed to obtain node lock, is the following location writable?: "
+ Arrays.toString(environment.dataWithClusterFiles()), lastException);
}
this.localNodeId = localNodeId;
this.locks = locks;
this.nodePaths = nodePaths;
if (logger.isDebugEnabled()) {
logger.debug("using node location [{}], local_node_id [{}]", nodePaths, localNodeId);
}
maybeLogPathDetails();
maybeLogHeapDetails();
applySegmentInfosTrace(settings);
assertCanWrite();
success = true;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(locks);
}
}
if (locks[0] == null) {
throw new IllegalStateException("Failed to obtain node lock, is the following location writable?: "
+ Arrays.toString(environment.dataWithClusterFiles()), lastException);
}
this.localNodeId = localNodeId;
this.locks = locks;
this.nodePaths = nodePaths;
if (logger.isDebugEnabled()) {
logger.debug("using node location [{}], local_node_id [{}]", nodePaths, localNodeId);
}
maybeLogPathDetails();
maybeLogHeapDetails();
maybeWarnFileDescriptors();
applySegmentInfosTrace(settings);
}
private static void releaseAndNullLocks(Lock[] locks) {
@ -315,19 +324,6 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops);
}
private void maybeWarnFileDescriptors() {
long maxFileDescriptorCount = ProcessProbe.getInstance().getMaxFileDescriptorCount();
if (maxFileDescriptorCount == -1) {
return;
}
int fileDescriptorCountThreshold = (1 << 16);
if (maxFileDescriptorCount < fileDescriptorCountThreshold) {
logger.warn(
"max file descriptors [{}] for elasticsearch process likely too low, consider increasing to at least [{}]",
maxFileDescriptorCount,
fileDescriptorCountThreshold);
}
}
@SuppressForbidden(reason = "System.out.*")
static void applySegmentInfosTrace(Settings settings) {
@ -807,7 +803,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
}
@Override
public void close() {
public final void close() {
if (closed.compareAndSet(false, true) && locks != null) {
for (Lock lock : locks) {
try {
@ -923,4 +919,45 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
return shardPath.getParent().getParent().getParent();
}
/**
* This is a best effort to ensure that we actually have write permissions to write in all our data directories.
* This prevents disasters if nodes are started under the wrong username etc.
*/
private void assertCanWrite() throws IOException {
for (Path path : nodeDataPaths()) { // check node-paths are writable
tryWriteTempFile(path);
}
for (String index : this.findAllIndices()) {
for (Path path : this.indexPaths(index)) { // check index paths are writable
Path statePath = path.resolve(MetaDataStateFormat.STATE_DIR_NAME);
tryWriteTempFile(statePath);
tryWriteTempFile(path);
}
for (ShardId shardID : this.findAllShardIds(new Index(index, IndexMetaData.INDEX_UUID_NA_VALUE))) {
Path[] paths = this.availableShardPaths(shardID);
for (Path path : paths) { // check shard paths are writable
Path indexDir = path.resolve(ShardPath.INDEX_FOLDER_NAME);
Path statePath = path.resolve(MetaDataStateFormat.STATE_DIR_NAME);
Path translogDir = path.resolve(ShardPath.TRANSLOG_FOLDER_NAME);
tryWriteTempFile(indexDir);
tryWriteTempFile(translogDir);
tryWriteTempFile(statePath);
tryWriteTempFile(path);
}
}
}
}
private static void tryWriteTempFile(Path path) throws IOException {
if (Files.exists(path)) {
Path resolve = path.resolve(".es_temp_file");
try {
Files.createFile(resolve);
Files.deleteIfExists(resolve);
} catch (IOException ex) {
throw new IOException("failed to write in data directory [" + path + "] write permission is required", ex);
}
}
}
}

View File

@ -133,27 +133,6 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
@Override
protected void doStart() {
clusterService.addLast(this);
// check we didn't miss any cluster state that came in until now / during the addition
clusterService.submitStateUpdateTask("gateway_initial_state_recovery", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
checkStateMeetsSettingsAndMaybeRecover(currentState);
return currentState;
}
@Override
public boolean runOnlyOnMaster() {
// It's OK to run on non masters as checkStateMeetsSettingsAndMaybeRecover checks for this
// we return false to avoid unneeded failure logs
return false;
}
@Override
public void onFailure(String source, Throwable t) {
logger.warn("unexpected failure while checking if state can be recovered. another attempt will be made with the next cluster state change", t);
}
});
}
@Override
@ -170,10 +149,9 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
if (lifecycle.stoppedOrClosed()) {
return;
}
checkStateMeetsSettingsAndMaybeRecover(event.state());
}
protected void checkStateMeetsSettingsAndMaybeRecover(ClusterState state) {
final ClusterState state = event.state();
if (state.nodes().localNodeMaster() == false) {
// not our job to recover
return;

View File

@ -43,6 +43,8 @@ import org.jboss.netty.handler.codec.http.HttpResponse;
import org.jboss.netty.handler.codec.http.HttpResponseStatus;
import org.jboss.netty.handler.codec.http.HttpVersion;
import java.util.Collections;
import java.util.EnumMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -51,10 +53,7 @@ import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.CONNECTION;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Values.CLOSE;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Values.KEEP_ALIVE;
/**
*
*/
public class NettyHttpChannel extends HttpChannel {
public final class NettyHttpChannel extends HttpChannel {
private final NettyHttpServerTransport transport;
private final Channel channel;
@ -92,18 +91,11 @@ public class NettyHttpChannel extends HttpChannel {
String opaque = nettyRequest.headers().get("X-Opaque-Id");
if (opaque != null) {
resp.headers().add("X-Opaque-Id", opaque);
setHeaderField(resp, "X-Opaque-Id", opaque);
}
// Add all custom headers
Map<String, List<String>> customHeaders = response.getHeaders();
if (customHeaders != null) {
for (Map.Entry<String, List<String>> headerEntry : customHeaders.entrySet()) {
for (String headerValue : headerEntry.getValue()) {
resp.headers().add(headerEntry.getKey(), headerValue);
}
}
}
addCustomHeaders(response, resp);
BytesReference content = response.content();
ChannelBuffer buffer;
@ -113,30 +105,11 @@ public class NettyHttpChannel extends HttpChannel {
resp.setContent(buffer);
// If our response doesn't specify a content-type header, set one
if (!resp.headers().contains(HttpHeaders.Names.CONTENT_TYPE)) {
resp.headers().add(HttpHeaders.Names.CONTENT_TYPE, response.contentType());
}
setHeaderField(resp, HttpHeaders.Names.CONTENT_TYPE, response.contentType(), false);
// If our response has no content-length, calculate and set one
if (!resp.headers().contains(HttpHeaders.Names.CONTENT_LENGTH)) {
resp.headers().add(HttpHeaders.Names.CONTENT_LENGTH, String.valueOf(buffer.readableBytes()));
}
setHeaderField(resp, HttpHeaders.Names.CONTENT_LENGTH, String.valueOf(buffer.readableBytes()), false);
if (transport.resetCookies) {
String cookieString = nettyRequest.headers().get(HttpHeaders.Names.COOKIE);
if (cookieString != null) {
CookieDecoder cookieDecoder = new CookieDecoder();
Set<Cookie> cookies = cookieDecoder.decode(cookieString);
if (!cookies.isEmpty()) {
// Reset the cookies if necessary.
CookieEncoder cookieEncoder = new CookieEncoder(true);
for (Cookie cookie : cookies) {
cookieEncoder.addCookie(cookie);
}
resp.headers().add(HttpHeaders.Names.SET_COOKIE, cookieEncoder.encode());
}
}
}
addCookies(resp);
ChannelFuture future;
@ -164,6 +137,45 @@ public class NettyHttpChannel extends HttpChannel {
}
}
private void setHeaderField(HttpResponse resp, String headerField, String value) {
setHeaderField(resp, headerField, value, true);
}
private void setHeaderField(HttpResponse resp, String headerField, String value, boolean override) {
if (override || !resp.headers().contains(headerField)) {
resp.headers().add(headerField, value);
}
}
private void addCookies(HttpResponse resp) {
if (transport.resetCookies) {
String cookieString = nettyRequest.headers().get(HttpHeaders.Names.COOKIE);
if (cookieString != null) {
CookieDecoder cookieDecoder = new CookieDecoder();
Set<Cookie> cookies = cookieDecoder.decode(cookieString);
if (!cookies.isEmpty()) {
// Reset the cookies if necessary.
CookieEncoder cookieEncoder = new CookieEncoder(true);
for (Cookie cookie : cookies) {
cookieEncoder.addCookie(cookie);
}
setHeaderField(resp, HttpHeaders.Names.SET_COOKIE, cookieEncoder.encode());
}
}
}
}
private void addCustomHeaders(RestResponse response, HttpResponse resp) {
Map<String, List<String>> customHeaders = response.getHeaders();
if (customHeaders != null) {
for (Map.Entry<String, List<String>> headerEntry : customHeaders.entrySet()) {
for (String headerValue : headerEntry.getValue()) {
setHeaderField(resp, headerEntry.getKey(), headerValue);
}
}
}
}
// Determine if the request protocol version is HTTP 1.0
private boolean isHttp10() {
return nettyRequest.getProtocolVersion().equals(HttpVersion.HTTP_1_0);
@ -196,101 +208,59 @@ public class NettyHttpChannel extends HttpChannel {
private static final HttpResponseStatus TOO_MANY_REQUESTS = new HttpResponseStatus(429, "Too Many Requests");
private HttpResponseStatus getStatus(RestStatus status) {
switch (status) {
case CONTINUE:
return HttpResponseStatus.CONTINUE;
case SWITCHING_PROTOCOLS:
return HttpResponseStatus.SWITCHING_PROTOCOLS;
case OK:
return HttpResponseStatus.OK;
case CREATED:
return HttpResponseStatus.CREATED;
case ACCEPTED:
return HttpResponseStatus.ACCEPTED;
case NON_AUTHORITATIVE_INFORMATION:
return HttpResponseStatus.NON_AUTHORITATIVE_INFORMATION;
case NO_CONTENT:
return HttpResponseStatus.NO_CONTENT;
case RESET_CONTENT:
return HttpResponseStatus.RESET_CONTENT;
case PARTIAL_CONTENT:
return HttpResponseStatus.PARTIAL_CONTENT;
case MULTI_STATUS:
// no status for this??
return HttpResponseStatus.INTERNAL_SERVER_ERROR;
case MULTIPLE_CHOICES:
return HttpResponseStatus.MULTIPLE_CHOICES;
case MOVED_PERMANENTLY:
return HttpResponseStatus.MOVED_PERMANENTLY;
case FOUND:
return HttpResponseStatus.FOUND;
case SEE_OTHER:
return HttpResponseStatus.SEE_OTHER;
case NOT_MODIFIED:
return HttpResponseStatus.NOT_MODIFIED;
case USE_PROXY:
return HttpResponseStatus.USE_PROXY;
case TEMPORARY_REDIRECT:
return HttpResponseStatus.TEMPORARY_REDIRECT;
case BAD_REQUEST:
return HttpResponseStatus.BAD_REQUEST;
case UNAUTHORIZED:
return HttpResponseStatus.UNAUTHORIZED;
case PAYMENT_REQUIRED:
return HttpResponseStatus.PAYMENT_REQUIRED;
case FORBIDDEN:
return HttpResponseStatus.FORBIDDEN;
case NOT_FOUND:
return HttpResponseStatus.NOT_FOUND;
case METHOD_NOT_ALLOWED:
return HttpResponseStatus.METHOD_NOT_ALLOWED;
case NOT_ACCEPTABLE:
return HttpResponseStatus.NOT_ACCEPTABLE;
case PROXY_AUTHENTICATION:
return HttpResponseStatus.PROXY_AUTHENTICATION_REQUIRED;
case REQUEST_TIMEOUT:
return HttpResponseStatus.REQUEST_TIMEOUT;
case CONFLICT:
return HttpResponseStatus.CONFLICT;
case GONE:
return HttpResponseStatus.GONE;
case LENGTH_REQUIRED:
return HttpResponseStatus.LENGTH_REQUIRED;
case PRECONDITION_FAILED:
return HttpResponseStatus.PRECONDITION_FAILED;
case REQUEST_ENTITY_TOO_LARGE:
return HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE;
case REQUEST_URI_TOO_LONG:
return HttpResponseStatus.REQUEST_URI_TOO_LONG;
case UNSUPPORTED_MEDIA_TYPE:
return HttpResponseStatus.UNSUPPORTED_MEDIA_TYPE;
case REQUESTED_RANGE_NOT_SATISFIED:
return HttpResponseStatus.REQUESTED_RANGE_NOT_SATISFIABLE;
case EXPECTATION_FAILED:
return HttpResponseStatus.EXPECTATION_FAILED;
case UNPROCESSABLE_ENTITY:
return HttpResponseStatus.BAD_REQUEST;
case LOCKED:
return HttpResponseStatus.BAD_REQUEST;
case FAILED_DEPENDENCY:
return HttpResponseStatus.BAD_REQUEST;
case TOO_MANY_REQUESTS:
return TOO_MANY_REQUESTS;
case INTERNAL_SERVER_ERROR:
return HttpResponseStatus.INTERNAL_SERVER_ERROR;
case NOT_IMPLEMENTED:
return HttpResponseStatus.NOT_IMPLEMENTED;
case BAD_GATEWAY:
return HttpResponseStatus.BAD_GATEWAY;
case SERVICE_UNAVAILABLE:
return HttpResponseStatus.SERVICE_UNAVAILABLE;
case GATEWAY_TIMEOUT:
return HttpResponseStatus.GATEWAY_TIMEOUT;
case HTTP_VERSION_NOT_SUPPORTED:
return HttpResponseStatus.HTTP_VERSION_NOT_SUPPORTED;
default:
return HttpResponseStatus.INTERNAL_SERVER_ERROR;
}
static Map<RestStatus, HttpResponseStatus> MAP;
static {
EnumMap<RestStatus, HttpResponseStatus> map = new EnumMap<>(RestStatus.class);
map.put(RestStatus.CONTINUE, HttpResponseStatus.CONTINUE);
map.put(RestStatus.SWITCHING_PROTOCOLS, HttpResponseStatus.SWITCHING_PROTOCOLS);
map.put(RestStatus.OK, HttpResponseStatus.OK);
map.put(RestStatus.CREATED, HttpResponseStatus.CREATED);
map.put(RestStatus.ACCEPTED, HttpResponseStatus.ACCEPTED);
map.put(RestStatus.NON_AUTHORITATIVE_INFORMATION, HttpResponseStatus.NON_AUTHORITATIVE_INFORMATION);
map.put(RestStatus.NO_CONTENT, HttpResponseStatus.NO_CONTENT);
map.put(RestStatus.RESET_CONTENT, HttpResponseStatus.RESET_CONTENT);
map.put(RestStatus.PARTIAL_CONTENT, HttpResponseStatus.PARTIAL_CONTENT);
map.put(RestStatus.MULTI_STATUS, HttpResponseStatus.INTERNAL_SERVER_ERROR); // no status for this??
map.put(RestStatus.MULTIPLE_CHOICES, HttpResponseStatus.MULTIPLE_CHOICES);
map.put(RestStatus.MOVED_PERMANENTLY, HttpResponseStatus.MOVED_PERMANENTLY);
map.put(RestStatus.FOUND, HttpResponseStatus.FOUND);
map.put(RestStatus.SEE_OTHER, HttpResponseStatus.SEE_OTHER);
map.put(RestStatus.NOT_MODIFIED, HttpResponseStatus.NOT_MODIFIED);
map.put(RestStatus.USE_PROXY, HttpResponseStatus.USE_PROXY);
map.put(RestStatus.TEMPORARY_REDIRECT, HttpResponseStatus.TEMPORARY_REDIRECT);
map.put(RestStatus.BAD_REQUEST, HttpResponseStatus.BAD_REQUEST);
map.put(RestStatus.UNAUTHORIZED, HttpResponseStatus.UNAUTHORIZED);
map.put(RestStatus.PAYMENT_REQUIRED, HttpResponseStatus.PAYMENT_REQUIRED);
map.put(RestStatus.FORBIDDEN, HttpResponseStatus.FORBIDDEN);
map.put(RestStatus.NOT_FOUND, HttpResponseStatus.NOT_FOUND);
map.put(RestStatus.METHOD_NOT_ALLOWED, HttpResponseStatus.METHOD_NOT_ALLOWED);
map.put(RestStatus.NOT_ACCEPTABLE, HttpResponseStatus.NOT_ACCEPTABLE);
map.put(RestStatus.PROXY_AUTHENTICATION, HttpResponseStatus.PROXY_AUTHENTICATION_REQUIRED);
map.put(RestStatus.REQUEST_TIMEOUT, HttpResponseStatus.REQUEST_TIMEOUT);
map.put(RestStatus.CONFLICT, HttpResponseStatus.CONFLICT);
map.put(RestStatus.GONE, HttpResponseStatus.GONE);
map.put(RestStatus.LENGTH_REQUIRED, HttpResponseStatus.LENGTH_REQUIRED);
map.put(RestStatus.PRECONDITION_FAILED, HttpResponseStatus.PRECONDITION_FAILED);
map.put(RestStatus.REQUEST_ENTITY_TOO_LARGE, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE);
map.put(RestStatus.REQUEST_URI_TOO_LONG, HttpResponseStatus.REQUEST_URI_TOO_LONG);
map.put(RestStatus.UNSUPPORTED_MEDIA_TYPE, HttpResponseStatus.UNSUPPORTED_MEDIA_TYPE);
map.put(RestStatus.REQUESTED_RANGE_NOT_SATISFIED, HttpResponseStatus.REQUESTED_RANGE_NOT_SATISFIABLE);
map.put(RestStatus.EXPECTATION_FAILED, HttpResponseStatus.EXPECTATION_FAILED);
map.put(RestStatus.UNPROCESSABLE_ENTITY, HttpResponseStatus.BAD_REQUEST);
map.put(RestStatus.LOCKED, HttpResponseStatus.BAD_REQUEST);
map.put(RestStatus.FAILED_DEPENDENCY, HttpResponseStatus.BAD_REQUEST);
map.put(RestStatus.TOO_MANY_REQUESTS, TOO_MANY_REQUESTS);
map.put(RestStatus.INTERNAL_SERVER_ERROR, HttpResponseStatus.INTERNAL_SERVER_ERROR);
map.put(RestStatus.NOT_IMPLEMENTED, HttpResponseStatus.NOT_IMPLEMENTED);
map.put(RestStatus.BAD_GATEWAY, HttpResponseStatus.BAD_GATEWAY);
map.put(RestStatus.SERVICE_UNAVAILABLE, HttpResponseStatus.SERVICE_UNAVAILABLE);
map.put(RestStatus.GATEWAY_TIMEOUT, HttpResponseStatus.GATEWAY_TIMEOUT);
map.put(RestStatus.HTTP_VERSION_NOT_SUPPORTED, HttpResponseStatus.HTTP_VERSION_NOT_SUPPORTED);
MAP = Collections.unmodifiableMap(map);
}
private static HttpResponseStatus getStatus(RestStatus status) {
return MAP.getOrDefault(status, HttpResponseStatus.INTERNAL_SERVER_ERROR);
}
}

View File

@ -37,7 +37,7 @@ import org.elasticsearch.index.similarity.SimilarityProvider;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.indices.IndicesQueryCache;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.mapper.MapperRegistry;
@ -154,7 +154,7 @@ public final class IndexModule {
*/
public void addIndexStore(String type, BiFunction<IndexSettings, IndexStoreConfig, IndexStore> provider) {
if (storeTypes.containsKey(type)) {
throw new IllegalArgumentException("key [" + type +"] already registerd");
throw new IllegalArgumentException("key [" + type +"] already registered");
}
storeTypes.put(type, provider);
}

View File

@ -420,7 +420,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
* Creates a new QueryShardContext. The context has not types set yet, if types are required set them via {@link QueryShardContext#setTypes(String...)}
*/
public QueryShardContext newQueryShardContext() {
return new QueryShardContext(indexSettings, nodeServicesProvider.getClient(), indexCache.bitsetFilterCache(), indexFieldData, mapperService(), similarityService(), nodeServicesProvider.getScriptService(), nodeServicesProvider.getIndicesQueriesRegistry());
return new QueryShardContext(indexSettings, indexCache.bitsetFilterCache(), indexFieldData, mapperService(), similarityService(), nodeServicesProvider.getScriptService(), nodeServicesProvider.getIndicesQueriesRegistry());
}
ThreadPool getThreadPool() {

Some files were not shown because too many files have changed in this diff Show More