diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 1a4e5b58f33..00000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: java
-jdk:
- - openjdk7
-
-env:
- - ES_TEST_LOCAL=true
- - ES_TEST_LOCAL=false
-
-notifications:
- email: false
diff --git a/TESTING.asciidoc b/TESTING.asciidoc
index 5eea0b8c163..ce81f97548f 100644
--- a/TESTING.asciidoc
+++ b/TESTING.asciidoc
@@ -18,24 +18,18 @@ gradle assemble
== Other test options
-To disable and enable network transport, set the `Des.node.mode`.
+To disable and enable network transport, set the `tests.es.node.mode` system property.
Use network transport:
------------------------------------
--Des.node.mode=network
+-Dtests.es.node.mode=network
------------------------------------
Use local transport (default since 1.3):
-------------------------------------
--Des.node.mode=local
--------------------------------------
-
-Alternatively, you can set the `ES_TEST_LOCAL` environment variable:
-
--------------------------------------
-export ES_TEST_LOCAL=true && gradle test
+-Dtests.es.node.mode=local
-------------------------------------
=== Running Elasticsearch from a checkout
@@ -201,7 +195,7 @@ gradle test -Dtests.timeoutSuite=5000! ...
Change the logging level of ES (not gradle)
--------------------------------
-gradle test -Des.logger.level=DEBUG
+gradle test -Dtests.es.logger.level=DEBUG
--------------------------------
Print all the logging output from the test runs to the commandline
diff --git a/build.gradle b/build.gradle
index 3d52d4ab279..3f0895a4e75 100644
--- a/build.gradle
+++ b/build.gradle
@@ -81,7 +81,7 @@ subprojects {
nexus {
String buildSnapshot = System.getProperty('build.snapshot', 'true')
if (buildSnapshot == 'false') {
- Repository repo = new RepositoryBuilder().findGitDir(new File('.')).build()
+ Repository repo = new RepositoryBuilder().findGitDir(project.rootDir).build()
String shortHash = repo.resolve('HEAD')?.name?.substring(0,7)
repositoryUrl = project.hasProperty('build.repository') ? project.property('build.repository') : "file://${System.getenv('HOME')}/elasticsearch-releases/${version}-${shortHash}/"
}
@@ -144,6 +144,14 @@ subprojects {
// see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959
javadoc.options.encoding='UTF8'
javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet')
+ /*
+ TODO: building javadocs with java 9 b118 is currently broken with weird errors, so
+ for now this is commented out...try again with the next ea build...
+ javadoc.executable = new File(project.javaHome, 'bin/javadoc')
+ if (project.javaVersion == JavaVersion.VERSION_1_9) {
+ // TODO: remove this hack! gradle should be passing this...
+ javadoc.options.addStringOption('source', '8')
+ }*/
}
}
@@ -261,13 +269,6 @@ tasks.idea.doLast {
if (System.getProperty('idea.active') != null && ideaMarker.exists() == false) {
throw new GradleException('You must run gradle idea from the root of elasticsearch before importing into IntelliJ')
}
-// add buildSrc itself as a groovy project
-task buildSrcIdea(type: GradleBuild) {
- buildFile = 'buildSrc/build.gradle'
- tasks = ['cleanIdea', 'ideaModule']
-}
-tasks.idea.dependsOn(buildSrcIdea)
-
// eclipse configuration
allprojects {
@@ -310,13 +311,6 @@ allprojects {
tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings)
}
-// add buildSrc itself as a groovy project
-task buildSrcEclipse(type: GradleBuild) {
- buildFile = 'buildSrc/build.gradle'
- tasks = ['cleanEclipse', 'eclipse']
-}
-tasks.eclipse.dependsOn(buildSrcEclipse)
-
// we need to add the same --debug-jvm option as
// the real RunTask has, so we can pass it through
class Run extends DefaultTask {
diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle
index b286124bfee..623fdab3e3e 100644
--- a/buildSrc/build.gradle
+++ b/buildSrc/build.gradle
@@ -84,7 +84,7 @@ dependencies {
compile 'com.netflix.nebula:gradle-info-plugin:3.0.3'
compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r'
compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE....
- compile 'de.thetaphi:forbiddenapis:2.0'
+ compile 'de.thetaphi:forbiddenapis:2.1'
compile 'com.bmuschko:gradle-nexus-plugin:2.3.1'
compile 'org.apache.rat:apache-rat:0.11'
}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
index 029c80b6e25..c4eb76164fe 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
@@ -143,7 +143,7 @@ class BuildPlugin implements Plugin {
}
project.rootProject.ext.javaHome = javaHome
- project.rootProject.ext.javaVersion = javaVersion
+ project.rootProject.ext.javaVersion = javaVersionEnum
project.rootProject.ext.buildChecksDone = true
}
project.targetCompatibility = minimumJava
@@ -378,7 +378,7 @@ class BuildPlugin implements Plugin {
* -serial because we don't use java serialization.
*/
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
- options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial' << '-Xdoclint:all' << '-Xdoclint:-missing'
+ options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-options,-deprecation' << '-Xdoclint:all' << '-Xdoclint:-missing'
// compile with compact 3 profile by default
// NOTE: this is just a compile time check: does not replace testing with a compact3 JRE
if (project.compactProfile != 'full') {
@@ -387,10 +387,13 @@ class BuildPlugin implements Plugin {
options.encoding = 'UTF-8'
//options.incremental = true
- // gradle ignores target/source compatibility when it is "unnecessary", but since to compile with
- // java 9, gradle is running in java 8, it incorrectly thinks it is unnecessary
- assert minimumJava == JavaVersion.VERSION_1_8
- options.compilerArgs << '-target' << '1.8' << '-source' << '1.8'
+ if (project.javaVersion == JavaVersion.VERSION_1_9) {
+ // hack until gradle supports java 9's new "-release" arg
+ assert minimumJava == JavaVersion.VERSION_1_8
+ options.compilerArgs << '-release' << '8'
+ project.sourceCompatibility = null
+ project.targetCompatibility = null
+ }
}
}
}
@@ -456,7 +459,7 @@ class BuildPlugin implements Plugin {
// default test sysprop values
systemProperty 'tests.ifNoTests', 'fail'
// TODO: remove setting logging level via system property
- systemProperty 'es.logger.level', 'WARN'
+ systemProperty 'tests.logger.level', 'WARN'
for (Map.Entry property : System.properties.entrySet()) {
if (property.getKey().startsWith('tests.') ||
property.getKey().startsWith('es.')) {
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy
index ef8c8e280ed..9f840df36e1 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy
@@ -87,6 +87,10 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
* calls buildTest to actually build the test.
*/
void handleSnippet(Snippet snippet) {
+ if (snippet.language == 'json') {
+ throw new InvalidUserDataException(
+ "$snippet: Use `js` instead of `json`.")
+ }
if (snippet.testSetup) {
setup(snippet)
return
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy
index 427d3191dc5..48a4d7c26dc 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy
@@ -62,9 +62,8 @@ class PrecommitTasks {
private static Task configureForbiddenApis(Project project) {
project.pluginManager.apply(ForbiddenApisPlugin.class)
project.forbiddenApis {
- internalRuntimeForbidden = true
failOnUnsupportedJava = false
- bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-system-out']
+ bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-non-portable', 'jdk-system-out']
signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'),
getClass().getResource('/forbidden/es-all-signatures.txt')]
suppressAnnotations = ['**.SuppressForbidden']
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy
index 3ff5a06ad42..076a564f84a 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy
@@ -203,8 +203,7 @@ public class ThirdPartyAuditTask extends AntTask {
Set sheistySet = getSheistyClasses(tmpDir.toPath());
try {
- ant.thirdPartyAudit(internalRuntimeForbidden: false,
- failOnUnsupportedJava: false,
+ ant.thirdPartyAudit(failOnUnsupportedJava: false,
failOnMissingClasses: false,
signaturesFile: new File(getClass().getResource('/forbidden/third-party-audit.txt').toURI()),
classpath: classpath.asPath) {
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy
index 2ff5e333139..5d9961a0425 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy
@@ -129,18 +129,18 @@ class NodeInfo {
}
env = [ 'JAVA_HOME' : project.javaHome ]
- args.addAll("-E", "es.node.portsfile=true")
+ args.addAll("-E", "node.portsfile=true")
String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ")
String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs
env.put('ES_JAVA_OPTS', esJavaOpts)
for (Map.Entry property : System.properties.entrySet()) {
- if (property.getKey().startsWith('es.')) {
+ if (property.key.startsWith('tests.es.')) {
args.add("-E")
- args.add("${property.getKey()}=${property.getValue()}")
+ args.add("${property.key.substring('tests.es.'.size())}=${property.value}")
}
}
env.put('ES_JVM_OPTIONS', new File(confDir, 'jvm.options'))
- args.addAll("-E", "es.path.conf=${confDir}")
+ args.addAll("-E", "path.conf=${confDir}")
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
args.add('"') // end the entire command, quoted
}
diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml
index 48f07b1a2d5..63133dd851b 100644
--- a/buildSrc/src/main/resources/checkstyle_suppressions.xml
+++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml
@@ -37,8 +37,6 @@
-
-
@@ -179,12 +177,6 @@
-
-
-
-
-
-
@@ -453,9 +445,6 @@
-
-
-
@@ -520,7 +509,6 @@
-
@@ -566,7 +554,6 @@
-
@@ -745,7 +732,6 @@
-
@@ -981,8 +967,6 @@
-
-
@@ -1071,9 +1055,6 @@
-
-
-
@@ -1222,6 +1203,16 @@
+
+
+
+
+
+
+
+
+
+
@@ -1232,13 +1223,6 @@
-
-
-
-
-
-
-
@@ -1309,6 +1293,7 @@
+
@@ -1335,7 +1320,6 @@
-
diff --git a/buildSrc/version.properties b/buildSrc/version.properties
index fee8404080a..6669abb90b3 100644
--- a/buildSrc/version.properties
+++ b/buildSrc/version.properties
@@ -1,4 +1,4 @@
-elasticsearch = 5.0.0
+elasticsearch = 5.0.0-alpha3
lucene = 6.0.0
# optional dependencies
@@ -13,9 +13,7 @@ jna = 4.1.0
# test dependencies
randomizedrunner = 2.3.2
junit = 4.11
-# TODO: Upgrade httpclient to a version > 4.5.1 once released. Then remove o.e.test.rest.client.StrictHostnameVerifier* and use
-# DefaultHostnameVerifier instead since we no longer need to workaround https://issues.apache.org/jira/browse/HTTPCLIENT-1698
-httpclient = 4.3.6
-httpcore = 4.3.3
+httpclient = 4.5.2
+httpcore = 4.4.4
commonslogging = 1.1.3
commonscodec = 1.10
diff --git a/core/README.textile b/core/README.textile
deleted file mode 100644
index daaf5ecb70e..00000000000
--- a/core/README.textile
+++ /dev/null
@@ -1,235 +0,0 @@
-h1. Elasticsearch
-
-h2. A Distributed RESTful Search Engine
-
-h3. "https://www.elastic.co/products/elasticsearch":https://www.elastic.co/products/elasticsearch
-
-Elasticsearch is a distributed RESTful search engine built for the cloud. Features include:
-
-* Distributed and Highly Available Search Engine.
-** Each index is fully sharded with a configurable number of shards.
-** Each shard can have one or more replicas.
-** Read / Search operations performed on either one of the replica shard.
-* Multi Tenant with Multi Types.
-** Support for more than one index.
-** Support for more than one type per index.
-** Index level configuration (number of shards, index storage, ...).
-* Various set of APIs
-** HTTP RESTful API
-** Native Java API.
-** All APIs perform automatic node operation rerouting.
-* Document oriented
-** No need for upfront schema definition.
-** Schema can be defined per type for customization of the indexing process.
-* Reliable, Asynchronous Write Behind for long term persistency.
-* (Near) Real Time Search.
-* Built on top of Lucene
-** Each shard is a fully functional Lucene index
-** All the power of Lucene easily exposed through simple configuration / plugins.
-* Per operation consistency
-** Single document level operations are atomic, consistent, isolated and durable.
-* Open Source under the Apache License, version 2 ("ALv2")
-
-h2. Getting Started
-
-First of all, DON'T PANIC. It will take 5 minutes to get the gist of what Elasticsearch is all about.
-
-h3. Requirements
-
-You need to have a recent version of Java installed. See the "Setup":http://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html#jvm-version page for more information.
-
-h3. Installation
-
-* "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution.
-* Run @bin/elasticsearch@ on unix, or @bin\elasticsearch.bat@ on windows.
-* Run @curl -X GET http://localhost:9200/@.
-* Start more servers ...
-
-h3. Indexing
-
-Let's try and index some twitter like information. First, let's create a twitter user, and add some tweets (the @twitter@ index will be created automatically):
-
-
-curl -XPUT 'http://localhost:9200/twitter/user/kimchy' -d '{ "name" : "Shay Banon" }'
-
-curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '
-{
- "user": "kimchy",
- "postDate": "2009-11-15T13:12:00",
- "message": "Trying out Elasticsearch, so far so good?"
-}'
-
-curl -XPUT 'http://localhost:9200/twitter/tweet/2' -d '
-{
- "user": "kimchy",
- "postDate": "2009-11-15T14:12:12",
- "message": "Another tweet, will it be indexed?"
-}'
-
-
-Now, let's see if the information was added by GETting it:
-
-
-
-There are many more options to perform search, after all, it's a search product no? All the familiar Lucene queries are available through the JSON query language, or through the query parser.
-
-h3. Multi Tenant - Indices and Types
-
-Maan, that twitter index might get big (in this case, index size == valuation). Let's see if we can structure our twitter system a bit differently in order to support such large amounts of data.
-
-Elasticsearch supports multiple indices, as well as multiple types per index. In the previous example we used an index called @twitter@, with two types, @user@ and @tweet@.
-
-Another way to define our simple twitter system is to have a different index per user (note, though that each index has an overhead). Here is the indexing curl's in this case:
-
-
-curl -XPUT 'http://localhost:9200/kimchy/info/1' -d '{ "name" : "Shay Banon" }'
-
-curl -XPUT 'http://localhost:9200/kimchy/tweet/1' -d '
-{
- "user": "kimchy",
- "postDate": "2009-11-15T13:12:00",
- "message": "Trying out Elasticsearch, so far so good?"
-}'
-
-curl -XPUT 'http://localhost:9200/kimchy/tweet/2' -d '
-{
- "user": "kimchy",
- "postDate": "2009-11-15T14:12:12",
- "message": "Another tweet, will it be indexed?"
-}'
-
-
-The above will index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get his own special index.
-
-Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well):
-
-
-
-Search (and similar operations) are multi index aware. This means that we can easily search on more than one
-index (twitter user), for example:
-
-
-
-{One liner teaser}: And the cool part about that? You can easily search on multiple twitter users (indices), with different boost levels per user (index), making social search so much simpler (results from my friends rank higher than results from friends of my friends).
-
-h3. Distributed, Highly Available
-
-Let's face it, things will fail....
-
-Elasticsearch is a highly available and distributed search engine. Each index is broken down into shards, and each shard can have one or more replica. By default, an index is created with 5 shards and 1 replica per shard (5/1). There are many topologies that can be used, including 1/10 (improve search performance), or 20/1 (improve indexing performance, with search executed in a map reduce fashion across shards).
-
-In order to play with the distributed nature of Elasticsearch, simply bring more nodes up and shut down nodes. The system will continue to serve requests (make sure you use the correct http port) with the latest data indexed.
-
-h3. Where to go from here?
-
-We have just covered a very small portion of what Elasticsearch is all about. For more information, please refer to the "elastic.co":http://www.elastic.co/products/elasticsearch website.
-
-h3. Building from Source
-
-Elasticsearch uses "Maven":http://maven.apache.org for its build system.
-
-In order to create a distribution, simply run the @mvn clean package
--DskipTests@ command in the cloned directory.
-
-The distribution will be created under @target/releases@.
-
-See the "TESTING":TESTING.asciidoc file for more information about
-running the Elasticsearch test suite.
-
-h3. Upgrading to Elasticsearch 1.x?
-
-In order to ensure a smooth upgrade process from earlier versions of Elasticsearch (< 1.0.0), it is recommended to perform a full cluster restart. Please see the "setup reference":https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html for more details on the upgrade process.
-
-h1. License
-
-
-This software is licensed under the Apache License, version 2 ("ALv2"), quoted below.
-
-Copyright 2009-2016 Elasticsearch
-
-Licensed under the Apache License, Version 2.0 (the "License"); you may not
-use this file except in compliance with the License. You may obtain a copy of
-the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-License for the specific language governing permissions and limitations under
-the License.
-
diff --git a/core/build.gradle b/core/build.gradle
index a549f3260fa..16d21f66ef8 100644
--- a/core/build.gradle
+++ b/core/build.gradle
@@ -121,6 +121,36 @@ forbiddenPatterns {
exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt'
}
+task generateModulesList {
+ List modules = project(':modules').subprojects.collect { it.name }
+ File modulesFile = new File(buildDir, 'generated-resources/modules.txt')
+ processResources.from(modulesFile)
+ inputs.property('modules', modules)
+ outputs.file(modulesFile)
+ doLast {
+ modulesFile.parentFile.mkdirs()
+ modulesFile.setText(modules.join('\n'), 'UTF-8')
+ }
+}
+
+task generatePluginsList {
+ List plugins = project(':plugins').subprojects
+ .findAll { it.name.contains('example') == false }
+ .collect { it.name }
+ File pluginsFile = new File(buildDir, 'generated-resources/plugins.txt')
+ processResources.from(pluginsFile)
+ inputs.property('plugins', plugins)
+ outputs.file(pluginsFile)
+ doLast {
+ pluginsFile.parentFile.mkdirs()
+ pluginsFile.setText(plugins.join('\n'), 'UTF-8')
+ }
+}
+
+processResources {
+ dependsOn generateModulesList, generatePluginsList
+}
+
thirdPartyAudit.excludes = [
// uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name)
'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator',
diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorException.java b/core/src/main/java/org/apache/log4j/Java9Hack.java
similarity index 59%
rename from core/src/main/java/org/elasticsearch/index/percolator/PercolatorException.java
rename to core/src/main/java/org/apache/log4j/Java9Hack.java
index 3813679d81c..831cf5b35ae 100644
--- a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorException.java
+++ b/core/src/main/java/org/apache/log4j/Java9Hack.java
@@ -16,25 +16,22 @@
* specific language governing permissions and limitations
* under the License.
*/
-package org.elasticsearch.index.percolator;
-import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.index.Index;
+package org.apache.log4j;
-import java.io.IOException;
+import org.apache.log4j.helpers.ThreadLocalMap;
/**
- * Exception during indexing a percolator query.
+ * Log4j 1.2 MDC breaks because it parses java.version incorrectly (does not handle new java9 versioning).
+ *
+ * This hack fixes up the pkg private members as if it had detected the java version correctly.
*/
-public class PercolatorException extends ElasticsearchException {
+public class Java9Hack {
- public PercolatorException(Index index, String msg, Throwable cause) {
- super(msg, cause);
- setIndex(index);
- }
-
- public PercolatorException(StreamInput in) throws IOException{
- super(in);
+ public static void fixLog4j() {
+ if (MDC.mdc.tlm == null) {
+ MDC.mdc.java1 = false;
+ MDC.mdc.tlm = new ThreadLocalMap();
+ }
}
}
diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java
index b242811b7be..ad5a2e79cd1 100644
--- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java
+++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java
@@ -21,7 +21,6 @@ package org.elasticsearch;
import org.elasticsearch.action.support.replication.ReplicationOperation;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
-import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
@@ -201,41 +200,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
return rootCause;
}
- /**
- * Check whether this exception contains an exception of the given type:
- * either it is of the given class itself or it contains a nested cause
- * of the given type.
- *
- * @param exType the exception type to look for
- * @return whether there is a nested exception of the specified type
- */
- public boolean contains(Class extends Throwable> exType) {
- if (exType == null) {
- return false;
- }
- if (exType.isInstance(this)) {
- return true;
- }
- Throwable cause = getCause();
- if (cause == this) {
- return false;
- }
- if (cause instanceof ElasticsearchException) {
- return ((ElasticsearchException) cause).contains(exType);
- } else {
- while (cause != null) {
- if (exType.isInstance(cause)) {
- return true;
- }
- if (cause.getCause() == cause) {
- break;
- }
- cause = cause.getCause();
- }
- return false;
- }
- }
-
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(this.getMessage());
@@ -532,7 +496,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class,
org.elasticsearch.search.SearchContextMissingException::new, 24),
- SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 25),
+ GENERAL_SCRIPT_EXCEPTION(org.elasticsearch.script.GeneralScriptException.class,
+ org.elasticsearch.script.GeneralScriptException::new, 25),
BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class,
org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26),
SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class,
@@ -681,8 +646,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106),
REPOSITORY_MISSING_EXCEPTION(org.elasticsearch.repositories.RepositoryMissingException.class,
org.elasticsearch.repositories.RepositoryMissingException::new, 107),
- PERCOLATOR_EXCEPTION(org.elasticsearch.index.percolator.PercolatorException.class,
- org.elasticsearch.index.percolator.PercolatorException::new, 108),
DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class,
org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),
FLUSH_NOT_ALLOWED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class,
@@ -744,7 +707,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class,
org.elasticsearch.index.query.QueryShardException::new, 141),
NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class,
- ShardStateAction.NoLongerPrimaryShardException::new, 142);
+ ShardStateAction.NoLongerPrimaryShardException::new, 142),
+ SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143);
final Class extends ElasticsearchException> exceptionClass;
diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java
index 9cc526d8f97..c220c297388 100644
--- a/core/src/main/java/org/elasticsearch/Version.java
+++ b/core/src/main/java/org/elasticsearch/Version.java
@@ -32,7 +32,6 @@ import java.io.IOException;
/**
*/
-@SuppressWarnings("deprecation")
public class Version {
/*
* The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA
@@ -75,9 +74,9 @@ public class Version {
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
public static final int V_5_0_0_alpha2_ID = 5000002;
public static final Version V_5_0_0_alpha2 = new Version(V_5_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
- public static final int V_5_0_0_ID = 5000099;
- public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
- public static final Version CURRENT = V_5_0_0;
+ public static final int V_5_0_0_alpha3_ID = 5000003;
+ public static final Version V_5_0_0_alpha3 = new Version(V_5_0_0_alpha3_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
+ public static final Version CURRENT = V_5_0_0_alpha3;
static {
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
@@ -90,8 +89,8 @@ public class Version {
public static Version fromId(int id) {
switch (id) {
- case V_5_0_0_ID:
- return V_5_0_0;
+ case V_5_0_0_alpha3_ID:
+ return V_5_0_0_alpha3;
case V_5_0_0_alpha2_ID:
return V_5_0_0_alpha2;
case V_5_0_0_alpha1_ID:
diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java
index 3e93f699645..bab3dcb2ed2 100644
--- a/core/src/main/java/org/elasticsearch/action/ActionModule.java
+++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java
@@ -165,10 +165,6 @@ import org.elasticsearch.action.ingest.SimulatePipelineAction;
import org.elasticsearch.action.ingest.SimulatePipelineTransportAction;
import org.elasticsearch.action.main.MainAction;
import org.elasticsearch.action.main.TransportMainAction;
-import org.elasticsearch.action.percolate.MultiPercolateAction;
-import org.elasticsearch.action.percolate.PercolateAction;
-import org.elasticsearch.action.percolate.TransportMultiPercolateAction;
-import org.elasticsearch.action.percolate.TransportPercolateAction;
import org.elasticsearch.action.search.ClearScrollAction;
import org.elasticsearch.action.search.MultiSearchAction;
import org.elasticsearch.action.search.SearchAction;
@@ -332,8 +328,6 @@ public class ActionModule extends AbstractModule {
registerAction(SearchAction.INSTANCE, TransportSearchAction.class);
registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class);
registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class);
- registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class);
- registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class);
registerAction(ExplainAction.INSTANCE, TransportExplainAction.class);
registerAction(ClearScrollAction.INSTANCE, TransportClearScrollAction.class);
registerAction(RecoveryAction.INSTANCE, TransportRecoveryAction.class);
diff --git a/core/src/main/java/org/elasticsearch/action/ActionRequest.java b/core/src/main/java/org/elasticsearch/action/ActionRequest.java
index 7955855bc0d..dac3b4f2a21 100644
--- a/core/src/main/java/org/elasticsearch/action/ActionRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/ActionRequest.java
@@ -39,6 +39,10 @@ public abstract class ActionRequest> exte
public abstract ActionRequestValidationException validate();
+ public boolean getShouldPersistResult() {
+ return false;
+ }
+
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java
index dbabe681c7a..e007929faf2 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java
@@ -42,17 +42,22 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
private final ShardId shard;
private final boolean primary;
+ private final boolean hasPendingAsyncFetch;
private final String assignedNodeId;
private final UnassignedInfo unassignedInfo;
+ private final long allocationDelayMillis;
private final long remainingDelayMillis;
private final Map nodeExplanations;
- public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long remainingDelayMillis,
- @Nullable UnassignedInfo unassignedInfo, Map nodeExplanations) {
+ public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long allocationDelayMillis,
+ long remainingDelayMillis, @Nullable UnassignedInfo unassignedInfo, boolean hasPendingAsyncFetch,
+ Map nodeExplanations) {
this.shard = shard;
this.primary = primary;
+ this.hasPendingAsyncFetch = hasPendingAsyncFetch;
this.assignedNodeId = assignedNodeId;
this.unassignedInfo = unassignedInfo;
+ this.allocationDelayMillis = allocationDelayMillis;
this.remainingDelayMillis = remainingDelayMillis;
this.nodeExplanations = nodeExplanations;
}
@@ -60,8 +65,10 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
public ClusterAllocationExplanation(StreamInput in) throws IOException {
this.shard = ShardId.readShardId(in);
this.primary = in.readBoolean();
+ this.hasPendingAsyncFetch = in.readBoolean();
this.assignedNodeId = in.readOptionalString();
this.unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new);
+ this.allocationDelayMillis = in.readVLong();
this.remainingDelayMillis = in.readVLong();
int mapSize = in.readVInt();
@@ -77,8 +84,10 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
public void writeTo(StreamOutput out) throws IOException {
this.getShard().writeTo(out);
out.writeBoolean(this.isPrimary());
+ out.writeBoolean(this.isStillFetchingShardData());
out.writeOptionalString(this.getAssignedNodeId());
out.writeOptionalWriteable(this.getUnassignedInfo());
+ out.writeVLong(allocationDelayMillis);
out.writeVLong(remainingDelayMillis);
out.writeVInt(this.nodeExplanations.size());
@@ -97,6 +106,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
return this.primary;
}
+ /** Return turn if shard data is still being fetched for the allocation */
+ public boolean isStillFetchingShardData() {
+ return this.hasPendingAsyncFetch;
+ }
+
/** Return turn if the shard is assigned to a node */
public boolean isAssigned() {
return this.assignedNodeId != null;
@@ -114,7 +128,12 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
return this.unassignedInfo;
}
- /** Return the remaining allocation delay for this shard in millisocends */
+ /** Return the configured delay before the shard can be allocated in milliseconds */
+ public long getAllocationDelayMillis() {
+ return this.allocationDelayMillis;
+ }
+
+ /** Return the remaining allocation delay for this shard in milliseconds */
public long getRemainingDelayMillis() {
return this.remainingDelayMillis;
}
@@ -138,11 +157,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
if (assignedNodeId != null) {
builder.field("assigned_node_id", this.assignedNodeId);
}
+ builder.field("shard_state_fetch_pending", this.hasPendingAsyncFetch);
// If we have unassigned info, show that
if (unassignedInfo != null) {
unassignedInfo.toXContent(builder, params);
- long delay = unassignedInfo.getLastComputedLeftDelayNanos();
- builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueNanos(delay));
+ builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueMillis(allocationDelayMillis));
builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis));
}
builder.startObject("nodes");
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java
index 28b62083d42..c5e1f6cc2a1 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java
@@ -50,6 +50,7 @@ import org.elasticsearch.common.collect.ImmutableOpenIntMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@@ -58,6 +59,8 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
+import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
+
/**
* The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the
* master node in the cluster.
@@ -69,19 +72,22 @@ public class TransportClusterAllocationExplainAction
private final AllocationDeciders allocationDeciders;
private final ShardsAllocator shardAllocator;
private final TransportIndicesShardStoresAction shardStoresAction;
+ private final GatewayAllocator gatewayAllocator;
@Inject
public TransportClusterAllocationExplainAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
ClusterInfoService clusterInfoService, AllocationDeciders allocationDeciders,
- ShardsAllocator shardAllocator, TransportIndicesShardStoresAction shardStoresAction) {
+ ShardsAllocator shardAllocator, TransportIndicesShardStoresAction shardStoresAction,
+ GatewayAllocator gatewayAllocator) {
super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, ClusterAllocationExplainRequest::new);
this.clusterInfoService = clusterInfoService;
this.allocationDeciders = allocationDeciders;
this.shardAllocator = shardAllocator;
this.shardStoresAction = shardStoresAction;
+ this.gatewayAllocator = gatewayAllocator;
}
@Override
@@ -130,7 +136,8 @@ public class TransportClusterAllocationExplainAction
Float nodeWeight,
IndicesShardStoresResponse.StoreStatus storeStatus,
String assignedNodeId,
- Set activeAllocationIds) {
+ Set activeAllocationIds,
+ boolean hasPendingAsyncFetch) {
final ClusterAllocationExplanation.FinalDecision finalDecision;
final ClusterAllocationExplanation.StoreCopy storeCopy;
final String finalExplanation;
@@ -161,6 +168,19 @@ public class TransportClusterAllocationExplainAction
if (node.getId().equals(assignedNodeId)) {
finalDecision = ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED;
finalExplanation = "the shard is already assigned to this node";
+ } else if (hasPendingAsyncFetch &&
+ shard.primary() == false &&
+ shard.unassigned() &&
+ shard.allocatedPostIndexCreate(indexMetaData) &&
+ nodeDecision.type() != Decision.Type.YES) {
+ finalExplanation = "the shard cannot be assigned because allocation deciders return a " + nodeDecision.type().name() +
+ " decision and the shard's state is still being fetched";
+ finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
+ } else if (hasPendingAsyncFetch &&
+ shard.unassigned() &&
+ shard.allocatedPostIndexCreate(indexMetaData)) {
+ finalExplanation = "the shard's state is still being fetched so it cannot be allocated";
+ finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
} else if (shard.primary() && shard.unassigned() && shard.allocatedPostIndexCreate(indexMetaData) &&
storeCopy == ClusterAllocationExplanation.StoreCopy.STALE) {
finalExplanation = "the copy of the shard is stale, allocation ids do not match";
@@ -180,6 +200,7 @@ public class TransportClusterAllocationExplainAction
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
finalExplanation = "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision";
} else {
+ // TODO: handle throttling decision better here
finalDecision = ClusterAllocationExplanation.FinalDecision.YES;
if (storeCopy == ClusterAllocationExplanation.StoreCopy.AVAILABLE) {
finalExplanation = "the shard can be assigned and the node contains a valid copy of the shard data";
@@ -198,7 +219,8 @@ public class TransportClusterAllocationExplainAction
*/
public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes,
boolean includeYesDecisions, ShardsAllocator shardAllocator,
- List shardStores) {
+ List shardStores,
+ GatewayAllocator gatewayAllocator) {
// don't short circuit deciders, we want a full explanation
allocation.debugDecision(true);
// get the existing unassigned info if available
@@ -217,9 +239,9 @@ public class TransportClusterAllocationExplainAction
long remainingDelayMillis = 0;
final MetaData metadata = allocation.metaData();
final IndexMetaData indexMetaData = metadata.index(shard.index());
- if (ui != null) {
- final Settings indexSettings = indexMetaData.getSettings();
- long remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), metadata.settings(), indexSettings);
+ long allocationDelayMillis = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).getMillis();
+ if (ui != null && ui.isDelayed()) {
+ long remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), indexMetaData.getSettings());
remainingDelayMillis = TimeValue.timeValueNanos(remainingDelayNanos).millis();
}
@@ -238,11 +260,13 @@ public class TransportClusterAllocationExplainAction
Float weight = weights.get(node);
IndicesShardStoresResponse.StoreStatus storeStatus = nodeToStatus.get(node);
NodeExplanation nodeExplanation = calculateNodeExplanation(shard, indexMetaData, node, decision, weight,
- storeStatus, shard.currentNodeId(), indexMetaData.activeAllocationIds(shard.getId()));
+ storeStatus, shard.currentNodeId(), indexMetaData.activeAllocationIds(shard.getId()),
+ allocation.hasPendingAsyncFetch());
explanations.put(node, nodeExplanation);
}
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(),
- shard.currentNodeId(), remainingDelayMillis, ui, explanations);
+ shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui,
+ gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations);
}
@Override
@@ -250,7 +274,7 @@ public class TransportClusterAllocationExplainAction
final ActionListener listener) {
final RoutingNodes routingNodes = state.getRoutingNodes();
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,
- clusterInfoService.getClusterInfo(), System.nanoTime());
+ clusterInfoService.getClusterInfo(), System.nanoTime(), false);
ShardRouting foundShard = null;
if (request.useAnyUnassignedShard()) {
@@ -297,7 +321,7 @@ public class TransportClusterAllocationExplainAction
shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName());
List shardStoreStatus = shardStatuses.get(shardRouting.id());
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
- request.includeYesDecisions(), shardAllocator, shardStoreStatus);
+ request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator);
listener.onResponse(new ClusterAllocationExplainResponse(cae));
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java
index 446ae3affb7..bad4001e211 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java
@@ -192,6 +192,7 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
builder.endObject();
builder.endObject();
}
+ builder.endObject();
} else if ("parents".equals(groupBy)) {
builder.startObject("tasks");
for (TaskGroup group : getTaskGroups()) {
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java
index a241f01ea28..4ec729bbc53 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java
@@ -19,28 +19,24 @@
package org.elasticsearch.action.admin.cluster.reroute;
-import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
-import org.elasticsearch.cluster.routing.allocation.command.AllocationCommandRegistry;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
-import org.elasticsearch.common.ParseFieldMatcher;
-import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.xcontent.XContentHelper;
-import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
+import java.util.Objects;
/**
* Request to submit cluster reroute allocation commands
*/
public class ClusterRerouteRequest extends AcknowledgedRequest {
- AllocationCommands commands = new AllocationCommands();
- boolean dryRun;
- boolean explain;
+ private AllocationCommands commands = new AllocationCommands();
+ private boolean dryRun;
+ private boolean explain;
+ private boolean retryFailed;
public ClusterRerouteRequest() {
}
@@ -81,6 +77,15 @@ public class ClusterRerouteRequest extends AcknowledgedRequestfalse). If true, the
+ * request will retry allocating shards that can't currently be allocated due to too many allocation failures.
+ */
+ public ClusterRerouteRequest setRetryFailed(boolean retryFailed) {
+ this.retryFailed = retryFailed;
+ return this;
+ }
+
/**
* Returns the current explain flag
*/
@@ -88,41 +93,27 @@ public class ClusterRerouteRequest extends AcknowledgedRequest {
-
+public class ClusterRerouteRequestBuilder
+ extends AcknowledgedRequestBuilder {
public ClusterRerouteRequestBuilder(ElasticsearchClient client, ClusterRerouteAction action) {
super(client, action, new ClusterRerouteRequest());
}
@@ -61,10 +60,11 @@ public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilderfalse). If true, the
+ * request will retry allocating shards that can't currently be allocated due to too many allocation failures.
*/
- public ClusterRerouteRequestBuilder setCommands(AllocationCommand... commands) throws Exception {
- request.commands(commands);
+ public ClusterRerouteRequestBuilder setRetryFailed(boolean retryFailed) {
+ request.setRetryFailed(retryFailed);
return this;
}
-}
+}
\ No newline at end of file
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java
index e6116dbfbc4..b0b676f6e2e 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java
@@ -33,6 +33,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@@ -68,38 +69,55 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction listener) {
- clusterService.submitStateUpdateTask("cluster_reroute (api)", new AckedClusterStateUpdateTask(Priority.IMMEDIATE, request, listener) {
-
- private volatile ClusterState clusterStateToSend;
- private volatile RoutingExplanations explanations;
-
- @Override
- protected ClusterRerouteResponse newResponse(boolean acknowledged) {
- return new ClusterRerouteResponse(acknowledged, clusterStateToSend, explanations);
- }
-
- @Override
- public void onAckTimeout() {
- listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend, new RoutingExplanations()));
- }
-
- @Override
- public void onFailure(String source, Throwable t) {
- logger.debug("failed to perform [{}]", t, source);
- super.onFailure(source, t);
- }
-
- @Override
- public ClusterState execute(ClusterState currentState) {
- RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.commands, request.explain());
- ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build();
- clusterStateToSend = newState;
- explanations = routingResult.explanations();
- if (request.dryRun) {
- return currentState;
- }
- return newState;
- }
- });
+ clusterService.submitStateUpdateTask("cluster_reroute (api)", new ClusterRerouteResponseAckedClusterStateUpdateTask(logger,
+ allocationService, request, listener));
}
-}
\ No newline at end of file
+
+ static class ClusterRerouteResponseAckedClusterStateUpdateTask extends AckedClusterStateUpdateTask {
+
+ private final ClusterRerouteRequest request;
+ private final ActionListener listener;
+ private final ESLogger logger;
+ private final AllocationService allocationService;
+ private volatile ClusterState clusterStateToSend;
+ private volatile RoutingExplanations explanations;
+
+ ClusterRerouteResponseAckedClusterStateUpdateTask(ESLogger logger, AllocationService allocationService, ClusterRerouteRequest request,
+ ActionListener listener) {
+ super(Priority.IMMEDIATE, request, listener);
+ this.request = request;
+ this.listener = listener;
+ this.logger = logger;
+ this.allocationService = allocationService;
+ }
+
+ @Override
+ protected ClusterRerouteResponse newResponse(boolean acknowledged) {
+ return new ClusterRerouteResponse(acknowledged, clusterStateToSend, explanations);
+ }
+
+ @Override
+ public void onAckTimeout() {
+ listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend, new RoutingExplanations()));
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.debug("failed to perform [{}]", t, source);
+ super.onFailure(source, t);
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.getCommands(), request.explain(),
+ request.isRetryFailed());
+ ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build();
+ clusterStateToSend = newState;
+ explanations = routingResult.explanations();
+ if (request.dryRun()) {
+ return currentState;
+ }
+ return newState;
+ }
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java
index 0a7a8a9ce80..efc2fbeb5b5 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java
@@ -81,18 +81,13 @@ public class CreateSnapshotResponse extends ActionResponse implements ToXContent
return snapshotInfo.status();
}
- static final class Fields {
- static final String SNAPSHOT = "snapshot";
- static final String ACCEPTED = "accepted";
- }
-
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (snapshotInfo != null) {
- builder.field(Fields.SNAPSHOT);
- snapshotInfo.toExternalXContent(builder, params);
+ builder.field("snapshot");
+ snapshotInfo.toXContent(builder, params);
} else {
- builder.field(Fields.ACCEPTED, true);
+ builder.field("accepted", true);
}
return builder;
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java
index a5db19684b2..ec996e6d366 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java
@@ -74,15 +74,11 @@ public class GetSnapshotsResponse extends ActionResponse implements ToXContent {
}
}
- static final class Fields {
- static final String SNAPSHOTS = "snapshots";
- }
-
@Override
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
- builder.startArray(Fields.SNAPSHOTS);
+ builder.startArray("snapshots");
for (SnapshotInfo snapshotInfo : snapshots) {
- snapshotInfo.toExternalXContent(builder, params);
+ snapshotInfo.toXContent(builder, params);
}
builder.endArray();
return builder;
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java
index a54c01ed15a..70f4f2aa4f2 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java
@@ -73,18 +73,13 @@ public class RestoreSnapshotResponse extends ActionResponse implements ToXConten
return restoreInfo.status();
}
- static final class Fields {
- static final String SNAPSHOT = "snapshot";
- static final String ACCEPTED = "accepted";
- }
-
@Override
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
if (restoreInfo != null) {
- builder.field(Fields.SNAPSHOT);
+ builder.field("snapshot");
restoreInfo.toXContent(builder, params);
} else {
- builder.field(Fields.ACCEPTED, true);
+ builder.field("accepted", true);
}
return builder;
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java
index 34e503224ce..b9800a2d9ed 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java
@@ -73,13 +73,9 @@ public class SnapshotsStatusResponse extends ActionResponse implements ToXConten
}
}
- static final class Fields {
- static final String SNAPSHOTS = "snapshots";
- }
-
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- builder.startArray(Fields.SNAPSHOTS);
+ builder.startArray("snapshots");
for (SnapshotStatus snapshot : snapshots) {
snapshot.toXContent(builder, params);
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java
index 8c0c427beea..9a7bb5c8f3d 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java
@@ -27,7 +27,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats;
-import org.elasticsearch.index.percolator.PercolatorQueryCacheStats;
import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.search.suggest.completion.CompletionStats;
@@ -45,7 +44,6 @@ public class ClusterStatsIndices implements ToXContent {
private QueryCacheStats queryCache;
private CompletionStats completion;
private SegmentsStats segments;
- private PercolatorQueryCacheStats percolatorCache;
public ClusterStatsIndices(List nodeResponses) {
ObjectObjectHashMap countsPerIndex = new ObjectObjectHashMap<>();
@@ -56,7 +54,6 @@ public class ClusterStatsIndices implements ToXContent {
this.queryCache = new QueryCacheStats();
this.completion = new CompletionStats();
this.segments = new SegmentsStats();
- this.percolatorCache = new PercolatorQueryCacheStats();
for (ClusterStatsNodeResponse r : nodeResponses) {
for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) {
@@ -79,7 +76,6 @@ public class ClusterStatsIndices implements ToXContent {
queryCache.add(shardCommonStats.queryCache);
completion.add(shardCommonStats.completion);
segments.add(shardCommonStats.segments);
- percolatorCache.add(shardCommonStats.percolatorCache);
}
}
@@ -122,10 +118,6 @@ public class ClusterStatsIndices implements ToXContent {
return segments;
}
- public PercolatorQueryCacheStats getPercolatorCache() {
- return percolatorCache;
- }
-
static final class Fields {
static final String COUNT = "count";
}
@@ -140,7 +132,6 @@ public class ClusterStatsIndices implements ToXContent {
queryCache.toXContent(builder, params);
completion.toXContent(builder, params);
segments.toXContent(builder, params);
- percolatorCache.toXContent(builder, params);
return builder;
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java
index bae7b20694d..3a0b1455209 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java
@@ -55,8 +55,7 @@ public class TransportClusterStatsAction extends TransportNodesAction {
private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store,
- CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments,
- CommonStatsFlags.Flag.PercolatorCache);
+ CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments);
private final NodeService nodeService;
private final IndicesService indicesService;
@@ -100,7 +99,7 @@ public class TransportClusterStatsAction extends TransportNodesAction {
return flags.isSet(Flag.FieldData);
}
- public IndicesStatsRequest percolate(boolean percolate) {
- flags.set(Flag.PercolatorCache, percolate);
- return this;
- }
-
- public boolean percolate() {
- return flags.isSet(Flag.PercolatorCache);
- }
-
public IndicesStatsRequest segments(boolean segments) {
flags.set(Flag.Segments, segments);
return this;
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java
index cad919cbd18..8e7afe3e7e3 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java
@@ -127,11 +127,6 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder
return this;
}
- public IndicesStatsRequestBuilder setPercolate(boolean percolate) {
- request.percolate(percolate);
- return this;
- }
-
public IndicesStatsRequestBuilder setSegments(boolean segments) {
request.segments(segments);
return this;
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java
index 8c12dfa9fda..7e8ccd30a8a 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java
@@ -139,9 +139,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
flags.set(CommonStatsFlags.Flag.FieldData);
flags.fieldDataFields(request.fieldDataFields());
}
- if (request.percolate()) {
- flags.set(CommonStatsFlags.Flag.PercolatorCache);
- }
if (request.segments()) {
flags.set(CommonStatsFlags.Flag.Segments);
flags.includeSegmentFileSizes(request.includeSegmentFileSizes());
@@ -163,6 +160,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
flags.set(CommonStatsFlags.Flag.Recovery);
}
- return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, flags), indexShard.commitStats());
+ return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), indexShard.commitStats());
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java b/core/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java
index ee0ba95b2b1..2ddb35e1357 100644
--- a/core/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java
+++ b/core/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java
@@ -19,8 +19,6 @@
package org.elasticsearch.action.search;
-import java.util.Map;
-
/**
*
*/
@@ -36,13 +34,10 @@ class ParsedScrollId {
private final ScrollIdForNode[] context;
- private final Map attributes;
-
- public ParsedScrollId(String source, String type, ScrollIdForNode[] context, Map attributes) {
+ public ParsedScrollId(String source, String type, ScrollIdForNode[] context) {
this.source = source;
this.type = type;
this.context = context;
- this.attributes = attributes;
}
public String getSource() {
@@ -56,8 +51,4 @@ class ParsedScrollId {
public ScrollIdForNode[] getContext() {
return context;
}
-
- public Map getAttributes() {
- return this.attributes;
- }
}
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java
index f7cb72b22e9..7cf608559ce 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java
@@ -123,7 +123,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction searchPhaseResults,
- @Nullable Map attributes) throws IOException {
+ static String buildScrollId(SearchType searchType, AtomicArray extends SearchPhaseResult> searchPhaseResults) throws IOException {
if (searchType == SearchType.DFS_QUERY_THEN_FETCH || searchType == SearchType.QUERY_THEN_FETCH) {
- return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults, attributes);
+ return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults);
} else if (searchType == SearchType.QUERY_AND_FETCH || searchType == SearchType.DFS_QUERY_AND_FETCH) {
- return buildScrollId(ParsedScrollId.QUERY_AND_FETCH_TYPE, searchPhaseResults, attributes);
+ return buildScrollId(ParsedScrollId.QUERY_AND_FETCH_TYPE, searchPhaseResults);
} else {
throw new IllegalStateException("search_type [" + searchType + "] not supported");
}
}
- static String buildScrollId(String type, AtomicArray extends SearchPhaseResult> searchPhaseResults,
- @Nullable Map attributes) throws IOException {
- StringBuilder sb = new StringBuilder().append(type).append(';');
- sb.append(searchPhaseResults.asList().size()).append(';');
- for (AtomicArray.Entry extends SearchPhaseResult> entry : searchPhaseResults.asList()) {
- SearchPhaseResult searchPhaseResult = entry.value;
- sb.append(searchPhaseResult.id()).append(':').append(searchPhaseResult.shardTarget().nodeId()).append(';');
- }
- if (attributes == null) {
- sb.append("0;");
- } else {
- sb.append(attributes.size()).append(";");
- for (Map.Entry entry : attributes.entrySet()) {
- sb.append(entry.getKey()).append(':').append(entry.getValue()).append(';');
+ static String buildScrollId(String type, AtomicArray extends SearchPhaseResult> searchPhaseResults) throws IOException {
+ try (RAMOutputStream out = new RAMOutputStream()) {
+ out.writeString(type);
+ out.writeVInt(searchPhaseResults.asList().size());
+ for (AtomicArray.Entry extends SearchPhaseResult> entry : searchPhaseResults.asList()) {
+ SearchPhaseResult searchPhaseResult = entry.value;
+ out.writeLong(searchPhaseResult.id());
+ out.writeString(searchPhaseResult.shardTarget().nodeId());
}
+ byte[] bytes = new byte[(int) out.getFilePointer()];
+ out.writeTo(bytes, 0);
+ return Base64.getUrlEncoder().encodeToString(bytes);
}
- BytesRef bytesRef = new BytesRef(sb);
- return Base64.encodeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length, Base64.URL_SAFE);
}
static ParsedScrollId parseScrollId(String scrollId) {
- CharsRefBuilder spare = new CharsRefBuilder();
try {
- byte[] decode = Base64.decode(scrollId, Base64.URL_SAFE);
- spare.copyUTF8Bytes(decode, 0, decode.length);
+ byte[] bytes = Base64.getUrlDecoder().decode(scrollId);
+ ByteArrayDataInput in = new ByteArrayDataInput(bytes);
+ String type = in.readString();
+ ScrollIdForNode[] context = new ScrollIdForNode[in.readVInt()];
+ for (int i = 0; i < context.length; ++i) {
+ long id = in.readLong();
+ String target = in.readString();
+ context[i] = new ScrollIdForNode(target, id);
+ }
+ if (in.getPosition() != bytes.length) {
+ throw new IllegalArgumentException("Not all bytes were read");
+ }
+ return new ParsedScrollId(scrollId, type, context);
} catch (Exception e) {
- throw new IllegalArgumentException("Failed to decode scrollId", e);
+ throw new IllegalArgumentException("Cannot parse scroll id", e);
}
- String[] elements = spare.get().toString().split(";");
- if (elements.length < 2) {
- throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]");
- }
-
- int index = 0;
- String type = elements[index++];
- int contextSize = Integer.parseInt(elements[index++]);
- if (elements.length < contextSize + 2) {
- throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]");
- }
-
- ScrollIdForNode[] context = new ScrollIdForNode[contextSize];
- for (int i = 0; i < contextSize; i++) {
- String element = elements[index++];
- int sep = element.indexOf(':');
- if (sep == -1) {
- throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]");
- }
- context[i] = new ScrollIdForNode(element.substring(sep + 1), Long.parseLong(element.substring(0, sep)));
- }
- Map attributes;
- int attributesSize = Integer.parseInt(elements[index++]);
- if (attributesSize == 0) {
- attributes = emptyMap();
- } else {
- attributes = new HashMap<>(attributesSize);
- for (int i = 0; i < attributesSize; i++) {
- String element = elements[index++];
- int sep = element.indexOf(':');
- attributes.put(element.substring(0, sep), element.substring(sep + 1));
- }
- }
- return new ParsedScrollId(scrollId, type, context, attributes);
}
private TransportSearchHelper() {
diff --git a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java
index 79dbf85db65..75542784997 100644
--- a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java
@@ -133,6 +133,10 @@ public abstract class TransportAction, Re
return;
}
+ if (task != null && request.getShouldPersistResult()) {
+ listener = new PersistentActionListener<>(taskManager, task, listener);
+ }
+
if (filters.length == 0) {
try {
doExecute(task, request, listener);
@@ -171,7 +175,7 @@ public abstract class TransportAction, Re
if (i < this.action.filters.length) {
this.action.filters[i].apply(task, actionName, request, listener, this);
} else if (i == this.action.filters.length) {
- this.action.doExecute(task, request, new FilteredActionListener(actionName, listener,
+ this.action.doExecute(task, request, new FilteredActionListener<>(actionName, listener,
new ResponseFilterChain<>(this.action.filters, logger)));
} else {
listener.onFailure(new IllegalStateException("proceed was called too many times"));
@@ -246,4 +250,37 @@ public abstract class TransportAction, Re
listener.onFailure(e);
}
}
+
+ /**
+ * Wrapper for an action listener that persists the result at the end of the execution
+ */
+ private static class PersistentActionListener implements ActionListener {
+ private final ActionListener delegate;
+ private final Task task;
+ private final TaskManager taskManager;
+
+ private PersistentActionListener(TaskManager taskManager, Task task, ActionListener delegate) {
+ this.taskManager = taskManager;
+ this.task = task;
+ this.delegate = delegate;
+ }
+
+ @Override
+ public void onResponse(Response response) {
+ try {
+ taskManager.persistResult(task, response, delegate);
+ } catch (Throwable e) {
+ delegate.onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ taskManager.persistResult(task, e, delegate);
+ } catch (Throwable e1) {
+ delegate.onFailure(e1);
+ }
+ }
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java
index 6d35cafd088..305a4fd30ae 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java
@@ -177,15 +177,7 @@ final class Bootstrap {
// install SM after natives, shutdown hooks, etc.
Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
- // We do not need to reload system properties here as we have already applied them in building the settings and
- // reloading could cause multiple prompts to the user for values if a system property was specified with a prompt
- // placeholder
- Settings nodeSettings = Settings.builder()
- .put(settings)
- .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true)
- .build();
-
- node = new Node(nodeSettings) {
+ node = new Node(settings) {
@Override
protected void validateNodeBeforeAcceptingRequests(Settings settings, BoundTransportAddress boundTransportAddress) {
BootstrapCheck.check(settings, boundTransportAddress);
@@ -193,13 +185,13 @@ final class Bootstrap {
};
}
- private static Environment initialSettings(boolean foreground, String pidFile) {
+ private static Environment initialSettings(boolean foreground, String pidFile, Map esSettings) {
Terminal terminal = foreground ? Terminal.DEFAULT : null;
Settings.Builder builder = Settings.builder();
if (Strings.hasLength(pidFile)) {
builder.put(Environment.PIDFILE_SETTING.getKey(), pidFile);
}
- return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal);
+ return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, esSettings);
}
private void start() {
@@ -233,11 +225,13 @@ final class Bootstrap {
// Set the system property before anything has a chance to trigger its use
initLoggerPrefix();
- elasticsearchSettings(esSettings);
+ // force the class initializer for BootstrapInfo to run before
+ // the security manager is installed
+ BootstrapInfo.init();
INSTANCE = new Bootstrap();
- Environment environment = initialSettings(foreground, pidFile);
+ Environment environment = initialSettings(foreground, pidFile, esSettings);
Settings settings = environment.settings();
LogConfigurator.configure(settings, true);
checkForCustomConfFile();
@@ -295,13 +289,6 @@ final class Bootstrap {
}
}
- @SuppressForbidden(reason = "Sets system properties passed as CLI parameters")
- private static void elasticsearchSettings(Map esSettings) {
- for (Map.Entry esSetting : esSettings.entrySet()) {
- System.setProperty(esSetting.getKey(), esSetting.getValue());
- }
- }
-
@SuppressForbidden(reason = "System#out")
private static void closeSystOut() {
System.out.close();
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java
index bd693951eb2..791836bf8a4 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java
@@ -120,4 +120,8 @@ public final class BootstrapInfo {
}
return SYSTEM_PROPERTIES;
}
+
+ public static void init() {
+ }
+
}
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java
index bb1f6cc87d5..b3259129473 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java
@@ -21,28 +21,25 @@ package org.elasticsearch.bootstrap;
import joptsimple.OptionSet;
import joptsimple.OptionSpec;
-import joptsimple.util.KeyValuePair;
import org.elasticsearch.Build;
-import org.elasticsearch.cli.Command;
import org.elasticsearch.cli.ExitCodes;
+import org.elasticsearch.cli.SettingCommand;
import org.elasticsearch.cli.Terminal;
import org.elasticsearch.cli.UserError;
import org.elasticsearch.monitor.jvm.JvmInfo;
import java.io.IOException;
import java.util.Arrays;
-import java.util.HashMap;
import java.util.Map;
/**
* This class starts elasticsearch.
*/
-class Elasticsearch extends Command {
+class Elasticsearch extends SettingCommand {
private final OptionSpec versionOption;
private final OptionSpec daemonizeOption;
private final OptionSpec pidfileOption;
- private final OptionSpec propertyOption;
// visible for testing
Elasticsearch() {
@@ -56,7 +53,6 @@ class Elasticsearch extends Command {
pidfileOption = parser.acceptsAll(Arrays.asList("p", "pidfile"),
"Creates a pid file in the specified path on start")
.withRequiredArg();
- propertyOption = parser.accepts("E", "Configure an Elasticsearch setting").withRequiredArg().ofType(KeyValuePair.class);
}
/**
@@ -75,7 +71,7 @@ class Elasticsearch extends Command {
}
@Override
- protected void execute(Terminal terminal, OptionSet options) throws Exception {
+ protected void execute(Terminal terminal, OptionSet options, Map settings) throws Exception {
if (options.nonOptionArguments().isEmpty() == false) {
throw new UserError(ExitCodes.USAGE, "Positional arguments not allowed, found " + options.nonOptionArguments());
}
@@ -84,26 +80,15 @@ class Elasticsearch extends Command {
throw new UserError(ExitCodes.USAGE, "Elasticsearch version option is mutually exclusive with any other option");
}
terminal.println("Version: " + org.elasticsearch.Version.CURRENT
- + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
- + ", JVM: " + JvmInfo.jvmInfo().version());
+ + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
+ + ", JVM: " + JvmInfo.jvmInfo().version());
return;
}
final boolean daemonize = options.has(daemonizeOption);
final String pidFile = pidfileOption.value(options);
- final Map esSettings = new HashMap<>();
- for (final KeyValuePair kvp : propertyOption.values(options)) {
- if (!kvp.key.startsWith("es.")) {
- throw new UserError(ExitCodes.USAGE, "Elasticsearch settings must be prefixed with [es.] but was [" + kvp.key + "]");
- }
- if (kvp.value.isEmpty()) {
- throw new UserError(ExitCodes.USAGE, "Elasticsearch setting [" + kvp.key + "] must not be empty");
- }
- esSettings.put(kvp.key, kvp.value);
- }
-
- init(daemonize, pidFile, esSettings);
+ init(daemonize, pidFile, settings);
}
void init(final boolean daemonize, final String pidFile, final Map esSettings) {
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Security.java b/core/src/main/java/org/elasticsearch/bootstrap/Security.java
index 4437097bb35..e1c6d320df6 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/Security.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/Security.java
@@ -244,7 +244,7 @@ final class Security {
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.binFile(), "read,readlink");
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libFile(), "read,readlink");
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesFile(), "read,readlink");
- addPath(policy, Environment.PATH_PLUGINS_SETTING.getKey(), environment.pluginsFile(), "read,readlink");
+ addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsFile(), "read,readlink");
addPath(policy, Environment.PATH_CONF_SETTING.getKey(), environment.configFile(), "read,readlink");
addPath(policy, Environment.PATH_SCRIPTS_SETTING.getKey(), environment.scriptsFile(), "read,readlink");
// read-write dirs
diff --git a/core/src/main/java/org/elasticsearch/cli/Command.java b/core/src/main/java/org/elasticsearch/cli/Command.java
index 1fc7c9fe74f..3e2faf13657 100644
--- a/core/src/main/java/org/elasticsearch/cli/Command.java
+++ b/core/src/main/java/org/elasticsearch/cli/Command.java
@@ -19,15 +19,15 @@
package org.elasticsearch.cli;
-import java.io.IOException;
-import java.util.Arrays;
-
import joptsimple.OptionException;
import joptsimple.OptionParser;
import joptsimple.OptionSet;
import joptsimple.OptionSpec;
import org.elasticsearch.common.SuppressForbidden;
+import java.io.IOException;
+import java.util.Arrays;
+
/**
* An action to execute within a cli.
*/
@@ -112,4 +112,5 @@ public abstract class Command {
*
* Any runtime user errors (like an input file that does not exist), should throw a {@link UserError}. */
protected abstract void execute(Terminal terminal, OptionSet options) throws Exception;
+
}
diff --git a/core/src/main/java/org/elasticsearch/cli/SettingCommand.java b/core/src/main/java/org/elasticsearch/cli/SettingCommand.java
new file mode 100644
index 00000000000..868975ac6ff
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/cli/SettingCommand.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cli;
+
+import joptsimple.OptionSet;
+import joptsimple.OptionSpec;
+import joptsimple.util.KeyValuePair;
+
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+
+public abstract class SettingCommand extends Command {
+
+ private final OptionSpec settingOption;
+
+ public SettingCommand(String description) {
+ super(description);
+ this.settingOption = parser.accepts("E", "Configure a setting").withRequiredArg().ofType(KeyValuePair.class);
+ }
+
+ @Override
+ protected void execute(Terminal terminal, OptionSet options) throws Exception {
+ final Map settings = new HashMap<>();
+ for (final KeyValuePair kvp : settingOption.values(options)) {
+ if (kvp.value.isEmpty()) {
+ throw new UserError(ExitCodes.USAGE, "Setting [" + kvp.key + "] must not be empty");
+ }
+ settings.put(kvp.key, kvp.value);
+ }
+
+ putSystemPropertyIfSettingIsMissing(settings, "path.conf", "es.path.conf");
+ putSystemPropertyIfSettingIsMissing(settings, "path.data", "es.path.data");
+ putSystemPropertyIfSettingIsMissing(settings, "path.home", "es.path.home");
+ putSystemPropertyIfSettingIsMissing(settings, "path.logs", "es.path.logs");
+
+ execute(terminal, options, settings);
+ }
+
+ protected static void putSystemPropertyIfSettingIsMissing(final Map settings, final String setting, final String key) {
+ final String value = System.getProperty(key);
+ if (value != null) {
+ if (settings.containsKey(setting)) {
+ final String message =
+ String.format(
+ Locale.ROOT,
+ "duplicate setting [%s] found via command-line [%s] and system property [%s]",
+ setting,
+ settings.get(setting),
+ value);
+ throw new IllegalArgumentException(message);
+ } else {
+ settings.put(setting, value);
+ }
+ }
+ }
+
+ protected abstract void execute(Terminal terminal, OptionSet options, Map settings) throws Exception;
+
+}
diff --git a/core/src/main/java/org/elasticsearch/client/Client.java b/core/src/main/java/org/elasticsearch/client/Client.java
index 47e8e43f37b..0cf22d7a2c4 100644
--- a/core/src/main/java/org/elasticsearch/client/Client.java
+++ b/core/src/main/java/org/elasticsearch/client/Client.java
@@ -42,12 +42,6 @@ import org.elasticsearch.action.get.MultiGetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.index.IndexResponse;
-import org.elasticsearch.action.percolate.MultiPercolateRequest;
-import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
-import org.elasticsearch.action.percolate.MultiPercolateResponse;
-import org.elasticsearch.action.percolate.PercolateRequest;
-import org.elasticsearch.action.percolate.PercolateRequestBuilder;
-import org.elasticsearch.action.percolate.PercolateResponse;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.ClearScrollRequestBuilder;
import org.elasticsearch.action.search.ClearScrollResponse;
@@ -419,36 +413,6 @@ public interface Client extends ElasticsearchClient, Releasable {
*/
MultiTermVectorsRequestBuilder prepareMultiTermVectors();
- /**
- * Percolates a request returning the matches documents.
- */
- ActionFuture percolate(PercolateRequest request);
-
- /**
- * Percolates a request returning the matches documents.
- */
- void percolate(PercolateRequest request, ActionListener listener);
-
- /**
- * Percolates a request returning the matches documents.
- */
- PercolateRequestBuilder preparePercolate();
-
- /**
- * Performs multiple percolate requests.
- */
- ActionFuture multiPercolate(MultiPercolateRequest request);
-
- /**
- * Performs multiple percolate requests.
- */
- void multiPercolate(MultiPercolateRequest request, ActionListener listener);
-
- /**
- * Performs multiple percolate requests.
- */
- MultiPercolateRequestBuilder prepareMultiPercolate();
-
/**
* Computes a score explanation for the specified request.
*
diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java
index 6083422862c..e36bc4b8d77 100644
--- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java
+++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java
@@ -295,14 +295,6 @@ import org.elasticsearch.action.ingest.SimulatePipelineRequest;
import org.elasticsearch.action.ingest.SimulatePipelineRequestBuilder;
import org.elasticsearch.action.ingest.SimulatePipelineResponse;
import org.elasticsearch.action.ingest.WritePipelineResponse;
-import org.elasticsearch.action.percolate.MultiPercolateAction;
-import org.elasticsearch.action.percolate.MultiPercolateRequest;
-import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
-import org.elasticsearch.action.percolate.MultiPercolateResponse;
-import org.elasticsearch.action.percolate.PercolateAction;
-import org.elasticsearch.action.percolate.PercolateRequest;
-import org.elasticsearch.action.percolate.PercolateRequestBuilder;
-import org.elasticsearch.action.percolate.PercolateResponse;
import org.elasticsearch.action.search.ClearScrollAction;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.ClearScrollRequestBuilder;
@@ -623,36 +615,6 @@ public abstract class AbstractClient extends AbstractComponent implements Client
return new MultiTermVectorsRequestBuilder(this, MultiTermVectorsAction.INSTANCE);
}
- @Override
- public ActionFuture percolate(final PercolateRequest request) {
- return execute(PercolateAction.INSTANCE, request);
- }
-
- @Override
- public void percolate(final PercolateRequest request, final ActionListener listener) {
- execute(PercolateAction.INSTANCE, request, listener);
- }
-
- @Override
- public PercolateRequestBuilder preparePercolate() {
- return new PercolateRequestBuilder(this, PercolateAction.INSTANCE);
- }
-
- @Override
- public MultiPercolateRequestBuilder prepareMultiPercolate() {
- return new MultiPercolateRequestBuilder(this, MultiPercolateAction.INSTANCE);
- }
-
- @Override
- public void multiPercolate(MultiPercolateRequest request, ActionListener listener) {
- execute(MultiPercolateAction.INSTANCE, request, listener);
- }
-
- @Override
- public ActionFuture multiPercolate(MultiPercolateRequest request) {
- return execute(MultiPercolateAction.INSTANCE, request);
- }
-
@Override
public ExplainRequestBuilder prepareExplain(String index, String type, String id) {
return new ExplainRequestBuilder(this, ExplainAction.INSTANCE, index, type, id);
diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java
index 47dd2ce9ae6..15ed58346e7 100644
--- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java
+++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java
@@ -33,6 +33,7 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
import org.elasticsearch.cluster.metadata.MetaDataMappingService;
import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
import org.elasticsearch.cluster.node.DiscoveryNodeService;
+import org.elasticsearch.cluster.routing.DelayedAllocationService;
import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
@@ -49,6 +50,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDeci
import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
@@ -62,6 +64,7 @@ import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.gateway.GatewayAllocator;
+import org.elasticsearch.tasks.TaskResultsService;
import java.util.Arrays;
import java.util.Collections;
@@ -79,6 +82,7 @@ public class ClusterModule extends AbstractModule {
new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope);
public static final List> DEFAULT_ALLOCATION_DECIDERS =
Collections.unmodifiableList(Arrays.asList(
+ MaxRetryAllocationDecider.class,
SameShardAllocationDecider.class,
FilterAllocationDecider.class,
ReplicaAfterPrimaryActiveAllocationDecider.class,
@@ -149,9 +153,11 @@ public class ClusterModule extends AbstractModule {
bind(MetaDataIndexTemplateService.class).asEagerSingleton();
bind(IndexNameExpressionResolver.class).asEagerSingleton();
bind(RoutingService.class).asEagerSingleton();
+ bind(DelayedAllocationService.class).asEagerSingleton();
bind(ShardStateAction.class).asEagerSingleton();
bind(NodeIndexDeletedAction.class).asEagerSingleton();
bind(NodeMappingRefreshAction.class).asEagerSingleton();
bind(MappingUpdatedAction.class).asEagerSingleton();
+ bind(TaskResultsService.class).asEagerSingleton();
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
index d3b5e7ecbad..a41d02d28fa 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
@@ -42,7 +42,6 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.percolator.PercolatorFieldMapper;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidTypeNameException;
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java
new file mode 100644
index 00000000000..e79884cf9c9
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java
@@ -0,0 +1,225 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.ClusterStateUpdateTask;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.common.util.concurrent.FutureUtils;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * The {@link DelayedAllocationService} listens to cluster state changes and checks
+ * if there are unassigned shards with delayed allocation (unassigned shards that have
+ * the delay marker). These are shards that have become unassigned due to a node leaving
+ * and which were assigned the delay marker based on the index delay setting
+ * {@link UnassignedInfo#INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING}
+ * (see {@link AllocationService#deassociateDeadNodes(RoutingAllocation)}).
+ * This class is responsible for choosing the next (closest) delay expiration of a
+ * delayed shard to schedule a reroute to remove the delay marker.
+ * The actual removal of the delay marker happens in
+ * {@link AllocationService#removeDelayMarkers(RoutingAllocation)}, triggering yet
+ * another cluster change event.
+ */
+public class DelayedAllocationService extends AbstractLifecycleComponent implements ClusterStateListener {
+
+ static final String CLUSTER_UPDATE_TASK_SOURCE = "delayed_allocation_reroute";
+
+ final ThreadPool threadPool;
+ private final ClusterService clusterService;
+ private final AllocationService allocationService;
+
+ AtomicReference delayedRerouteTask = new AtomicReference<>(); // package private to access from tests
+
+ /**
+ * represents a delayed scheduling of the reroute action that can be cancelled.
+ */
+ class DelayedRerouteTask extends ClusterStateUpdateTask {
+ final TimeValue nextDelay; // delay until submitting the reroute command
+ final long baseTimestampNanos; // timestamp (in nanos) upon which delay was calculated
+ volatile ScheduledFuture future;
+ final AtomicBoolean cancelScheduling = new AtomicBoolean();
+
+ DelayedRerouteTask(TimeValue nextDelay, long baseTimestampNanos) {
+ this.nextDelay = nextDelay;
+ this.baseTimestampNanos = baseTimestampNanos;
+ }
+
+ public long scheduledTimeToRunInNanos() {
+ return baseTimestampNanos + nextDelay.nanos();
+ }
+
+ public void cancelScheduling() {
+ cancelScheduling.set(true);
+ FutureUtils.cancel(future);
+ removeIfSameTask(this);
+ }
+
+ public void schedule() {
+ future = threadPool.schedule(nextDelay, ThreadPool.Names.SAME, new AbstractRunnable() {
+ @Override
+ protected void doRun() throws Exception {
+ if (cancelScheduling.get()) {
+ return;
+ }
+ clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE, DelayedRerouteTask.this);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ logger.warn("failed to submit schedule/execute reroute post unassigned shard", t);
+ removeIfSameTask(DelayedRerouteTask.this);
+ }
+ });
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ removeIfSameTask(this);
+ RoutingAllocation.Result routingResult = allocationService.reroute(currentState, "assign delayed unassigned shards");
+ if (routingResult.changed()) {
+ return ClusterState.builder(currentState).routingResult(routingResult).build();
+ } else {
+ return currentState;
+ }
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ if (oldState == newState) {
+ // no state changed, check when we should remove the delay flag from the shards the next time.
+ // if cluster state changed, we can leave the scheduling of the next delay up to the clusterChangedEvent
+ // this should not be needed, but we want to be extra safe here
+ scheduleIfNeeded(currentNanoTime(), newState);
+ }
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ removeIfSameTask(this);
+ logger.warn("failed to schedule/execute reroute post unassigned shard", t);
+ }
+ }
+
+ @Inject
+ public DelayedAllocationService(Settings settings, ThreadPool threadPool, ClusterService clusterService,
+ AllocationService allocationService) {
+ super(settings);
+ this.threadPool = threadPool;
+ this.clusterService = clusterService;
+ this.allocationService = allocationService;
+ clusterService.addFirst(this);
+ }
+
+ @Override
+ protected void doStart() {
+ }
+
+ @Override
+ protected void doStop() {
+ }
+
+ @Override
+ protected void doClose() {
+ clusterService.remove(this);
+ removeTaskAndCancel();
+ }
+
+ /** override this to control time based decisions during delayed allocation */
+ protected long currentNanoTime() {
+ return System.nanoTime();
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ long currentNanoTime = currentNanoTime();
+ if (event.state().nodes().isLocalNodeElectedMaster()) {
+ scheduleIfNeeded(currentNanoTime, event.state());
+ }
+ }
+
+ private void removeTaskAndCancel() {
+ DelayedRerouteTask existingTask = delayedRerouteTask.getAndSet(null);
+ if (existingTask != null) {
+ logger.trace("cancelling existing delayed reroute task");
+ existingTask.cancelScheduling();
+ }
+ }
+
+ private void removeIfSameTask(DelayedRerouteTask expectedTask) {
+ delayedRerouteTask.compareAndSet(expectedTask, null);
+ }
+
+ /**
+ * Figure out if an existing scheduled reroute is good enough or whether we need to cancel and reschedule.
+ */
+ private void scheduleIfNeeded(long currentNanoTime, ClusterState state) {
+ assertClusterStateThread();
+ long nextDelayNanos = UnassignedInfo.findNextDelayedAllocation(currentNanoTime, state);
+ if (nextDelayNanos < 0) {
+ logger.trace("no need to schedule reroute - no delayed unassigned shards");
+ removeTaskAndCancel();
+ } else {
+ TimeValue nextDelay = TimeValue.timeValueNanos(nextDelayNanos);
+ final boolean earlierRerouteNeeded;
+ DelayedRerouteTask existingTask = delayedRerouteTask.get();
+ DelayedRerouteTask newTask = new DelayedRerouteTask(nextDelay, currentNanoTime);
+ if (existingTask == null) {
+ earlierRerouteNeeded = true;
+ } else if (newTask.scheduledTimeToRunInNanos() < existingTask.scheduledTimeToRunInNanos()) {
+ // we need an earlier delayed reroute
+ logger.trace("cancelling existing delayed reroute task as delayed reroute has to happen [{}] earlier",
+ TimeValue.timeValueNanos(existingTask.scheduledTimeToRunInNanos() - newTask.scheduledTimeToRunInNanos()));
+ existingTask.cancelScheduling();
+ earlierRerouteNeeded = true;
+ } else {
+ earlierRerouteNeeded = false;
+ }
+
+ if (earlierRerouteNeeded) {
+ logger.info("scheduling reroute for delayed shards in [{}] ({} delayed shards)", nextDelay,
+ UnassignedInfo.getNumberOfDelayedUnassigned(state));
+ DelayedRerouteTask currentTask = delayedRerouteTask.getAndSet(newTask);
+ assert existingTask == currentTask || currentTask == null;
+ newTask.schedule();
+ } else {
+ logger.trace("no need to reschedule delayed reroute - currently scheduled delayed reroute in [{}] is enough", nextDelay);
+ }
+ }
+ }
+
+ // protected so that it can be overridden (and disabled) by unit tests
+ protected void assertClusterStateThread() {
+ ClusterService.assertClusterStateThread();
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java
index 53b094bc34b..a1e891bce3d 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java
@@ -22,6 +22,7 @@ package org.elasticsearch.cluster.routing;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.Randomness;
+import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -331,15 +332,13 @@ public class IndexShardRoutingTable implements Iterable {
public ShardIterator onlyNodeActiveInitializingShardsIt(String nodeId) {
ArrayList ordered = new ArrayList<>(activeShards.size() + allInitializingShards.size());
- // fill it in a randomized fashion
- for (int i = 0; i < activeShards.size(); i++) {
- ShardRouting shardRouting = activeShards.get(i);
+ int seed = shuffler.nextSeed();
+ for (ShardRouting shardRouting : shuffler.shuffle(activeShards, seed)) {
if (nodeId.equals(shardRouting.currentNodeId())) {
ordered.add(shardRouting);
}
}
- for (int i = 0; i < allInitializingShards.size(); i++) {
- ShardRouting shardRouting = allInitializingShards.get(i);
+ for (ShardRouting shardRouting : shuffler.shuffle(allInitializingShards, seed)) {
if (nodeId.equals(shardRouting.currentNodeId())) {
ordered.add(shardRouting);
}
@@ -347,26 +346,31 @@ public class IndexShardRoutingTable implements Iterable {
return new PlainShardIterator(shardId, ordered);
}
+ public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String nodeAttributes, DiscoveryNodes discoveryNodes) {
+ return onlyNodeSelectorActiveInitializingShardsIt(new String[] {nodeAttributes}, discoveryNodes);
+ }
+
/**
* Returns shards based on nodeAttributes given such as node name , node attribute, node IP
* Supports node specifications in cluster API
*/
- public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String nodeAttribute, DiscoveryNodes discoveryNodes) {
+ public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String[] nodeAttributes, DiscoveryNodes discoveryNodes) {
ArrayList ordered = new ArrayList<>(activeShards.size() + allInitializingShards.size());
- Set selectedNodes = Sets.newHashSet(discoveryNodes.resolveNodesIds(nodeAttribute));
-
- for (ShardRouting shardRouting : activeShards) {
+ Set selectedNodes = Sets.newHashSet(discoveryNodes.resolveNodesIds(nodeAttributes));
+ int seed = shuffler.nextSeed();
+ for (ShardRouting shardRouting : shuffler.shuffle(activeShards, seed)) {
if (selectedNodes.contains(shardRouting.currentNodeId())) {
ordered.add(shardRouting);
}
}
- for (ShardRouting shardRouting : allInitializingShards) {
+ for (ShardRouting shardRouting : shuffler.shuffle(allInitializingShards, seed)) {
if (selectedNodes.contains(shardRouting.currentNodeId())) {
ordered.add(shardRouting);
}
}
if (ordered.isEmpty()) {
- throw new IllegalArgumentException("No data node with criteria [" + nodeAttribute + "] found");
+ throw new IllegalArgumentException("no data nodes with critera(s) " +
+ Strings.arrayToCommaDelimitedString(nodeAttributes) + "] found for shard:" + shardId());
}
return new PlainShardIterator(shardId, ordered);
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java
index 70246026894..3fb6f55a919 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java
@@ -177,8 +177,8 @@ public class OperationRouting extends AbstractComponent {
ensureNodeIdExists(nodes, nodeId);
return indexShard.onlyNodeActiveInitializingShardsIt(nodeId);
case ONLY_NODES:
- String nodeAttribute = preference.substring(Preference.ONLY_NODES.type().length() + 1);
- return indexShard.onlyNodeSelectorActiveInitializingShardsIt(nodeAttribute, nodes);
+ String nodeAttributes = preference.substring(Preference.ONLY_NODES.type().length() + 1);
+ return indexShard.onlyNodeSelectorActiveInitializingShardsIt(nodeAttributes.split(","), nodes);
default:
throw new IllegalArgumentException("unknown preference [" + preferenceType + "]");
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java
index 1ebd4699d1a..78e7e15d389 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java
@@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
@@ -30,12 +29,7 @@ import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.unit.TimeValue;
-import org.elasticsearch.common.util.concurrent.AbstractRunnable;
-import org.elasticsearch.common.util.concurrent.FutureUtils;
-import org.elasticsearch.threadpool.ThreadPool;
-import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.atomic.AtomicBoolean;
/**
@@ -50,27 +44,20 @@ import java.util.concurrent.atomic.AtomicBoolean;
* actions.
*
*/
-public class RoutingService extends AbstractLifecycleComponent implements ClusterStateListener {
+public class RoutingService extends AbstractLifecycleComponent {
private static final String CLUSTER_UPDATE_TASK_SOURCE = "cluster_reroute";
- final ThreadPool threadPool;
private final ClusterService clusterService;
private final AllocationService allocationService;
private AtomicBoolean rerouting = new AtomicBoolean();
- private volatile long minDelaySettingAtLastSchedulingNanos = Long.MAX_VALUE;
- private volatile ScheduledFuture registeredNextDelayFuture;
@Inject
- public RoutingService(Settings settings, ThreadPool threadPool, ClusterService clusterService, AllocationService allocationService) {
+ public RoutingService(Settings settings, ClusterService clusterService, AllocationService allocationService) {
super(settings);
- this.threadPool = threadPool;
this.clusterService = clusterService;
this.allocationService = allocationService;
- if (clusterService != null) {
- clusterService.addFirst(this);
- }
}
@Override
@@ -83,8 +70,6 @@ public class RoutingService extends AbstractLifecycleComponent i
@Override
protected void doClose() {
- FutureUtils.cancel(registeredNextDelayFuture);
- clusterService.remove(this);
}
public AllocationService getAllocationService() {
@@ -98,48 +83,6 @@ public class RoutingService extends AbstractLifecycleComponent i
performReroute(reason);
}
- @Override
- public void clusterChanged(ClusterChangedEvent event) {
- if (event.state().nodes().isLocalNodeElectedMaster()) {
- // Figure out if an existing scheduled reroute is good enough or whether we need to cancel and reschedule.
- // If the minimum of the currently relevant delay settings is larger than something we scheduled in the past,
- // we are guaranteed that the planned schedule will happen before any of the current shard delays are expired.
- long minDelaySetting = UnassignedInfo.findSmallestDelayedAllocationSettingNanos(settings, event.state());
- if (minDelaySetting <= 0) {
- logger.trace("no need to schedule reroute - no delayed unassigned shards, minDelaySetting [{}], scheduled [{}]", minDelaySetting, minDelaySettingAtLastSchedulingNanos);
- minDelaySettingAtLastSchedulingNanos = Long.MAX_VALUE;
- FutureUtils.cancel(registeredNextDelayFuture);
- } else if (minDelaySetting < minDelaySettingAtLastSchedulingNanos) {
- FutureUtils.cancel(registeredNextDelayFuture);
- minDelaySettingAtLastSchedulingNanos = minDelaySetting;
- TimeValue nextDelay = TimeValue.timeValueNanos(UnassignedInfo.findNextDelayedAllocationIn(event.state()));
- assert nextDelay.nanos() > 0 : "next delay must be non 0 as minDelaySetting is [" + minDelaySetting + "]";
- logger.info("delaying allocation for [{}] unassigned shards, next check in [{}]",
- UnassignedInfo.getNumberOfDelayedUnassigned(event.state()), nextDelay);
- registeredNextDelayFuture = threadPool.schedule(nextDelay, ThreadPool.Names.SAME, new AbstractRunnable() {
- @Override
- protected void doRun() throws Exception {
- minDelaySettingAtLastSchedulingNanos = Long.MAX_VALUE;
- reroute("assign delayed unassigned shards");
- }
-
- @Override
- public void onFailure(Throwable t) {
- logger.warn("failed to schedule/execute reroute post unassigned shard", t);
- minDelaySettingAtLastSchedulingNanos = Long.MAX_VALUE;
- }
- });
- } else {
- logger.trace("no need to schedule reroute - current schedule reroute is enough. minDelaySetting [{}], scheduled [{}]", minDelaySetting, minDelaySettingAtLastSchedulingNanos);
- }
- }
- }
-
- // visible for testing
- long getMinDelaySettingAtLastSchedulingNanos() {
- return this.minDelaySettingAtLastSchedulingNanos;
- }
-
// visible for testing
protected void performReroute(String reason) {
try {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java
index 60bb455dfe6..810f8c183da 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java
@@ -316,6 +316,7 @@ public final class ShardRouting implements Writeable, ToXContent {
public ShardRouting updateUnassignedInfo(UnassignedInfo unassignedInfo) {
assert this.unassignedInfo != null : "can only update unassign info if they are already set";
+ assert this.unassignedInfo.isDelayed() || (unassignedInfo.isDelayed() == false) : "cannot transition from non-delayed to delayed";
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state,
unassignedInfo, allocationId, expectedShardSize);
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java
index 2670363364d..5d6333972b3 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java
@@ -22,6 +22,7 @@ package org.elasticsearch.cluster.routing;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -43,12 +44,10 @@ import java.io.IOException;
public final class UnassignedInfo implements ToXContent, Writeable {
public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime");
- private static final TimeValue DEFAULT_DELAYED_NODE_LEFT_TIMEOUT = TimeValue.timeValueMinutes(1);
public static final Setting INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING =
- Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, Property.Dynamic,
+ Setting.timeSetting("index.unassigned.node_left.delayed_timeout", TimeValue.timeValueMinutes(1), Property.Dynamic,
Property.IndexScope);
-
/**
* Reason why the shard is in unassigned state.
*
@@ -103,24 +102,29 @@ public final class UnassignedInfo implements ToXContent, Writeable {
/**
* A better replica location is identified and causes the existing replica allocation to be cancelled.
*/
- REALLOCATED_REPLICA;
+ REALLOCATED_REPLICA,
+ /**
+ * Unassigned as a result of a failed primary while the replica was initializing.
+ */
+ PRIMARY_FAILED;
}
private final Reason reason;
private final long unassignedTimeMillis; // used for display and log messages, in milliseconds
private final long unassignedTimeNanos; // in nanoseconds, used to calculate delay for delayed shard allocation
- private final long lastComputedLeftDelayNanos; // how long to delay shard allocation, not serialized (always positive, 0 means no delay)
+ private final boolean delayed; // if allocation of this shard is delayed due to INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING
private final String message;
private final Throwable failure;
+ private final int failedAllocations;
/**
- * creates an UnassingedInfo object based **current** time
+ * creates an UnassignedInfo object based on **current** time
*
* @param reason the cause for making this shard unassigned. See {@link Reason} for more information.
* @param message more information about cause.
**/
public UnassignedInfo(Reason reason, String message) {
- this(reason, message, null, System.nanoTime(), System.currentTimeMillis());
+ this(reason, message, null, reason == Reason.ALLOCATION_FAILED ? 1 : 0, System.nanoTime(), System.currentTimeMillis(), false);
}
/**
@@ -129,49 +133,63 @@ public final class UnassignedInfo implements ToXContent, Writeable {
* @param failure the shard level failure that caused this shard to be unassigned, if exists.
* @param unassignedTimeNanos the time to use as the base for any delayed re-assignment calculation
* @param unassignedTimeMillis the time of unassignment used to display to in our reporting.
+ * @param delayed if allocation of this shard is delayed due to INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.
*/
- public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Throwable failure, long unassignedTimeNanos, long unassignedTimeMillis) {
+ public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Throwable failure, int failedAllocations,
+ long unassignedTimeNanos, long unassignedTimeMillis, boolean delayed) {
this.reason = reason;
this.unassignedTimeMillis = unassignedTimeMillis;
this.unassignedTimeNanos = unassignedTimeNanos;
- this.lastComputedLeftDelayNanos = 0L;
+ this.delayed = delayed;
this.message = message;
this.failure = failure;
+ this.failedAllocations = failedAllocations;
+ assert (failedAllocations > 0) == (reason == Reason.ALLOCATION_FAILED) :
+ "failedAllocations: " + failedAllocations + " for reason " + reason;
assert !(message == null && failure != null) : "provide a message if a failure exception is provided";
- }
-
- public UnassignedInfo(UnassignedInfo unassignedInfo, long newComputedLeftDelayNanos) {
- this.reason = unassignedInfo.reason;
- this.unassignedTimeMillis = unassignedInfo.unassignedTimeMillis;
- this.unassignedTimeNanos = unassignedInfo.unassignedTimeNanos;
- this.lastComputedLeftDelayNanos = newComputedLeftDelayNanos;
- this.message = unassignedInfo.message;
- this.failure = unassignedInfo.failure;
+ assert !(delayed && reason != Reason.NODE_LEFT) : "shard can only be delayed if it is unassigned due to a node leaving";
}
public UnassignedInfo(StreamInput in) throws IOException {
this.reason = Reason.values()[(int) in.readByte()];
this.unassignedTimeMillis = in.readLong();
// As System.nanoTime() cannot be compared across different JVMs, reset it to now.
- // This means that in master failover situations, elapsed delay time is forgotten.
+ // This means that in master fail-over situations, elapsed delay time is forgotten.
this.unassignedTimeNanos = System.nanoTime();
- this.lastComputedLeftDelayNanos = 0L;
+ this.delayed = in.readBoolean();
this.message = in.readOptionalString();
this.failure = in.readThrowable();
+ this.failedAllocations = in.readVInt();
}
public void writeTo(StreamOutput out) throws IOException {
out.writeByte((byte) reason.ordinal());
out.writeLong(unassignedTimeMillis);
// Do not serialize unassignedTimeNanos as System.nanoTime() cannot be compared across different JVMs
+ out.writeBoolean(delayed);
out.writeOptionalString(message);
out.writeThrowable(failure);
+ out.writeVInt(failedAllocations);
}
public UnassignedInfo readFrom(StreamInput in) throws IOException {
return new UnassignedInfo(in);
}
+ /**
+ * Returns the number of previously failed allocations of this shard.
+ */
+ public int getNumFailedAllocations() {
+ return failedAllocations;
+ }
+
+ /**
+ * Returns true if allocation of this shard is delayed due to {@link #INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING}
+ */
+ public boolean isDelayed() {
+ return delayed;
+ }
+
/**
* The reason why the shard is unassigned.
*/
@@ -224,50 +242,16 @@ public final class UnassignedInfo implements ToXContent, Writeable {
}
/**
- * The allocation delay value in nano seconds associated with the index (defaulting to node settings if not set).
- */
- public long getAllocationDelayTimeoutSettingNanos(Settings settings, Settings indexSettings) {
- if (reason != Reason.NODE_LEFT) {
- return 0;
- }
- TimeValue delayTimeout = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexSettings, settings);
- return Math.max(0L, delayTimeout.nanos());
- }
-
- /**
- * The delay in nanoseconds until this unassigned shard can be reassigned. This value is cached and might be slightly out-of-date.
- * See also the {@link #updateDelay(long, Settings, Settings)} method.
- */
- public long getLastComputedLeftDelayNanos() {
- return lastComputedLeftDelayNanos;
- }
-
- /**
- * Calculates the delay left based on current time (in nanoseconds) and index/node settings.
+ * Calculates the delay left based on current time (in nanoseconds) and the delay defined by the index settings.
+ * Only relevant if shard is effectively delayed (see {@link #isDelayed()})
+ * Returns 0 if delay is negative
*
* @return calculated delay in nanoseconds
*/
- public long getRemainingDelay(final long nanoTimeNow, final Settings settings, final Settings indexSettings) {
- final long delayTimeoutNanos = getAllocationDelayTimeoutSettingNanos(settings, indexSettings);
- if (delayTimeoutNanos == 0L) {
- return 0L;
- } else {
- assert nanoTimeNow >= unassignedTimeNanos;
- return Math.max(0L, delayTimeoutNanos - (nanoTimeNow - unassignedTimeNanos));
- }
- }
-
- /**
- * Creates new UnassignedInfo object if delay needs updating.
- *
- * @return new Unassigned with updated delay, or this if no change in delay
- */
- public UnassignedInfo updateDelay(final long nanoTimeNow, final Settings settings, final Settings indexSettings) {
- final long newComputedLeftDelayNanos = getRemainingDelay(nanoTimeNow, settings, indexSettings);
- if (lastComputedLeftDelayNanos == newComputedLeftDelayNanos) {
- return this;
- }
- return new UnassignedInfo(this, newComputedLeftDelayNanos);
+ public long getRemainingDelay(final long nanoTimeNow, final Settings indexSettings) {
+ long delayTimeoutNanos = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexSettings).nanos();
+ assert nanoTimeNow >= unassignedTimeNanos;
+ return Math.max(0L, delayTimeoutNanos - (nanoTimeNow - unassignedTimeNanos));
}
/**
@@ -276,56 +260,46 @@ public final class UnassignedInfo implements ToXContent, Writeable {
public static int getNumberOfDelayedUnassigned(ClusterState state) {
int count = 0;
for (ShardRouting shard : state.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED)) {
- if (shard.primary() == false) {
- long delay = shard.unassignedInfo().getLastComputedLeftDelayNanos();
- if (delay > 0) {
- count++;
- }
+ if (shard.unassignedInfo().isDelayed()) {
+ count++;
}
}
return count;
}
/**
- * Finds the smallest delay expiration setting in nanos of all unassigned shards that are still delayed. Returns 0 if there are none.
+ * Finds the next (closest) delay expiration of an delayed shard in nanoseconds based on current time.
+ * Returns 0 if delay is negative.
+ * Returns -1 if no delayed shard is found.
*/
- public static long findSmallestDelayedAllocationSettingNanos(Settings settings, ClusterState state) {
- long minDelaySetting = Long.MAX_VALUE;
- for (ShardRouting shard : state.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED)) {
- if (shard.primary() == false) {
- IndexMetaData indexMetaData = state.metaData().index(shard.getIndexName());
- boolean delayed = shard.unassignedInfo().getLastComputedLeftDelayNanos() > 0;
- long delayTimeoutSetting = shard.unassignedInfo().getAllocationDelayTimeoutSettingNanos(settings, indexMetaData.getSettings());
- if (delayed && delayTimeoutSetting > 0 && delayTimeoutSetting < minDelaySetting) {
- minDelaySetting = delayTimeoutSetting;
+ public static long findNextDelayedAllocation(long currentNanoTime, ClusterState state) {
+ MetaData metaData = state.metaData();
+ RoutingTable routingTable = state.routingTable();
+ long nextDelayNanos = Long.MAX_VALUE;
+ for (ShardRouting shard : routingTable.shardsWithState(ShardRoutingState.UNASSIGNED)) {
+ UnassignedInfo unassignedInfo = shard.unassignedInfo();
+ if (unassignedInfo.isDelayed()) {
+ Settings indexSettings = metaData.index(shard.index()).getSettings();
+ // calculate next time to schedule
+ final long newComputedLeftDelayNanos = unassignedInfo.getRemainingDelay(currentNanoTime, indexSettings);
+ if (newComputedLeftDelayNanos < nextDelayNanos) {
+ nextDelayNanos = newComputedLeftDelayNanos;
}
}
}
- return minDelaySetting == Long.MAX_VALUE ? 0L : minDelaySetting;
- }
-
-
- /**
- * Finds the next (closest) delay expiration of an unassigned shard in nanoseconds. Returns 0 if there are none.
- */
- public static long findNextDelayedAllocationIn(ClusterState state) {
- long nextDelay = Long.MAX_VALUE;
- for (ShardRouting shard : state.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED)) {
- if (shard.primary() == false) {
- long nextShardDelay = shard.unassignedInfo().getLastComputedLeftDelayNanos();
- if (nextShardDelay > 0 && nextShardDelay < nextDelay) {
- nextDelay = nextShardDelay;
- }
- }
- }
- return nextDelay == Long.MAX_VALUE ? 0L : nextDelay;
+ return nextDelayNanos == Long.MAX_VALUE ? -1L : nextDelayNanos;
}
public String shortSummary() {
StringBuilder sb = new StringBuilder();
sb.append("[reason=").append(reason).append("]");
sb.append(", at[").append(DATE_TIME_FORMATTER.printer().print(unassignedTimeMillis)).append("]");
+ if (failedAllocations > 0) {
+ sb.append(", failed_attempts[").append(failedAllocations).append("]");
+ }
+ sb.append(", delayed=").append(delayed);
String details = getDetails();
+
if (details != null) {
sb.append(", details[").append(details).append("]");
}
@@ -342,6 +316,10 @@ public final class UnassignedInfo implements ToXContent, Writeable {
builder.startObject("unassigned_info");
builder.field("reason", reason);
builder.field("at", DATE_TIME_FORMATTER.printer().print(unassignedTimeMillis));
+ if (failedAllocations > 0) {
+ builder.field("failed_attempts", failedAllocations);
+ }
+ builder.field("delayed", delayed);
String details = getDetails();
if (details != null) {
builder.field("details", details);
@@ -364,6 +342,12 @@ public final class UnassignedInfo implements ToXContent, Writeable {
if (unassignedTimeMillis != that.unassignedTimeMillis) {
return false;
}
+ if (delayed != that.delayed) {
+ return false;
+ }
+ if (failedAllocations != that.failedAllocations) {
+ return false;
+ }
if (reason != that.reason) {
return false;
}
@@ -371,12 +355,13 @@ public final class UnassignedInfo implements ToXContent, Writeable {
return false;
}
return !(failure != null ? !failure.equals(that.failure) : that.failure != null);
-
}
@Override
public int hashCode() {
int result = reason != null ? reason.hashCode() : 0;
+ result = 31 * result + Boolean.hashCode(delayed);
+ result = 31 * result + Integer.hashCode(failedAllocations);
result = 31 * result + Long.hashCode(unassignedTimeMillis);
result = 31 * result + (message != null ? message.hashCode() : 0);
result = 31 * result + (failure != null ? failure.hashCode() : 0);
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
index e1bbbb7f4ab..7dd9b1f8fec 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
@@ -53,6 +53,8 @@ import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
+import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
+
/**
* This service manages the node allocation of a cluster. For this reason the
@@ -90,7 +92,7 @@ public class AllocationService extends AbstractComponent {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
- StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState, startedShards, clusterInfoService.getClusterInfo());
+ StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState, startedShards, clusterInfoService.getClusterInfo(), currentNanoTime());
boolean changed = applyStartedShards(routingNodes, startedShards);
if (!changed) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
@@ -99,28 +101,27 @@ public class AllocationService extends AbstractComponent {
if (withReroute) {
reroute(allocation);
}
- final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes);
-
String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString());
- logClusterHealthStateChange(
- new ClusterStateHealth(clusterState),
- new ClusterStateHealth(clusterState.metaData(), result.routingTable()),
- "shards started [" + startedShardsAsString + "] ..."
- );
- return result;
+ return buildResultAndLogHealthChange(allocation, "shards started [" + startedShardsAsString + "] ...");
+ }
+
+ protected RoutingAllocation.Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason) {
+ return buildResultAndLogHealthChange(allocation, reason, new RoutingExplanations());
}
- protected RoutingAllocation.Result buildChangedResult(MetaData oldMetaData, RoutingTable oldRoutingTable, RoutingNodes newRoutingNodes) {
- return buildChangedResult(oldMetaData, oldRoutingTable, newRoutingNodes, new RoutingExplanations());
-
- }
-
- protected RoutingAllocation.Result buildChangedResult(MetaData oldMetaData, RoutingTable oldRoutingTable, RoutingNodes newRoutingNodes,
- RoutingExplanations explanations) {
+ protected RoutingAllocation.Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason, RoutingExplanations explanations) {
+ MetaData oldMetaData = allocation.metaData();
+ RoutingTable oldRoutingTable = allocation.routingTable();
+ RoutingNodes newRoutingNodes = allocation.routingNodes();
final RoutingTable newRoutingTable = new RoutingTable.Builder().updateNodes(newRoutingNodes).build();
MetaData newMetaData = updateMetaDataWithRoutingTable(oldMetaData, oldRoutingTable, newRoutingTable);
assert newRoutingTable.validate(newMetaData); // validates the routing table is coherent with the cluster state metadata
+ logClusterHealthStateChange(
+ new ClusterStateHealth(allocation.metaData(), allocation.routingTable()),
+ new ClusterStateHealth(newMetaData, newRoutingTable),
+ reason
+ );
return new RoutingAllocation.Result(true, newRoutingTable, newMetaData, explanations);
}
@@ -216,28 +217,48 @@ public class AllocationService extends AbstractComponent {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
- FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState, failedShards, clusterInfoService.getClusterInfo());
+ long currentNanoTime = currentNanoTime();
+ FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState, failedShards, clusterInfoService.getClusterInfo(), currentNanoTime);
boolean changed = false;
// as failing primaries also fail associated replicas, we fail replicas first here so that their nodes are added to ignore list
List orderedFailedShards = new ArrayList<>(failedShards);
orderedFailedShards.sort(Comparator.comparing(failedShard -> failedShard.shard.primary()));
for (FailedRerouteAllocation.FailedShard failedShard : orderedFailedShards) {
+ UnassignedInfo unassignedInfo = failedShard.shard.unassignedInfo();
+ final int failedAllocations = unassignedInfo != null ? unassignedInfo.getNumFailedAllocations() : 0;
changed |= applyFailedShard(allocation, failedShard.shard, true, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShard.message, failedShard.failure,
- System.nanoTime(), System.currentTimeMillis()));
+ failedAllocations + 1, currentNanoTime, System.currentTimeMillis(), false));
}
if (!changed) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
}
gatewayAllocator.applyFailedShards(allocation);
reroute(allocation);
- final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes);
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString());
- logClusterHealthStateChange(
- new ClusterStateHealth(clusterState),
- new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
- "shards failed [" + failedShardsAsString + "] ..."
- );
- return result;
+ return buildResultAndLogHealthChange(allocation, "shards failed [" + failedShardsAsString + "] ...");
+ }
+
+ /**
+ * Removes delay markers from unassigned shards based on current time stamp. Returns true if markers were removed.
+ */
+ private boolean removeDelayMarkers(RoutingAllocation allocation) {
+ final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = allocation.routingNodes().unassigned().iterator();
+ final MetaData metaData = allocation.metaData();
+ boolean changed = false;
+ while (unassignedIterator.hasNext()) {
+ ShardRouting shardRouting = unassignedIterator.next();
+ UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
+ if (unassignedInfo.isDelayed()) {
+ final long newComputedLeftDelayNanos = unassignedInfo.getRemainingDelay(allocation.getCurrentNanoTime(),
+ metaData.getIndexSafe(shardRouting.index()).getSettings());
+ if (newComputedLeftDelayNanos == 0) {
+ changed = true;
+ unassignedIterator.updateUnassignedInfo(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(), unassignedInfo.getFailure(),
+ unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(), unassignedInfo.getUnassignedTimeInMillis(), false));
+ }
+ }
+ }
+ return changed;
}
/**
@@ -257,16 +278,13 @@ public class AllocationService extends AbstractComponent {
.collect(Collectors.joining(", "));
}
- public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands) {
- return reroute(clusterState, commands, false);
- }
-
- public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain) {
+ public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// we don't shuffle the unassigned shards here, to try and get as close as possible to
// a consistent result of the effect the commands have on the routing
// this allows systems to dry run the commands, see the resulting cluster state, and act on it
- RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, clusterInfoService.getClusterInfo(), currentNanoTime());
+ RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
+ clusterInfoService.getClusterInfo(), currentNanoTime(), retryFailed);
// don't short circuit deciders, we want a full explanation
allocation.debugDecision(true);
// we ignore disable allocation, because commands are explicit
@@ -277,13 +295,7 @@ public class AllocationService extends AbstractComponent {
// the assumption is that commands will move / act on shards (or fail through exceptions)
// so, there will always be shard "movements", so no need to check on reroute
reroute(allocation);
- RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes, explanations);
- logClusterHealthStateChange(
- new ClusterStateHealth(clusterState),
- new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
- "reroute commands"
- );
- return result;
+ return buildResultAndLogHealthChange(allocation, "reroute commands", explanations);
}
@@ -305,18 +317,13 @@ public class AllocationService extends AbstractComponent {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
- RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, clusterInfoService.getClusterInfo(), currentNanoTime());
+ RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
+ clusterInfoService.getClusterInfo(), currentNanoTime(), false);
allocation.debugDecision(debug);
if (!reroute(allocation)) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
}
- RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes);
- logClusterHealthStateChange(
- new ClusterStateHealth(clusterState),
- new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
- reason
- );
- return result;
+ return buildResultAndLogHealthChange(allocation, reason);
}
private void logClusterHealthStateChange(ClusterStateHealth previousStateHealth, ClusterStateHealth newStateHealth, String reason) {
@@ -341,8 +348,7 @@ public class AllocationService extends AbstractComponent {
// now allocate all the unassigned to available nodes
if (allocation.routingNodes().unassigned().size() > 0) {
- updateLeftDelayOfUnassignedShards(allocation, settings);
-
+ changed |= removeDelayMarkers(allocation);
changed |= gatewayAllocator.allocateUnassigned(allocation);
}
@@ -351,22 +357,6 @@ public class AllocationService extends AbstractComponent {
return changed;
}
- // public for testing
- public static void updateLeftDelayOfUnassignedShards(RoutingAllocation allocation, Settings settings) {
- final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = allocation.routingNodes().unassigned().iterator();
- final MetaData metaData = allocation.metaData();
- while (unassignedIterator.hasNext()) {
- ShardRouting shardRouting = unassignedIterator.next();
- final IndexMetaData indexMetaData = metaData.getIndexSafe(shardRouting.index());
- UnassignedInfo previousUnassignedInfo = shardRouting.unassignedInfo();
- UnassignedInfo updatedUnassignedInfo = previousUnassignedInfo.updateDelay(allocation.getCurrentNanoTime(), settings,
- indexMetaData.getSettings());
- if (updatedUnassignedInfo != previousUnassignedInfo) { // reference equality!
- unassignedIterator.updateUnassignedInfo(updatedUnassignedInfo);
- }
- }
- }
-
private boolean electPrimariesAndUnassignedDanglingReplicas(RoutingAllocation allocation) {
boolean changed = false;
final RoutingNodes routingNodes = allocation.routingNodes();
@@ -436,8 +426,10 @@ public class AllocationService extends AbstractComponent {
changed = true;
// now, go over all the shards routing on the node, and fail them
for (ShardRouting shardRouting : node.copyShards()) {
- UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]", null,
- allocation.getCurrentNanoTime(), System.currentTimeMillis());
+ final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
+ boolean delayed = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).nanos() > 0;
+ UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]",
+ null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), delayed);
applyFailedShard(allocation, shardRouting, false, unassignedInfo);
}
// its a dead node, remove it, note, its important to remove it *after* we apply failed shard
@@ -457,8 +449,8 @@ public class AllocationService extends AbstractComponent {
boolean changed = false;
for (ShardRouting routing : replicas) {
changed |= applyFailedShard(allocation, routing, false,
- new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "primary failed while replica initializing",
- null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
+ new UnassignedInfo(UnassignedInfo.Reason.PRIMARY_FAILED, "primary failed while replica initializing",
+ null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false));
}
return changed;
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java
index a13862fed26..b1b0dfce1fe 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java
@@ -57,8 +57,8 @@ public class FailedRerouteAllocation extends RoutingAllocation {
private final List failedShards;
- public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List failedShards, ClusterInfo clusterInfo) {
- super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime());
+ public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List failedShards, ClusterInfo clusterInfo, long currentNanoTime) {
+ super(deciders, routingNodes, clusterState, clusterInfo, currentNanoTime, false);
this.failedShards = failedShards;
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
index 60ca3a8d5fd..584b141913f 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
@@ -56,7 +56,7 @@ public class RoutingAllocation {
private final MetaData metaData;
- private RoutingExplanations explanations = new RoutingExplanations();
+ private final RoutingExplanations explanations;
/**
* Creates a new {@link RoutingAllocation.Result}
@@ -65,9 +65,7 @@ public class RoutingAllocation {
* @param metaData the {@link MetaData} this Result references
*/
public Result(boolean changed, RoutingTable routingTable, MetaData metaData) {
- this.changed = changed;
- this.routingTable = routingTable;
- this.metaData = metaData;
+ this(changed, routingTable, metaData, new RoutingExplanations());
}
/**
@@ -134,6 +132,8 @@ public class RoutingAllocation {
private boolean ignoreDisable = false;
+ private final boolean retryFailed;
+
private boolean debugDecision = false;
private boolean hasPendingAsyncFetch = false;
@@ -148,7 +148,7 @@ public class RoutingAllocation {
* @param clusterState cluster state before rerouting
* @param currentNanoTime the nano time to use for all delay allocation calculation (typically {@link System#nanoTime()})
*/
- public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, long currentNanoTime) {
+ public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, long currentNanoTime, boolean retryFailed) {
this.deciders = deciders;
this.routingNodes = routingNodes;
this.metaData = clusterState.metaData();
@@ -156,6 +156,7 @@ public class RoutingAllocation {
this.customs = clusterState.customs();
this.clusterInfo = clusterInfo;
this.currentNanoTime = currentNanoTime;
+ this.retryFailed = retryFailed;
}
/** returns the nano time captured at the beginning of the allocation. used to make sure all time based decisions are aligned */
@@ -297,4 +298,8 @@ public class RoutingAllocation {
public void setHasPendingAsyncFetch() {
this.hasPendingAsyncFetch = true;
}
+
+ public boolean isRetryFailed() {
+ return retryFailed;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java
index e9570edd9c3..4d1ac1408a2 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java
@@ -35,8 +35,8 @@ public class StartedRerouteAllocation extends RoutingAllocation {
private final List extends ShardRouting> startedShards;
- public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List extends ShardRouting> startedShards, ClusterInfo clusterInfo) {
- super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime());
+ public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List extends ShardRouting> startedShards, ClusterInfo clusterInfo, long currentNanoTime) {
+ super(deciders, routingNodes, clusterState, clusterInfo, currentNanoTime, false);
this.startedShards = startedShards;
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java
index 20918159619..0fb27a80154 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java
@@ -38,6 +38,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
+import java.util.Objects;
import java.util.function.Consumer;
import java.util.function.Function;
@@ -228,4 +229,22 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom
protected void extraXContent(XContentBuilder builder) throws IOException {
}
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ AbstractAllocateAllocationCommand other = (AbstractAllocateAllocationCommand) obj;
+ // Override equals and hashCode for testing
+ return Objects.equals(index, other.index) &&
+ Objects.equals(shardId, other.shardId) &&
+ Objects.equals(node, other.node);
+ }
+
+ @Override
+ public int hashCode() {
+ // Override equals and hashCode for testing
+ return Objects.hash(index, shardId, node);
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java
index d4191292cfc..08be17a8e98 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java
@@ -125,7 +125,7 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation
// we need to move the unassigned info back to treat it as if it was index creation
unassignedInfoToUpdate = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
"force empty allocation from previous reason " + shardRouting.unassignedInfo().getReason() + ", " + shardRouting.unassignedInfo().getMessage(),
- shardRouting.unassignedInfo().getFailure(), System.nanoTime(), System.currentTimeMillis());
+ shardRouting.unassignedInfo().getFailure(), 0, System.nanoTime(), System.currentTimeMillis(), false);
}
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate);
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java
index b651580ea74..8c47deee66f 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java
@@ -136,6 +136,4 @@ public class AllocateReplicaAllocationCommand extends AbstractAllocateAllocation
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting);
return new RerouteExplanation(this, decision);
}
-
-
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java
index 736018531fa..92c1ffa9921 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java
@@ -22,13 +22,16 @@ package org.elasticsearch.cluster.routing.allocation.command;
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.io.stream.NamedWriteable;
+import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
/**
- * This interface defines the basic methods of commands for allocation
+ * A command to move shards in some way.
+ *
+ * Commands are registered in {@link NetworkModule}.
*/
public interface AllocationCommand extends NamedWriteable, ToXContent {
interface Parser {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java
index ca0eab6e33b..10ba3f55944 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java
@@ -20,12 +20,12 @@
package org.elasticsearch.cluster.routing.allocation.command;
import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
@@ -33,12 +33,13 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
+import java.util.Objects;
/**
* A simple {@link AllocationCommand} composite managing several
* {@link AllocationCommand} implementations
*/
-public class AllocationCommands {
+public class AllocationCommands extends ToXContentToBytes {
private final List commands = new ArrayList<>();
/**
@@ -171,21 +172,31 @@ public class AllocationCommands {
return commands;
}
- /**
- * Writes {@link AllocationCommands} to a {@link XContentBuilder}
- *
- * @param commands {@link AllocationCommands} to write
- * @param builder {@link XContentBuilder} to use
- * @param params Parameters to use for building
- * @throws IOException if something bad happens while building the content
- */
- public static void toXContent(AllocationCommands commands, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray("commands");
- for (AllocationCommand command : commands.commands) {
+ for (AllocationCommand command : commands) {
builder.startObject();
builder.field(command.name(), command);
builder.endObject();
}
builder.endArray();
+ return builder;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ AllocationCommands other = (AllocationCommands) obj;
+ // Override equals and hashCode for testing
+ return Objects.equals(commands, other.commands);
+ }
+
+ @Override
+ public int hashCode() {
+ // Override equals and hashCode for testing
+ return Objects.hashCode(commands);
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/BasePrimaryAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/BasePrimaryAllocationCommand.java
index 0013061e8ea..2eb3af9d4f9 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/BasePrimaryAllocationCommand.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/BasePrimaryAllocationCommand.java
@@ -83,4 +83,18 @@ public abstract class BasePrimaryAllocationCommand extends AbstractAllocateAlloc
protected void extraXContent(XContentBuilder builder) throws IOException {
builder.field(ACCEPT_DATA_LOSS_FIELD, acceptDataLoss);
}
+
+ @Override
+ public boolean equals(Object obj) {
+ if (false == super.equals(obj)) {
+ return false;
+ }
+ BasePrimaryAllocationCommand other = (BasePrimaryAllocationCommand) obj;
+ return acceptDataLoss == other.acceptDataLoss;
+ }
+
+ @Override
+ public int hashCode() {
+ return 31 * super.hashCode() + Boolean.hashCode(acceptDataLoss);
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java
index 60b0842b273..9adb3e30708 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java
@@ -35,6 +35,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
+import java.util.Objects;
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
@@ -240,4 +241,23 @@ public class CancelAllocationCommand implements AllocationCommand {
}
return new CancelAllocationCommand(index, shardId, nodeId, allowPrimary);
}
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ CancelAllocationCommand other = (CancelAllocationCommand) obj;
+ // Override equals and hashCode for testing
+ return Objects.equals(index, other.index) &&
+ Objects.equals(shardId, other.shardId) &&
+ Objects.equals(node, other.node) &&
+ Objects.equals(allowPrimary, other.allowPrimary);
+ }
+
+ @Override
+ public int hashCode() {
+ // Override equals and hashCode for testing
+ return Objects.hash(index, shardId, node, allowPrimary);
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java
index a2e1a54e515..69bd8f0eeca 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java
@@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
+import java.util.Objects;
/**
* A command that moves a shard from a specific node to another node.
@@ -195,4 +196,23 @@ public class MoveAllocationCommand implements AllocationCommand {
}
return new MoveAllocationCommand(index, shardId, fromNode, toNode);
}
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ MoveAllocationCommand other = (MoveAllocationCommand) obj;
+ // Override equals and hashCode for testing
+ return Objects.equals(index, other.index) &&
+ Objects.equals(shardId, other.shardId) &&
+ Objects.equals(fromNode, other.fromNode) &&
+ Objects.equals(toNode, other.toNode);
+ }
+
+ @Override
+ public int hashCode() {
+ // Override equals and hashCode for testing
+ return Objects.hash(index, shardId, fromNode, toNode);
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java
new file mode 100644
index 00000000000..6a8a0ccc5fa
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.UnassignedInfo;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ * An allocation decider that prevents shards from being allocated on any node if the shards allocation has been retried N times without
+ * success. This means if a shard has been INITIALIZING N times in a row without being moved to STARTED the shard will be ignored until
+ * the setting for index.allocation.max_retry is raised. The default value is 5.
+ * Note: This allocation decider also allows allocation of repeatedly failing shards when the /_cluster/reroute?retry_failed=true
+ * API is manually invoked. This allows single retries without raising the limits.
+ *
+ * @see RoutingAllocation#isRetryFailed()
+ */
+public class MaxRetryAllocationDecider extends AllocationDecider {
+
+ public static final Setting SETTING_ALLOCATION_MAX_RETRY = Setting.intSetting("index.allocation.max_retries", 5, 0,
+ Setting.Property.Dynamic, Setting.Property.IndexScope);
+
+ public static final String NAME = "max_retry";
+
+ /**
+ * Initializes a new {@link MaxRetryAllocationDecider}
+ *
+ * @param settings {@link Settings} used by this {@link AllocationDecider}
+ */
+ @Inject
+ public MaxRetryAllocationDecider(Settings settings) {
+ super(settings);
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) {
+ UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
+ if (unassignedInfo != null && unassignedInfo.getNumFailedAllocations() > 0) {
+ final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
+ final int maxRetry = SETTING_ALLOCATION_MAX_RETRY.get(indexMetaData.getSettings());
+ if (allocation.isRetryFailed()) { // manual allocation - retry
+ // if we are called via the _reroute API we ignore the failure counter and try to allocate
+ // this improves the usability since people don't need to raise the limits to issue retries since a simple _reroute call is
+ // enough to manually retry.
+ return allocation.decision(Decision.YES, NAME, "shard has already failed allocating ["
+ + unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed "
+ + unassignedInfo.toString() + " - retrying once on manual allocation");
+ } else if (unassignedInfo.getNumFailedAllocations() >= maxRetry) {
+ return allocation.decision(Decision.NO, NAME, "shard has already failed allocating ["
+ + unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed "
+ + unassignedInfo.toString() + " - manually call [/_cluster/reroute?retry_failed=true] to retry");
+ }
+ }
+ return allocation.decision(Decision.YES, NAME, "shard has no previous failures");
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return canAllocate(shardRouting, allocation);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java
index 15c2b5c3939..ac4830fe0ec 100644
--- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java
@@ -455,7 +455,7 @@ public class ClusterService extends AbstractLifecycleComponent {
}
/** asserts that the current thread is the cluster state update thread */
- public boolean assertClusterStateThread() {
+ public static boolean assertClusterStateThread() {
assert Thread.currentThread().getName().contains(ClusterService.UPDATE_THREAD_NAME) :
"not called from the cluster state update thread";
return true;
diff --git a/core/src/main/java/org/elasticsearch/common/Base64.java b/core/src/main/java/org/elasticsearch/common/Base64.java
deleted file mode 100644
index fa499a55d4d..00000000000
--- a/core/src/main/java/org/elasticsearch/common/Base64.java
+++ /dev/null
@@ -1,1621 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.elasticsearch.common;
-
-import java.io.IOException;
-import java.nio.charset.Charset;
-import java.util.Locale;
-import java.util.Objects;
-
-/**
- *
- * The options parameter, which appears in a few places, is used to pass
- * several pieces of information to the encoder. In the "higher level" methods such as
- * encodeBytes( bytes, options ) the options parameter can be used to indicate such
- * things as first gzipping the bytes before encoding them, not inserting linefeeds,
- * and encoding using the URL-safe and Ordered dialects.
- *
- * Note, according to RFC3548,
- * Section 2.1, implementations should not add line feeds unless explicitly told
- * to do so. I've got Base64 set to this behavior now, although earlier versions
- * broke lines by default.
- *
- * The constants defined in Base64 can be OR-ed together to combine options, so you
- * might make a call like this:
- *
v2.3.7 - Fixed subtle bug when base 64 input stream contained the
- * value 01111111, which is an invalid base 64 character but should not
- * throw an ArrayIndexOutOfBoundsException either. Led to discovery of
- * mishandling (or potential for better handling) of other bad input
- * characters. You should now get an IOException if you try decoding
- * something that has bad characters in it.
- *
v2.3.6 - Fixed bug when breaking lines and the final byte of the encoded
- * string ended in the last column; the buffer was not properly shrunk and
- * contained an extra (null) byte that made it into the string.
- *
v2.3.5 - Fixed bug in {@code #encodeFromFile} where estimated buffer size
- * was wrong for files of size 31, 34, and 37 bytes.
- *
v2.3.4 - Fixed bug when working with gzipped streams whereby flushing
- * the Base64.OutputStream closed the Base64 encoding (by padding with equals
- * signs) too soon. Also added an option to suppress the automatic decoding
- * of gzipped streams. Also added experimental support for specifying a
- * class loader when using the
- * {@code #decodeToObject(java.lang.String, int, java.lang.ClassLoader)}
- * method.
- *
v2.3.3 - Changed default char encoding to US-ASCII which reduces the internal Java
- * footprint with its CharEncoders and so forth. Fixed some javadocs that were
- * inconsistent. Removed imports and specified things like java.io.IOException
- * explicitly inline.
- *
v2.3.2 - Reduced memory footprint! Finally refined the "guessing" of how big the
- * final encoded data will be so that the code doesn't have to create two output
- * arrays: an oversized initial one and then a final, exact-sized one. Big win
- * when using the {@link #encodeBytesToBytes(byte[])} family of methods (and not
- * using the gzip options which uses a different mechanism with streams and stuff).
- *
v2.3.1 - Added {@link #encodeBytesToBytes(byte[], int, int, int)} and some
- * similar helper methods to be more efficient with memory by not returning a
- * String but just a byte array.
- *
v2.3 - This is not a drop-in replacement! This is two years of comments
- * and bug fixes queued up and finally executed. Thanks to everyone who sent
- * me stuff, and I'm sorry I wasn't able to distribute your fixes to everyone else.
- * Much bad coding was cleaned up including throwing exceptions where necessary
- * instead of returning null values or something similar. Here are some changes
- * that may affect you:
- *
- *
Does not break lines, by default. This is to keep in compliance with
- * RFC3548.
- *
Throws exceptions instead of returning null values. Because some operations
- * (especially those that may permit the GZIP option) use IO streams, there
- * is a possibility of an java.io.IOException being thrown. After some discussion and
- * thought, I've changed the behavior of the methods to throw java.io.IOExceptions
- * rather than return null if ever there's an error. I think this is more
- * appropriate, though it will require some changes to your code. Sorry,
- * it should have been done this way to begin with.
- *
Removed all references to System.out, System.err, and the like.
- * Shame on me. All I can say is sorry they were ever there.
- *
Throws NullPointerExceptions and IllegalArgumentExceptions as needed
- * such as when passed arrays are null or offsets are invalid.
- *
Cleaned up as much javadoc as I could to avoid any javadoc warnings.
- * This was especially annoying before for people who were thorough in their
- * own projects and then had gobs of javadoc warnings on this file.
- *
- *
v2.2.1 - Fixed bug using URL_SAFE and ORDERED encodings. Fixed bug
- * when using very small files (~< 40 bytes).
- *
v2.2 - Added some helper methods for encoding/decoding directly from
- * one file to the next. Also added a main() method to support command line
- * encoding/decoding from one file to the next. Also added these Base64 dialects:
- *
- *
The default is RFC3548 format.
- *
Calling Base64.setFormat(Base64.BASE64_FORMAT.URLSAFE_FORMAT) generates
- * URL and file name friendly format as described in Section 4 of RFC3548.
- * http://www.faqs.org/rfcs/rfc3548.html
- *
Calling Base64.setFormat(Base64.BASE64_FORMAT.ORDERED_FORMAT) generates
- * URL and file name friendly format that preserves lexical ordering as described
- * in http://www.faqs.org/qa/rfcc-1940.html
- *
- * Special thanks to Jim Kellerman at http://www.powerset.com/
- * for contributing the new Base64 dialects.
- *
- *
v2.1 - Cleaned up javadoc comments and unused variables and methods. Added
- * some convenience methods for reading and writing to and from files.
- *
v2.0.2 - Now specifies UTF-8 encoding in places where the code fails on systems
- * with other encodings (like EBCDIC).
- *
v2.0.1 - Fixed an error when decoding a single byte, that is, when the
- * encoded data was a single byte.
- *
v2.0 - I got rid of methods that used booleans to set options.
- * Now everything is more consolidated and cleaner. The code now detects
- * when data that's being decoded is gzip-compressed and will decompress it
- * automatically. Generally things are cleaner. You'll probably have to
- * change some method calls that you were making to support the new
- * options format (ints that you "OR" together).
- *
v1.5.1 - Fixed bug when decompressing and decoding to a
- * byte[] using decode( String s, boolean gzipCompressed ).
- * Added the ability to "suspend" encoding in the Output Stream so
- * you can turn on and off the encoding if you need to embed base64
- * data in an otherwise "normal" stream (like an XML file).
- *
v1.5 - Output stream pases on flush() command but doesn't do anything itself.
- * This helps when using GZIP streams.
- * Added the ability to GZip-compress objects before encoding them.
- *
v1.4 - Added helper methods to read/write files.
- *
v1.3.6 - Fixed OutputStream.flush() so that 'position' is reset.
- *
v1.3.5 - Added flag to turn on and off line breaks. Fixed bug in input stream
- * where last buffer being read, if not completely full, was not returned.
- *
v1.3.4 - Fixed when "improperly padded stream" error was thrown at the wrong time.
- *
v1.3.3 - Fixed I/O streams which were totally messed up.
- *
- *
- * I am placing this code in the Public Domain. Do with it as you will.
- * This software comes with no guarantees or warranties but with
- * plenty of well-wishing instead!
- * Please visit http://iharder.net/base64
- * periodically to check for updates or to contribute improvements.
- *
- * @author Robert Harder
- * @author rob@iharder.net
- * @version 2.3.7
- */
-public final class Base64 {
-
-/* ******** P U B L I C F I E L D S ******** */
-
-
- /**
- * No options specified. Value is zero.
- */
- public final static int NO_OPTIONS = 0;
-
- /**
- * Specify encoding in first bit. Value is one.
- */
- public final static int ENCODE = 1;
-
-
- /**
- * Specify decoding in first bit. Value is zero.
- */
- public final static int DECODE = 0;
-
-
- /**
- * Specify that data should be gzip-compressed in second bit. Value is two.
- */
- public final static int GZIP = 2;
-
- /**
- * Specify that gzipped data should not be automatically gunzipped.
- */
- public final static int DONT_GUNZIP = 4;
-
-
- /**
- * Do break lines when encoding. Value is 8.
- */
- public final static int DO_BREAK_LINES = 8;
-
- /**
- * Encode using Base64-like encoding that is URL- and Filename-safe as described
- * in Section 4 of RFC3548:
- * http://www.faqs.org/rfcs/rfc3548.html.
- * It is important to note that data encoded this way is not officially valid Base64,
- * or at the very least should not be called Base64 without also specifying that is
- * was encoded using the URL- and Filename-safe dialect.
- */
- public final static int URL_SAFE = 16;
-
-
- /**
- * Encode using the special "ordered" dialect of Base64 described here:
- * http://www.faqs.org/qa/rfcc-1940.html.
- */
- public final static int ORDERED = 32;
-
-
-/* ******** P R I V A T E F I E L D S ******** */
-
-
- /**
- * Maximum line length (76) of Base64 output.
- */
- private final static int MAX_LINE_LENGTH = 76;
-
-
- /**
- * The equals sign (=) as a byte.
- */
- private final static byte EQUALS_SIGN = (byte) '=';
-
-
- /**
- * The new line character (\n) as a byte.
- */
- private final static byte NEW_LINE = (byte) '\n';
-
-
- /**
- * Preferred encoding.
- */
- public final static Charset PREFERRED_ENCODING = Charset.forName("US-ASCII");
-
-
- private final static byte WHITE_SPACE_ENC = -5; // Indicates white space in encoding
- private final static byte EQUALS_SIGN_ENC = -1; // Indicates equals sign in encoding
-
-
-/* ******** S T A N D A R D B A S E 6 4 A L P H A B E T ******** */
-
- /**
- * The 64 valid Base64 values.
- */
- /* Host platform me be something funny like EBCDIC, so we hardcode these values. */
- private final static byte[] _STANDARD_ALPHABET = {
- (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G',
- (byte) 'H', (byte) 'I', (byte) 'J', (byte) 'K', (byte) 'L', (byte) 'M', (byte) 'N',
- (byte) 'O', (byte) 'P', (byte) 'Q', (byte) 'R', (byte) 'S', (byte) 'T', (byte) 'U',
- (byte) 'V', (byte) 'W', (byte) 'X', (byte) 'Y', (byte) 'Z',
- (byte) 'a', (byte) 'b', (byte) 'c', (byte) 'd', (byte) 'e', (byte) 'f', (byte) 'g',
- (byte) 'h', (byte) 'i', (byte) 'j', (byte) 'k', (byte) 'l', (byte) 'm', (byte) 'n',
- (byte) 'o', (byte) 'p', (byte) 'q', (byte) 'r', (byte) 's', (byte) 't', (byte) 'u',
- (byte) 'v', (byte) 'w', (byte) 'x', (byte) 'y', (byte) 'z',
- (byte) '0', (byte) '1', (byte) '2', (byte) '3', (byte) '4', (byte) '5',
- (byte) '6', (byte) '7', (byte) '8', (byte) '9', (byte) '+', (byte) '/'
- };
-
-
- /**
- * Translates a Base64 value to either its 6-bit reconstruction value
- * or a negative number indicating some other meaning.
- */
- private final static byte[] _STANDARD_DECODABET = {
- -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 0 - 8
- -5, -5, // Whitespace: Tab and Linefeed
- -9, -9, // Decimal 11 - 12
- -5, // Whitespace: Carriage Return
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 14 - 26
- -9, -9, -9, -9, -9, // Decimal 27 - 31
- -5, // Whitespace: Space
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 33 - 42
- 62, // Plus sign at decimal 43
- -9, -9, -9, // Decimal 44 - 46
- 63, // Slash at decimal 47
- 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, // Numbers zero through nine
- -9, -9, -9, // Decimal 58 - 60
- -1, // Equals sign at decimal 61
- -9, -9, -9, // Decimal 62 - 64
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, // Letters 'A' through 'N'
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, // Letters 'O' through 'Z'
- -9, -9, -9, -9, -9, -9, // Decimal 91 - 96
- 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, // Letters 'a' through 'm'
- 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // Letters 'n' through 'z'
- -9, -9, -9, -9, -9 // Decimal 123 - 127
- , -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 128 - 139
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 140 - 152
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 153 - 165
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 166 - 178
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 179 - 191
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 192 - 204
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 205 - 217
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 218 - 230
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 231 - 243
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9 // Decimal 244 - 255
- };
-
-
-/* ******** U R L S A F E B A S E 6 4 A L P H A B E T ******** */
-
- /**
- * Used in the URL- and Filename-safe dialect described in Section 4 of RFC3548:
- * http://www.faqs.org/rfcs/rfc3548.html.
- * Notice that the last two bytes become "hyphen" and "underscore" instead of "plus" and "slash."
- */
- private final static byte[] _URL_SAFE_ALPHABET = {
- (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G',
- (byte) 'H', (byte) 'I', (byte) 'J', (byte) 'K', (byte) 'L', (byte) 'M', (byte) 'N',
- (byte) 'O', (byte) 'P', (byte) 'Q', (byte) 'R', (byte) 'S', (byte) 'T', (byte) 'U',
- (byte) 'V', (byte) 'W', (byte) 'X', (byte) 'Y', (byte) 'Z',
- (byte) 'a', (byte) 'b', (byte) 'c', (byte) 'd', (byte) 'e', (byte) 'f', (byte) 'g',
- (byte) 'h', (byte) 'i', (byte) 'j', (byte) 'k', (byte) 'l', (byte) 'm', (byte) 'n',
- (byte) 'o', (byte) 'p', (byte) 'q', (byte) 'r', (byte) 's', (byte) 't', (byte) 'u',
- (byte) 'v', (byte) 'w', (byte) 'x', (byte) 'y', (byte) 'z',
- (byte) '0', (byte) '1', (byte) '2', (byte) '3', (byte) '4', (byte) '5',
- (byte) '6', (byte) '7', (byte) '8', (byte) '9', (byte) '-', (byte) '_'
- };
-
- /**
- * Used in decoding URL- and Filename-safe dialects of Base64.
- */
- private final static byte[] _URL_SAFE_DECODABET = {
- -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 0 - 8
- -5, -5, // Whitespace: Tab and Linefeed
- -9, -9, // Decimal 11 - 12
- -5, // Whitespace: Carriage Return
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 14 - 26
- -9, -9, -9, -9, -9, // Decimal 27 - 31
- -5, // Whitespace: Space
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 33 - 42
- -9, // Plus sign at decimal 43
- -9, // Decimal 44
- 62, // Minus sign at decimal 45
- -9, // Decimal 46
- -9, // Slash at decimal 47
- 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, // Numbers zero through nine
- -9, -9, -9, // Decimal 58 - 60
- -1, // Equals sign at decimal 61
- -9, -9, -9, // Decimal 62 - 64
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, // Letters 'A' through 'N'
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, // Letters 'O' through 'Z'
- -9, -9, -9, -9, // Decimal 91 - 94
- 63, // Underscore at decimal 95
- -9, // Decimal 96
- 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, // Letters 'a' through 'm'
- 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // Letters 'n' through 'z'
- -9, -9, -9, -9, -9 // Decimal 123 - 127
- , -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 128 - 139
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 140 - 152
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 153 - 165
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 166 - 178
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 179 - 191
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 192 - 204
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 205 - 217
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 218 - 230
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 231 - 243
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9 // Decimal 244 - 255
- };
-
-
-/* ******** O R D E R E D B A S E 6 4 A L P H A B E T ******** */
-
- /**
- * I don't get the point of this technique, but someone requested it,
- * and it is described here:
- * http://www.faqs.org/qa/rfcc-1940.html.
- */
- private final static byte[] _ORDERED_ALPHABET = {
- (byte) '-',
- (byte) '0', (byte) '1', (byte) '2', (byte) '3', (byte) '4',
- (byte) '5', (byte) '6', (byte) '7', (byte) '8', (byte) '9',
- (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G',
- (byte) 'H', (byte) 'I', (byte) 'J', (byte) 'K', (byte) 'L', (byte) 'M', (byte) 'N',
- (byte) 'O', (byte) 'P', (byte) 'Q', (byte) 'R', (byte) 'S', (byte) 'T', (byte) 'U',
- (byte) 'V', (byte) 'W', (byte) 'X', (byte) 'Y', (byte) 'Z',
- (byte) '_',
- (byte) 'a', (byte) 'b', (byte) 'c', (byte) 'd', (byte) 'e', (byte) 'f', (byte) 'g',
- (byte) 'h', (byte) 'i', (byte) 'j', (byte) 'k', (byte) 'l', (byte) 'm', (byte) 'n',
- (byte) 'o', (byte) 'p', (byte) 'q', (byte) 'r', (byte) 's', (byte) 't', (byte) 'u',
- (byte) 'v', (byte) 'w', (byte) 'x', (byte) 'y', (byte) 'z'
- };
-
- /**
- * Used in decoding the "ordered" dialect of Base64.
- */
- private final static byte[] _ORDERED_DECODABET = {
- -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 0 - 8
- -5, -5, // Whitespace: Tab and Linefeed
- -9, -9, // Decimal 11 - 12
- -5, // Whitespace: Carriage Return
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 14 - 26
- -9, -9, -9, -9, -9, // Decimal 27 - 31
- -5, // Whitespace: Space
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 33 - 42
- -9, // Plus sign at decimal 43
- -9, // Decimal 44
- 0, // Minus sign at decimal 45
- -9, // Decimal 46
- -9, // Slash at decimal 47
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // Numbers zero through nine
- -9, -9, -9, // Decimal 58 - 60
- -1, // Equals sign at decimal 61
- -9, -9, -9, // Decimal 62 - 64
- 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, // Letters 'A' through 'M'
- 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, // Letters 'N' through 'Z'
- -9, -9, -9, -9, // Decimal 91 - 94
- 37, // Underscore at decimal 95
- -9, // Decimal 96
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, // Letters 'a' through 'm'
- 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, // Letters 'n' through 'z'
- -9, -9, -9, -9, -9 // Decimal 123 - 127
- , -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 128 - 139
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 140 - 152
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 153 - 165
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 166 - 178
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 179 - 191
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 192 - 204
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 205 - 217
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 218 - 230
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 231 - 243
- -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9 // Decimal 244 - 255
- };
-
-
-/* ******** D E T E R M I N E W H I C H A L H A B E T ******** */
-
-
- /**
- * Returns one of the _SOMETHING_ALPHABET byte arrays depending on
- * the options specified.
- * It's possible, though silly, to specify ORDERED and URLSAFE
- * in which case one of them will be picked, though there is
- * no guarantee as to which one will be picked.
- */
- private final static byte[] getAlphabet(int options) {
- if ((options & URL_SAFE) == URL_SAFE) {
- return _URL_SAFE_ALPHABET;
- } else if ((options & ORDERED) == ORDERED) {
- return _ORDERED_ALPHABET;
- } else {
- return _STANDARD_ALPHABET;
- }
- } // end getAlphabet
-
-
- /**
- * Returns one of the _SOMETHING_DECODABET byte arrays depending on
- * the options specified.
- * It's possible, though silly, to specify ORDERED and URL_SAFE
- * in which case one of them will be picked, though there is
- * no guarantee as to which one will be picked.
- */
- private final static byte[] getDecodabet(int options) {
- if ((options & URL_SAFE) == URL_SAFE) {
- return _URL_SAFE_DECODABET;
- } else if ((options & ORDERED) == ORDERED) {
- return _ORDERED_DECODABET;
- } else {
- return _STANDARD_DECODABET;
- }
- } // end getAlphabet
-
-
- /**
- * Defeats instantiation.
- */
- private Base64() {
- }
-
-
-/* ******** E N C O D I N G M E T H O D S ******** */
-
-
- /**
- * Encodes up to the first three bytes of array threeBytes
- * and returns a four-byte array in Base64 notation.
- * The actual number of significant bytes in your array is
- * given by numSigBytes.
- * The array threeBytes needs only be as big as
- * numSigBytes.
- * Code can reuse a byte array by passing a four-byte array as b4.
- *
- * @param b4 A reusable byte array to reduce array instantiation
- * @param threeBytes the array to convert
- * @param numSigBytes the number of significant bytes in your array
- * @return four byte array in Base64 notation.
- * @since 1.5.1
- */
- private static byte[] encode3to4(byte[] b4, byte[] threeBytes, int numSigBytes, int options) {
- encode3to4(threeBytes, 0, numSigBytes, b4, 0, options);
- return b4;
- } // end encode3to4
-
-
- /**
- *
Encodes up to three bytes of the array source
- * and writes the resulting four Base64 bytes to destination.
- * The source and destination arrays can be manipulated
- * anywhere along their length by specifying
- * srcOffset and destOffset.
- * This method does not check to make sure your arrays
- * are large enough to accommodate srcOffset + 3 for
- * the source array or destOffset + 4 for
- * the destination array.
- * The actual number of significant bytes in your array is
- * given by numSigBytes.
- *
This is the lowest level of the encoding methods with
- * all possible parameters.
- *
- * @param source the array to convert
- * @param srcOffset the index where conversion begins
- * @param numSigBytes the number of significant bytes in your array
- * @param destination the array to hold the conversion
- * @param destOffset the index where output will be put
- * @return the destination array
- * @since 1.3
- */
- private static byte[] encode3to4(
- byte[] source, int srcOffset, int numSigBytes,
- byte[] destination, int destOffset, int options) {
-
- byte[] ALPHABET = getAlphabet(options);
-
- // 1 2 3
- // 01234567890123456789012345678901 Bit position
- // --------000000001111111122222222 Array position from threeBytes
- // --------| || || || | Six bit groups to index ALPHABET
- // >>18 >>12 >> 6 >> 0 Right shift necessary
- // 0x3f 0x3f 0x3f Additional AND
-
- // Create buffer with zero-padding if there are only one or two
- // significant bytes passed in the array.
- // We have to shift left 24 in order to flush out the 1's that appear
- // when Java treats a value as negative that is cast from a byte to an int.
- int inBuff = (numSigBytes > 0 ? ((source[srcOffset] << 24) >>> 8) : 0)
- | (numSigBytes > 1 ? ((source[srcOffset + 1] << 24) >>> 16) : 0)
- | (numSigBytes > 2 ? ((source[srcOffset + 2] << 24) >>> 24) : 0);
-
- switch (numSigBytes) {
- case 3:
- destination[destOffset] = ALPHABET[(inBuff >>> 18)];
- destination[destOffset + 1] = ALPHABET[(inBuff >>> 12) & 0x3f];
- destination[destOffset + 2] = ALPHABET[(inBuff >>> 6) & 0x3f];
- destination[destOffset + 3] = ALPHABET[(inBuff) & 0x3f];
- return destination;
-
- case 2:
- destination[destOffset] = ALPHABET[(inBuff >>> 18)];
- destination[destOffset + 1] = ALPHABET[(inBuff >>> 12) & 0x3f];
- destination[destOffset + 2] = ALPHABET[(inBuff >>> 6) & 0x3f];
- destination[destOffset + 3] = EQUALS_SIGN;
- return destination;
-
- case 1:
- destination[destOffset] = ALPHABET[(inBuff >>> 18)];
- destination[destOffset + 1] = ALPHABET[(inBuff >>> 12) & 0x3f];
- destination[destOffset + 2] = EQUALS_SIGN;
- destination[destOffset + 3] = EQUALS_SIGN;
- return destination;
-
- default:
- return destination;
- } // end switch
- } // end encode3to4
-
-
- /**
- * Performs Base64 encoding on the raw ByteBuffer,
- * writing it to the encoded ByteBuffer.
- * This is an experimental feature. Currently it does not
- * pass along any options (such as {@link #DO_BREAK_LINES}
- * or {@link #GZIP}.
- *
- * @param raw input buffer
- * @param encoded output buffer
- * @since 2.3
- */
- public static void encode(java.nio.ByteBuffer raw, java.nio.ByteBuffer encoded) {
- byte[] raw3 = new byte[3];
- byte[] enc4 = new byte[4];
-
- while (raw.hasRemaining()) {
- int rem = Math.min(3, raw.remaining());
- raw.get(raw3, 0, rem);
- Base64.encode3to4(enc4, raw3, rem, Base64.NO_OPTIONS);
- encoded.put(enc4);
- } // end input remaining
- }
-
-
- /**
- * Performs Base64 encoding on the raw ByteBuffer,
- * writing it to the encoded CharBuffer.
- * This is an experimental feature. Currently it does not
- * pass along any options (such as {@link #DO_BREAK_LINES}
- * or {@link #GZIP}.
- *
- * @param raw input buffer
- * @param encoded output buffer
- * @since 2.3
- */
- public static void encode(java.nio.ByteBuffer raw, java.nio.CharBuffer encoded) {
- byte[] raw3 = new byte[3];
- byte[] enc4 = new byte[4];
-
- while (raw.hasRemaining()) {
- int rem = Math.min(3, raw.remaining());
- raw.get(raw3, 0, rem);
- Base64.encode3to4(enc4, raw3, rem, Base64.NO_OPTIONS);
- for (int i = 0; i < 4; i++) {
- encoded.put((char) (enc4[i] & 0xFF));
- }
- } // end input remaining
- }
-
-
-
- /**
- * Encodes a byte array into Base64 notation.
- * Does not GZip-compress data.
- *
- * @param source The data to convert
- * @return The data in Base64-encoded form
- * @throws NullPointerException if source array is null
- * @since 1.4
- */
- public static String encodeBytes(byte[] source) {
- // Since we're not going to have the GZIP encoding turned on,
- // we're not going to have an java.io.IOException thrown, so
- // we should not force the user to have to catch it.
- String encoded = null;
- try {
- encoded = encodeBytes(source, 0, source.length, NO_OPTIONS);
- } catch (java.io.IOException ex) {
- // not sure why this was an assertion before, running with assertions disabled would mean swallowing this exception
- throw new IllegalStateException(ex);
- } // end catch
- assert encoded != null;
- return encoded;
- } // end encodeBytes
-
-
- /**
- * Encodes a byte array into Base64 notation.
- *
- * Example options:
- * GZIP: gzip-compresses object before encoding it.
- * DO_BREAK_LINES: break lines at 76 characters
- * Note: Technically, this makes your encoding non-compliant.
- *
- *
- * Example: encodeBytes( myData, Base64.GZIP ) or
- *
As of v 2.3, if there is an error with the GZIP stream,
- * the method will throw an java.io.IOException. This is new to v2.3!
- * In earlier versions, it just returned a null value, but
- * in retrospect that's a pretty poor way to handle it.
- *
- * @param source The data to convert
- * @param options Specified options
- * @return The Base64-encoded data as a String
- * @throws java.io.IOException if there is an error
- * @throws NullPointerException if source array is null
- * @see Base64#GZIP
- * @see Base64#DO_BREAK_LINES
- * @since 2.0
- */
- public static String encodeBytes(byte[] source, int options) throws java.io.IOException {
- return encodeBytes(source, 0, source.length, options);
- } // end encodeBytes
-
- /**
- * Encodes a byte array into Base64 notation.
- * Does not GZip-compress data.
- *
- * As of v 2.3, if there is an error,
- * the method will throw an java.io.IOException. This is new to v2.3!
- * In earlier versions, it just returned a null value, but
- * in retrospect that's a pretty poor way to handle it.
- *
- * @param source The data to convert
- * @param off Offset in array where conversion should begin
- * @param len Length of data to convert
- * @return The Base64-encoded data as a String
- * @throws NullPointerException if source array is null
- * @throws IllegalArgumentException if source array, offset, or length are invalid
- * @since 1.4
- */
- public static String encodeBytes(byte[] source, int off, int len) {
- // Since we're not going to have the GZIP encoding turned on,
- // we're not going to have an java.io.IOException thrown, so
- // we should not force the user to have to catch it.
- String encoded = null;
- try {
- encoded = encodeBytes(source, off, len, NO_OPTIONS);
- } catch (java.io.IOException ex) {
- throw new IllegalStateException(ex);
- } // end catch
- assert encoded != null;
- return encoded;
- } // end encodeBytes
-
-
- /**
- * Encodes a byte array into Base64 notation.
- *
- * Example options:
- * GZIP: gzip-compresses object before encoding it.
- * DO_BREAK_LINES: break lines at 76 characters
- * Note: Technically, this makes your encoding non-compliant.
- *
- *
- * Example: encodeBytes( myData, Base64.GZIP ) or
- *
- * As of v 2.3, if there is an error with the GZIP stream,
- * the method will throw an java.io.IOException. This is new to v2.3!
- * In earlier versions, it just returned a null value, but
- * in retrospect that's a pretty poor way to handle it.
- *
- * @param source The data to convert
- * @param off Offset in array where conversion should begin
- * @param len Length of data to convert
- * @param options Specified options
- * @return The Base64-encoded data as a String
- * @throws java.io.IOException if there is an error
- * @throws NullPointerException if source array is null
- * @throws IllegalArgumentException if source array, offset, or length are invalid
- * @see Base64#GZIP
- * @see Base64#DO_BREAK_LINES
- * @since 2.0
- */
- public static String encodeBytes(byte[] source, int off, int len, int options) throws java.io.IOException {
- byte[] encoded = encodeBytesToBytes(source, off, len, options);
-
- // Return value according to relevant encoding.
- return new String(encoded, PREFERRED_ENCODING);
-
- } // end encodeBytes
-
-
- /**
- * Similar to {@link #encodeBytes(byte[])} but returns
- * a byte array instead of instantiating a String. This is more efficient
- * if you're working with I/O streams and have large data sets to encode.
- *
- * @param source The data to convert
- * @return The Base64-encoded data as a byte[] (of ASCII characters)
- * @throws NullPointerException if source array is null
- * @since 2.3.1
- */
- public static byte[] encodeBytesToBytes(byte[] source) {
- byte[] encoded = null;
- try {
- encoded = encodeBytesToBytes(source, 0, source.length, Base64.NO_OPTIONS);
- } catch (java.io.IOException ex) {
- throw new IllegalStateException("IOExceptions only come from GZipping, which is turned off: ", ex);
- }
- return encoded;
- }
-
-
- /**
- * Similar to {@link #encodeBytes(byte[], int, int, int)} but returns
- * a byte array instead of instantiating a String. This is more efficient
- * if you're working with I/O streams and have large data sets to encode.
- *
- * @param source The data to convert
- * @param off Offset in array where conversion should begin
- * @param len Length of data to convert
- * @param options Specified options
- * @return The Base64-encoded data as a String
- * @throws java.io.IOException if there is an error
- * @throws NullPointerException if source array is null
- * @throws IllegalArgumentException if source array, offset, or length are invalid
- * @see Base64#GZIP
- * @see Base64#DO_BREAK_LINES
- * @since 2.3.1
- */
- public static byte[] encodeBytesToBytes(byte[] source, int off, int len, int options) throws java.io.IOException {
- Objects.requireNonNull(source, "Cannot serialize a null array.");
-
- if (off < 0) {
- throw new IllegalArgumentException("Cannot have negative offset: " + off);
- } // end if: off < 0
-
- if (len < 0) {
- throw new IllegalArgumentException("Cannot have length offset: " + len);
- } // end if: len < 0
-
- if (off + len > source.length) {
- throw new IllegalArgumentException(
- String.format(Locale.ROOT, "Cannot have offset of %d and length of %d with array of length %d", off, len, source.length));
- } // end if: off < 0
-
- // Compress?
- if ((options & GZIP) != 0) {
- return encodeCompressedBytes(source, off, len, options);
- } // end if: compress
-
- // Else, don't compress. Better not to use streams at all then.
- else {
- return encodeNonCompressedBytes(source, off, len, options);
- } // end else: don't compress
-
- } // end encodeBytesToBytes
-
- private static byte[] encodeNonCompressedBytes(byte[] source, int off, int len, int options) {
- boolean breakLines = (options & DO_BREAK_LINES) != 0;
-
- //int len43 = len * 4 / 3;
- //byte[] outBuff = new byte[ ( len43 ) // Main 4:3
- // + ( (len % 3) > 0 ? 4 : 0 ) // Account for padding
- // + (breakLines ? ( len43 / MAX_LINE_LENGTH ) : 0) ]; // New lines
- // Try to determine more precisely how big the array needs to be.
- // If we get it right, we don't have to do an array copy, and
- // we save a bunch of memory.
- int encLen = (len / 3) * 4 + (len % 3 > 0 ? 4 : 0); // Bytes needed for actual encoding
- if (breakLines) {
- encLen += encLen / MAX_LINE_LENGTH; // Plus extra newline characters
- }
- byte[] outBuff = new byte[encLen];
-
-
- int d = 0;
- int e = 0;
- int len2 = len - 2;
- int lineLength = 0;
- for (; d < len2; d += 3, e += 4) {
- encode3to4(source, d + off, 3, outBuff, e, options);
-
- lineLength += 4;
- if (breakLines && lineLength >= MAX_LINE_LENGTH) {
- outBuff[e + 4] = NEW_LINE;
- e++;
- lineLength = 0;
- } // end if: end of line
- } // en dfor: each piece of array
-
- if (d < len) {
- encode3to4(source, d + off, len - d, outBuff, e, options);
- e += 4;
- } // end if: some padding needed
-
-
- // Only resize array if we didn't guess it right.
- if (e <= outBuff.length - 1) {
- // If breaking lines and the last byte falls right at
- // the line length (76 bytes per line), there will be
- // one extra byte, and the array will need to be resized.
- // Not too bad of an estimate on array size, I'd say.
- byte[] finalOut = new byte[e];
- System.arraycopy(outBuff, 0, finalOut, 0, e);
- //System.err.println("Having to resize array from " + outBuff.length + " to " + e );
- return finalOut;
- } else {
- //System.err.println("No need to resize array.");
- return outBuff;
- }
- }
-
- private static byte[] encodeCompressedBytes(byte[] source, int off, int len, int options) throws IOException {
- java.io.ByteArrayOutputStream baos = null;
- java.util.zip.GZIPOutputStream gzos = null;
- OutputStream b64os = null;
-
- try {
- // GZip -> Base64 -> ByteArray
- baos = new java.io.ByteArrayOutputStream();
- b64os = new OutputStream(baos, ENCODE | options);
- gzos = new java.util.zip.GZIPOutputStream(b64os);
-
- gzos.write(source, off, len);
- gzos.close();
- } // end try
- catch (IOException e) {
- // Catch it and then throw it immediately so that
- // the finally{} block is called for cleanup.
- throw e;
- } // end catch
- finally {
- try {
- gzos.close();
- } catch (Exception e) {
- }
- try {
- b64os.close();
- } catch (Exception e) {
- }
- try {
- baos.close();
- } catch (Exception e) {
- }
- } // end finally
-
- return baos.toByteArray();
- }
-
-
-/* ******** D E C O D I N G M E T H O D S ******** */
-
-
- /**
- * Decodes four bytes from array source
- * and writes the resulting bytes (up to three of them)
- * to destination.
- * The source and destination arrays can be manipulated
- * anywhere along their length by specifying
- * srcOffset and destOffset.
- * This method does not check to make sure your arrays
- * are large enough to accommodate srcOffset + 4 for
- * the source array or destOffset + 3 for
- * the destination array.
- * This method returns the actual number of bytes that
- * were converted from the Base64 encoding.
- *
This is the lowest level of the decoding methods with
- * all possible parameters.
- *
- * @param source the array to convert
- * @param srcOffset the index where conversion begins
- * @param destination the array to hold the conversion
- * @param destOffset the index where output will be put
- * @param options alphabet type is pulled from this (standard, url-safe, ordered)
- * @return the number of decoded bytes converted
- * @throws NullPointerException if source or destination arrays are null
- * @throws IllegalArgumentException if srcOffset or destOffset are invalid
- * or there is not enough room in the array.
- * @since 1.3
- */
- private static int decode4to3(byte[] source, int srcOffset, byte[] destination, int destOffset, int options) {
- // Lots of error checking and exception throwing
- Objects.requireNonNull(source, "Source array was null.");
- Objects.requireNonNull(destination, "Destination array was null.");
- if (srcOffset < 0 || srcOffset + 3 >= source.length) {
- throw new IllegalArgumentException(String.format(Locale.ROOT,
- "Source array with length %d cannot have offset of %d and still process four bytes.", source.length, srcOffset));
- } // end if
- if (destOffset < 0 || destOffset + 2 >= destination.length) {
- throw new IllegalArgumentException(String.format(Locale.ROOT,
- "Destination array with length %d cannot have offset of %d and still store three bytes.", destination.length, destOffset));
- } // end if
-
- byte[] DECODABET = getDecodabet(options);
-
-
- // Two ways to do the same thing. Don't know which way I like best.
- //int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 )
- // | ( ( DECODABET[ source[ srcOffset + 1] ] << 24 ) >>> 12 );
- int outBuff = ((DECODABET[source[srcOffset]] & 0xFF) << 18)
- | ((DECODABET[source[srcOffset + 1]] & 0xFF) << 12);
-
- destination[destOffset] = (byte) (outBuff >>> 16);
-
- // Example: Dk==
- if (source[srcOffset + 2] == EQUALS_SIGN) {
- return 1;
- }
-
- outBuff |= ((DECODABET[source[srcOffset + 2]] & 0xFF) << 6);
- destination[destOffset + 1] = (byte) (outBuff >>> 8);
-
- // Example: DkL=
- if (source[srcOffset + 3] == EQUALS_SIGN) {
- return 2;
- }
-
- outBuff |= ((DECODABET[source[srcOffset + 3]] & 0xFF));
- destination[destOffset + 2] = (byte) (outBuff);
-
- // Example: DkLE
- return 3;
- }
-
-
- /**
- * Low-level access to decoding ASCII characters in
- * the form of a byte array. Ignores GUNZIP option, if
- * it's set. This is not generally a recommended method,
- * although it is used internally as part of the decoding process.
- * Special case: if len = 0, an empty array is returned. Still,
- * if you need more speed and reduced memory footprint (and aren't
- * gzipping), consider this method.
- *
- * @param source The Base64 encoded data
- * @return decoded data
- * @since 2.3.1
- */
- public static byte[] decode(byte[] source)
- throws java.io.IOException {
- byte[] decoded = null;
-// try {
- decoded = decode(source, 0, source.length, Base64.NO_OPTIONS);
-// } catch( java.io.IOException ex ) {
-// assert false : "IOExceptions only come from GZipping, which is turned off: " + ex.getMessage();
-// }
- return decoded;
- }
-
-
- /**
- * Low-level access to decoding ASCII characters in
- * the form of a byte array. Ignores GUNZIP option, if
- * it's set. This is not generally a recommended method,
- * although it is used internally as part of the decoding process.
- * Special case: if len = 0, an empty array is returned. Still,
- * if you need more speed and reduced memory footprint (and aren't
- * gzipping), consider this method.
- *
- * @param source The Base64 encoded data
- * @param off The offset of where to begin decoding
- * @param len The length of characters to decode
- * @param options Can specify options such as alphabet type to use
- * @return decoded data
- * @throws java.io.IOException If bogus characters exist in source data
- * @since 1.3
- */
- public static byte[] decode(byte[] source, int off, int len, int options) throws java.io.IOException {
- // Lots of error checking and exception throwing
- Objects.requireNonNull(source, "Cannot decode null source array.");
- if (off < 0 || off + len > source.length) {
- throw new IllegalArgumentException(String.format(Locale.ROOT,
- "Source array with length %d cannot have offset of %d and process %d bytes.", source.length, off, len));
- } // end if
-
- if (len == 0) {
- return new byte[0];
- } else if (len < 4) {
- throw new IllegalArgumentException(
- "Base64-encoded string must have at least four characters, but length specified was " + len);
- } // end if
-
- byte[] DECODABET = getDecodabet(options);
-
- int len34 = len * 3 / 4; // Estimate on array size
- byte[] outBuff = new byte[len34]; // Upper limit on size of output
-
- int outBuffPosn = decode(source, off, len, options, DECODABET, outBuff);
-
- byte[] out = new byte[outBuffPosn];
- System.arraycopy(outBuff, 0, out, 0, outBuffPosn);
- return out;
- } // end decode
-
- private static int decode(byte[] source, int off, int len, int options, byte[] DECODABET, byte[] outBuff) throws IOException {
- int outBuffPosn = 0; // Keep track of where we're writing
- byte[] b4 = new byte[4]; // Four byte buffer from source, eliminating white space
- int b4Posn = 0; // Keep track of four byte input buffer
- for (int i = off; i < off + len; i++) { // Loop through source
-
- byte sbiDecode = DECODABET[source[i] & 0xFF];
-
- // White space, Equals sign, or legit Base64 character
- // Note the values such as -5 and -9 in the
- // DECODABETs at the top of the file.
- if (sbiDecode >= WHITE_SPACE_ENC) {
- if (sbiDecode >= EQUALS_SIGN_ENC) {
- b4[b4Posn++] = source[i]; // Save non-whitespace
- if (b4Posn > 3) { // Time to decode?
- outBuffPosn += decode4to3(b4, 0, outBuff, outBuffPosn, options);
- b4Posn = 0;
-
- // If that was the equals sign, break out of 'for' loop
- if (source[i] == EQUALS_SIGN) {
- // check if the equals sign is somewhere in between
- if (i+1 < len + off) {
- throw new IOException(String.format(Locale.ROOT,
- "Found equals sign at position %d of the base64 string, not at the end", i));
- }
- break;
- } // end if: equals sign
- } // end if: quartet built
- else {
- if (source[i] == EQUALS_SIGN && len + off > i && source[i+1] != EQUALS_SIGN) {
- throw new IOException(String.format(Locale.ROOT,
- "Found equals sign at position %d of the base64 string, not at the end", i));
- } // enf if: equals sign and next character not as well
- } // end else:
- } // end if: equals sign or better
- } // end if: white space, equals sign or better
- else {
- // There's a bad input character in the Base64 stream.
- throw new IOException(String.format(Locale.ROOT,
- "Bad Base64 input character decimal %d in array position %d", ((int) source[i]) & 0xFF, i));
- } // end else:
- } // each input character
- return outBuffPosn;
- }
-
-
- /**
- * Decodes data from Base64 notation, automatically
- * detecting gzip-compressed data and decompressing it.
- *
- * @param s the string to decode
- * @return the decoded data
- * @throws java.io.IOException If there is a problem
- * @since 1.4
- */
- public static byte[] decode(String s) throws java.io.IOException {
- return decode(s, NO_OPTIONS);
- }
-
-
- /**
- * Decodes data from Base64 notation, automatically
- * detecting gzip-compressed data and decompressing it.
- *
- * @param s the string to decode
- * @param options encode options such as URL_SAFE
- * @return the decoded data
- * @throws java.io.IOException if there is an error
- * @throws NullPointerException if s is null
- * @since 1.4
- */
- public static byte[] decode(String s, int options) throws java.io.IOException {
-
- if (s == null) {
- throw new NullPointerException("Input string was null.");
- } // end if
-
- byte[] bytes = s.getBytes(PREFERRED_ENCODING);
- //
-
- // Decode
- bytes = decode(bytes, 0, bytes.length, options);
-
- // Check to see if it's gzip-compressed
- // GZIP Magic Two-Byte Number: 0x8b1f (35615)
- boolean dontGunzip = (options & DONT_GUNZIP) != 0;
- if ((bytes != null) && (bytes.length >= 4) && (!dontGunzip)) {
-
- int head = ((int) bytes[0] & 0xff) | ((bytes[1] << 8) & 0xff00);
- if (java.util.zip.GZIPInputStream.GZIP_MAGIC == head) {
- java.io.ByteArrayInputStream bais = null;
- java.util.zip.GZIPInputStream gzis = null;
- java.io.ByteArrayOutputStream baos = null;
- byte[] buffer = new byte[2048];
- int length = 0;
-
- try {
- baos = new java.io.ByteArrayOutputStream();
- bais = new java.io.ByteArrayInputStream(bytes);
- gzis = new java.util.zip.GZIPInputStream(bais);
-
- while ((length = gzis.read(buffer)) >= 0) {
- baos.write(buffer, 0, length);
- } // end while: reading input
-
- // No error? Get new bytes.
- bytes = baos.toByteArray();
-
- } // end try
- catch (java.io.IOException e) {
- // e.printStackTrace();
- // Just return originally-decoded bytes
- } // end catch
- finally {
- try {
- baos.close();
- } catch (Exception e) {
- }
- try {
- gzis.close();
- } catch (Exception e) {
- }
- try {
- bais.close();
- } catch (Exception e) {
- }
- } // end finally
-
- } // end if: gzipped
- } // end if: bytes.length >= 2
-
- return bytes;
- } // end decode
-
-
-
- /* ******** I N N E R C L A S S I N P U T S T R E A M ******** */
-
-
- /**
- * A {@link Base64.InputStream} will read data from another
- * java.io.InputStream, given in the constructor,
- * and encode/decode to/from Base64 notation on the fly.
- *
- * @see Base64
- * @since 1.3
- */
- public static class InputStream extends java.io.FilterInputStream {
-
- private boolean encode; // Encoding or decoding
- private int position; // Current position in the buffer
- private byte[] buffer; // Small buffer holding converted data
- private int bufferLength; // Length of buffer (3 or 4)
- private int numSigBytes; // Number of meaningful bytes in the buffer
- private int lineLength;
- private boolean breakLines; // Break lines at less than 80 characters
- private int options; // Record options used to create the stream.
- private byte[] decodabet; // Local copies to avoid extra method calls
-
-
- /**
- * Constructs a {@link Base64.InputStream} in DECODE mode.
- *
- * @param in the java.io.InputStream from which to read data.
- * @since 1.3
- */
- public InputStream(java.io.InputStream in) {
- this(in, DECODE);
- } // end constructor
-
-
- /**
- * Constructs a {@link Base64.InputStream} in
- * either ENCODE or DECODE mode.
- *
- * Valid options:
- * ENCODE or DECODE: Encode or Decode as data is read.
- * DO_BREAK_LINES: break lines at 76 characters
- * (only meaningful when encoding)
- *
- *
- * Example: new Base64.InputStream( in, Base64.DECODE )
- *
- * @param in the java.io.InputStream from which to read data.
- * @param options Specified options
- * @see Base64#ENCODE
- * @see Base64#DECODE
- * @see Base64#DO_BREAK_LINES
- * @since 2.0
- */
- public InputStream(java.io.InputStream in, int options) {
-
- super(in);
- this.options = options; // Record for later
- this.breakLines = (options & DO_BREAK_LINES) > 0;
- this.encode = (options & ENCODE) > 0;
- this.bufferLength = encode ? 4 : 3;
- this.buffer = new byte[bufferLength];
- this.position = -1;
- this.lineLength = 0;
- this.decodabet = getDecodabet(options);
- } // end constructor
-
- /**
- * Reads enough of the input stream to convert
- * to/from Base64 and returns the next byte.
- *
- * @return next byte
- * @since 1.3
- */
- @Override
- public int read() throws java.io.IOException {
-
- // Do we need to get data?
- if (position < 0) {
- if (encode) {
- byte[] b3 = new byte[3];
- int numBinaryBytes = 0;
- for (int i = 0; i < 3; i++) {
- int b = in.read();
-
- // If end of stream, b is -1.
- if (b >= 0) {
- b3[i] = (byte) b;
- numBinaryBytes++;
- } else {
- break; // out of for loop
- } // end else: end of stream
-
- } // end for: each needed input byte
-
- if (numBinaryBytes > 0) {
- encode3to4(b3, 0, numBinaryBytes, buffer, 0, options);
- position = 0;
- numSigBytes = 4;
- } // end if: got data
- else {
- return -1; // Must be end of stream
- } // end else
- } // end if: encoding
-
- // Else decoding
- else {
- byte[] b4 = new byte[4];
- int i = 0;
- for (i = 0; i < 4; i++) {
- // Read four "meaningful" bytes:
- int b = 0;
- do {
- b = in.read();
- }
- while (b >= 0 && decodabet[b & 0x7f] <= WHITE_SPACE_ENC);
-
- if (b < 0) {
- break; // Reads a -1 if end of stream
- } // end if: end of stream
-
- b4[i] = (byte) b;
- } // end for: each needed input byte
-
- if (i == 4) {
- numSigBytes = decode4to3(b4, 0, buffer, 0, options);
- position = 0;
- } // end if: got four characters
- else if (i == 0) {
- return -1;
- } // end else if: also padded correctly
- else {
- // Must have broken out from above.
- throw new java.io.IOException("Improperly padded Base64 input.");
- } // end
-
- } // end else: decode
- } // end else: get data
-
- // Got data?
- if (position >= 0) {
- // End of relevant data?
- if ( /*!encode &&*/ position >= numSigBytes) {
- return -1;
- } // end if: got data
-
- if (encode && breakLines && lineLength >= MAX_LINE_LENGTH) {
- lineLength = 0;
- return '\n';
- } // end if
- else {
- lineLength++; // This isn't important when decoding
- // but throwing an extra "if" seems
- // just as wasteful.
-
- int b = buffer[position++];
-
- if (position >= bufferLength) {
- position = -1;
- } // end if: end
-
- return b & 0xFF; // This is how you "cast" a byte that's
- // intended to be unsigned.
- } // end else
- } // end if: position >= 0
-
- // Else error
- else {
- throw new java.io.IOException("Error in Base64 code reading stream.");
- } // end else
- } // end read
-
-
- /**
- * Calls {@link #read()} repeatedly until the end of stream
- * is reached or len bytes are read.
- * Returns number of bytes read into array or -1 if
- * end of stream is encountered.
- *
- * @param dest array to hold values
- * @param off offset for array
- * @param len max number of bytes to read into array
- * @return bytes read into array or -1 if end of stream is encountered.
- * @since 1.3
- */
- @Override
- public int read(byte[] dest, int off, int len)
- throws java.io.IOException {
- int i;
- int b;
- for (i = 0; i < len; i++) {
- b = read();
-
- if (b >= 0) {
- dest[off + i] = (byte) b;
- } else if (i == 0) {
- return -1;
- } else {
- break; // Out of 'for' loop
- } // Out of 'for' loop
- } // end for: each byte read
- return i;
- } // end read
-
- } // end inner class InputStream
-
-
- /* ******** I N N E R C L A S S O U T P U T S T R E A M ******** */
-
-
- /**
- * A {@link Base64.OutputStream} will write data to another
- * java.io.OutputStream, given in the constructor,
- * and encode/decode to/from Base64 notation on the fly.
- *
- * @see Base64
- * @since 1.3
- */
- public static class OutputStream extends java.io.FilterOutputStream {
-
- private boolean encode;
- private int position;
- private byte[] buffer;
- private int bufferLength;
- private int lineLength;
- private boolean breakLines;
- private byte[] b4; // Scratch used in a few places
- private boolean suspendEncoding;
- private int options; // Record for later
- private byte[] decodabet; // Local copies to avoid extra method calls
-
- /**
- * Constructs a {@link Base64.OutputStream} in ENCODE mode.
- *
- * @param out the java.io.OutputStream to which data will be written.
- * @since 1.3
- */
- public OutputStream(java.io.OutputStream out) {
- this(out, ENCODE);
- } // end constructor
-
-
- /**
- * Constructs a {@link Base64.OutputStream} in
- * either ENCODE or DECODE mode.
- *
- * Valid options:
- * ENCODE or DECODE: Encode or Decode as data is read.
- * DO_BREAK_LINES: don't break lines at 76 characters
- * (only meaningful when encoding)
- *
- *
- * Example: new Base64.OutputStream( out, Base64.ENCODE )
- *
- * @param out the java.io.OutputStream to which data will be written.
- * @param options Specified options.
- * @see Base64#ENCODE
- * @see Base64#DECODE
- * @see Base64#DO_BREAK_LINES
- * @since 1.3
- */
- public OutputStream(java.io.OutputStream out, int options) {
- super(out);
- this.breakLines = (options & DO_BREAK_LINES) != 0;
- this.encode = (options & ENCODE) != 0;
- this.bufferLength = encode ? 3 : 4;
- this.buffer = new byte[bufferLength];
- this.position = 0;
- this.lineLength = 0;
- this.suspendEncoding = false;
- this.b4 = new byte[4];
- this.options = options;
- this.decodabet = getDecodabet(options);
- } // end constructor
-
-
- /**
- * Writes the byte to the output stream after
- * converting to/from Base64 notation.
- * When encoding, bytes are buffered three
- * at a time before the output stream actually
- * gets a write() call.
- * When decoding, bytes are buffered four
- * at a time.
- *
- * @param theByte the byte to write
- * @since 1.3
- */
- @Override
- public void write(int theByte)
- throws java.io.IOException {
- // Encoding suspended?
- if (suspendEncoding) {
- this.out.write(theByte);
- return;
- } // end if: suspended
-
- // Encode?
- if (encode) {
- buffer[position++] = (byte) theByte;
- if (position >= bufferLength) { // Enough to encode.
-
- this.out.write(encode3to4(b4, buffer, bufferLength, options));
-
- lineLength += 4;
- if (breakLines && lineLength >= MAX_LINE_LENGTH) {
- this.out.write(NEW_LINE);
- lineLength = 0;
- } // end if: end of line
-
- position = 0;
- } // end if: enough to output
- } // end if: encoding
-
- // Else, Decoding
- else {
- // Meaningful Base64 character?
- if (decodabet[theByte & 0x7f] > WHITE_SPACE_ENC) {
- buffer[position++] = (byte) theByte;
- if (position >= bufferLength) { // Enough to output.
-
- int len = Base64.decode4to3(buffer, 0, b4, 0, options);
- out.write(b4, 0, len);
- position = 0;
- } // end if: enough to output
- } // end if: meaningful base64 character
- else if (decodabet[theByte & 0x7f] != WHITE_SPACE_ENC) {
- throw new java.io.IOException("Invalid character in Base64 data.");
- } // end else: not white space either
- } // end else: decoding
- } // end write
-
-
- /**
- * Calls {@link #write(int)} repeatedly until len
- * bytes are written.
- *
- * @param theBytes array from which to read bytes
- * @param off offset for array
- * @param len max number of bytes to read into array
- * @since 1.3
- */
- @Override
- public void write(byte[] theBytes, int off, int len)
- throws java.io.IOException {
- // Encoding suspended?
- if (suspendEncoding) {
- this.out.write(theBytes, off, len);
- return;
- } // end if: suspended
-
- for (int i = 0; i < len; i++) {
- write(theBytes[off + i]);
- } // end for: each byte written
-
- } // end write
-
-
- /**
- * Method added by PHIL. [Thanks, PHIL. -Rob]
- * This pads the buffer without closing the stream.
- *
- * @throws java.io.IOException if there's an error.
- */
- public void flushBase64() throws java.io.IOException {
- if (position > 0) {
- if (encode) {
- out.write(encode3to4(b4, buffer, position, options));
- position = 0;
- } // end if: encoding
- else {
- throw new java.io.IOException("Base64 input not properly padded.");
- } // end else: decoding
- } // end if: buffer partially full
-
- } // end flush
-
-
- /**
- * Flushes and closes (I think, in the superclass) the stream.
- *
- * @since 1.3
- */
- @Override
- public void close() throws java.io.IOException {
- // 1. Ensure that pending characters are written
- flushBase64();
-
- // 2. Actually close the stream
- // Base class both flushes and closes.
- super.close();
-
- buffer = null;
- out = null;
- } // end close
-
-
- /**
- * Suspends encoding of the stream.
- * May be helpful if you need to embed a piece of
- * base64-encoded data in a stream.
- *
- * @throws java.io.IOException if there's an error flushing
- * @since 1.5.1
- */
- public void suspendEncoding() throws java.io.IOException {
- flushBase64();
- this.suspendEncoding = true;
- } // end suspendEncoding
-
-
- /**
- * Resumes encoding of the stream.
- * May be helpful if you need to embed a piece of
- * base64-encoded data in a stream.
- *
- * @since 1.5.1
- */
- public void resumeEncoding() {
- this.suspendEncoding = false;
- } // end resumeEncoding
-
-
- } // end inner class OutputStream
-
-
-} // end class Base64
diff --git a/core/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java b/core/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java
index 9a3c35f3527..9f5e5f34a1b 100644
--- a/core/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java
+++ b/core/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java
@@ -21,6 +21,7 @@ package org.elasticsearch.common;
import java.io.IOException;
+import java.util.Base64;
import java.util.Random;
class RandomBasedUUIDGenerator implements UUIDGenerator {
@@ -54,14 +55,6 @@ class RandomBasedUUIDGenerator implements UUIDGenerator {
* We set only the MSB of the variant*/
randomBytes[8] &= 0x3f; /* clear the 2 most significant bits */
randomBytes[8] |= 0x80; /* set the variant (MSB is set)*/
- try {
- byte[] encoded = Base64.encodeBytesToBytes(randomBytes, 0, randomBytes.length, Base64.URL_SAFE);
- // we know the bytes are 16, and not a multi of 3, so remove the 2 padding chars that are added
- assert encoded[encoded.length - 1] == '=';
- assert encoded[encoded.length - 2] == '=';
- return new String(encoded, 0, encoded.length - 2, Base64.PREFERRED_ENCODING);
- } catch (IOException e) {
- throw new IllegalStateException("should not be thrown");
- }
+ return Base64.getUrlEncoder().withoutPadding().encodeToString(randomBytes);
}
}
diff --git a/core/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java b/core/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java
index d1a22a17cda..8d507ae7f22 100644
--- a/core/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java
+++ b/core/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java
@@ -19,8 +19,7 @@
package org.elasticsearch.common;
-
-import java.io.IOException;
+import java.util.Base64;
import java.util.concurrent.atomic.AtomicInteger;
/** These are essentially flake ids (http://boundary.com/blog/2012/01/12/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang) but
@@ -80,15 +79,6 @@ class TimeBasedUUIDGenerator implements UUIDGenerator {
assert 9 + SECURE_MUNGED_ADDRESS.length == uuidBytes.length;
- byte[] encoded;
- try {
- encoded = Base64.encodeBytesToBytes(uuidBytes, 0, uuidBytes.length, Base64.URL_SAFE);
- } catch (IOException e) {
- throw new IllegalStateException("should not be thrown", e);
- }
-
- // We are a multiple of 3 bytes so we should not see any padding:
- assert encoded[encoded.length - 1] != '=';
- return new String(encoded, 0, encoded.length, Base64.PREFERRED_ENCODING);
+ return Base64.getUrlEncoder().withoutPadding().encodeToString(uuidBytes);
}
}
diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java b/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java
index 7636097e288..858486d282c 100644
--- a/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java
+++ b/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java
@@ -30,6 +30,8 @@ import java.util.List;
*/
public class BlobPath implements Iterable {
+ private static final String SEPARATOR = "/";
+
private final List paths;
public BlobPath() {
@@ -60,15 +62,12 @@ public class BlobPath implements Iterable {
return new BlobPath(Collections.unmodifiableList(paths));
}
- public String buildAsString(String separator) {
- StringBuilder sb = new StringBuilder();
- for (int i = 0; i < paths.size(); i++) {
- sb.append(paths.get(i));
- if (i < (paths.size() - 1)) {
- sb.append(separator);
- }
+ public String buildAsString() {
+ String p = String.join(SEPARATOR, paths);
+ if (p.isEmpty()) {
+ return p;
}
- return sb.toString();
+ return p + SEPARATOR;
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/common/joda/Joda.java b/core/src/main/java/org/elasticsearch/common/joda/Joda.java
index b65a248c21f..cffea836ac2 100644
--- a/core/src/main/java/org/elasticsearch/common/joda/Joda.java
+++ b/core/src/main/java/org/elasticsearch/common/joda/Joda.java
@@ -321,20 +321,15 @@ public class Joda {
public static class EpochTimeParser implements DateTimeParser {
- private static final Pattern MILLI_SECOND_PRECISION_PATTERN = Pattern.compile("^-?\\d{1,13}$");
- private static final Pattern SECOND_PRECISION_PATTERN = Pattern.compile("^-?\\d{1,10}$");
-
private final boolean hasMilliSecondPrecision;
- private final Pattern pattern;
public EpochTimeParser(boolean hasMilliSecondPrecision) {
this.hasMilliSecondPrecision = hasMilliSecondPrecision;
- this.pattern = hasMilliSecondPrecision ? MILLI_SECOND_PRECISION_PATTERN : SECOND_PRECISION_PATTERN;
}
@Override
public int estimateParsedLength() {
- return hasMilliSecondPrecision ? 13 : 10;
+ return hasMilliSecondPrecision ? 19 : 16;
}
@Override
@@ -344,8 +339,7 @@ public class Joda {
if ((isPositive && isTooLong) ||
// timestamps have to have UTC timezone
- bucket.getZone() != DateTimeZone.UTC ||
- pattern.matcher(text).matches() == false) {
+ bucket.getZone() != DateTimeZone.UTC) {
return -1;
}
@@ -378,7 +372,7 @@ public class Joda {
@Override
public int estimatePrintedLength() {
- return hasMilliSecondPrecision ? 13 : 10;
+ return hasMilliSecondPrecision ? 19 : 16;
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java
index eba89c2e02a..c49b0364e28 100644
--- a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java
+++ b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java
@@ -19,9 +19,10 @@
package org.elasticsearch.common.logging;
+import org.apache.log4j.Java9Hack;
import org.apache.log4j.PropertyConfigurator;
+import org.apache.lucene.util.Constants;
import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.bootstrap.BootstrapInfo;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.env.Environment;
@@ -87,14 +88,17 @@ public class LogConfigurator {
replacements.put("ttcc", "org.apache.log4j.TTCCLayout");
replacements.put("xml", "org.apache.log4j.XMLLayout");
REPLACEMENTS = unmodifiableMap(replacements);
+
+ if (Constants.JRE_IS_MINIMUM_JAVA9) {
+ Java9Hack.fixLog4j();
+ }
}
private static boolean loaded;
/**
* Consolidates settings and converts them into actual log4j settings, then initializes loggers and appenders.
- *
- * @param settings custom settings that should be applied
+ * @param settings custom settings that should be applied
* @param resolveConfig controls whether the logging conf file should be read too or not.
*/
public static void configure(Settings settings, boolean resolveConfig) {
@@ -109,7 +113,7 @@ public class LogConfigurator {
if (resolveConfig) {
resolveConfig(environment, settingsBuilder);
}
- settingsBuilder.putProperties("es.", BootstrapInfo.getSystemProperties());
+
// add custom settings after config was added so that they are not overwritten by config
settingsBuilder.put(settings);
settingsBuilder.replacePropertyPlaceholders();
diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java
index 82454f3dd62..47b85250735 100644
--- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java
+++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java
@@ -26,7 +26,7 @@ import org.apache.lucene.search.Scorer;
import org.elasticsearch.script.ExplainableSearchScript;
import org.elasticsearch.script.LeafSearchScript;
import org.elasticsearch.script.Script;
-import org.elasticsearch.script.ScriptException;
+import org.elasticsearch.script.GeneralScriptException;
import org.elasticsearch.script.SearchScript;
import java.io.IOException;
@@ -87,7 +87,7 @@ public class ScriptScoreFunction extends ScoreFunction {
scorer.score = subQueryScore;
double result = leafScript.runAsDouble();
if (Double.isNaN(result)) {
- throw new ScriptException("script_score returned NaN");
+ throw new GeneralScriptException("script_score returned NaN");
}
return result;
}
diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java
index c0b650e555d..201e5297511 100644
--- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java
+++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java
@@ -19,11 +19,6 @@
package org.elasticsearch.common.network;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
import org.elasticsearch.action.support.replication.ReplicationTask;
import org.elasticsearch.client.transport.TransportClientNodesService;
import org.elasticsearch.client.transport.support.TransportProxyClient;
@@ -36,7 +31,6 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommandReg
import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.Writeable;
@@ -71,6 +65,12 @@ import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestore
import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction;
import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction;
import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction;
+import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestDeleteSearchTemplateAction;
+import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestDeleteStoredScriptAction;
+import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestGetSearchTemplateAction;
+import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestGetStoredScriptAction;
+import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestPutSearchTemplateAction;
+import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestPutStoredScriptAction;
import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction;
import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction;
import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction;
@@ -137,19 +137,11 @@ import org.elasticsearch.rest.action.ingest.RestGetPipelineAction;
import org.elasticsearch.rest.action.ingest.RestPutPipelineAction;
import org.elasticsearch.rest.action.ingest.RestSimulatePipelineAction;
import org.elasticsearch.rest.action.main.RestMainAction;
-import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction;
-import org.elasticsearch.rest.action.percolate.RestPercolateAction;
-import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestDeleteStoredScriptAction;
-import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestGetStoredScriptAction;
-import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestPutStoredScriptAction;
import org.elasticsearch.rest.action.search.RestClearScrollAction;
import org.elasticsearch.rest.action.search.RestMultiSearchAction;
import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.rest.action.search.RestSearchScrollAction;
import org.elasticsearch.rest.action.suggest.RestSuggestAction;
-import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestDeleteSearchTemplateAction;
-import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestGetSearchTemplateAction;
-import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestPutSearchTemplateAction;
import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction;
import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction;
import org.elasticsearch.rest.action.update.RestUpdateAction;
@@ -159,6 +151,9 @@ import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.local.LocalTransport;
import org.elasticsearch.transport.netty.NettyTransport;
+import java.util.Arrays;
+import java.util.List;
+
/**
* A module to handle registering and binding all network related classes.
*/
@@ -250,8 +245,6 @@ public class NetworkModule extends AbstractModule {
RestMultiTermVectorsAction.class,
RestBulkAction.class,
RestUpdateAction.class,
- RestPercolateAction.class,
- RestMultiPercolateAction.class,
RestSearchAction.class,
RestSearchScrollAction.class,
@@ -404,7 +397,7 @@ public class NetworkModule extends AbstractModule {
* @param commandName the names under which the command should be parsed. The {@link ParseField#getPreferredName()} is special because
* it is the name under which the command's reader is registered.
*/
- public void registerAllocationCommand(Writeable.Reader reader, AllocationCommand.Parser parser,
+ private void registerAllocationCommand(Writeable.Reader reader, AllocationCommand.Parser parser,
ParseField commandName) {
allocationCommandRegistry.register(parser, commandName);
namedWriteableRegistry.register(AllocationCommand.class, commandName.getPreferredName(), reader);
diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
index 5b6130281d4..b29e7604a98 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
@@ -332,7 +332,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
Environment.PATH_DATA_SETTING,
Environment.PATH_HOME_SETTING,
Environment.PATH_LOGS_SETTING,
- Environment.PATH_PLUGINS_SETTING,
Environment.PATH_REPO_SETTING,
Environment.PATH_SCRIPTS_SETTING,
Environment.PATH_SHARED_DATA_SETTING,
@@ -375,7 +374,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX,
ClusterName.CLUSTER_NAME_SETTING,
Client.CLIENT_TYPE_SETTING_S,
- InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING,
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
EsExecutors.PROCESSORS_SETTING,
ThreadContext.DEFAULT_HEADERS_SETTING,
diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
index 1b795239457..027100b3469 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
@@ -21,6 +21,7 @@ package org.elasticsearch.common.settings;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.gateway.PrimaryShardAllocator;
@@ -35,12 +36,10 @@ import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.percolator.PercolatorQueryCache;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.FsDirectoryService;
import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.Store;
-import org.elasticsearch.index.IndexWarmer;
import org.elasticsearch.indices.IndicesRequestCache;
import java.util.Arrays;
@@ -59,6 +58,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
public static final Predicate INDEX_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(IndexMetaData.INDEX_SETTING_PREFIX);
public static final Set> BUILT_IN_INDEX_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
+ MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY,
IndexSettings.INDEX_TTL_DISABLE_PURGE_SETTING,
IndexStore.INDEX_STORE_THROTTLE_TYPE_SETTING,
IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
@@ -126,7 +126,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
FieldMapper.IGNORE_MALFORMED_SETTING,
FieldMapper.COERCE_SETTING,
Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING,
- PercolatorQueryCache.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING,
MapperService.INDEX_MAPPER_DYNAMIC_SETTING,
MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING,
MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING,
diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java
index 1efb65c18b1..da32468acc3 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java
@@ -122,7 +122,7 @@ public class Setting extends ToXContentToBytes {
this.defaultValue = defaultValue;
this.parser = parser;
if (properties == null) {
- throw new IllegalArgumentException("properties can not be null for setting [" + key + "]");
+ throw new IllegalArgumentException("properties cannot be null for setting [" + key + "]");
}
if (properties.length == 0) {
this.properties = EMPTY_PROPERTIES;
@@ -132,7 +132,7 @@ public class Setting extends ToXContentToBytes {
}
/**
- * Creates a new Setting instance. When no scope is provided, we default to {@link Property#NodeScope}.
+ * Creates a new Setting instance
* @param key the settings key for this setting.
* @param defaultValue a default value function that returns the default values string representation.
* @param parser a parser that parses the string rep into a complex datatype.
@@ -165,7 +165,7 @@ public class Setting extends ToXContentToBytes {
}
/**
- * Creates a new Setting instance. When no scope is provided, we default to {@link Property#NodeScope}.
+ * Creates a new Setting instance
* @param key the settings key for this setting.
* @param fallbackSetting a setting who's value to fallback on if this setting is not defined
* @param parser a parser that parses the string rep into a complex datatype.
@@ -537,6 +537,10 @@ public class Setting extends ToXContentToBytes {
return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, properties);
}
+ public static Setting boolSetting(String key, Function defaultValueFn, Property... properties) {
+ return new Setting<>(key, defaultValueFn, Booleans::parseBooleanExact, properties);
+ }
+
public static Setting byteSizeSetting(String key, String percentage, Property... properties) {
return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties);
}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java
index 8488ca75c73..15554e5ccaa 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java
@@ -58,9 +58,11 @@ import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
import java.util.function.Predicate;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import java.util.stream.Collectors;
import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue;
import static org.elasticsearch.common.unit.SizeValue.parseSizeValue;
@@ -942,89 +944,54 @@ public final class Settings implements ToXContent {
return this;
}
- /**
- * Puts all the properties with keys starting with the provided prefix.
- *
- * @param prefix The prefix to filter property key by
- * @param properties The properties to put
- * @return The builder
- */
- public Builder putProperties(String prefix, Dictionary