Merge branch 'master' into pr/update-aws-sdk
This commit is contained in:
commit
623a5b7a85
10
.travis.yml
10
.travis.yml
|
@ -1,10 +0,0 @@
|
|||
language: java
|
||||
jdk:
|
||||
- openjdk7
|
||||
|
||||
env:
|
||||
- ES_TEST_LOCAL=true
|
||||
- ES_TEST_LOCAL=false
|
||||
|
||||
notifications:
|
||||
email: false
|
|
@ -18,24 +18,18 @@ gradle assemble
|
|||
|
||||
== Other test options
|
||||
|
||||
To disable and enable network transport, set the `Des.node.mode`.
|
||||
To disable and enable network transport, set the `tests.es.node.mode` system property.
|
||||
|
||||
Use network transport:
|
||||
|
||||
------------------------------------
|
||||
-Des.node.mode=network
|
||||
-Dtests.es.node.mode=network
|
||||
------------------------------------
|
||||
|
||||
Use local transport (default since 1.3):
|
||||
|
||||
-------------------------------------
|
||||
-Des.node.mode=local
|
||||
-------------------------------------
|
||||
|
||||
Alternatively, you can set the `ES_TEST_LOCAL` environment variable:
|
||||
|
||||
-------------------------------------
|
||||
export ES_TEST_LOCAL=true && gradle test
|
||||
-Dtests.es.node.mode=local
|
||||
-------------------------------------
|
||||
|
||||
=== Running Elasticsearch from a checkout
|
||||
|
@ -201,7 +195,7 @@ gradle test -Dtests.timeoutSuite=5000! ...
|
|||
Change the logging level of ES (not gradle)
|
||||
|
||||
--------------------------------
|
||||
gradle test -Des.logger.level=DEBUG
|
||||
gradle test -Dtests.es.logger.level=DEBUG
|
||||
--------------------------------
|
||||
|
||||
Print all the logging output from the test runs to the commandline
|
||||
|
|
24
build.gradle
24
build.gradle
|
@ -81,7 +81,7 @@ subprojects {
|
|||
nexus {
|
||||
String buildSnapshot = System.getProperty('build.snapshot', 'true')
|
||||
if (buildSnapshot == 'false') {
|
||||
Repository repo = new RepositoryBuilder().findGitDir(new File('.')).build()
|
||||
Repository repo = new RepositoryBuilder().findGitDir(project.rootDir).build()
|
||||
String shortHash = repo.resolve('HEAD')?.name?.substring(0,7)
|
||||
repositoryUrl = project.hasProperty('build.repository') ? project.property('build.repository') : "file://${System.getenv('HOME')}/elasticsearch-releases/${version}-${shortHash}/"
|
||||
}
|
||||
|
@ -144,6 +144,14 @@ subprojects {
|
|||
// see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959
|
||||
javadoc.options.encoding='UTF8'
|
||||
javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet')
|
||||
/*
|
||||
TODO: building javadocs with java 9 b118 is currently broken with weird errors, so
|
||||
for now this is commented out...try again with the next ea build...
|
||||
javadoc.executable = new File(project.javaHome, 'bin/javadoc')
|
||||
if (project.javaVersion == JavaVersion.VERSION_1_9) {
|
||||
// TODO: remove this hack! gradle should be passing this...
|
||||
javadoc.options.addStringOption('source', '8')
|
||||
}*/
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -261,13 +269,6 @@ tasks.idea.doLast {
|
|||
if (System.getProperty('idea.active') != null && ideaMarker.exists() == false) {
|
||||
throw new GradleException('You must run gradle idea from the root of elasticsearch before importing into IntelliJ')
|
||||
}
|
||||
// add buildSrc itself as a groovy project
|
||||
task buildSrcIdea(type: GradleBuild) {
|
||||
buildFile = 'buildSrc/build.gradle'
|
||||
tasks = ['cleanIdea', 'ideaModule']
|
||||
}
|
||||
tasks.idea.dependsOn(buildSrcIdea)
|
||||
|
||||
|
||||
// eclipse configuration
|
||||
allprojects {
|
||||
|
@ -310,13 +311,6 @@ allprojects {
|
|||
tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings)
|
||||
}
|
||||
|
||||
// add buildSrc itself as a groovy project
|
||||
task buildSrcEclipse(type: GradleBuild) {
|
||||
buildFile = 'buildSrc/build.gradle'
|
||||
tasks = ['cleanEclipse', 'eclipse']
|
||||
}
|
||||
tasks.eclipse.dependsOn(buildSrcEclipse)
|
||||
|
||||
// we need to add the same --debug-jvm option as
|
||||
// the real RunTask has, so we can pass it through
|
||||
class Run extends DefaultTask {
|
||||
|
|
|
@ -84,7 +84,7 @@ dependencies {
|
|||
compile 'com.netflix.nebula:gradle-info-plugin:3.0.3'
|
||||
compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r'
|
||||
compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE....
|
||||
compile 'de.thetaphi:forbiddenapis:2.0'
|
||||
compile 'de.thetaphi:forbiddenapis:2.1'
|
||||
compile 'com.bmuschko:gradle-nexus-plugin:2.3.1'
|
||||
compile 'org.apache.rat:apache-rat:0.11'
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
|
||||
project.rootProject.ext.javaHome = javaHome
|
||||
project.rootProject.ext.javaVersion = javaVersion
|
||||
project.rootProject.ext.javaVersion = javaVersionEnum
|
||||
project.rootProject.ext.buildChecksDone = true
|
||||
}
|
||||
project.targetCompatibility = minimumJava
|
||||
|
@ -378,7 +378,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
* -serial because we don't use java serialization.
|
||||
*/
|
||||
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-options,-deprecation' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
// compile with compact 3 profile by default
|
||||
// NOTE: this is just a compile time check: does not replace testing with a compact3 JRE
|
||||
if (project.compactProfile != 'full') {
|
||||
|
@ -387,10 +387,13 @@ class BuildPlugin implements Plugin<Project> {
|
|||
options.encoding = 'UTF-8'
|
||||
//options.incremental = true
|
||||
|
||||
// gradle ignores target/source compatibility when it is "unnecessary", but since to compile with
|
||||
// java 9, gradle is running in java 8, it incorrectly thinks it is unnecessary
|
||||
assert minimumJava == JavaVersion.VERSION_1_8
|
||||
options.compilerArgs << '-target' << '1.8' << '-source' << '1.8'
|
||||
if (project.javaVersion == JavaVersion.VERSION_1_9) {
|
||||
// hack until gradle supports java 9's new "-release" arg
|
||||
assert minimumJava == JavaVersion.VERSION_1_8
|
||||
options.compilerArgs << '-release' << '8'
|
||||
project.sourceCompatibility = null
|
||||
project.targetCompatibility = null
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -456,7 +459,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
// default test sysprop values
|
||||
systemProperty 'tests.ifNoTests', 'fail'
|
||||
// TODO: remove setting logging level via system property
|
||||
systemProperty 'es.logger.level', 'WARN'
|
||||
systemProperty 'tests.logger.level', 'WARN'
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('tests.') ||
|
||||
property.getKey().startsWith('es.')) {
|
||||
|
|
|
@ -87,6 +87,10 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
* calls buildTest to actually build the test.
|
||||
*/
|
||||
void handleSnippet(Snippet snippet) {
|
||||
if (snippet.language == 'json') {
|
||||
throw new InvalidUserDataException(
|
||||
"$snippet: Use `js` instead of `json`.")
|
||||
}
|
||||
if (snippet.testSetup) {
|
||||
setup(snippet)
|
||||
return
|
||||
|
|
|
@ -62,9 +62,8 @@ class PrecommitTasks {
|
|||
private static Task configureForbiddenApis(Project project) {
|
||||
project.pluginManager.apply(ForbiddenApisPlugin.class)
|
||||
project.forbiddenApis {
|
||||
internalRuntimeForbidden = true
|
||||
failOnUnsupportedJava = false
|
||||
bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-system-out']
|
||||
bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-non-portable', 'jdk-system-out']
|
||||
signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'),
|
||||
getClass().getResource('/forbidden/es-all-signatures.txt')]
|
||||
suppressAnnotations = ['**.SuppressForbidden']
|
||||
|
|
|
@ -203,8 +203,7 @@ public class ThirdPartyAuditTask extends AntTask {
|
|||
Set<String> sheistySet = getSheistyClasses(tmpDir.toPath());
|
||||
|
||||
try {
|
||||
ant.thirdPartyAudit(internalRuntimeForbidden: false,
|
||||
failOnUnsupportedJava: false,
|
||||
ant.thirdPartyAudit(failOnUnsupportedJava: false,
|
||||
failOnMissingClasses: false,
|
||||
signaturesFile: new File(getClass().getResource('/forbidden/third-party-audit.txt').toURI()),
|
||||
classpath: classpath.asPath) {
|
||||
|
|
|
@ -129,18 +129,18 @@ class NodeInfo {
|
|||
}
|
||||
|
||||
env = [ 'JAVA_HOME' : project.javaHome ]
|
||||
args.addAll("-E", "es.node.portsfile=true")
|
||||
args.addAll("-E", "node.portsfile=true")
|
||||
String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ")
|
||||
String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs
|
||||
env.put('ES_JAVA_OPTS', esJavaOpts)
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('es.')) {
|
||||
if (property.key.startsWith('tests.es.')) {
|
||||
args.add("-E")
|
||||
args.add("${property.getKey()}=${property.getValue()}")
|
||||
args.add("${property.key.substring('tests.es.'.size())}=${property.value}")
|
||||
}
|
||||
}
|
||||
env.put('ES_JVM_OPTIONS', new File(confDir, 'jvm.options'))
|
||||
args.addAll("-E", "es.path.conf=${confDir}")
|
||||
args.addAll("-E", "path.conf=${confDir}")
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
args.add('"') // end the entire command, quoted
|
||||
}
|
||||
|
|
|
@ -37,8 +37,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]put[/\\]TransportPutRepositoryAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]verify[/\\]TransportVerifyRepositoryAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]verify[/\\]VerifyRepositoryRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]reroute[/\\]ClusterRerouteRequest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]reroute[/\\]ClusterRerouteRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]reroute[/\\]TransportClusterRerouteAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]settings[/\\]ClusterUpdateSettingsAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]settings[/\\]ClusterUpdateSettingsRequestBuilder.java" checks="LineLength" />
|
||||
|
@ -179,12 +177,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineRequest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineTransportAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]percolate[/\\]MultiPercolateRequest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]percolate[/\\]MultiPercolateRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]percolate[/\\]PercolateShardResponse.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]percolate[/\\]TransportMultiPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]percolate[/\\]TransportPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]percolate[/\\]TransportShardMultiPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]MultiSearchRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchPhaseExecutionException.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchResponse.java" checks="LineLength" />
|
||||
|
@ -453,9 +445,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]object[/\\]ObjectMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]object[/\\]RootObjectMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]merge[/\\]MergeStats.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]percolator[/\\]ExtractQueryTermsService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]percolator[/\\]PercolatorFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]percolator[/\\]PercolatorQueriesRegistry.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]AbstractQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]MatchQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryBuilders.java" checks="LineLength" />
|
||||
|
@ -520,7 +509,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmStats.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]internal[/\\]InternalSettingsPreparer.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorQuery.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]DummyPluginInfo.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]RemovePluginCommand.java" checks="LineLength" />
|
||||
|
@ -566,7 +554,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestThreadPoolAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]get[/\\]RestMultiGetAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]index[/\\]RestIndexAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]percolate[/\\]RestPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]script[/\\]RestDeleteIndexedScriptAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]script[/\\]RestPutIndexedScriptAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]search[/\\]RestClearScrollAction.java" checks="LineLength" />
|
||||
|
@ -745,7 +732,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineRequestParsingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineResponseTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]WriteableIngestDocumentTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]percolate[/\\]MultiPercolatorRequestTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]MultiSearchRequestTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchRequestBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]AutoCreateIndexTests.java" checks="LineLength" />
|
||||
|
@ -981,8 +967,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]typelevels[/\\]ParseDocumentTypeLevelsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]update[/\\]UpdateMappingOnClusterIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]update[/\\]UpdateMappingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]percolator[/\\]PercolatorFieldMapperTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]AbstractQueryTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]BoolQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]BoostingQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]CommonTermsQueryBuilderTests.java" checks="LineLength" />
|
||||
|
@ -1071,9 +1055,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]os[/\\]OsProbeTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]nodesinfo[/\\]NodeInfoStreamingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]options[/\\]detailederrors[/\\]DetailedErrorsEnabledIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolatorIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorQueryTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginInfoTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsServiceTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]FullRollingRestartIT.java" checks="LineLength" />
|
||||
|
@ -1222,6 +1203,16 @@
|
|||
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]package-info.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]mustache[/\\]MustacheScriptEngineTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]mustache[/\\]MustacheTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolateRequest.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolateRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolateShardResponse.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportMultiPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportShardMultiPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]RestPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolatorIT.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorIT.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolatorRequestTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuCollationTokenFilterFactory.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuFoldingTokenFilterFactory.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuNormalizerTokenFilterFactory.java" checks="LineLength" />
|
||||
|
@ -1232,13 +1223,6 @@
|
|||
<suppress files="plugins[/\\]analysis-phonetic[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]PhoneticTokenFilterFactory.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-smartcn[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]SimpleSmartChineseAnalysisTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-stempel[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]PolishAnalysisTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]deletebyquery[/\\]DeleteByQueryRequest.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]deletebyquery[/\\]DeleteByQueryRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]deletebyquery[/\\]DeleteByQueryResponse.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]deletebyquery[/\\]TransportDeleteByQueryAction.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]deletebyquery[/\\]IndexDeleteByQueryResponseTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]deletebyquery[/\\]TransportDeleteByQueryActionTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]delete-by-query[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugin[/\\]deletebyquery[/\\]DeleteByQueryTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]azure[/\\]AbstractAzureTestCase.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]azure[/\\]AzureMinimumMasterNodesTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]azure[/\\]AzureSimpleTests.java" checks="LineLength" />
|
||||
|
@ -1309,6 +1293,7 @@
|
|||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]cli[/\\]CliToolTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]MockBigArrays.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]script[/\\]NativeSignificanceScoreScriptWithParams.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]AbstractQueryTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]BackgroundIndexer.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]CompositeTestCluster.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]CorruptionUtils.java" checks="LineLength" />
|
||||
|
@ -1335,7 +1320,6 @@
|
|||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]junit[/\\]listeners[/\\]LoggingListener.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]ESRestTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]RestTestExecutionContext.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]client[/\\]RestClient.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]client[/\\]http[/\\]HttpRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]json[/\\]JsonPath.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]GreaterThanEqualToParser.java" checks="LineLength" />
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
elasticsearch = 5.0.0
|
||||
elasticsearch = 5.0.0-alpha3
|
||||
lucene = 6.0.0
|
||||
|
||||
# optional dependencies
|
||||
|
@ -13,9 +13,7 @@ jna = 4.1.0
|
|||
# test dependencies
|
||||
randomizedrunner = 2.3.2
|
||||
junit = 4.11
|
||||
# TODO: Upgrade httpclient to a version > 4.5.1 once released. Then remove o.e.test.rest.client.StrictHostnameVerifier* and use
|
||||
# DefaultHostnameVerifier instead since we no longer need to workaround https://issues.apache.org/jira/browse/HTTPCLIENT-1698
|
||||
httpclient = 4.3.6
|
||||
httpcore = 4.3.3
|
||||
httpclient = 4.5.2
|
||||
httpcore = 4.4.4
|
||||
commonslogging = 1.1.3
|
||||
commonscodec = 1.10
|
||||
|
|
|
@ -1,235 +0,0 @@
|
|||
h1. Elasticsearch
|
||||
|
||||
h2. A Distributed RESTful Search Engine
|
||||
|
||||
h3. "https://www.elastic.co/products/elasticsearch":https://www.elastic.co/products/elasticsearch
|
||||
|
||||
Elasticsearch is a distributed RESTful search engine built for the cloud. Features include:
|
||||
|
||||
* Distributed and Highly Available Search Engine.
|
||||
** Each index is fully sharded with a configurable number of shards.
|
||||
** Each shard can have one or more replicas.
|
||||
** Read / Search operations performed on either one of the replica shard.
|
||||
* Multi Tenant with Multi Types.
|
||||
** Support for more than one index.
|
||||
** Support for more than one type per index.
|
||||
** Index level configuration (number of shards, index storage, ...).
|
||||
* Various set of APIs
|
||||
** HTTP RESTful API
|
||||
** Native Java API.
|
||||
** All APIs perform automatic node operation rerouting.
|
||||
* Document oriented
|
||||
** No need for upfront schema definition.
|
||||
** Schema can be defined per type for customization of the indexing process.
|
||||
* Reliable, Asynchronous Write Behind for long term persistency.
|
||||
* (Near) Real Time Search.
|
||||
* Built on top of Lucene
|
||||
** Each shard is a fully functional Lucene index
|
||||
** All the power of Lucene easily exposed through simple configuration / plugins.
|
||||
* Per operation consistency
|
||||
** Single document level operations are atomic, consistent, isolated and durable.
|
||||
* Open Source under the Apache License, version 2 ("ALv2")
|
||||
|
||||
h2. Getting Started
|
||||
|
||||
First of all, DON'T PANIC. It will take 5 minutes to get the gist of what Elasticsearch is all about.
|
||||
|
||||
h3. Requirements
|
||||
|
||||
You need to have a recent version of Java installed. See the "Setup":http://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html#jvm-version page for more information.
|
||||
|
||||
h3. Installation
|
||||
|
||||
* "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution.
|
||||
* Run @bin/elasticsearch@ on unix, or @bin\elasticsearch.bat@ on windows.
|
||||
* Run @curl -X GET http://localhost:9200/@.
|
||||
* Start more servers ...
|
||||
|
||||
h3. Indexing
|
||||
|
||||
Let's try and index some twitter like information. First, let's create a twitter user, and add some tweets (the @twitter@ index will be created automatically):
|
||||
|
||||
<pre>
|
||||
curl -XPUT 'http://localhost:9200/twitter/user/kimchy' -d '{ "name" : "Shay Banon" }'
|
||||
|
||||
curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '
|
||||
{
|
||||
"user": "kimchy",
|
||||
"postDate": "2009-11-15T13:12:00",
|
||||
"message": "Trying out Elasticsearch, so far so good?"
|
||||
}'
|
||||
|
||||
curl -XPUT 'http://localhost:9200/twitter/tweet/2' -d '
|
||||
{
|
||||
"user": "kimchy",
|
||||
"postDate": "2009-11-15T14:12:12",
|
||||
"message": "Another tweet, will it be indexed?"
|
||||
}'
|
||||
</pre>
|
||||
|
||||
Now, let's see if the information was added by GETting it:
|
||||
|
||||
<pre>
|
||||
curl -XGET 'http://localhost:9200/twitter/user/kimchy?pretty=true'
|
||||
curl -XGET 'http://localhost:9200/twitter/tweet/1?pretty=true'
|
||||
curl -XGET 'http://localhost:9200/twitter/tweet/2?pretty=true'
|
||||
</pre>
|
||||
|
||||
h3. Searching
|
||||
|
||||
Mmm search..., shouldn't it be elastic?
|
||||
Let's find all the tweets that @kimchy@ posted:
|
||||
|
||||
<pre>
|
||||
curl -XGET 'http://localhost:9200/twitter/tweet/_search?q=user:kimchy&pretty=true'
|
||||
</pre>
|
||||
|
||||
We can also use the JSON query language Elasticsearch provides instead of a query string:
|
||||
|
||||
<pre>
|
||||
curl -XGET 'http://localhost:9200/twitter/tweet/_search?pretty=true' -d '
|
||||
{
|
||||
"query" : {
|
||||
"match" : { "user": "kimchy" }
|
||||
}
|
||||
}'
|
||||
</pre>
|
||||
|
||||
Just for kicks, let's get all the documents stored (we should see the user as well):
|
||||
|
||||
<pre>
|
||||
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
|
||||
{
|
||||
"query" : {
|
||||
"matchAll" : {}
|
||||
}
|
||||
}'
|
||||
</pre>
|
||||
|
||||
We can also do range search (the @postDate@ was automatically identified as date)
|
||||
|
||||
<pre>
|
||||
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
|
||||
{
|
||||
"query" : {
|
||||
"range" : {
|
||||
"postDate" : { "from" : "2009-11-15T13:00:00", "to" : "2009-11-15T14:00:00" }
|
||||
}
|
||||
}
|
||||
}'
|
||||
</pre>
|
||||
|
||||
There are many more options to perform search, after all, it's a search product no? All the familiar Lucene queries are available through the JSON query language, or through the query parser.
|
||||
|
||||
h3. Multi Tenant - Indices and Types
|
||||
|
||||
Maan, that twitter index might get big (in this case, index size == valuation). Let's see if we can structure our twitter system a bit differently in order to support such large amounts of data.
|
||||
|
||||
Elasticsearch supports multiple indices, as well as multiple types per index. In the previous example we used an index called @twitter@, with two types, @user@ and @tweet@.
|
||||
|
||||
Another way to define our simple twitter system is to have a different index per user (note, though that each index has an overhead). Here is the indexing curl's in this case:
|
||||
|
||||
<pre>
|
||||
curl -XPUT 'http://localhost:9200/kimchy/info/1' -d '{ "name" : "Shay Banon" }'
|
||||
|
||||
curl -XPUT 'http://localhost:9200/kimchy/tweet/1' -d '
|
||||
{
|
||||
"user": "kimchy",
|
||||
"postDate": "2009-11-15T13:12:00",
|
||||
"message": "Trying out Elasticsearch, so far so good?"
|
||||
}'
|
||||
|
||||
curl -XPUT 'http://localhost:9200/kimchy/tweet/2' -d '
|
||||
{
|
||||
"user": "kimchy",
|
||||
"postDate": "2009-11-15T14:12:12",
|
||||
"message": "Another tweet, will it be indexed?"
|
||||
}'
|
||||
</pre>
|
||||
|
||||
The above will index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get his own special index.
|
||||
|
||||
Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well):
|
||||
|
||||
<pre>
|
||||
curl -XPUT http://localhost:9200/another_user/ -d '
|
||||
{
|
||||
"index" : {
|
||||
"numberOfShards" : 1,
|
||||
"numberOfReplicas" : 1
|
||||
}
|
||||
}'
|
||||
</pre>
|
||||
|
||||
Search (and similar operations) are multi index aware. This means that we can easily search on more than one
|
||||
index (twitter user), for example:
|
||||
|
||||
<pre>
|
||||
curl -XGET 'http://localhost:9200/kimchy,another_user/_search?pretty=true' -d '
|
||||
{
|
||||
"query" : {
|
||||
"matchAll" : {}
|
||||
}
|
||||
}'
|
||||
</pre>
|
||||
|
||||
Or on all the indices:
|
||||
|
||||
<pre>
|
||||
curl -XGET 'http://localhost:9200/_search?pretty=true' -d '
|
||||
{
|
||||
"query" : {
|
||||
"matchAll" : {}
|
||||
}
|
||||
}'
|
||||
</pre>
|
||||
|
||||
{One liner teaser}: And the cool part about that? You can easily search on multiple twitter users (indices), with different boost levels per user (index), making social search so much simpler (results from my friends rank higher than results from friends of my friends).
|
||||
|
||||
h3. Distributed, Highly Available
|
||||
|
||||
Let's face it, things will fail....
|
||||
|
||||
Elasticsearch is a highly available and distributed search engine. Each index is broken down into shards, and each shard can have one or more replica. By default, an index is created with 5 shards and 1 replica per shard (5/1). There are many topologies that can be used, including 1/10 (improve search performance), or 20/1 (improve indexing performance, with search executed in a map reduce fashion across shards).
|
||||
|
||||
In order to play with the distributed nature of Elasticsearch, simply bring more nodes up and shut down nodes. The system will continue to serve requests (make sure you use the correct http port) with the latest data indexed.
|
||||
|
||||
h3. Where to go from here?
|
||||
|
||||
We have just covered a very small portion of what Elasticsearch is all about. For more information, please refer to the "elastic.co":http://www.elastic.co/products/elasticsearch website.
|
||||
|
||||
h3. Building from Source
|
||||
|
||||
Elasticsearch uses "Maven":http://maven.apache.org for its build system.
|
||||
|
||||
In order to create a distribution, simply run the @mvn clean package
|
||||
-DskipTests@ command in the cloned directory.
|
||||
|
||||
The distribution will be created under @target/releases@.
|
||||
|
||||
See the "TESTING":TESTING.asciidoc file for more information about
|
||||
running the Elasticsearch test suite.
|
||||
|
||||
h3. Upgrading to Elasticsearch 1.x?
|
||||
|
||||
In order to ensure a smooth upgrade process from earlier versions of Elasticsearch (< 1.0.0), it is recommended to perform a full cluster restart. Please see the "setup reference":https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html for more details on the upgrade process.
|
||||
|
||||
h1. License
|
||||
|
||||
<pre>
|
||||
This software is licensed under the Apache License, version 2 ("ALv2"), quoted below.
|
||||
|
||||
Copyright 2009-2016 Elasticsearch <https://www.elastic.co>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
use this file except in compliance with the License. You may obtain a copy of
|
||||
the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations under
|
||||
the License.
|
||||
</pre>
|
|
@ -121,6 +121,36 @@ forbiddenPatterns {
|
|||
exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt'
|
||||
}
|
||||
|
||||
task generateModulesList {
|
||||
List<String> modules = project(':modules').subprojects.collect { it.name }
|
||||
File modulesFile = new File(buildDir, 'generated-resources/modules.txt')
|
||||
processResources.from(modulesFile)
|
||||
inputs.property('modules', modules)
|
||||
outputs.file(modulesFile)
|
||||
doLast {
|
||||
modulesFile.parentFile.mkdirs()
|
||||
modulesFile.setText(modules.join('\n'), 'UTF-8')
|
||||
}
|
||||
}
|
||||
|
||||
task generatePluginsList {
|
||||
List<String> plugins = project(':plugins').subprojects
|
||||
.findAll { it.name.contains('example') == false }
|
||||
.collect { it.name }
|
||||
File pluginsFile = new File(buildDir, 'generated-resources/plugins.txt')
|
||||
processResources.from(pluginsFile)
|
||||
inputs.property('plugins', plugins)
|
||||
outputs.file(pluginsFile)
|
||||
doLast {
|
||||
pluginsFile.parentFile.mkdirs()
|
||||
pluginsFile.setText(plugins.join('\n'), 'UTF-8')
|
||||
}
|
||||
}
|
||||
|
||||
processResources {
|
||||
dependsOn generateModulesList, generatePluginsList
|
||||
}
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
// uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name)
|
||||
'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator',
|
||||
|
|
|
@ -16,25 +16,22 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.percolator;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.index.Index;
|
||||
package org.apache.log4j;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.log4j.helpers.ThreadLocalMap;
|
||||
|
||||
/**
|
||||
* Exception during indexing a percolator query.
|
||||
* Log4j 1.2 MDC breaks because it parses java.version incorrectly (does not handle new java9 versioning).
|
||||
*
|
||||
* This hack fixes up the pkg private members as if it had detected the java version correctly.
|
||||
*/
|
||||
public class PercolatorException extends ElasticsearchException {
|
||||
public class Java9Hack {
|
||||
|
||||
public PercolatorException(Index index, String msg, Throwable cause) {
|
||||
super(msg, cause);
|
||||
setIndex(index);
|
||||
}
|
||||
|
||||
public PercolatorException(StreamInput in) throws IOException{
|
||||
super(in);
|
||||
public static void fixLog4j() {
|
||||
if (MDC.mdc.tlm == null) {
|
||||
MDC.mdc.java1 = false;
|
||||
MDC.mdc.tlm = new ThreadLocalMap();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -21,7 +21,6 @@ package org.elasticsearch;
|
|||
|
||||
import org.elasticsearch.action.support.replication.ReplicationOperation;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -201,41 +200,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
return rootCause;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether this exception contains an exception of the given type:
|
||||
* either it is of the given class itself or it contains a nested cause
|
||||
* of the given type.
|
||||
*
|
||||
* @param exType the exception type to look for
|
||||
* @return whether there is a nested exception of the specified type
|
||||
*/
|
||||
public boolean contains(Class<? extends Throwable> exType) {
|
||||
if (exType == null) {
|
||||
return false;
|
||||
}
|
||||
if (exType.isInstance(this)) {
|
||||
return true;
|
||||
}
|
||||
Throwable cause = getCause();
|
||||
if (cause == this) {
|
||||
return false;
|
||||
}
|
||||
if (cause instanceof ElasticsearchException) {
|
||||
return ((ElasticsearchException) cause).contains(exType);
|
||||
} else {
|
||||
while (cause != null) {
|
||||
if (exType.isInstance(cause)) {
|
||||
return true;
|
||||
}
|
||||
if (cause.getCause() == cause) {
|
||||
break;
|
||||
}
|
||||
cause = cause.getCause();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeOptionalString(this.getMessage());
|
||||
|
@ -532,7 +496,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
|
||||
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class,
|
||||
org.elasticsearch.search.SearchContextMissingException::new, 24),
|
||||
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 25),
|
||||
GENERAL_SCRIPT_EXCEPTION(org.elasticsearch.script.GeneralScriptException.class,
|
||||
org.elasticsearch.script.GeneralScriptException::new, 25),
|
||||
BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class,
|
||||
org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26),
|
||||
SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class,
|
||||
|
@ -681,8 +646,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106),
|
||||
REPOSITORY_MISSING_EXCEPTION(org.elasticsearch.repositories.RepositoryMissingException.class,
|
||||
org.elasticsearch.repositories.RepositoryMissingException::new, 107),
|
||||
PERCOLATOR_EXCEPTION(org.elasticsearch.index.percolator.PercolatorException.class,
|
||||
org.elasticsearch.index.percolator.PercolatorException::new, 108),
|
||||
DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class,
|
||||
org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),
|
||||
FLUSH_NOT_ALLOWED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class,
|
||||
|
@ -744,7 +707,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class,
|
||||
org.elasticsearch.index.query.QueryShardException::new, 141),
|
||||
NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class,
|
||||
ShardStateAction.NoLongerPrimaryShardException::new, 142);
|
||||
ShardStateAction.NoLongerPrimaryShardException::new, 142),
|
||||
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143);
|
||||
|
||||
|
||||
final Class<? extends ElasticsearchException> exceptionClass;
|
||||
|
|
|
@ -32,7 +32,6 @@ import java.io.IOException;
|
|||
|
||||
/**
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
public class Version {
|
||||
/*
|
||||
* The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA
|
||||
|
@ -75,9 +74,9 @@ public class Version {
|
|||
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_alpha2_ID = 5000002;
|
||||
public static final Version V_5_0_0_alpha2 = new Version(V_5_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_ID = 5000099;
|
||||
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final Version CURRENT = V_5_0_0;
|
||||
public static final int V_5_0_0_alpha3_ID = 5000003;
|
||||
public static final Version V_5_0_0_alpha3 = new Version(V_5_0_0_alpha3_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final Version CURRENT = V_5_0_0_alpha3;
|
||||
|
||||
static {
|
||||
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
|
||||
|
@ -90,8 +89,8 @@ public class Version {
|
|||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_5_0_0_ID:
|
||||
return V_5_0_0;
|
||||
case V_5_0_0_alpha3_ID:
|
||||
return V_5_0_0_alpha3;
|
||||
case V_5_0_0_alpha2_ID:
|
||||
return V_5_0_0_alpha2;
|
||||
case V_5_0_0_alpha1_ID:
|
||||
|
|
|
@ -165,10 +165,6 @@ import org.elasticsearch.action.ingest.SimulatePipelineAction;
|
|||
import org.elasticsearch.action.ingest.SimulatePipelineTransportAction;
|
||||
import org.elasticsearch.action.main.MainAction;
|
||||
import org.elasticsearch.action.main.TransportMainAction;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateAction;
|
||||
import org.elasticsearch.action.percolate.PercolateAction;
|
||||
import org.elasticsearch.action.percolate.TransportMultiPercolateAction;
|
||||
import org.elasticsearch.action.percolate.TransportPercolateAction;
|
||||
import org.elasticsearch.action.search.ClearScrollAction;
|
||||
import org.elasticsearch.action.search.MultiSearchAction;
|
||||
import org.elasticsearch.action.search.SearchAction;
|
||||
|
@ -332,8 +328,6 @@ public class ActionModule extends AbstractModule {
|
|||
registerAction(SearchAction.INSTANCE, TransportSearchAction.class);
|
||||
registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class);
|
||||
registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class);
|
||||
registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class);
|
||||
registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class);
|
||||
registerAction(ExplainAction.INSTANCE, TransportExplainAction.class);
|
||||
registerAction(ClearScrollAction.INSTANCE, TransportClearScrollAction.class);
|
||||
registerAction(RecoveryAction.INSTANCE, TransportRecoveryAction.class);
|
||||
|
|
|
@ -39,6 +39,10 @@ public abstract class ActionRequest<Request extends ActionRequest<Request>> exte
|
|||
|
||||
public abstract ActionRequestValidationException validate();
|
||||
|
||||
public boolean getShouldPersistResult() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
|
|
@ -42,17 +42,22 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
|
||||
private final ShardId shard;
|
||||
private final boolean primary;
|
||||
private final boolean hasPendingAsyncFetch;
|
||||
private final String assignedNodeId;
|
||||
private final UnassignedInfo unassignedInfo;
|
||||
private final long allocationDelayMillis;
|
||||
private final long remainingDelayMillis;
|
||||
private final Map<DiscoveryNode, NodeExplanation> nodeExplanations;
|
||||
|
||||
public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long remainingDelayMillis,
|
||||
@Nullable UnassignedInfo unassignedInfo, Map<DiscoveryNode, NodeExplanation> nodeExplanations) {
|
||||
public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long allocationDelayMillis,
|
||||
long remainingDelayMillis, @Nullable UnassignedInfo unassignedInfo, boolean hasPendingAsyncFetch,
|
||||
Map<DiscoveryNode, NodeExplanation> nodeExplanations) {
|
||||
this.shard = shard;
|
||||
this.primary = primary;
|
||||
this.hasPendingAsyncFetch = hasPendingAsyncFetch;
|
||||
this.assignedNodeId = assignedNodeId;
|
||||
this.unassignedInfo = unassignedInfo;
|
||||
this.allocationDelayMillis = allocationDelayMillis;
|
||||
this.remainingDelayMillis = remainingDelayMillis;
|
||||
this.nodeExplanations = nodeExplanations;
|
||||
}
|
||||
|
@ -60,8 +65,10 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
public ClusterAllocationExplanation(StreamInput in) throws IOException {
|
||||
this.shard = ShardId.readShardId(in);
|
||||
this.primary = in.readBoolean();
|
||||
this.hasPendingAsyncFetch = in.readBoolean();
|
||||
this.assignedNodeId = in.readOptionalString();
|
||||
this.unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new);
|
||||
this.allocationDelayMillis = in.readVLong();
|
||||
this.remainingDelayMillis = in.readVLong();
|
||||
|
||||
int mapSize = in.readVInt();
|
||||
|
@ -77,8 +84,10 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
this.getShard().writeTo(out);
|
||||
out.writeBoolean(this.isPrimary());
|
||||
out.writeBoolean(this.isStillFetchingShardData());
|
||||
out.writeOptionalString(this.getAssignedNodeId());
|
||||
out.writeOptionalWriteable(this.getUnassignedInfo());
|
||||
out.writeVLong(allocationDelayMillis);
|
||||
out.writeVLong(remainingDelayMillis);
|
||||
|
||||
out.writeVInt(this.nodeExplanations.size());
|
||||
|
@ -97,6 +106,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
return this.primary;
|
||||
}
|
||||
|
||||
/** Return turn if shard data is still being fetched for the allocation */
|
||||
public boolean isStillFetchingShardData() {
|
||||
return this.hasPendingAsyncFetch;
|
||||
}
|
||||
|
||||
/** Return turn if the shard is assigned to a node */
|
||||
public boolean isAssigned() {
|
||||
return this.assignedNodeId != null;
|
||||
|
@ -114,7 +128,12 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
return this.unassignedInfo;
|
||||
}
|
||||
|
||||
/** Return the remaining allocation delay for this shard in millisocends */
|
||||
/** Return the configured delay before the shard can be allocated in milliseconds */
|
||||
public long getAllocationDelayMillis() {
|
||||
return this.allocationDelayMillis;
|
||||
}
|
||||
|
||||
/** Return the remaining allocation delay for this shard in milliseconds */
|
||||
public long getRemainingDelayMillis() {
|
||||
return this.remainingDelayMillis;
|
||||
}
|
||||
|
@ -138,11 +157,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
if (assignedNodeId != null) {
|
||||
builder.field("assigned_node_id", this.assignedNodeId);
|
||||
}
|
||||
builder.field("shard_state_fetch_pending", this.hasPendingAsyncFetch);
|
||||
// If we have unassigned info, show that
|
||||
if (unassignedInfo != null) {
|
||||
unassignedInfo.toXContent(builder, params);
|
||||
long delay = unassignedInfo.getLastComputedLeftDelayNanos();
|
||||
builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueNanos(delay));
|
||||
builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueMillis(allocationDelayMillis));
|
||||
builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis));
|
||||
}
|
||||
builder.startObject("nodes");
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -58,6 +59,8 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
|
||||
|
||||
/**
|
||||
* The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the
|
||||
* master node in the cluster.
|
||||
|
@ -69,19 +72,22 @@ public class TransportClusterAllocationExplainAction
|
|||
private final AllocationDeciders allocationDeciders;
|
||||
private final ShardsAllocator shardAllocator;
|
||||
private final TransportIndicesShardStoresAction shardStoresAction;
|
||||
private final GatewayAllocator gatewayAllocator;
|
||||
|
||||
@Inject
|
||||
public TransportClusterAllocationExplainAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
ClusterInfoService clusterInfoService, AllocationDeciders allocationDeciders,
|
||||
ShardsAllocator shardAllocator, TransportIndicesShardStoresAction shardStoresAction) {
|
||||
ShardsAllocator shardAllocator, TransportIndicesShardStoresAction shardStoresAction,
|
||||
GatewayAllocator gatewayAllocator) {
|
||||
super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver, ClusterAllocationExplainRequest::new);
|
||||
this.clusterInfoService = clusterInfoService;
|
||||
this.allocationDeciders = allocationDeciders;
|
||||
this.shardAllocator = shardAllocator;
|
||||
this.shardStoresAction = shardStoresAction;
|
||||
this.gatewayAllocator = gatewayAllocator;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -130,7 +136,8 @@ public class TransportClusterAllocationExplainAction
|
|||
Float nodeWeight,
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus,
|
||||
String assignedNodeId,
|
||||
Set<String> activeAllocationIds) {
|
||||
Set<String> activeAllocationIds,
|
||||
boolean hasPendingAsyncFetch) {
|
||||
final ClusterAllocationExplanation.FinalDecision finalDecision;
|
||||
final ClusterAllocationExplanation.StoreCopy storeCopy;
|
||||
final String finalExplanation;
|
||||
|
@ -161,6 +168,19 @@ public class TransportClusterAllocationExplainAction
|
|||
if (node.getId().equals(assignedNodeId)) {
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED;
|
||||
finalExplanation = "the shard is already assigned to this node";
|
||||
} else if (hasPendingAsyncFetch &&
|
||||
shard.primary() == false &&
|
||||
shard.unassigned() &&
|
||||
shard.allocatedPostIndexCreate(indexMetaData) &&
|
||||
nodeDecision.type() != Decision.Type.YES) {
|
||||
finalExplanation = "the shard cannot be assigned because allocation deciders return a " + nodeDecision.type().name() +
|
||||
" decision and the shard's state is still being fetched";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (hasPendingAsyncFetch &&
|
||||
shard.unassigned() &&
|
||||
shard.allocatedPostIndexCreate(indexMetaData)) {
|
||||
finalExplanation = "the shard's state is still being fetched so it cannot be allocated";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.allocatedPostIndexCreate(indexMetaData) &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.STALE) {
|
||||
finalExplanation = "the copy of the shard is stale, allocation ids do not match";
|
||||
|
@ -180,6 +200,7 @@ public class TransportClusterAllocationExplainAction
|
|||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
finalExplanation = "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision";
|
||||
} else {
|
||||
// TODO: handle throttling decision better here
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.YES;
|
||||
if (storeCopy == ClusterAllocationExplanation.StoreCopy.AVAILABLE) {
|
||||
finalExplanation = "the shard can be assigned and the node contains a valid copy of the shard data";
|
||||
|
@ -198,7 +219,8 @@ public class TransportClusterAllocationExplainAction
|
|||
*/
|
||||
public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes,
|
||||
boolean includeYesDecisions, ShardsAllocator shardAllocator,
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStores) {
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStores,
|
||||
GatewayAllocator gatewayAllocator) {
|
||||
// don't short circuit deciders, we want a full explanation
|
||||
allocation.debugDecision(true);
|
||||
// get the existing unassigned info if available
|
||||
|
@ -217,9 +239,9 @@ public class TransportClusterAllocationExplainAction
|
|||
long remainingDelayMillis = 0;
|
||||
final MetaData metadata = allocation.metaData();
|
||||
final IndexMetaData indexMetaData = metadata.index(shard.index());
|
||||
if (ui != null) {
|
||||
final Settings indexSettings = indexMetaData.getSettings();
|
||||
long remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), metadata.settings(), indexSettings);
|
||||
long allocationDelayMillis = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).getMillis();
|
||||
if (ui != null && ui.isDelayed()) {
|
||||
long remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), indexMetaData.getSettings());
|
||||
remainingDelayMillis = TimeValue.timeValueNanos(remainingDelayNanos).millis();
|
||||
}
|
||||
|
||||
|
@ -238,11 +260,13 @@ public class TransportClusterAllocationExplainAction
|
|||
Float weight = weights.get(node);
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = nodeToStatus.get(node);
|
||||
NodeExplanation nodeExplanation = calculateNodeExplanation(shard, indexMetaData, node, decision, weight,
|
||||
storeStatus, shard.currentNodeId(), indexMetaData.activeAllocationIds(shard.getId()));
|
||||
storeStatus, shard.currentNodeId(), indexMetaData.activeAllocationIds(shard.getId()),
|
||||
allocation.hasPendingAsyncFetch());
|
||||
explanations.put(node, nodeExplanation);
|
||||
}
|
||||
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(),
|
||||
shard.currentNodeId(), remainingDelayMillis, ui, explanations);
|
||||
shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui,
|
||||
gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -250,7 +274,7 @@ public class TransportClusterAllocationExplainAction
|
|||
final ActionListener<ClusterAllocationExplainResponse> listener) {
|
||||
final RoutingNodes routingNodes = state.getRoutingNodes();
|
||||
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,
|
||||
clusterInfoService.getClusterInfo(), System.nanoTime());
|
||||
clusterInfoService.getClusterInfo(), System.nanoTime(), false);
|
||||
|
||||
ShardRouting foundShard = null;
|
||||
if (request.useAnyUnassignedShard()) {
|
||||
|
@ -297,7 +321,7 @@ public class TransportClusterAllocationExplainAction
|
|||
shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName());
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStoreStatus = shardStatuses.get(shardRouting.id());
|
||||
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
|
||||
request.includeYesDecisions(), shardAllocator, shardStoreStatus);
|
||||
request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator);
|
||||
listener.onResponse(new ClusterAllocationExplainResponse(cae));
|
||||
}
|
||||
|
||||
|
|
|
@ -192,6 +192,7 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
builder.endObject();
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
} else if ("parents".equals(groupBy)) {
|
||||
builder.startObject("tasks");
|
||||
for (TaskGroup group : getTaskGroups()) {
|
||||
|
|
|
@ -19,28 +19,24 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.reroute;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommandRegistry;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Request to submit cluster reroute allocation commands
|
||||
*/
|
||||
public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteRequest> {
|
||||
AllocationCommands commands = new AllocationCommands();
|
||||
boolean dryRun;
|
||||
boolean explain;
|
||||
private AllocationCommands commands = new AllocationCommands();
|
||||
private boolean dryRun;
|
||||
private boolean explain;
|
||||
private boolean retryFailed;
|
||||
|
||||
public ClusterRerouteRequest() {
|
||||
}
|
||||
|
@ -81,6 +77,15 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the retry failed flag (defaults to <tt>false</tt>). If true, the
|
||||
* request will retry allocating shards that can't currently be allocated due to too many allocation failures.
|
||||
*/
|
||||
public ClusterRerouteRequest setRetryFailed(boolean retryFailed) {
|
||||
this.retryFailed = retryFailed;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current explain flag
|
||||
*/
|
||||
|
@ -88,41 +93,27 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
|||
return this.explain;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current retry failed flag
|
||||
*/
|
||||
public boolean isRetryFailed() {
|
||||
return this.retryFailed;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Set the allocation commands to execute.
|
||||
*/
|
||||
public ClusterRerouteRequest commands(AllocationCommand... commands) {
|
||||
this.commands = new AllocationCommands(commands);
|
||||
public ClusterRerouteRequest commands(AllocationCommands commands) {
|
||||
this.commands = commands;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the source for the request.
|
||||
* Returns the allocation commands to execute
|
||||
*/
|
||||
public ClusterRerouteRequest source(BytesReference source, AllocationCommandRegistry registry, ParseFieldMatcher parseFieldMatcher)
|
||||
throws Exception {
|
||||
try (XContentParser parser = XContentHelper.createParser(source)) {
|
||||
XContentParser.Token token;
|
||||
String currentFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
if ("commands".equals(currentFieldName)) {
|
||||
this.commands = AllocationCommands.fromXContent(parser, parseFieldMatcher, registry);
|
||||
} else {
|
||||
throw new ElasticsearchParseException("failed to parse reroute request, got start array with wrong field name [{}]", currentFieldName);
|
||||
}
|
||||
} else if (token.isValue()) {
|
||||
if ("dry_run".equals(currentFieldName) || "dryRun".equals(currentFieldName)) {
|
||||
dryRun = parser.booleanValue();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("failed to parse reroute request, got value with wrong field name [{}]", currentFieldName);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return this;
|
||||
public AllocationCommands getCommands() {
|
||||
return commands;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -136,6 +127,7 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
|||
commands = AllocationCommands.readFrom(in);
|
||||
dryRun = in.readBoolean();
|
||||
explain = in.readBoolean();
|
||||
retryFailed = in.readBoolean();
|
||||
readTimeout(in);
|
||||
}
|
||||
|
||||
|
@ -145,6 +137,28 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
|||
AllocationCommands.writeTo(commands, out);
|
||||
out.writeBoolean(dryRun);
|
||||
out.writeBoolean(explain);
|
||||
out.writeBoolean(retryFailed);
|
||||
writeTimeout(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
ClusterRerouteRequest other = (ClusterRerouteRequest) obj;
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.equals(commands, other.commands) &&
|
||||
Objects.equals(dryRun, other.dryRun) &&
|
||||
Objects.equals(explain, other.explain) &&
|
||||
Objects.equals(timeout, other.timeout) &&
|
||||
Objects.equals(retryFailed, other.retryFailed) &&
|
||||
Objects.equals(masterNodeTimeout, other.masterNodeTimeout);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.hash(commands, dryRun, explain, timeout, retryFailed, masterNodeTimeout);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,13 +22,12 @@ package org.elasticsearch.action.admin.cluster.reroute;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
||||
/**
|
||||
* Builder for a cluster reroute request
|
||||
*/
|
||||
public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder<ClusterRerouteRequest, ClusterRerouteResponse, ClusterRerouteRequestBuilder> {
|
||||
|
||||
public class ClusterRerouteRequestBuilder
|
||||
extends AcknowledgedRequestBuilder<ClusterRerouteRequest, ClusterRerouteResponse, ClusterRerouteRequestBuilder> {
|
||||
public ClusterRerouteRequestBuilder(ElasticsearchClient client, ClusterRerouteAction action) {
|
||||
super(client, action, new ClusterRerouteRequest());
|
||||
}
|
||||
|
@ -61,10 +60,11 @@ public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder<Clu
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets the commands for the request to execute.
|
||||
* Sets the retry failed flag (defaults to <tt>false</tt>). If true, the
|
||||
* request will retry allocating shards that can't currently be allocated due to too many allocation failures.
|
||||
*/
|
||||
public ClusterRerouteRequestBuilder setCommands(AllocationCommand... commands) throws Exception {
|
||||
request.commands(commands);
|
||||
public ClusterRerouteRequestBuilder setRetryFailed(boolean retryFailed) {
|
||||
request.setRetryFailed(retryFailed);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -68,38 +69,55 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
|
|||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener<ClusterRerouteResponse> listener) {
|
||||
clusterService.submitStateUpdateTask("cluster_reroute (api)", new AckedClusterStateUpdateTask<ClusterRerouteResponse>(Priority.IMMEDIATE, request, listener) {
|
||||
|
||||
private volatile ClusterState clusterStateToSend;
|
||||
private volatile RoutingExplanations explanations;
|
||||
|
||||
@Override
|
||||
protected ClusterRerouteResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterRerouteResponse(acknowledged, clusterStateToSend, explanations);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onAckTimeout() {
|
||||
listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend, new RoutingExplanations()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.debug("failed to perform [{}]", t, source);
|
||||
super.onFailure(source, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.commands, request.explain());
|
||||
ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build();
|
||||
clusterStateToSend = newState;
|
||||
explanations = routingResult.explanations();
|
||||
if (request.dryRun) {
|
||||
return currentState;
|
||||
}
|
||||
return newState;
|
||||
}
|
||||
});
|
||||
clusterService.submitStateUpdateTask("cluster_reroute (api)", new ClusterRerouteResponseAckedClusterStateUpdateTask(logger,
|
||||
allocationService, request, listener));
|
||||
}
|
||||
}
|
||||
|
||||
static class ClusterRerouteResponseAckedClusterStateUpdateTask extends AckedClusterStateUpdateTask<ClusterRerouteResponse> {
|
||||
|
||||
private final ClusterRerouteRequest request;
|
||||
private final ActionListener<ClusterRerouteResponse> listener;
|
||||
private final ESLogger logger;
|
||||
private final AllocationService allocationService;
|
||||
private volatile ClusterState clusterStateToSend;
|
||||
private volatile RoutingExplanations explanations;
|
||||
|
||||
ClusterRerouteResponseAckedClusterStateUpdateTask(ESLogger logger, AllocationService allocationService, ClusterRerouteRequest request,
|
||||
ActionListener<ClusterRerouteResponse> listener) {
|
||||
super(Priority.IMMEDIATE, request, listener);
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.logger = logger;
|
||||
this.allocationService = allocationService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterRerouteResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterRerouteResponse(acknowledged, clusterStateToSend, explanations);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onAckTimeout() {
|
||||
listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend, new RoutingExplanations()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.debug("failed to perform [{}]", t, source);
|
||||
super.onFailure(source, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.getCommands(), request.explain(),
|
||||
request.isRetryFailed());
|
||||
ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build();
|
||||
clusterStateToSend = newState;
|
||||
explanations = routingResult.explanations();
|
||||
if (request.dryRun()) {
|
||||
return currentState;
|
||||
}
|
||||
return newState;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,18 +81,13 @@ public class CreateSnapshotResponse extends ActionResponse implements ToXContent
|
|||
return snapshotInfo.status();
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String SNAPSHOT = "snapshot";
|
||||
static final String ACCEPTED = "accepted";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (snapshotInfo != null) {
|
||||
builder.field(Fields.SNAPSHOT);
|
||||
snapshotInfo.toExternalXContent(builder, params);
|
||||
builder.field("snapshot");
|
||||
snapshotInfo.toXContent(builder, params);
|
||||
} else {
|
||||
builder.field(Fields.ACCEPTED, true);
|
||||
builder.field("accepted", true);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -74,15 +74,11 @@ public class GetSnapshotsResponse extends ActionResponse implements ToXContent {
|
|||
}
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String SNAPSHOTS = "snapshots";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startArray(Fields.SNAPSHOTS);
|
||||
builder.startArray("snapshots");
|
||||
for (SnapshotInfo snapshotInfo : snapshots) {
|
||||
snapshotInfo.toExternalXContent(builder, params);
|
||||
snapshotInfo.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
|
|
|
@ -73,18 +73,13 @@ public class RestoreSnapshotResponse extends ActionResponse implements ToXConten
|
|||
return restoreInfo.status();
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String SNAPSHOT = "snapshot";
|
||||
static final String ACCEPTED = "accepted";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
if (restoreInfo != null) {
|
||||
builder.field(Fields.SNAPSHOT);
|
||||
builder.field("snapshot");
|
||||
restoreInfo.toXContent(builder, params);
|
||||
} else {
|
||||
builder.field(Fields.ACCEPTED, true);
|
||||
builder.field("accepted", true);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -73,13 +73,9 @@ public class SnapshotsStatusResponse extends ActionResponse implements ToXConten
|
|||
}
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String SNAPSHOTS = "snapshots";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray(Fields.SNAPSHOTS);
|
||||
builder.startArray("snapshots");
|
||||
for (SnapshotStatus snapshot : snapshots) {
|
||||
snapshot.toXContent(builder, params);
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.engine.SegmentsStats;
|
||||
import org.elasticsearch.index.fielddata.FieldDataStats;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCacheStats;
|
||||
import org.elasticsearch.index.shard.DocsStats;
|
||||
import org.elasticsearch.index.store.StoreStats;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionStats;
|
||||
|
@ -45,7 +44,6 @@ public class ClusterStatsIndices implements ToXContent {
|
|||
private QueryCacheStats queryCache;
|
||||
private CompletionStats completion;
|
||||
private SegmentsStats segments;
|
||||
private PercolatorQueryCacheStats percolatorCache;
|
||||
|
||||
public ClusterStatsIndices(List<ClusterStatsNodeResponse> nodeResponses) {
|
||||
ObjectObjectHashMap<String, ShardStats> countsPerIndex = new ObjectObjectHashMap<>();
|
||||
|
@ -56,7 +54,6 @@ public class ClusterStatsIndices implements ToXContent {
|
|||
this.queryCache = new QueryCacheStats();
|
||||
this.completion = new CompletionStats();
|
||||
this.segments = new SegmentsStats();
|
||||
this.percolatorCache = new PercolatorQueryCacheStats();
|
||||
|
||||
for (ClusterStatsNodeResponse r : nodeResponses) {
|
||||
for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) {
|
||||
|
@ -79,7 +76,6 @@ public class ClusterStatsIndices implements ToXContent {
|
|||
queryCache.add(shardCommonStats.queryCache);
|
||||
completion.add(shardCommonStats.completion);
|
||||
segments.add(shardCommonStats.segments);
|
||||
percolatorCache.add(shardCommonStats.percolatorCache);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -122,10 +118,6 @@ public class ClusterStatsIndices implements ToXContent {
|
|||
return segments;
|
||||
}
|
||||
|
||||
public PercolatorQueryCacheStats getPercolatorCache() {
|
||||
return percolatorCache;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String COUNT = "count";
|
||||
}
|
||||
|
@ -140,7 +132,6 @@ public class ClusterStatsIndices implements ToXContent {
|
|||
queryCache.toXContent(builder, params);
|
||||
completion.toXContent(builder, params);
|
||||
segments.toXContent(builder, params);
|
||||
percolatorCache.toXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
|
|
@ -55,8 +55,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
TransportClusterStatsAction.ClusterStatsNodeRequest, ClusterStatsNodeResponse> {
|
||||
|
||||
private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store,
|
||||
CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments,
|
||||
CommonStatsFlags.Flag.PercolatorCache);
|
||||
CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments);
|
||||
|
||||
private final NodeService nodeService;
|
||||
private final IndicesService indicesService;
|
||||
|
@ -100,7 +99,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
for (IndexShard indexShard : indexService) {
|
||||
if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) {
|
||||
// only report on fully started shards
|
||||
shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats()));
|
||||
shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,10 +32,8 @@ import org.elasticsearch.index.engine.SegmentsStats;
|
|||
import org.elasticsearch.index.fielddata.FieldDataStats;
|
||||
import org.elasticsearch.index.flush.FlushStats;
|
||||
import org.elasticsearch.index.get.GetStats;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCache;
|
||||
import org.elasticsearch.index.shard.IndexingStats;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCacheStats;
|
||||
import org.elasticsearch.index.recovery.RecoveryStats;
|
||||
import org.elasticsearch.index.refresh.RefreshStats;
|
||||
import org.elasticsearch.index.search.stats.SearchStats;
|
||||
|
@ -101,9 +99,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
case Segments:
|
||||
segments = new SegmentsStats();
|
||||
break;
|
||||
case PercolatorCache:
|
||||
percolatorCache = new PercolatorQueryCacheStats();
|
||||
break;
|
||||
case Translog:
|
||||
translog = new TranslogStats();
|
||||
break;
|
||||
|
@ -123,8 +118,7 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
}
|
||||
|
||||
|
||||
public CommonStats(IndicesQueryCache indicesQueryCache, PercolatorQueryCache percolatorQueryCache,
|
||||
IndexShard indexShard, CommonStatsFlags flags) {
|
||||
public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) {
|
||||
|
||||
CommonStatsFlags.Flag[] setFlags = flags.getFlags();
|
||||
|
||||
|
@ -169,9 +163,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
case Segments:
|
||||
segments = indexShard.segmentStats(flags.includeSegmentFileSizes());
|
||||
break;
|
||||
case PercolatorCache:
|
||||
percolatorCache = percolatorQueryCache.getStats(indexShard.shardId());
|
||||
break;
|
||||
case Translog:
|
||||
translog = indexShard.translogStats();
|
||||
break;
|
||||
|
@ -223,9 +214,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
@Nullable
|
||||
public FieldDataStats fieldData;
|
||||
|
||||
@Nullable
|
||||
public PercolatorQueryCacheStats percolatorCache;
|
||||
|
||||
@Nullable
|
||||
public CompletionStats completion;
|
||||
|
||||
|
@ -331,14 +319,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
} else {
|
||||
fieldData.add(stats.getFieldData());
|
||||
}
|
||||
if (percolatorCache == null) {
|
||||
if (stats.getPercolatorCache() != null) {
|
||||
percolatorCache = new PercolatorQueryCacheStats();
|
||||
percolatorCache.add(stats.getPercolatorCache());
|
||||
}
|
||||
} else {
|
||||
percolatorCache.add(stats.getPercolatorCache());
|
||||
}
|
||||
if (completion == null) {
|
||||
if (stats.getCompletion() != null) {
|
||||
completion = new CompletionStats();
|
||||
|
@ -436,11 +416,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
return this.fieldData;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public PercolatorQueryCacheStats getPercolatorCache() {
|
||||
return percolatorCache;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public CompletionStats getCompletion() {
|
||||
return completion;
|
||||
|
@ -528,9 +503,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
if (in.readBoolean()) {
|
||||
fieldData = FieldDataStats.readFieldDataStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
percolatorCache = PercolatorQueryCacheStats.readPercolateStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
completion = CompletionStats.readCompletionStats(in);
|
||||
}
|
||||
|
@ -610,12 +582,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
out.writeBoolean(true);
|
||||
fieldData.writeTo(out);
|
||||
}
|
||||
if (percolatorCache == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
percolatorCache.writeTo(out);
|
||||
}
|
||||
if (completion == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
|
@ -669,9 +635,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
if (fieldData != null) {
|
||||
fieldData.toXContent(builder, params);
|
||||
}
|
||||
if (percolatorCache != null) {
|
||||
percolatorCache.toXContent(builder, params);
|
||||
}
|
||||
if (completion != null) {
|
||||
completion.toXContent(builder, params);
|
||||
}
|
||||
|
|
|
@ -240,7 +240,6 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
FieldData("fielddata"),
|
||||
Docs("docs"),
|
||||
Warmer("warmer"),
|
||||
PercolatorCache("percolator_cache"),
|
||||
Completion("completion"),
|
||||
Segments("segments"),
|
||||
Translog("translog"),
|
||||
|
|
|
@ -184,15 +184,6 @@ public class IndicesStatsRequest extends BroadcastRequest<IndicesStatsRequest> {
|
|||
return flags.isSet(Flag.FieldData);
|
||||
}
|
||||
|
||||
public IndicesStatsRequest percolate(boolean percolate) {
|
||||
flags.set(Flag.PercolatorCache, percolate);
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean percolate() {
|
||||
return flags.isSet(Flag.PercolatorCache);
|
||||
}
|
||||
|
||||
public IndicesStatsRequest segments(boolean segments) {
|
||||
flags.set(Flag.Segments, segments);
|
||||
return this;
|
||||
|
|
|
@ -127,11 +127,6 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder
|
|||
return this;
|
||||
}
|
||||
|
||||
public IndicesStatsRequestBuilder setPercolate(boolean percolate) {
|
||||
request.percolate(percolate);
|
||||
return this;
|
||||
}
|
||||
|
||||
public IndicesStatsRequestBuilder setSegments(boolean segments) {
|
||||
request.segments(segments);
|
||||
return this;
|
||||
|
|
|
@ -139,9 +139,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
|
|||
flags.set(CommonStatsFlags.Flag.FieldData);
|
||||
flags.fieldDataFields(request.fieldDataFields());
|
||||
}
|
||||
if (request.percolate()) {
|
||||
flags.set(CommonStatsFlags.Flag.PercolatorCache);
|
||||
}
|
||||
if (request.segments()) {
|
||||
flags.set(CommonStatsFlags.Flag.Segments);
|
||||
flags.includeSegmentFileSizes(request.includeSegmentFileSizes());
|
||||
|
@ -163,6 +160,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
|
|||
flags.set(CommonStatsFlags.Flag.Recovery);
|
||||
}
|
||||
|
||||
return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, flags), indexShard.commitStats());
|
||||
return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), indexShard.commitStats());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
@ -36,13 +34,10 @@ class ParsedScrollId {
|
|||
|
||||
private final ScrollIdForNode[] context;
|
||||
|
||||
private final Map<String, String> attributes;
|
||||
|
||||
public ParsedScrollId(String source, String type, ScrollIdForNode[] context, Map<String, String> attributes) {
|
||||
public ParsedScrollId(String source, String type, ScrollIdForNode[] context) {
|
||||
this.source = source;
|
||||
this.type = type;
|
||||
this.context = context;
|
||||
this.attributes = attributes;
|
||||
}
|
||||
|
||||
public String getSource() {
|
||||
|
@ -56,8 +51,4 @@ class ParsedScrollId {
|
|||
public ScrollIdForNode[] getContext() {
|
||||
return context;
|
||||
}
|
||||
|
||||
public Map<String, String> getAttributes() {
|
||||
return this.attributes;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -123,7 +123,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
|
|||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
|
|
|
@ -200,7 +200,7 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
|
|||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
|
|
|
@ -66,7 +66,7 @@ class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetc
|
|||
firstResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
|
|
|
@ -133,7 +133,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
|
|||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps,
|
||||
successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
|
|
|
@ -19,21 +19,16 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
import org.apache.lucene.store.ByteArrayDataInput;
|
||||
import org.apache.lucene.store.RAMOutputStream;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Base64;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import java.util.Base64;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -49,79 +44,49 @@ final class TransportSearchHelper {
|
|||
return new InternalScrollSearchRequest(request, id);
|
||||
}
|
||||
|
||||
static String buildScrollId(SearchType searchType, AtomicArray<? extends SearchPhaseResult> searchPhaseResults,
|
||||
@Nullable Map<String, String> attributes) throws IOException {
|
||||
static String buildScrollId(SearchType searchType, AtomicArray<? extends SearchPhaseResult> searchPhaseResults) throws IOException {
|
||||
if (searchType == SearchType.DFS_QUERY_THEN_FETCH || searchType == SearchType.QUERY_THEN_FETCH) {
|
||||
return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults, attributes);
|
||||
return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults);
|
||||
} else if (searchType == SearchType.QUERY_AND_FETCH || searchType == SearchType.DFS_QUERY_AND_FETCH) {
|
||||
return buildScrollId(ParsedScrollId.QUERY_AND_FETCH_TYPE, searchPhaseResults, attributes);
|
||||
return buildScrollId(ParsedScrollId.QUERY_AND_FETCH_TYPE, searchPhaseResults);
|
||||
} else {
|
||||
throw new IllegalStateException("search_type [" + searchType + "] not supported");
|
||||
}
|
||||
}
|
||||
|
||||
static String buildScrollId(String type, AtomicArray<? extends SearchPhaseResult> searchPhaseResults,
|
||||
@Nullable Map<String, String> attributes) throws IOException {
|
||||
StringBuilder sb = new StringBuilder().append(type).append(';');
|
||||
sb.append(searchPhaseResults.asList().size()).append(';');
|
||||
for (AtomicArray.Entry<? extends SearchPhaseResult> entry : searchPhaseResults.asList()) {
|
||||
SearchPhaseResult searchPhaseResult = entry.value;
|
||||
sb.append(searchPhaseResult.id()).append(':').append(searchPhaseResult.shardTarget().nodeId()).append(';');
|
||||
}
|
||||
if (attributes == null) {
|
||||
sb.append("0;");
|
||||
} else {
|
||||
sb.append(attributes.size()).append(";");
|
||||
for (Map.Entry<String, String> entry : attributes.entrySet()) {
|
||||
sb.append(entry.getKey()).append(':').append(entry.getValue()).append(';');
|
||||
static String buildScrollId(String type, AtomicArray<? extends SearchPhaseResult> searchPhaseResults) throws IOException {
|
||||
try (RAMOutputStream out = new RAMOutputStream()) {
|
||||
out.writeString(type);
|
||||
out.writeVInt(searchPhaseResults.asList().size());
|
||||
for (AtomicArray.Entry<? extends SearchPhaseResult> entry : searchPhaseResults.asList()) {
|
||||
SearchPhaseResult searchPhaseResult = entry.value;
|
||||
out.writeLong(searchPhaseResult.id());
|
||||
out.writeString(searchPhaseResult.shardTarget().nodeId());
|
||||
}
|
||||
byte[] bytes = new byte[(int) out.getFilePointer()];
|
||||
out.writeTo(bytes, 0);
|
||||
return Base64.getUrlEncoder().encodeToString(bytes);
|
||||
}
|
||||
BytesRef bytesRef = new BytesRef(sb);
|
||||
return Base64.encodeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length, Base64.URL_SAFE);
|
||||
}
|
||||
|
||||
static ParsedScrollId parseScrollId(String scrollId) {
|
||||
CharsRefBuilder spare = new CharsRefBuilder();
|
||||
try {
|
||||
byte[] decode = Base64.decode(scrollId, Base64.URL_SAFE);
|
||||
spare.copyUTF8Bytes(decode, 0, decode.length);
|
||||
byte[] bytes = Base64.getUrlDecoder().decode(scrollId);
|
||||
ByteArrayDataInput in = new ByteArrayDataInput(bytes);
|
||||
String type = in.readString();
|
||||
ScrollIdForNode[] context = new ScrollIdForNode[in.readVInt()];
|
||||
for (int i = 0; i < context.length; ++i) {
|
||||
long id = in.readLong();
|
||||
String target = in.readString();
|
||||
context[i] = new ScrollIdForNode(target, id);
|
||||
}
|
||||
if (in.getPosition() != bytes.length) {
|
||||
throw new IllegalArgumentException("Not all bytes were read");
|
||||
}
|
||||
return new ParsedScrollId(scrollId, type, context);
|
||||
} catch (Exception e) {
|
||||
throw new IllegalArgumentException("Failed to decode scrollId", e);
|
||||
throw new IllegalArgumentException("Cannot parse scroll id", e);
|
||||
}
|
||||
String[] elements = spare.get().toString().split(";");
|
||||
if (elements.length < 2) {
|
||||
throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]");
|
||||
}
|
||||
|
||||
int index = 0;
|
||||
String type = elements[index++];
|
||||
int contextSize = Integer.parseInt(elements[index++]);
|
||||
if (elements.length < contextSize + 2) {
|
||||
throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]");
|
||||
}
|
||||
|
||||
ScrollIdForNode[] context = new ScrollIdForNode[contextSize];
|
||||
for (int i = 0; i < contextSize; i++) {
|
||||
String element = elements[index++];
|
||||
int sep = element.indexOf(':');
|
||||
if (sep == -1) {
|
||||
throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]");
|
||||
}
|
||||
context[i] = new ScrollIdForNode(element.substring(sep + 1), Long.parseLong(element.substring(0, sep)));
|
||||
}
|
||||
Map<String, String> attributes;
|
||||
int attributesSize = Integer.parseInt(elements[index++]);
|
||||
if (attributesSize == 0) {
|
||||
attributes = emptyMap();
|
||||
} else {
|
||||
attributes = new HashMap<>(attributesSize);
|
||||
for (int i = 0; i < attributesSize; i++) {
|
||||
String element = elements[index++];
|
||||
int sep = element.indexOf(':');
|
||||
attributes.put(element.substring(0, sep), element.substring(sep + 1));
|
||||
}
|
||||
}
|
||||
return new ParsedScrollId(scrollId, type, context, attributes);
|
||||
}
|
||||
|
||||
private TransportSearchHelper() {
|
||||
|
|
|
@ -133,6 +133,10 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
|
|||
return;
|
||||
}
|
||||
|
||||
if (task != null && request.getShouldPersistResult()) {
|
||||
listener = new PersistentActionListener<>(taskManager, task, listener);
|
||||
}
|
||||
|
||||
if (filters.length == 0) {
|
||||
try {
|
||||
doExecute(task, request, listener);
|
||||
|
@ -171,7 +175,7 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
|
|||
if (i < this.action.filters.length) {
|
||||
this.action.filters[i].apply(task, actionName, request, listener, this);
|
||||
} else if (i == this.action.filters.length) {
|
||||
this.action.doExecute(task, request, new FilteredActionListener<Response>(actionName, listener,
|
||||
this.action.doExecute(task, request, new FilteredActionListener<>(actionName, listener,
|
||||
new ResponseFilterChain<>(this.action.filters, logger)));
|
||||
} else {
|
||||
listener.onFailure(new IllegalStateException("proceed was called too many times"));
|
||||
|
@ -246,4 +250,37 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
|
|||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper for an action listener that persists the result at the end of the execution
|
||||
*/
|
||||
private static class PersistentActionListener<Response extends ActionResponse> implements ActionListener<Response> {
|
||||
private final ActionListener<Response> delegate;
|
||||
private final Task task;
|
||||
private final TaskManager taskManager;
|
||||
|
||||
private PersistentActionListener(TaskManager taskManager, Task task, ActionListener<Response> delegate) {
|
||||
this.taskManager = taskManager;
|
||||
this.task = task;
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResponse(Response response) {
|
||||
try {
|
||||
taskManager.persistResult(task, response, delegate);
|
||||
} catch (Throwable e) {
|
||||
delegate.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
try {
|
||||
taskManager.persistResult(task, e, delegate);
|
||||
} catch (Throwable e1) {
|
||||
delegate.onFailure(e1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -177,15 +177,7 @@ final class Bootstrap {
|
|||
// install SM after natives, shutdown hooks, etc.
|
||||
Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
|
||||
|
||||
// We do not need to reload system properties here as we have already applied them in building the settings and
|
||||
// reloading could cause multiple prompts to the user for values if a system property was specified with a prompt
|
||||
// placeholder
|
||||
Settings nodeSettings = Settings.builder()
|
||||
.put(settings)
|
||||
.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true)
|
||||
.build();
|
||||
|
||||
node = new Node(nodeSettings) {
|
||||
node = new Node(settings) {
|
||||
@Override
|
||||
protected void validateNodeBeforeAcceptingRequests(Settings settings, BoundTransportAddress boundTransportAddress) {
|
||||
BootstrapCheck.check(settings, boundTransportAddress);
|
||||
|
@ -193,13 +185,13 @@ final class Bootstrap {
|
|||
};
|
||||
}
|
||||
|
||||
private static Environment initialSettings(boolean foreground, String pidFile) {
|
||||
private static Environment initialSettings(boolean foreground, String pidFile, Map<String, String> esSettings) {
|
||||
Terminal terminal = foreground ? Terminal.DEFAULT : null;
|
||||
Settings.Builder builder = Settings.builder();
|
||||
if (Strings.hasLength(pidFile)) {
|
||||
builder.put(Environment.PIDFILE_SETTING.getKey(), pidFile);
|
||||
}
|
||||
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal);
|
||||
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, esSettings);
|
||||
}
|
||||
|
||||
private void start() {
|
||||
|
@ -233,11 +225,13 @@ final class Bootstrap {
|
|||
// Set the system property before anything has a chance to trigger its use
|
||||
initLoggerPrefix();
|
||||
|
||||
elasticsearchSettings(esSettings);
|
||||
// force the class initializer for BootstrapInfo to run before
|
||||
// the security manager is installed
|
||||
BootstrapInfo.init();
|
||||
|
||||
INSTANCE = new Bootstrap();
|
||||
|
||||
Environment environment = initialSettings(foreground, pidFile);
|
||||
Environment environment = initialSettings(foreground, pidFile, esSettings);
|
||||
Settings settings = environment.settings();
|
||||
LogConfigurator.configure(settings, true);
|
||||
checkForCustomConfFile();
|
||||
|
@ -295,13 +289,6 @@ final class Bootstrap {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "Sets system properties passed as CLI parameters")
|
||||
private static void elasticsearchSettings(Map<String, String> esSettings) {
|
||||
for (Map.Entry<String, String> esSetting : esSettings.entrySet()) {
|
||||
System.setProperty(esSetting.getKey(), esSetting.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "System#out")
|
||||
private static void closeSystOut() {
|
||||
System.out.close();
|
||||
|
|
|
@ -120,4 +120,8 @@ public final class BootstrapInfo {
|
|||
}
|
||||
return SYSTEM_PROPERTIES;
|
||||
}
|
||||
|
||||
public static void init() {
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -21,28 +21,25 @@ package org.elasticsearch.bootstrap;
|
|||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import joptsimple.util.KeyValuePair;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.cli.Command;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.SettingCommand;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserError;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* This class starts elasticsearch.
|
||||
*/
|
||||
class Elasticsearch extends Command {
|
||||
class Elasticsearch extends SettingCommand {
|
||||
|
||||
private final OptionSpec<Void> versionOption;
|
||||
private final OptionSpec<Void> daemonizeOption;
|
||||
private final OptionSpec<String> pidfileOption;
|
||||
private final OptionSpec<KeyValuePair> propertyOption;
|
||||
|
||||
// visible for testing
|
||||
Elasticsearch() {
|
||||
|
@ -56,7 +53,6 @@ class Elasticsearch extends Command {
|
|||
pidfileOption = parser.acceptsAll(Arrays.asList("p", "pidfile"),
|
||||
"Creates a pid file in the specified path on start")
|
||||
.withRequiredArg();
|
||||
propertyOption = parser.accepts("E", "Configure an Elasticsearch setting").withRequiredArg().ofType(KeyValuePair.class);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -75,7 +71,7 @@ class Elasticsearch extends Command {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options) throws Exception {
|
||||
protected void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception {
|
||||
if (options.nonOptionArguments().isEmpty() == false) {
|
||||
throw new UserError(ExitCodes.USAGE, "Positional arguments not allowed, found " + options.nonOptionArguments());
|
||||
}
|
||||
|
@ -84,26 +80,15 @@ class Elasticsearch extends Command {
|
|||
throw new UserError(ExitCodes.USAGE, "Elasticsearch version option is mutually exclusive with any other option");
|
||||
}
|
||||
terminal.println("Version: " + org.elasticsearch.Version.CURRENT
|
||||
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
|
||||
+ ", JVM: " + JvmInfo.jvmInfo().version());
|
||||
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
|
||||
+ ", JVM: " + JvmInfo.jvmInfo().version());
|
||||
return;
|
||||
}
|
||||
|
||||
final boolean daemonize = options.has(daemonizeOption);
|
||||
final String pidFile = pidfileOption.value(options);
|
||||
|
||||
final Map<String, String> esSettings = new HashMap<>();
|
||||
for (final KeyValuePair kvp : propertyOption.values(options)) {
|
||||
if (!kvp.key.startsWith("es.")) {
|
||||
throw new UserError(ExitCodes.USAGE, "Elasticsearch settings must be prefixed with [es.] but was [" + kvp.key + "]");
|
||||
}
|
||||
if (kvp.value.isEmpty()) {
|
||||
throw new UserError(ExitCodes.USAGE, "Elasticsearch setting [" + kvp.key + "] must not be empty");
|
||||
}
|
||||
esSettings.put(kvp.key, kvp.value);
|
||||
}
|
||||
|
||||
init(daemonize, pidFile, esSettings);
|
||||
init(daemonize, pidFile, settings);
|
||||
}
|
||||
|
||||
void init(final boolean daemonize, final String pidFile, final Map<String, String> esSettings) {
|
||||
|
|
|
@ -244,7 +244,7 @@ final class Security {
|
|||
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.binFile(), "read,readlink");
|
||||
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libFile(), "read,readlink");
|
||||
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesFile(), "read,readlink");
|
||||
addPath(policy, Environment.PATH_PLUGINS_SETTING.getKey(), environment.pluginsFile(), "read,readlink");
|
||||
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsFile(), "read,readlink");
|
||||
addPath(policy, Environment.PATH_CONF_SETTING.getKey(), environment.configFile(), "read,readlink");
|
||||
addPath(policy, Environment.PATH_SCRIPTS_SETTING.getKey(), environment.scriptsFile(), "read,readlink");
|
||||
// read-write dirs
|
||||
|
|
|
@ -19,15 +19,15 @@
|
|||
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import joptsimple.OptionException;
|
||||
import joptsimple.OptionParser;
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* An action to execute within a cli.
|
||||
*/
|
||||
|
@ -112,4 +112,5 @@ public abstract class Command {
|
|||
*
|
||||
* Any runtime user errors (like an input file that does not exist), should throw a {@link UserError}. */
|
||||
protected abstract void execute(Terminal terminal, OptionSet options) throws Exception;
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import joptsimple.util.KeyValuePair;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
public abstract class SettingCommand extends Command {
|
||||
|
||||
private final OptionSpec<KeyValuePair> settingOption;
|
||||
|
||||
public SettingCommand(String description) {
|
||||
super(description);
|
||||
this.settingOption = parser.accepts("E", "Configure a setting").withRequiredArg().ofType(KeyValuePair.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options) throws Exception {
|
||||
final Map<String, String> settings = new HashMap<>();
|
||||
for (final KeyValuePair kvp : settingOption.values(options)) {
|
||||
if (kvp.value.isEmpty()) {
|
||||
throw new UserError(ExitCodes.USAGE, "Setting [" + kvp.key + "] must not be empty");
|
||||
}
|
||||
settings.put(kvp.key, kvp.value);
|
||||
}
|
||||
|
||||
putSystemPropertyIfSettingIsMissing(settings, "path.conf", "es.path.conf");
|
||||
putSystemPropertyIfSettingIsMissing(settings, "path.data", "es.path.data");
|
||||
putSystemPropertyIfSettingIsMissing(settings, "path.home", "es.path.home");
|
||||
putSystemPropertyIfSettingIsMissing(settings, "path.logs", "es.path.logs");
|
||||
|
||||
execute(terminal, options, settings);
|
||||
}
|
||||
|
||||
protected static void putSystemPropertyIfSettingIsMissing(final Map<String, String> settings, final String setting, final String key) {
|
||||
final String value = System.getProperty(key);
|
||||
if (value != null) {
|
||||
if (settings.containsKey(setting)) {
|
||||
final String message =
|
||||
String.format(
|
||||
Locale.ROOT,
|
||||
"duplicate setting [%s] found via command-line [%s] and system property [%s]",
|
||||
setting,
|
||||
settings.get(setting),
|
||||
value);
|
||||
throw new IllegalArgumentException(message);
|
||||
} else {
|
||||
settings.put(setting, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception;
|
||||
|
||||
}
|
|
@ -42,12 +42,6 @@ import org.elasticsearch.action.get.MultiGetResponse;
|
|||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateRequest;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateResponse;
|
||||
import org.elasticsearch.action.percolate.PercolateRequest;
|
||||
import org.elasticsearch.action.percolate.PercolateRequestBuilder;
|
||||
import org.elasticsearch.action.percolate.PercolateResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollRequestBuilder;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
|
@ -419,36 +413,6 @@ public interface Client extends ElasticsearchClient, Releasable {
|
|||
*/
|
||||
MultiTermVectorsRequestBuilder prepareMultiTermVectors();
|
||||
|
||||
/**
|
||||
* Percolates a request returning the matches documents.
|
||||
*/
|
||||
ActionFuture<PercolateResponse> percolate(PercolateRequest request);
|
||||
|
||||
/**
|
||||
* Percolates a request returning the matches documents.
|
||||
*/
|
||||
void percolate(PercolateRequest request, ActionListener<PercolateResponse> listener);
|
||||
|
||||
/**
|
||||
* Percolates a request returning the matches documents.
|
||||
*/
|
||||
PercolateRequestBuilder preparePercolate();
|
||||
|
||||
/**
|
||||
* Performs multiple percolate requests.
|
||||
*/
|
||||
ActionFuture<MultiPercolateResponse> multiPercolate(MultiPercolateRequest request);
|
||||
|
||||
/**
|
||||
* Performs multiple percolate requests.
|
||||
*/
|
||||
void multiPercolate(MultiPercolateRequest request, ActionListener<MultiPercolateResponse> listener);
|
||||
|
||||
/**
|
||||
* Performs multiple percolate requests.
|
||||
*/
|
||||
MultiPercolateRequestBuilder prepareMultiPercolate();
|
||||
|
||||
/**
|
||||
* Computes a score explanation for the specified request.
|
||||
*
|
||||
|
|
|
@ -295,14 +295,6 @@ import org.elasticsearch.action.ingest.SimulatePipelineRequest;
|
|||
import org.elasticsearch.action.ingest.SimulatePipelineRequestBuilder;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineResponse;
|
||||
import org.elasticsearch.action.ingest.WritePipelineResponse;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateAction;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateRequest;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateResponse;
|
||||
import org.elasticsearch.action.percolate.PercolateAction;
|
||||
import org.elasticsearch.action.percolate.PercolateRequest;
|
||||
import org.elasticsearch.action.percolate.PercolateRequestBuilder;
|
||||
import org.elasticsearch.action.percolate.PercolateResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollAction;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollRequestBuilder;
|
||||
|
@ -623,36 +615,6 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
return new MultiTermVectorsRequestBuilder(this, MultiTermVectorsAction.INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<PercolateResponse> percolate(final PercolateRequest request) {
|
||||
return execute(PercolateAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void percolate(final PercolateRequest request, final ActionListener<PercolateResponse> listener) {
|
||||
execute(PercolateAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PercolateRequestBuilder preparePercolate() {
|
||||
return new PercolateRequestBuilder(this, PercolateAction.INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MultiPercolateRequestBuilder prepareMultiPercolate() {
|
||||
return new MultiPercolateRequestBuilder(this, MultiPercolateAction.INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void multiPercolate(MultiPercolateRequest request, ActionListener<MultiPercolateResponse> listener) {
|
||||
execute(MultiPercolateAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<MultiPercolateResponse> multiPercolate(MultiPercolateRequest request) {
|
||||
return execute(MultiPercolateAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ExplainRequestBuilder prepareExplain(String index, String type, String id) {
|
||||
return new ExplainRequestBuilder(this, ExplainAction.INSTANCE, index, type, id);
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
|
|||
import org.elasticsearch.cluster.metadata.MetaDataMappingService;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodeService;
|
||||
import org.elasticsearch.cluster.routing.DelayedAllocationService;
|
||||
import org.elasticsearch.cluster.routing.OperationRouting;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
|
@ -49,6 +50,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDeci
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
|
||||
|
@ -62,6 +64,7 @@ import org.elasticsearch.common.settings.Setting.Property;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.ExtensionPoint;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.tasks.TaskResultsService;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
@ -79,6 +82,7 @@ public class ClusterModule extends AbstractModule {
|
|||
new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope);
|
||||
public static final List<Class<? extends AllocationDecider>> DEFAULT_ALLOCATION_DECIDERS =
|
||||
Collections.unmodifiableList(Arrays.asList(
|
||||
MaxRetryAllocationDecider.class,
|
||||
SameShardAllocationDecider.class,
|
||||
FilterAllocationDecider.class,
|
||||
ReplicaAfterPrimaryActiveAllocationDecider.class,
|
||||
|
@ -149,9 +153,11 @@ public class ClusterModule extends AbstractModule {
|
|||
bind(MetaDataIndexTemplateService.class).asEagerSingleton();
|
||||
bind(IndexNameExpressionResolver.class).asEagerSingleton();
|
||||
bind(RoutingService.class).asEagerSingleton();
|
||||
bind(DelayedAllocationService.class).asEagerSingleton();
|
||||
bind(ShardStateAction.class).asEagerSingleton();
|
||||
bind(NodeIndexDeletedAction.class).asEagerSingleton();
|
||||
bind(NodeMappingRefreshAction.class).asEagerSingleton();
|
||||
bind(MappingUpdatedAction.class).asEagerSingleton();
|
||||
bind(TaskResultsService.class).asEagerSingleton();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,6 @@ import org.elasticsearch.index.IndexService;
|
|||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.InvalidTypeNameException;
|
||||
|
||||
|
|
|
@ -0,0 +1,225 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
* The {@link DelayedAllocationService} listens to cluster state changes and checks
|
||||
* if there are unassigned shards with delayed allocation (unassigned shards that have
|
||||
* the delay marker). These are shards that have become unassigned due to a node leaving
|
||||
* and which were assigned the delay marker based on the index delay setting
|
||||
* {@link UnassignedInfo#INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING}
|
||||
* (see {@link AllocationService#deassociateDeadNodes(RoutingAllocation)}).
|
||||
* This class is responsible for choosing the next (closest) delay expiration of a
|
||||
* delayed shard to schedule a reroute to remove the delay marker.
|
||||
* The actual removal of the delay marker happens in
|
||||
* {@link AllocationService#removeDelayMarkers(RoutingAllocation)}, triggering yet
|
||||
* another cluster change event.
|
||||
*/
|
||||
public class DelayedAllocationService extends AbstractLifecycleComponent<DelayedAllocationService> implements ClusterStateListener {
|
||||
|
||||
static final String CLUSTER_UPDATE_TASK_SOURCE = "delayed_allocation_reroute";
|
||||
|
||||
final ThreadPool threadPool;
|
||||
private final ClusterService clusterService;
|
||||
private final AllocationService allocationService;
|
||||
|
||||
AtomicReference<DelayedRerouteTask> delayedRerouteTask = new AtomicReference<>(); // package private to access from tests
|
||||
|
||||
/**
|
||||
* represents a delayed scheduling of the reroute action that can be cancelled.
|
||||
*/
|
||||
class DelayedRerouteTask extends ClusterStateUpdateTask {
|
||||
final TimeValue nextDelay; // delay until submitting the reroute command
|
||||
final long baseTimestampNanos; // timestamp (in nanos) upon which delay was calculated
|
||||
volatile ScheduledFuture future;
|
||||
final AtomicBoolean cancelScheduling = new AtomicBoolean();
|
||||
|
||||
DelayedRerouteTask(TimeValue nextDelay, long baseTimestampNanos) {
|
||||
this.nextDelay = nextDelay;
|
||||
this.baseTimestampNanos = baseTimestampNanos;
|
||||
}
|
||||
|
||||
public long scheduledTimeToRunInNanos() {
|
||||
return baseTimestampNanos + nextDelay.nanos();
|
||||
}
|
||||
|
||||
public void cancelScheduling() {
|
||||
cancelScheduling.set(true);
|
||||
FutureUtils.cancel(future);
|
||||
removeIfSameTask(this);
|
||||
}
|
||||
|
||||
public void schedule() {
|
||||
future = threadPool.schedule(nextDelay, ThreadPool.Names.SAME, new AbstractRunnable() {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
if (cancelScheduling.get()) {
|
||||
return;
|
||||
}
|
||||
clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE, DelayedRerouteTask.this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
logger.warn("failed to submit schedule/execute reroute post unassigned shard", t);
|
||||
removeIfSameTask(DelayedRerouteTask.this);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
removeIfSameTask(this);
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, "assign delayed unassigned shards");
|
||||
if (routingResult.changed()) {
|
||||
return ClusterState.builder(currentState).routingResult(routingResult).build();
|
||||
} else {
|
||||
return currentState;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
if (oldState == newState) {
|
||||
// no state changed, check when we should remove the delay flag from the shards the next time.
|
||||
// if cluster state changed, we can leave the scheduling of the next delay up to the clusterChangedEvent
|
||||
// this should not be needed, but we want to be extra safe here
|
||||
scheduleIfNeeded(currentNanoTime(), newState);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
removeIfSameTask(this);
|
||||
logger.warn("failed to schedule/execute reroute post unassigned shard", t);
|
||||
}
|
||||
}
|
||||
|
||||
@Inject
|
||||
public DelayedAllocationService(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
AllocationService allocationService) {
|
||||
super(settings);
|
||||
this.threadPool = threadPool;
|
||||
this.clusterService = clusterService;
|
||||
this.allocationService = allocationService;
|
||||
clusterService.addFirst(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
clusterService.remove(this);
|
||||
removeTaskAndCancel();
|
||||
}
|
||||
|
||||
/** override this to control time based decisions during delayed allocation */
|
||||
protected long currentNanoTime() {
|
||||
return System.nanoTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
long currentNanoTime = currentNanoTime();
|
||||
if (event.state().nodes().isLocalNodeElectedMaster()) {
|
||||
scheduleIfNeeded(currentNanoTime, event.state());
|
||||
}
|
||||
}
|
||||
|
||||
private void removeTaskAndCancel() {
|
||||
DelayedRerouteTask existingTask = delayedRerouteTask.getAndSet(null);
|
||||
if (existingTask != null) {
|
||||
logger.trace("cancelling existing delayed reroute task");
|
||||
existingTask.cancelScheduling();
|
||||
}
|
||||
}
|
||||
|
||||
private void removeIfSameTask(DelayedRerouteTask expectedTask) {
|
||||
delayedRerouteTask.compareAndSet(expectedTask, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Figure out if an existing scheduled reroute is good enough or whether we need to cancel and reschedule.
|
||||
*/
|
||||
private void scheduleIfNeeded(long currentNanoTime, ClusterState state) {
|
||||
assertClusterStateThread();
|
||||
long nextDelayNanos = UnassignedInfo.findNextDelayedAllocation(currentNanoTime, state);
|
||||
if (nextDelayNanos < 0) {
|
||||
logger.trace("no need to schedule reroute - no delayed unassigned shards");
|
||||
removeTaskAndCancel();
|
||||
} else {
|
||||
TimeValue nextDelay = TimeValue.timeValueNanos(nextDelayNanos);
|
||||
final boolean earlierRerouteNeeded;
|
||||
DelayedRerouteTask existingTask = delayedRerouteTask.get();
|
||||
DelayedRerouteTask newTask = new DelayedRerouteTask(nextDelay, currentNanoTime);
|
||||
if (existingTask == null) {
|
||||
earlierRerouteNeeded = true;
|
||||
} else if (newTask.scheduledTimeToRunInNanos() < existingTask.scheduledTimeToRunInNanos()) {
|
||||
// we need an earlier delayed reroute
|
||||
logger.trace("cancelling existing delayed reroute task as delayed reroute has to happen [{}] earlier",
|
||||
TimeValue.timeValueNanos(existingTask.scheduledTimeToRunInNanos() - newTask.scheduledTimeToRunInNanos()));
|
||||
existingTask.cancelScheduling();
|
||||
earlierRerouteNeeded = true;
|
||||
} else {
|
||||
earlierRerouteNeeded = false;
|
||||
}
|
||||
|
||||
if (earlierRerouteNeeded) {
|
||||
logger.info("scheduling reroute for delayed shards in [{}] ({} delayed shards)", nextDelay,
|
||||
UnassignedInfo.getNumberOfDelayedUnassigned(state));
|
||||
DelayedRerouteTask currentTask = delayedRerouteTask.getAndSet(newTask);
|
||||
assert existingTask == currentTask || currentTask == null;
|
||||
newTask.schedule();
|
||||
} else {
|
||||
logger.trace("no need to reschedule delayed reroute - currently scheduled delayed reroute in [{}] is enough", nextDelay);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// protected so that it can be overridden (and disabled) by unit tests
|
||||
protected void assertClusterStateThread() {
|
||||
ClusterService.assertClusterStateThread();
|
||||
}
|
||||
}
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.cluster.routing;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -331,15 +332,13 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
|
||||
public ShardIterator onlyNodeActiveInitializingShardsIt(String nodeId) {
|
||||
ArrayList<ShardRouting> ordered = new ArrayList<>(activeShards.size() + allInitializingShards.size());
|
||||
// fill it in a randomized fashion
|
||||
for (int i = 0; i < activeShards.size(); i++) {
|
||||
ShardRouting shardRouting = activeShards.get(i);
|
||||
int seed = shuffler.nextSeed();
|
||||
for (ShardRouting shardRouting : shuffler.shuffle(activeShards, seed)) {
|
||||
if (nodeId.equals(shardRouting.currentNodeId())) {
|
||||
ordered.add(shardRouting);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < allInitializingShards.size(); i++) {
|
||||
ShardRouting shardRouting = allInitializingShards.get(i);
|
||||
for (ShardRouting shardRouting : shuffler.shuffle(allInitializingShards, seed)) {
|
||||
if (nodeId.equals(shardRouting.currentNodeId())) {
|
||||
ordered.add(shardRouting);
|
||||
}
|
||||
|
@ -347,26 +346,31 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
return new PlainShardIterator(shardId, ordered);
|
||||
}
|
||||
|
||||
public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String nodeAttributes, DiscoveryNodes discoveryNodes) {
|
||||
return onlyNodeSelectorActiveInitializingShardsIt(new String[] {nodeAttributes}, discoveryNodes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns shards based on nodeAttributes given such as node name , node attribute, node IP
|
||||
* Supports node specifications in cluster API
|
||||
*/
|
||||
public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String nodeAttribute, DiscoveryNodes discoveryNodes) {
|
||||
public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String[] nodeAttributes, DiscoveryNodes discoveryNodes) {
|
||||
ArrayList<ShardRouting> ordered = new ArrayList<>(activeShards.size() + allInitializingShards.size());
|
||||
Set<String> selectedNodes = Sets.newHashSet(discoveryNodes.resolveNodesIds(nodeAttribute));
|
||||
|
||||
for (ShardRouting shardRouting : activeShards) {
|
||||
Set<String> selectedNodes = Sets.newHashSet(discoveryNodes.resolveNodesIds(nodeAttributes));
|
||||
int seed = shuffler.nextSeed();
|
||||
for (ShardRouting shardRouting : shuffler.shuffle(activeShards, seed)) {
|
||||
if (selectedNodes.contains(shardRouting.currentNodeId())) {
|
||||
ordered.add(shardRouting);
|
||||
}
|
||||
}
|
||||
for (ShardRouting shardRouting : allInitializingShards) {
|
||||
for (ShardRouting shardRouting : shuffler.shuffle(allInitializingShards, seed)) {
|
||||
if (selectedNodes.contains(shardRouting.currentNodeId())) {
|
||||
ordered.add(shardRouting);
|
||||
}
|
||||
}
|
||||
if (ordered.isEmpty()) {
|
||||
throw new IllegalArgumentException("No data node with criteria [" + nodeAttribute + "] found");
|
||||
throw new IllegalArgumentException("no data nodes with critera(s) " +
|
||||
Strings.arrayToCommaDelimitedString(nodeAttributes) + "] found for shard:" + shardId());
|
||||
}
|
||||
return new PlainShardIterator(shardId, ordered);
|
||||
}
|
||||
|
|
|
@ -177,8 +177,8 @@ public class OperationRouting extends AbstractComponent {
|
|||
ensureNodeIdExists(nodes, nodeId);
|
||||
return indexShard.onlyNodeActiveInitializingShardsIt(nodeId);
|
||||
case ONLY_NODES:
|
||||
String nodeAttribute = preference.substring(Preference.ONLY_NODES.type().length() + 1);
|
||||
return indexShard.onlyNodeSelectorActiveInitializingShardsIt(nodeAttribute, nodes);
|
||||
String nodeAttributes = preference.substring(Preference.ONLY_NODES.type().length() + 1);
|
||||
return indexShard.onlyNodeSelectorActiveInitializingShardsIt(nodeAttributes.split(","), nodes);
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown preference [" + preferenceType + "]");
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing;
|
|||
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
|
@ -30,12 +29,7 @@ import org.elasticsearch.common.Priority;
|
|||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/**
|
||||
|
@ -50,27 +44,20 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
* actions.
|
||||
* </p>
|
||||
*/
|
||||
public class RoutingService extends AbstractLifecycleComponent<RoutingService> implements ClusterStateListener {
|
||||
public class RoutingService extends AbstractLifecycleComponent<RoutingService> {
|
||||
|
||||
private static final String CLUSTER_UPDATE_TASK_SOURCE = "cluster_reroute";
|
||||
|
||||
final ThreadPool threadPool;
|
||||
private final ClusterService clusterService;
|
||||
private final AllocationService allocationService;
|
||||
|
||||
private AtomicBoolean rerouting = new AtomicBoolean();
|
||||
private volatile long minDelaySettingAtLastSchedulingNanos = Long.MAX_VALUE;
|
||||
private volatile ScheduledFuture registeredNextDelayFuture;
|
||||
|
||||
@Inject
|
||||
public RoutingService(Settings settings, ThreadPool threadPool, ClusterService clusterService, AllocationService allocationService) {
|
||||
public RoutingService(Settings settings, ClusterService clusterService, AllocationService allocationService) {
|
||||
super(settings);
|
||||
this.threadPool = threadPool;
|
||||
this.clusterService = clusterService;
|
||||
this.allocationService = allocationService;
|
||||
if (clusterService != null) {
|
||||
clusterService.addFirst(this);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -83,8 +70,6 @@ public class RoutingService extends AbstractLifecycleComponent<RoutingService> i
|
|||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
FutureUtils.cancel(registeredNextDelayFuture);
|
||||
clusterService.remove(this);
|
||||
}
|
||||
|
||||
public AllocationService getAllocationService() {
|
||||
|
@ -98,48 +83,6 @@ public class RoutingService extends AbstractLifecycleComponent<RoutingService> i
|
|||
performReroute(reason);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
if (event.state().nodes().isLocalNodeElectedMaster()) {
|
||||
// Figure out if an existing scheduled reroute is good enough or whether we need to cancel and reschedule.
|
||||
// If the minimum of the currently relevant delay settings is larger than something we scheduled in the past,
|
||||
// we are guaranteed that the planned schedule will happen before any of the current shard delays are expired.
|
||||
long minDelaySetting = UnassignedInfo.findSmallestDelayedAllocationSettingNanos(settings, event.state());
|
||||
if (minDelaySetting <= 0) {
|
||||
logger.trace("no need to schedule reroute - no delayed unassigned shards, minDelaySetting [{}], scheduled [{}]", minDelaySetting, minDelaySettingAtLastSchedulingNanos);
|
||||
minDelaySettingAtLastSchedulingNanos = Long.MAX_VALUE;
|
||||
FutureUtils.cancel(registeredNextDelayFuture);
|
||||
} else if (minDelaySetting < minDelaySettingAtLastSchedulingNanos) {
|
||||
FutureUtils.cancel(registeredNextDelayFuture);
|
||||
minDelaySettingAtLastSchedulingNanos = minDelaySetting;
|
||||
TimeValue nextDelay = TimeValue.timeValueNanos(UnassignedInfo.findNextDelayedAllocationIn(event.state()));
|
||||
assert nextDelay.nanos() > 0 : "next delay must be non 0 as minDelaySetting is [" + minDelaySetting + "]";
|
||||
logger.info("delaying allocation for [{}] unassigned shards, next check in [{}]",
|
||||
UnassignedInfo.getNumberOfDelayedUnassigned(event.state()), nextDelay);
|
||||
registeredNextDelayFuture = threadPool.schedule(nextDelay, ThreadPool.Names.SAME, new AbstractRunnable() {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
minDelaySettingAtLastSchedulingNanos = Long.MAX_VALUE;
|
||||
reroute("assign delayed unassigned shards");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
logger.warn("failed to schedule/execute reroute post unassigned shard", t);
|
||||
minDelaySettingAtLastSchedulingNanos = Long.MAX_VALUE;
|
||||
}
|
||||
});
|
||||
} else {
|
||||
logger.trace("no need to schedule reroute - current schedule reroute is enough. minDelaySetting [{}], scheduled [{}]", minDelaySetting, minDelaySettingAtLastSchedulingNanos);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
long getMinDelaySettingAtLastSchedulingNanos() {
|
||||
return this.minDelaySettingAtLastSchedulingNanos;
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
protected void performReroute(String reason) {
|
||||
try {
|
||||
|
|
|
@ -316,6 +316,7 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
|
||||
public ShardRouting updateUnassignedInfo(UnassignedInfo unassignedInfo) {
|
||||
assert this.unassignedInfo != null : "can only update unassign info if they are already set";
|
||||
assert this.unassignedInfo.isDelayed() || (unassignedInfo.isDelayed() == false) : "cannot transition from non-delayed to delayed";
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state,
|
||||
unassignedInfo, allocationId, expectedShardSize);
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.cluster.routing;
|
|||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -43,12 +44,10 @@ import java.io.IOException;
|
|||
public final class UnassignedInfo implements ToXContent, Writeable {
|
||||
|
||||
public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime");
|
||||
private static final TimeValue DEFAULT_DELAYED_NODE_LEFT_TIMEOUT = TimeValue.timeValueMinutes(1);
|
||||
|
||||
public static final Setting<TimeValue> INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING =
|
||||
Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, Property.Dynamic,
|
||||
Setting.timeSetting("index.unassigned.node_left.delayed_timeout", TimeValue.timeValueMinutes(1), Property.Dynamic,
|
||||
Property.IndexScope);
|
||||
|
||||
/**
|
||||
* Reason why the shard is in unassigned state.
|
||||
* <p>
|
||||
|
@ -103,24 +102,29 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
/**
|
||||
* A better replica location is identified and causes the existing replica allocation to be cancelled.
|
||||
*/
|
||||
REALLOCATED_REPLICA;
|
||||
REALLOCATED_REPLICA,
|
||||
/**
|
||||
* Unassigned as a result of a failed primary while the replica was initializing.
|
||||
*/
|
||||
PRIMARY_FAILED;
|
||||
}
|
||||
|
||||
private final Reason reason;
|
||||
private final long unassignedTimeMillis; // used for display and log messages, in milliseconds
|
||||
private final long unassignedTimeNanos; // in nanoseconds, used to calculate delay for delayed shard allocation
|
||||
private final long lastComputedLeftDelayNanos; // how long to delay shard allocation, not serialized (always positive, 0 means no delay)
|
||||
private final boolean delayed; // if allocation of this shard is delayed due to INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING
|
||||
private final String message;
|
||||
private final Throwable failure;
|
||||
private final int failedAllocations;
|
||||
|
||||
/**
|
||||
* creates an UnassingedInfo object based **current** time
|
||||
* creates an UnassignedInfo object based on **current** time
|
||||
*
|
||||
* @param reason the cause for making this shard unassigned. See {@link Reason} for more information.
|
||||
* @param message more information about cause.
|
||||
**/
|
||||
public UnassignedInfo(Reason reason, String message) {
|
||||
this(reason, message, null, System.nanoTime(), System.currentTimeMillis());
|
||||
this(reason, message, null, reason == Reason.ALLOCATION_FAILED ? 1 : 0, System.nanoTime(), System.currentTimeMillis(), false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -129,49 +133,63 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
* @param failure the shard level failure that caused this shard to be unassigned, if exists.
|
||||
* @param unassignedTimeNanos the time to use as the base for any delayed re-assignment calculation
|
||||
* @param unassignedTimeMillis the time of unassignment used to display to in our reporting.
|
||||
* @param delayed if allocation of this shard is delayed due to INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.
|
||||
*/
|
||||
public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Throwable failure, long unassignedTimeNanos, long unassignedTimeMillis) {
|
||||
public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Throwable failure, int failedAllocations,
|
||||
long unassignedTimeNanos, long unassignedTimeMillis, boolean delayed) {
|
||||
this.reason = reason;
|
||||
this.unassignedTimeMillis = unassignedTimeMillis;
|
||||
this.unassignedTimeNanos = unassignedTimeNanos;
|
||||
this.lastComputedLeftDelayNanos = 0L;
|
||||
this.delayed = delayed;
|
||||
this.message = message;
|
||||
this.failure = failure;
|
||||
this.failedAllocations = failedAllocations;
|
||||
assert (failedAllocations > 0) == (reason == Reason.ALLOCATION_FAILED) :
|
||||
"failedAllocations: " + failedAllocations + " for reason " + reason;
|
||||
assert !(message == null && failure != null) : "provide a message if a failure exception is provided";
|
||||
}
|
||||
|
||||
public UnassignedInfo(UnassignedInfo unassignedInfo, long newComputedLeftDelayNanos) {
|
||||
this.reason = unassignedInfo.reason;
|
||||
this.unassignedTimeMillis = unassignedInfo.unassignedTimeMillis;
|
||||
this.unassignedTimeNanos = unassignedInfo.unassignedTimeNanos;
|
||||
this.lastComputedLeftDelayNanos = newComputedLeftDelayNanos;
|
||||
this.message = unassignedInfo.message;
|
||||
this.failure = unassignedInfo.failure;
|
||||
assert !(delayed && reason != Reason.NODE_LEFT) : "shard can only be delayed if it is unassigned due to a node leaving";
|
||||
}
|
||||
|
||||
public UnassignedInfo(StreamInput in) throws IOException {
|
||||
this.reason = Reason.values()[(int) in.readByte()];
|
||||
this.unassignedTimeMillis = in.readLong();
|
||||
// As System.nanoTime() cannot be compared across different JVMs, reset it to now.
|
||||
// This means that in master failover situations, elapsed delay time is forgotten.
|
||||
// This means that in master fail-over situations, elapsed delay time is forgotten.
|
||||
this.unassignedTimeNanos = System.nanoTime();
|
||||
this.lastComputedLeftDelayNanos = 0L;
|
||||
this.delayed = in.readBoolean();
|
||||
this.message = in.readOptionalString();
|
||||
this.failure = in.readThrowable();
|
||||
this.failedAllocations = in.readVInt();
|
||||
}
|
||||
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte((byte) reason.ordinal());
|
||||
out.writeLong(unassignedTimeMillis);
|
||||
// Do not serialize unassignedTimeNanos as System.nanoTime() cannot be compared across different JVMs
|
||||
out.writeBoolean(delayed);
|
||||
out.writeOptionalString(message);
|
||||
out.writeThrowable(failure);
|
||||
out.writeVInt(failedAllocations);
|
||||
}
|
||||
|
||||
public UnassignedInfo readFrom(StreamInput in) throws IOException {
|
||||
return new UnassignedInfo(in);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of previously failed allocations of this shard.
|
||||
*/
|
||||
public int getNumFailedAllocations() {
|
||||
return failedAllocations;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if allocation of this shard is delayed due to {@link #INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING}
|
||||
*/
|
||||
public boolean isDelayed() {
|
||||
return delayed;
|
||||
}
|
||||
|
||||
/**
|
||||
* The reason why the shard is unassigned.
|
||||
*/
|
||||
|
@ -224,50 +242,16 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* The allocation delay value in nano seconds associated with the index (defaulting to node settings if not set).
|
||||
*/
|
||||
public long getAllocationDelayTimeoutSettingNanos(Settings settings, Settings indexSettings) {
|
||||
if (reason != Reason.NODE_LEFT) {
|
||||
return 0;
|
||||
}
|
||||
TimeValue delayTimeout = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexSettings, settings);
|
||||
return Math.max(0L, delayTimeout.nanos());
|
||||
}
|
||||
|
||||
/**
|
||||
* The delay in nanoseconds until this unassigned shard can be reassigned. This value is cached and might be slightly out-of-date.
|
||||
* See also the {@link #updateDelay(long, Settings, Settings)} method.
|
||||
*/
|
||||
public long getLastComputedLeftDelayNanos() {
|
||||
return lastComputedLeftDelayNanos;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the delay left based on current time (in nanoseconds) and index/node settings.
|
||||
* Calculates the delay left based on current time (in nanoseconds) and the delay defined by the index settings.
|
||||
* Only relevant if shard is effectively delayed (see {@link #isDelayed()})
|
||||
* Returns 0 if delay is negative
|
||||
*
|
||||
* @return calculated delay in nanoseconds
|
||||
*/
|
||||
public long getRemainingDelay(final long nanoTimeNow, final Settings settings, final Settings indexSettings) {
|
||||
final long delayTimeoutNanos = getAllocationDelayTimeoutSettingNanos(settings, indexSettings);
|
||||
if (delayTimeoutNanos == 0L) {
|
||||
return 0L;
|
||||
} else {
|
||||
assert nanoTimeNow >= unassignedTimeNanos;
|
||||
return Math.max(0L, delayTimeoutNanos - (nanoTimeNow - unassignedTimeNanos));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates new UnassignedInfo object if delay needs updating.
|
||||
*
|
||||
* @return new Unassigned with updated delay, or this if no change in delay
|
||||
*/
|
||||
public UnassignedInfo updateDelay(final long nanoTimeNow, final Settings settings, final Settings indexSettings) {
|
||||
final long newComputedLeftDelayNanos = getRemainingDelay(nanoTimeNow, settings, indexSettings);
|
||||
if (lastComputedLeftDelayNanos == newComputedLeftDelayNanos) {
|
||||
return this;
|
||||
}
|
||||
return new UnassignedInfo(this, newComputedLeftDelayNanos);
|
||||
public long getRemainingDelay(final long nanoTimeNow, final Settings indexSettings) {
|
||||
long delayTimeoutNanos = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexSettings).nanos();
|
||||
assert nanoTimeNow >= unassignedTimeNanos;
|
||||
return Math.max(0L, delayTimeoutNanos - (nanoTimeNow - unassignedTimeNanos));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -276,56 +260,46 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
public static int getNumberOfDelayedUnassigned(ClusterState state) {
|
||||
int count = 0;
|
||||
for (ShardRouting shard : state.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED)) {
|
||||
if (shard.primary() == false) {
|
||||
long delay = shard.unassignedInfo().getLastComputedLeftDelayNanos();
|
||||
if (delay > 0) {
|
||||
count++;
|
||||
}
|
||||
if (shard.unassignedInfo().isDelayed()) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the smallest delay expiration setting in nanos of all unassigned shards that are still delayed. Returns 0 if there are none.
|
||||
* Finds the next (closest) delay expiration of an delayed shard in nanoseconds based on current time.
|
||||
* Returns 0 if delay is negative.
|
||||
* Returns -1 if no delayed shard is found.
|
||||
*/
|
||||
public static long findSmallestDelayedAllocationSettingNanos(Settings settings, ClusterState state) {
|
||||
long minDelaySetting = Long.MAX_VALUE;
|
||||
for (ShardRouting shard : state.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED)) {
|
||||
if (shard.primary() == false) {
|
||||
IndexMetaData indexMetaData = state.metaData().index(shard.getIndexName());
|
||||
boolean delayed = shard.unassignedInfo().getLastComputedLeftDelayNanos() > 0;
|
||||
long delayTimeoutSetting = shard.unassignedInfo().getAllocationDelayTimeoutSettingNanos(settings, indexMetaData.getSettings());
|
||||
if (delayed && delayTimeoutSetting > 0 && delayTimeoutSetting < minDelaySetting) {
|
||||
minDelaySetting = delayTimeoutSetting;
|
||||
public static long findNextDelayedAllocation(long currentNanoTime, ClusterState state) {
|
||||
MetaData metaData = state.metaData();
|
||||
RoutingTable routingTable = state.routingTable();
|
||||
long nextDelayNanos = Long.MAX_VALUE;
|
||||
for (ShardRouting shard : routingTable.shardsWithState(ShardRoutingState.UNASSIGNED)) {
|
||||
UnassignedInfo unassignedInfo = shard.unassignedInfo();
|
||||
if (unassignedInfo.isDelayed()) {
|
||||
Settings indexSettings = metaData.index(shard.index()).getSettings();
|
||||
// calculate next time to schedule
|
||||
final long newComputedLeftDelayNanos = unassignedInfo.getRemainingDelay(currentNanoTime, indexSettings);
|
||||
if (newComputedLeftDelayNanos < nextDelayNanos) {
|
||||
nextDelayNanos = newComputedLeftDelayNanos;
|
||||
}
|
||||
}
|
||||
}
|
||||
return minDelaySetting == Long.MAX_VALUE ? 0L : minDelaySetting;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Finds the next (closest) delay expiration of an unassigned shard in nanoseconds. Returns 0 if there are none.
|
||||
*/
|
||||
public static long findNextDelayedAllocationIn(ClusterState state) {
|
||||
long nextDelay = Long.MAX_VALUE;
|
||||
for (ShardRouting shard : state.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED)) {
|
||||
if (shard.primary() == false) {
|
||||
long nextShardDelay = shard.unassignedInfo().getLastComputedLeftDelayNanos();
|
||||
if (nextShardDelay > 0 && nextShardDelay < nextDelay) {
|
||||
nextDelay = nextShardDelay;
|
||||
}
|
||||
}
|
||||
}
|
||||
return nextDelay == Long.MAX_VALUE ? 0L : nextDelay;
|
||||
return nextDelayNanos == Long.MAX_VALUE ? -1L : nextDelayNanos;
|
||||
}
|
||||
|
||||
public String shortSummary() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("[reason=").append(reason).append("]");
|
||||
sb.append(", at[").append(DATE_TIME_FORMATTER.printer().print(unassignedTimeMillis)).append("]");
|
||||
if (failedAllocations > 0) {
|
||||
sb.append(", failed_attempts[").append(failedAllocations).append("]");
|
||||
}
|
||||
sb.append(", delayed=").append(delayed);
|
||||
String details = getDetails();
|
||||
|
||||
if (details != null) {
|
||||
sb.append(", details[").append(details).append("]");
|
||||
}
|
||||
|
@ -342,6 +316,10 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
builder.startObject("unassigned_info");
|
||||
builder.field("reason", reason);
|
||||
builder.field("at", DATE_TIME_FORMATTER.printer().print(unassignedTimeMillis));
|
||||
if (failedAllocations > 0) {
|
||||
builder.field("failed_attempts", failedAllocations);
|
||||
}
|
||||
builder.field("delayed", delayed);
|
||||
String details = getDetails();
|
||||
if (details != null) {
|
||||
builder.field("details", details);
|
||||
|
@ -364,6 +342,12 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
if (unassignedTimeMillis != that.unassignedTimeMillis) {
|
||||
return false;
|
||||
}
|
||||
if (delayed != that.delayed) {
|
||||
return false;
|
||||
}
|
||||
if (failedAllocations != that.failedAllocations) {
|
||||
return false;
|
||||
}
|
||||
if (reason != that.reason) {
|
||||
return false;
|
||||
}
|
||||
|
@ -371,12 +355,13 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
return false;
|
||||
}
|
||||
return !(failure != null ? !failure.equals(that.failure) : that.failure != null);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = reason != null ? reason.hashCode() : 0;
|
||||
result = 31 * result + Boolean.hashCode(delayed);
|
||||
result = 31 * result + Integer.hashCode(failedAllocations);
|
||||
result = 31 * result + Long.hashCode(unassignedTimeMillis);
|
||||
result = 31 * result + (message != null ? message.hashCode() : 0);
|
||||
result = 31 * result + (failure != null ? failure.hashCode() : 0);
|
||||
|
|
|
@ -53,6 +53,8 @@ import java.util.Set;
|
|||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
|
||||
|
||||
|
||||
/**
|
||||
* This service manages the node allocation of a cluster. For this reason the
|
||||
|
@ -90,7 +92,7 @@ public class AllocationService extends AbstractComponent {
|
|||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||
routingNodes.unassigned().shuffle();
|
||||
StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState, startedShards, clusterInfoService.getClusterInfo());
|
||||
StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState, startedShards, clusterInfoService.getClusterInfo(), currentNanoTime());
|
||||
boolean changed = applyStartedShards(routingNodes, startedShards);
|
||||
if (!changed) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
|
@ -99,28 +101,27 @@ public class AllocationService extends AbstractComponent {
|
|||
if (withReroute) {
|
||||
reroute(allocation);
|
||||
}
|
||||
final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes);
|
||||
|
||||
String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString());
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.metaData(), result.routingTable()),
|
||||
"shards started [" + startedShardsAsString + "] ..."
|
||||
);
|
||||
return result;
|
||||
return buildResultAndLogHealthChange(allocation, "shards started [" + startedShardsAsString + "] ...");
|
||||
}
|
||||
|
||||
protected RoutingAllocation.Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason) {
|
||||
return buildResultAndLogHealthChange(allocation, reason, new RoutingExplanations());
|
||||
|
||||
}
|
||||
|
||||
protected RoutingAllocation.Result buildChangedResult(MetaData oldMetaData, RoutingTable oldRoutingTable, RoutingNodes newRoutingNodes) {
|
||||
return buildChangedResult(oldMetaData, oldRoutingTable, newRoutingNodes, new RoutingExplanations());
|
||||
|
||||
}
|
||||
|
||||
protected RoutingAllocation.Result buildChangedResult(MetaData oldMetaData, RoutingTable oldRoutingTable, RoutingNodes newRoutingNodes,
|
||||
RoutingExplanations explanations) {
|
||||
protected RoutingAllocation.Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason, RoutingExplanations explanations) {
|
||||
MetaData oldMetaData = allocation.metaData();
|
||||
RoutingTable oldRoutingTable = allocation.routingTable();
|
||||
RoutingNodes newRoutingNodes = allocation.routingNodes();
|
||||
final RoutingTable newRoutingTable = new RoutingTable.Builder().updateNodes(newRoutingNodes).build();
|
||||
MetaData newMetaData = updateMetaDataWithRoutingTable(oldMetaData, oldRoutingTable, newRoutingTable);
|
||||
assert newRoutingTable.validate(newMetaData); // validates the routing table is coherent with the cluster state metadata
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(allocation.metaData(), allocation.routingTable()),
|
||||
new ClusterStateHealth(newMetaData, newRoutingTable),
|
||||
reason
|
||||
);
|
||||
return new RoutingAllocation.Result(true, newRoutingTable, newMetaData, explanations);
|
||||
}
|
||||
|
||||
|
@ -216,28 +217,48 @@ public class AllocationService extends AbstractComponent {
|
|||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||
routingNodes.unassigned().shuffle();
|
||||
FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState, failedShards, clusterInfoService.getClusterInfo());
|
||||
long currentNanoTime = currentNanoTime();
|
||||
FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState, failedShards, clusterInfoService.getClusterInfo(), currentNanoTime);
|
||||
boolean changed = false;
|
||||
// as failing primaries also fail associated replicas, we fail replicas first here so that their nodes are added to ignore list
|
||||
List<FailedRerouteAllocation.FailedShard> orderedFailedShards = new ArrayList<>(failedShards);
|
||||
orderedFailedShards.sort(Comparator.comparing(failedShard -> failedShard.shard.primary()));
|
||||
for (FailedRerouteAllocation.FailedShard failedShard : orderedFailedShards) {
|
||||
UnassignedInfo unassignedInfo = failedShard.shard.unassignedInfo();
|
||||
final int failedAllocations = unassignedInfo != null ? unassignedInfo.getNumFailedAllocations() : 0;
|
||||
changed |= applyFailedShard(allocation, failedShard.shard, true, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShard.message, failedShard.failure,
|
||||
System.nanoTime(), System.currentTimeMillis()));
|
||||
failedAllocations + 1, currentNanoTime, System.currentTimeMillis(), false));
|
||||
}
|
||||
if (!changed) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
gatewayAllocator.applyFailedShards(allocation);
|
||||
reroute(allocation);
|
||||
final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes);
|
||||
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString());
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
|
||||
"shards failed [" + failedShardsAsString + "] ..."
|
||||
);
|
||||
return result;
|
||||
return buildResultAndLogHealthChange(allocation, "shards failed [" + failedShardsAsString + "] ...");
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes delay markers from unassigned shards based on current time stamp. Returns true if markers were removed.
|
||||
*/
|
||||
private boolean removeDelayMarkers(RoutingAllocation allocation) {
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = allocation.routingNodes().unassigned().iterator();
|
||||
final MetaData metaData = allocation.metaData();
|
||||
boolean changed = false;
|
||||
while (unassignedIterator.hasNext()) {
|
||||
ShardRouting shardRouting = unassignedIterator.next();
|
||||
UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
|
||||
if (unassignedInfo.isDelayed()) {
|
||||
final long newComputedLeftDelayNanos = unassignedInfo.getRemainingDelay(allocation.getCurrentNanoTime(),
|
||||
metaData.getIndexSafe(shardRouting.index()).getSettings());
|
||||
if (newComputedLeftDelayNanos == 0) {
|
||||
changed = true;
|
||||
unassignedIterator.updateUnassignedInfo(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(), unassignedInfo.getFailure(),
|
||||
unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(), unassignedInfo.getUnassignedTimeInMillis(), false));
|
||||
}
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -257,16 +278,13 @@ public class AllocationService extends AbstractComponent {
|
|||
.collect(Collectors.joining(", "));
|
||||
}
|
||||
|
||||
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands) {
|
||||
return reroute(clusterState, commands, false);
|
||||
}
|
||||
|
||||
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain) {
|
||||
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) {
|
||||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// we don't shuffle the unassigned shards here, to try and get as close as possible to
|
||||
// a consistent result of the effect the commands have on the routing
|
||||
// this allows systems to dry run the commands, see the resulting cluster state, and act on it
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, clusterInfoService.getClusterInfo(), currentNanoTime());
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
|
||||
clusterInfoService.getClusterInfo(), currentNanoTime(), retryFailed);
|
||||
// don't short circuit deciders, we want a full explanation
|
||||
allocation.debugDecision(true);
|
||||
// we ignore disable allocation, because commands are explicit
|
||||
|
@ -277,13 +295,7 @@ public class AllocationService extends AbstractComponent {
|
|||
// the assumption is that commands will move / act on shards (or fail through exceptions)
|
||||
// so, there will always be shard "movements", so no need to check on reroute
|
||||
reroute(allocation);
|
||||
RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes, explanations);
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
|
||||
"reroute commands"
|
||||
);
|
||||
return result;
|
||||
return buildResultAndLogHealthChange(allocation, "reroute commands", explanations);
|
||||
}
|
||||
|
||||
|
||||
|
@ -305,18 +317,13 @@ public class AllocationService extends AbstractComponent {
|
|||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||
routingNodes.unassigned().shuffle();
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, clusterInfoService.getClusterInfo(), currentNanoTime());
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
|
||||
clusterInfoService.getClusterInfo(), currentNanoTime(), false);
|
||||
allocation.debugDecision(debug);
|
||||
if (!reroute(allocation)) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes);
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
|
||||
reason
|
||||
);
|
||||
return result;
|
||||
return buildResultAndLogHealthChange(allocation, reason);
|
||||
}
|
||||
|
||||
private void logClusterHealthStateChange(ClusterStateHealth previousStateHealth, ClusterStateHealth newStateHealth, String reason) {
|
||||
|
@ -341,8 +348,7 @@ public class AllocationService extends AbstractComponent {
|
|||
|
||||
// now allocate all the unassigned to available nodes
|
||||
if (allocation.routingNodes().unassigned().size() > 0) {
|
||||
updateLeftDelayOfUnassignedShards(allocation, settings);
|
||||
|
||||
changed |= removeDelayMarkers(allocation);
|
||||
changed |= gatewayAllocator.allocateUnassigned(allocation);
|
||||
}
|
||||
|
||||
|
@ -351,22 +357,6 @@ public class AllocationService extends AbstractComponent {
|
|||
return changed;
|
||||
}
|
||||
|
||||
// public for testing
|
||||
public static void updateLeftDelayOfUnassignedShards(RoutingAllocation allocation, Settings settings) {
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = allocation.routingNodes().unassigned().iterator();
|
||||
final MetaData metaData = allocation.metaData();
|
||||
while (unassignedIterator.hasNext()) {
|
||||
ShardRouting shardRouting = unassignedIterator.next();
|
||||
final IndexMetaData indexMetaData = metaData.getIndexSafe(shardRouting.index());
|
||||
UnassignedInfo previousUnassignedInfo = shardRouting.unassignedInfo();
|
||||
UnassignedInfo updatedUnassignedInfo = previousUnassignedInfo.updateDelay(allocation.getCurrentNanoTime(), settings,
|
||||
indexMetaData.getSettings());
|
||||
if (updatedUnassignedInfo != previousUnassignedInfo) { // reference equality!
|
||||
unassignedIterator.updateUnassignedInfo(updatedUnassignedInfo);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private boolean electPrimariesAndUnassignedDanglingReplicas(RoutingAllocation allocation) {
|
||||
boolean changed = false;
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
|
@ -436,8 +426,10 @@ public class AllocationService extends AbstractComponent {
|
|||
changed = true;
|
||||
// now, go over all the shards routing on the node, and fail them
|
||||
for (ShardRouting shardRouting : node.copyShards()) {
|
||||
UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]", null,
|
||||
allocation.getCurrentNanoTime(), System.currentTimeMillis());
|
||||
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
boolean delayed = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).nanos() > 0;
|
||||
UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]",
|
||||
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), delayed);
|
||||
applyFailedShard(allocation, shardRouting, false, unassignedInfo);
|
||||
}
|
||||
// its a dead node, remove it, note, its important to remove it *after* we apply failed shard
|
||||
|
@ -457,8 +449,8 @@ public class AllocationService extends AbstractComponent {
|
|||
boolean changed = false;
|
||||
for (ShardRouting routing : replicas) {
|
||||
changed |= applyFailedShard(allocation, routing, false,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "primary failed while replica initializing",
|
||||
null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
|
||||
new UnassignedInfo(UnassignedInfo.Reason.PRIMARY_FAILED, "primary failed while replica initializing",
|
||||
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false));
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
|
|
@ -57,8 +57,8 @@ public class FailedRerouteAllocation extends RoutingAllocation {
|
|||
|
||||
private final List<FailedShard> failedShards;
|
||||
|
||||
public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<FailedShard> failedShards, ClusterInfo clusterInfo) {
|
||||
super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime());
|
||||
public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<FailedShard> failedShards, ClusterInfo clusterInfo, long currentNanoTime) {
|
||||
super(deciders, routingNodes, clusterState, clusterInfo, currentNanoTime, false);
|
||||
this.failedShards = failedShards;
|
||||
}
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ public class RoutingAllocation {
|
|||
|
||||
private final MetaData metaData;
|
||||
|
||||
private RoutingExplanations explanations = new RoutingExplanations();
|
||||
private final RoutingExplanations explanations;
|
||||
|
||||
/**
|
||||
* Creates a new {@link RoutingAllocation.Result}
|
||||
|
@ -65,9 +65,7 @@ public class RoutingAllocation {
|
|||
* @param metaData the {@link MetaData} this Result references
|
||||
*/
|
||||
public Result(boolean changed, RoutingTable routingTable, MetaData metaData) {
|
||||
this.changed = changed;
|
||||
this.routingTable = routingTable;
|
||||
this.metaData = metaData;
|
||||
this(changed, routingTable, metaData, new RoutingExplanations());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -134,6 +132,8 @@ public class RoutingAllocation {
|
|||
|
||||
private boolean ignoreDisable = false;
|
||||
|
||||
private final boolean retryFailed;
|
||||
|
||||
private boolean debugDecision = false;
|
||||
|
||||
private boolean hasPendingAsyncFetch = false;
|
||||
|
@ -148,7 +148,7 @@ public class RoutingAllocation {
|
|||
* @param clusterState cluster state before rerouting
|
||||
* @param currentNanoTime the nano time to use for all delay allocation calculation (typically {@link System#nanoTime()})
|
||||
*/
|
||||
public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, long currentNanoTime) {
|
||||
public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, long currentNanoTime, boolean retryFailed) {
|
||||
this.deciders = deciders;
|
||||
this.routingNodes = routingNodes;
|
||||
this.metaData = clusterState.metaData();
|
||||
|
@ -156,6 +156,7 @@ public class RoutingAllocation {
|
|||
this.customs = clusterState.customs();
|
||||
this.clusterInfo = clusterInfo;
|
||||
this.currentNanoTime = currentNanoTime;
|
||||
this.retryFailed = retryFailed;
|
||||
}
|
||||
|
||||
/** returns the nano time captured at the beginning of the allocation. used to make sure all time based decisions are aligned */
|
||||
|
@ -297,4 +298,8 @@ public class RoutingAllocation {
|
|||
public void setHasPendingAsyncFetch() {
|
||||
this.hasPendingAsyncFetch = true;
|
||||
}
|
||||
|
||||
public boolean isRetryFailed() {
|
||||
return retryFailed;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,8 +35,8 @@ public class StartedRerouteAllocation extends RoutingAllocation {
|
|||
|
||||
private final List<? extends ShardRouting> startedShards;
|
||||
|
||||
public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<? extends ShardRouting> startedShards, ClusterInfo clusterInfo) {
|
||||
super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime());
|
||||
public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<? extends ShardRouting> startedShards, ClusterInfo clusterInfo, long currentNanoTime) {
|
||||
super(deciders, routingNodes, clusterState, clusterInfo, currentNanoTime, false);
|
||||
this.startedShards = startedShards;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
|
@ -228,4 +229,22 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom
|
|||
|
||||
protected void extraXContent(XContentBuilder builder) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
AbstractAllocateAllocationCommand other = (AbstractAllocateAllocationCommand) obj;
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.equals(index, other.index) &&
|
||||
Objects.equals(shardId, other.shardId) &&
|
||||
Objects.equals(node, other.node);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.hash(index, shardId, node);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -125,7 +125,7 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation
|
|||
// we need to move the unassigned info back to treat it as if it was index creation
|
||||
unassignedInfoToUpdate = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
|
||||
"force empty allocation from previous reason " + shardRouting.unassignedInfo().getReason() + ", " + shardRouting.unassignedInfo().getMessage(),
|
||||
shardRouting.unassignedInfo().getFailure(), System.nanoTime(), System.currentTimeMillis());
|
||||
shardRouting.unassignedInfo().getFailure(), 0, System.nanoTime(), System.currentTimeMillis(), false);
|
||||
}
|
||||
|
||||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate);
|
||||
|
|
|
@ -136,6 +136,4 @@ public class AllocateReplicaAllocationCommand extends AbstractAllocateAllocation
|
|||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting);
|
||||
return new RerouteExplanation(this, decision);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -22,13 +22,16 @@ package org.elasticsearch.cluster.routing.allocation.command;
|
|||
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* This interface defines the basic methods of commands for allocation
|
||||
* A command to move shards in some way.
|
||||
*
|
||||
* Commands are registered in {@link NetworkModule}.
|
||||
*/
|
||||
public interface AllocationCommand extends NamedWriteable, ToXContent {
|
||||
interface Parser<T extends AllocationCommand> {
|
||||
|
|
|
@ -20,12 +20,12 @@
|
|||
package org.elasticsearch.cluster.routing.allocation.command;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.support.ToXContentToBytes;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
|
@ -33,12 +33,13 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A simple {@link AllocationCommand} composite managing several
|
||||
* {@link AllocationCommand} implementations
|
||||
*/
|
||||
public class AllocationCommands {
|
||||
public class AllocationCommands extends ToXContentToBytes {
|
||||
private final List<AllocationCommand> commands = new ArrayList<>();
|
||||
|
||||
/**
|
||||
|
@ -171,21 +172,31 @@ public class AllocationCommands {
|
|||
return commands;
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes {@link AllocationCommands} to a {@link XContentBuilder}
|
||||
*
|
||||
* @param commands {@link AllocationCommands} to write
|
||||
* @param builder {@link XContentBuilder} to use
|
||||
* @param params Parameters to use for building
|
||||
* @throws IOException if something bad happens while building the content
|
||||
*/
|
||||
public static void toXContent(AllocationCommands commands, XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray("commands");
|
||||
for (AllocationCommand command : commands.commands) {
|
||||
for (AllocationCommand command : commands) {
|
||||
builder.startObject();
|
||||
builder.field(command.name(), command);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
AllocationCommands other = (AllocationCommands) obj;
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.equals(commands, other.commands);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.hashCode(commands);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,4 +83,18 @@ public abstract class BasePrimaryAllocationCommand extends AbstractAllocateAlloc
|
|||
protected void extraXContent(XContentBuilder builder) throws IOException {
|
||||
builder.field(ACCEPT_DATA_LOSS_FIELD, acceptDataLoss);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (false == super.equals(obj)) {
|
||||
return false;
|
||||
}
|
||||
BasePrimaryAllocationCommand other = (BasePrimaryAllocationCommand) obj;
|
||||
return acceptDataLoss == other.acceptDataLoss;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return 31 * super.hashCode() + Boolean.hashCode(acceptDataLoss);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
|
||||
|
||||
|
@ -240,4 +241,23 @@ public class CancelAllocationCommand implements AllocationCommand {
|
|||
}
|
||||
return new CancelAllocationCommand(index, shardId, nodeId, allowPrimary);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
CancelAllocationCommand other = (CancelAllocationCommand) obj;
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.equals(index, other.index) &&
|
||||
Objects.equals(shardId, other.shardId) &&
|
||||
Objects.equals(node, other.node) &&
|
||||
Objects.equals(allowPrimary, other.allowPrimary);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.hash(index, shardId, node, allowPrimary);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A command that moves a shard from a specific node to another node.<br>
|
||||
|
@ -195,4 +196,23 @@ public class MoveAllocationCommand implements AllocationCommand {
|
|||
}
|
||||
return new MoveAllocationCommand(index, shardId, fromNode, toNode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
MoveAllocationCommand other = (MoveAllocationCommand) obj;
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.equals(index, other.index) &&
|
||||
Objects.equals(shardId, other.shardId) &&
|
||||
Objects.equals(fromNode, other.fromNode) &&
|
||||
Objects.equals(toNode, other.toNode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// Override equals and hashCode for testing
|
||||
return Objects.hash(index, shardId, fromNode, toNode);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
* An allocation decider that prevents shards from being allocated on any node if the shards allocation has been retried N times without
|
||||
* success. This means if a shard has been INITIALIZING N times in a row without being moved to STARTED the shard will be ignored until
|
||||
* the setting for <tt>index.allocation.max_retry</tt> is raised. The default value is <tt>5</tt>.
|
||||
* Note: This allocation decider also allows allocation of repeatedly failing shards when the <tt>/_cluster/reroute?retry_failed=true</tt>
|
||||
* API is manually invoked. This allows single retries without raising the limits.
|
||||
*
|
||||
* @see RoutingAllocation#isRetryFailed()
|
||||
*/
|
||||
public class MaxRetryAllocationDecider extends AllocationDecider {
|
||||
|
||||
public static final Setting<Integer> SETTING_ALLOCATION_MAX_RETRY = Setting.intSetting("index.allocation.max_retries", 5, 0,
|
||||
Setting.Property.Dynamic, Setting.Property.IndexScope);
|
||||
|
||||
public static final String NAME = "max_retry";
|
||||
|
||||
/**
|
||||
* Initializes a new {@link MaxRetryAllocationDecider}
|
||||
*
|
||||
* @param settings {@link Settings} used by this {@link AllocationDecider}
|
||||
*/
|
||||
@Inject
|
||||
public MaxRetryAllocationDecider(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) {
|
||||
UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
|
||||
if (unassignedInfo != null && unassignedInfo.getNumFailedAllocations() > 0) {
|
||||
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
final int maxRetry = SETTING_ALLOCATION_MAX_RETRY.get(indexMetaData.getSettings());
|
||||
if (allocation.isRetryFailed()) { // manual allocation - retry
|
||||
// if we are called via the _reroute API we ignore the failure counter and try to allocate
|
||||
// this improves the usability since people don't need to raise the limits to issue retries since a simple _reroute call is
|
||||
// enough to manually retry.
|
||||
return allocation.decision(Decision.YES, NAME, "shard has already failed allocating ["
|
||||
+ unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed "
|
||||
+ unassignedInfo.toString() + " - retrying once on manual allocation");
|
||||
} else if (unassignedInfo.getNumFailedAllocations() >= maxRetry) {
|
||||
return allocation.decision(Decision.NO, NAME, "shard has already failed allocating ["
|
||||
+ unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed "
|
||||
+ unassignedInfo.toString() + " - manually call [/_cluster/reroute?retry_failed=true] to retry");
|
||||
}
|
||||
}
|
||||
return allocation.decision(Decision.YES, NAME, "shard has no previous failures");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
return canAllocate(shardRouting, allocation);
|
||||
}
|
||||
}
|
|
@ -455,7 +455,7 @@ public class ClusterService extends AbstractLifecycleComponent<ClusterService> {
|
|||
}
|
||||
|
||||
/** asserts that the current thread is the cluster state update thread */
|
||||
public boolean assertClusterStateThread() {
|
||||
public static boolean assertClusterStateThread() {
|
||||
assert Thread.currentThread().getName().contains(ClusterService.UPDATE_THREAD_NAME) :
|
||||
"not called from the cluster state update thread";
|
||||
return true;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -21,6 +21,7 @@ package org.elasticsearch.common;
|
|||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Base64;
|
||||
import java.util.Random;
|
||||
|
||||
class RandomBasedUUIDGenerator implements UUIDGenerator {
|
||||
|
@ -54,14 +55,6 @@ class RandomBasedUUIDGenerator implements UUIDGenerator {
|
|||
* We set only the MSB of the variant*/
|
||||
randomBytes[8] &= 0x3f; /* clear the 2 most significant bits */
|
||||
randomBytes[8] |= 0x80; /* set the variant (MSB is set)*/
|
||||
try {
|
||||
byte[] encoded = Base64.encodeBytesToBytes(randomBytes, 0, randomBytes.length, Base64.URL_SAFE);
|
||||
// we know the bytes are 16, and not a multi of 3, so remove the 2 padding chars that are added
|
||||
assert encoded[encoded.length - 1] == '=';
|
||||
assert encoded[encoded.length - 2] == '=';
|
||||
return new String(encoded, 0, encoded.length - 2, Base64.PREFERRED_ENCODING);
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException("should not be thrown");
|
||||
}
|
||||
return Base64.getUrlEncoder().withoutPadding().encodeToString(randomBytes);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Base64;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/** These are essentially flake ids (http://boundary.com/blog/2012/01/12/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang) but
|
||||
|
@ -80,15 +79,6 @@ class TimeBasedUUIDGenerator implements UUIDGenerator {
|
|||
|
||||
assert 9 + SECURE_MUNGED_ADDRESS.length == uuidBytes.length;
|
||||
|
||||
byte[] encoded;
|
||||
try {
|
||||
encoded = Base64.encodeBytesToBytes(uuidBytes, 0, uuidBytes.length, Base64.URL_SAFE);
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException("should not be thrown", e);
|
||||
}
|
||||
|
||||
// We are a multiple of 3 bytes so we should not see any padding:
|
||||
assert encoded[encoded.length - 1] != '=';
|
||||
return new String(encoded, 0, encoded.length, Base64.PREFERRED_ENCODING);
|
||||
return Base64.getUrlEncoder().withoutPadding().encodeToString(uuidBytes);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,8 @@ import java.util.List;
|
|||
*/
|
||||
public class BlobPath implements Iterable<String> {
|
||||
|
||||
private static final String SEPARATOR = "/";
|
||||
|
||||
private final List<String> paths;
|
||||
|
||||
public BlobPath() {
|
||||
|
@ -60,15 +62,12 @@ public class BlobPath implements Iterable<String> {
|
|||
return new BlobPath(Collections.unmodifiableList(paths));
|
||||
}
|
||||
|
||||
public String buildAsString(String separator) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (int i = 0; i < paths.size(); i++) {
|
||||
sb.append(paths.get(i));
|
||||
if (i < (paths.size() - 1)) {
|
||||
sb.append(separator);
|
||||
}
|
||||
public String buildAsString() {
|
||||
String p = String.join(SEPARATOR, paths);
|
||||
if (p.isEmpty()) {
|
||||
return p;
|
||||
}
|
||||
return sb.toString();
|
||||
return p + SEPARATOR;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -321,20 +321,15 @@ public class Joda {
|
|||
|
||||
public static class EpochTimeParser implements DateTimeParser {
|
||||
|
||||
private static final Pattern MILLI_SECOND_PRECISION_PATTERN = Pattern.compile("^-?\\d{1,13}$");
|
||||
private static final Pattern SECOND_PRECISION_PATTERN = Pattern.compile("^-?\\d{1,10}$");
|
||||
|
||||
private final boolean hasMilliSecondPrecision;
|
||||
private final Pattern pattern;
|
||||
|
||||
public EpochTimeParser(boolean hasMilliSecondPrecision) {
|
||||
this.hasMilliSecondPrecision = hasMilliSecondPrecision;
|
||||
this.pattern = hasMilliSecondPrecision ? MILLI_SECOND_PRECISION_PATTERN : SECOND_PRECISION_PATTERN;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int estimateParsedLength() {
|
||||
return hasMilliSecondPrecision ? 13 : 10;
|
||||
return hasMilliSecondPrecision ? 19 : 16;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -344,8 +339,7 @@ public class Joda {
|
|||
|
||||
if ((isPositive && isTooLong) ||
|
||||
// timestamps have to have UTC timezone
|
||||
bucket.getZone() != DateTimeZone.UTC ||
|
||||
pattern.matcher(text).matches() == false) {
|
||||
bucket.getZone() != DateTimeZone.UTC) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -378,7 +372,7 @@ public class Joda {
|
|||
|
||||
@Override
|
||||
public int estimatePrintedLength() {
|
||||
return hasMilliSecondPrecision ? 13 : 10;
|
||||
return hasMilliSecondPrecision ? 19 : 16;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,9 +19,10 @@
|
|||
|
||||
package org.elasticsearch.common.logging;
|
||||
|
||||
import org.apache.log4j.Java9Hack;
|
||||
import org.apache.log4j.PropertyConfigurator;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.bootstrap.BootstrapInfo;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
@ -87,14 +88,17 @@ public class LogConfigurator {
|
|||
replacements.put("ttcc", "org.apache.log4j.TTCCLayout");
|
||||
replacements.put("xml", "org.apache.log4j.XMLLayout");
|
||||
REPLACEMENTS = unmodifiableMap(replacements);
|
||||
|
||||
if (Constants.JRE_IS_MINIMUM_JAVA9) {
|
||||
Java9Hack.fixLog4j();
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean loaded;
|
||||
|
||||
/**
|
||||
* Consolidates settings and converts them into actual log4j settings, then initializes loggers and appenders.
|
||||
*
|
||||
* @param settings custom settings that should be applied
|
||||
* @param settings custom settings that should be applied
|
||||
* @param resolveConfig controls whether the logging conf file should be read too or not.
|
||||
*/
|
||||
public static void configure(Settings settings, boolean resolveConfig) {
|
||||
|
@ -109,7 +113,7 @@ public class LogConfigurator {
|
|||
if (resolveConfig) {
|
||||
resolveConfig(environment, settingsBuilder);
|
||||
}
|
||||
settingsBuilder.putProperties("es.", BootstrapInfo.getSystemProperties());
|
||||
|
||||
// add custom settings after config was added so that they are not overwritten by config
|
||||
settingsBuilder.put(settings);
|
||||
settingsBuilder.replacePropertyPlaceholders();
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.lucene.search.Scorer;
|
|||
import org.elasticsearch.script.ExplainableSearchScript;
|
||||
import org.elasticsearch.script.LeafSearchScript;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptException;
|
||||
import org.elasticsearch.script.GeneralScriptException;
|
||||
import org.elasticsearch.script.SearchScript;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -87,7 +87,7 @@ public class ScriptScoreFunction extends ScoreFunction {
|
|||
scorer.score = subQueryScore;
|
||||
double result = leafScript.runAsDouble();
|
||||
if (Double.isNaN(result)) {
|
||||
throw new ScriptException("script_score returned NaN");
|
||||
throw new GeneralScriptException("script_score returned NaN");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -19,11 +19,6 @@
|
|||
|
||||
package org.elasticsearch.common.network;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.elasticsearch.action.support.replication.ReplicationTask;
|
||||
import org.elasticsearch.client.transport.TransportClientNodesService;
|
||||
import org.elasticsearch.client.transport.support.TransportProxyClient;
|
||||
|
@ -36,7 +31,6 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommandReg
|
|||
import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -71,6 +65,12 @@ import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestore
|
|||
import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestDeleteSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestDeleteStoredScriptAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestGetSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestGetStoredScriptAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestPutSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestPutStoredScriptAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction;
|
||||
|
@ -137,19 +137,11 @@ import org.elasticsearch.rest.action.ingest.RestGetPipelineAction;
|
|||
import org.elasticsearch.rest.action.ingest.RestPutPipelineAction;
|
||||
import org.elasticsearch.rest.action.ingest.RestSimulatePipelineAction;
|
||||
import org.elasticsearch.rest.action.main.RestMainAction;
|
||||
import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction;
|
||||
import org.elasticsearch.rest.action.percolate.RestPercolateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestDeleteStoredScriptAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestGetStoredScriptAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestPutStoredScriptAction;
|
||||
import org.elasticsearch.rest.action.search.RestClearScrollAction;
|
||||
import org.elasticsearch.rest.action.search.RestMultiSearchAction;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.rest.action.search.RestSearchScrollAction;
|
||||
import org.elasticsearch.rest.action.suggest.RestSuggestAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestDeleteSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestGetSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestPutSearchTemplateAction;
|
||||
import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction;
|
||||
import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction;
|
||||
import org.elasticsearch.rest.action.update.RestUpdateAction;
|
||||
|
@ -159,6 +151,9 @@ import org.elasticsearch.transport.TransportService;
|
|||
import org.elasticsearch.transport.local.LocalTransport;
|
||||
import org.elasticsearch.transport.netty.NettyTransport;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* A module to handle registering and binding all network related classes.
|
||||
*/
|
||||
|
@ -250,8 +245,6 @@ public class NetworkModule extends AbstractModule {
|
|||
RestMultiTermVectorsAction.class,
|
||||
RestBulkAction.class,
|
||||
RestUpdateAction.class,
|
||||
RestPercolateAction.class,
|
||||
RestMultiPercolateAction.class,
|
||||
|
||||
RestSearchAction.class,
|
||||
RestSearchScrollAction.class,
|
||||
|
@ -404,7 +397,7 @@ public class NetworkModule extends AbstractModule {
|
|||
* @param commandName the names under which the command should be parsed. The {@link ParseField#getPreferredName()} is special because
|
||||
* it is the name under which the command's reader is registered.
|
||||
*/
|
||||
public <T extends AllocationCommand> void registerAllocationCommand(Writeable.Reader<T> reader, AllocationCommand.Parser<T> parser,
|
||||
private <T extends AllocationCommand> void registerAllocationCommand(Writeable.Reader<T> reader, AllocationCommand.Parser<T> parser,
|
||||
ParseField commandName) {
|
||||
allocationCommandRegistry.register(parser, commandName);
|
||||
namedWriteableRegistry.register(AllocationCommand.class, commandName.getPreferredName(), reader);
|
||||
|
|
|
@ -332,7 +332,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
Environment.PATH_DATA_SETTING,
|
||||
Environment.PATH_HOME_SETTING,
|
||||
Environment.PATH_LOGS_SETTING,
|
||||
Environment.PATH_PLUGINS_SETTING,
|
||||
Environment.PATH_REPO_SETTING,
|
||||
Environment.PATH_SCRIPTS_SETTING,
|
||||
Environment.PATH_SHARED_DATA_SETTING,
|
||||
|
@ -375,7 +374,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX,
|
||||
ClusterName.CLUSTER_NAME_SETTING,
|
||||
Client.CLIENT_TYPE_SETTING_S,
|
||||
InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING,
|
||||
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
|
||||
EsExecutors.PROCESSORS_SETTING,
|
||||
ThreadContext.DEFAULT_HEADERS_SETTING,
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.settings;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
|
@ -35,12 +36,10 @@ import org.elasticsearch.index.engine.EngineConfig;
|
|||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCache;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.FsDirectoryService;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.IndexWarmer;
|
||||
import org.elasticsearch.indices.IndicesRequestCache;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -59,6 +58,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
public static final Predicate<String> INDEX_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
|
||||
public static final Set<Setting<?>> BUILT_IN_INDEX_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
|
||||
MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY,
|
||||
IndexSettings.INDEX_TTL_DISABLE_PURGE_SETTING,
|
||||
IndexStore.INDEX_STORE_THROTTLE_TYPE_SETTING,
|
||||
IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
|
||||
|
@ -126,7 +126,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
FieldMapper.IGNORE_MALFORMED_SETTING,
|
||||
FieldMapper.COERCE_SETTING,
|
||||
Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING,
|
||||
PercolatorQueryCache.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING,
|
||||
MapperService.INDEX_MAPPER_DYNAMIC_SETTING,
|
||||
MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING,
|
||||
MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING,
|
||||
|
|
|
@ -122,7 +122,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
this.defaultValue = defaultValue;
|
||||
this.parser = parser;
|
||||
if (properties == null) {
|
||||
throw new IllegalArgumentException("properties can not be null for setting [" + key + "]");
|
||||
throw new IllegalArgumentException("properties cannot be null for setting [" + key + "]");
|
||||
}
|
||||
if (properties.length == 0) {
|
||||
this.properties = EMPTY_PROPERTIES;
|
||||
|
@ -132,7 +132,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates a new Setting instance. When no scope is provided, we default to {@link Property#NodeScope}.
|
||||
* Creates a new Setting instance
|
||||
* @param key the settings key for this setting.
|
||||
* @param defaultValue a default value function that returns the default values string representation.
|
||||
* @param parser a parser that parses the string rep into a complex datatype.
|
||||
|
@ -165,7 +165,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates a new Setting instance. When no scope is provided, we default to {@link Property#NodeScope}.
|
||||
* Creates a new Setting instance
|
||||
* @param key the settings key for this setting.
|
||||
* @param fallbackSetting a setting who's value to fallback on if this setting is not defined
|
||||
* @param parser a parser that parses the string rep into a complex datatype.
|
||||
|
@ -537,6 +537,10 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, properties);
|
||||
}
|
||||
|
||||
public static Setting<Boolean> boolSetting(String key, Function<Settings, String> defaultValueFn, Property... properties) {
|
||||
return new Setting<>(key, defaultValueFn, Booleans::parseBooleanExact, properties);
|
||||
}
|
||||
|
||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, String percentage, Property... properties) {
|
||||
return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties);
|
||||
}
|
||||
|
|
|
@ -58,9 +58,11 @@ import java.util.Set;
|
|||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue;
|
||||
import static org.elasticsearch.common.unit.SizeValue.parseSizeValue;
|
||||
|
@ -942,89 +944,54 @@ public final class Settings implements ToXContent {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Puts all the properties with keys starting with the provided <tt>prefix</tt>.
|
||||
*
|
||||
* @param prefix The prefix to filter property key by
|
||||
* @param properties The properties to put
|
||||
* @return The builder
|
||||
*/
|
||||
public Builder putProperties(String prefix, Dictionary<Object, Object> properties) {
|
||||
for (Object property : Collections.list(properties.keys())) {
|
||||
String key = Objects.toString(property);
|
||||
String value = Objects.toString(properties.get(property));
|
||||
if (key.startsWith(prefix)) {
|
||||
map.put(key.substring(prefix.length()), value);
|
||||
public Builder putProperties(Map<String, String> esSettings, Predicate<String> keyPredicate, Function<String, String> keyFunction) {
|
||||
for (final Map.Entry<String, String> esSetting : esSettings.entrySet()) {
|
||||
final String key = esSetting.getKey();
|
||||
if (keyPredicate.test(key)) {
|
||||
map.put(keyFunction.apply(key), esSetting.getValue());
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Puts all the properties with keys starting with the provided <tt>prefix</tt>.
|
||||
*
|
||||
* @param prefix The prefix to filter property key by
|
||||
* @param properties The properties to put
|
||||
* @return The builder
|
||||
*/
|
||||
public Builder putProperties(String prefix, Dictionary<Object, Object> properties, String ignorePrefix) {
|
||||
for (Object property : Collections.list(properties.keys())) {
|
||||
String key = Objects.toString(property);
|
||||
String value = Objects.toString(properties.get(property));
|
||||
if (key.startsWith(prefix)) {
|
||||
if (!key.startsWith(ignorePrefix)) {
|
||||
map.put(key.substring(prefix.length()), value);
|
||||
}
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs across all the settings set on this builder and replaces <tt>${...}</tt> elements in the
|
||||
* each setting value according to the following logic:
|
||||
* <p>
|
||||
* First, tries to resolve it against a System property ({@link System#getProperty(String)}), next,
|
||||
* tries and resolve it against an environment variable ({@link System#getenv(String)}), and last, tries
|
||||
* and replace it with another setting already set on this builder.
|
||||
* Runs across all the settings set on this builder and
|
||||
* replaces <tt>${...}</tt> elements in each setting with
|
||||
* another setting already set on this builder.
|
||||
*/
|
||||
public Builder replacePropertyPlaceholders() {
|
||||
return replacePropertyPlaceholders(System::getenv);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
Builder replacePropertyPlaceholders(Function<String, String> getenv) {
|
||||
PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false);
|
||||
PropertyPlaceholder.PlaceholderResolver placeholderResolver = new PropertyPlaceholder.PlaceholderResolver() {
|
||||
@Override
|
||||
public String resolvePlaceholder(String placeholderName) {
|
||||
if (placeholderName.startsWith("env.")) {
|
||||
// explicit env var prefix
|
||||
return System.getenv(placeholderName.substring("env.".length()));
|
||||
}
|
||||
String value = System.getProperty(placeholderName);
|
||||
if (value != null) {
|
||||
return value;
|
||||
}
|
||||
value = System.getenv(placeholderName);
|
||||
if (value != null) {
|
||||
return value;
|
||||
}
|
||||
return map.get(placeholderName);
|
||||
@Override
|
||||
public String resolvePlaceholder(String placeholderName) {
|
||||
final String value = getenv.apply(placeholderName);
|
||||
if (value != null) {
|
||||
return value;
|
||||
}
|
||||
return map.get(placeholderName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean shouldIgnoreMissing(String placeholderName) {
|
||||
// if its an explicit env var, we are ok with not having a value for it and treat it as optional
|
||||
if (placeholderName.startsWith("env.") || placeholderName.startsWith("prompt.")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean shouldRemoveMissingPlaceholder(String placeholderName) {
|
||||
if (placeholderName.startsWith("prompt.")) {
|
||||
return false;
|
||||
}
|
||||
@Override
|
||||
public boolean shouldIgnoreMissing(String placeholderName) {
|
||||
if (placeholderName.startsWith("prompt.")) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean shouldRemoveMissingPlaceholder(String placeholderName) {
|
||||
if (placeholderName.startsWith("prompt.")) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
for (Map.Entry<String, String> entry : new HashMap<>(map).entrySet()) {
|
||||
String value = propertyPlaceholder.replacePlaceholders(entry.getKey(), entry.getValue(), placeholderResolver);
|
||||
// if the values exists and has length, we should maintain it in the map
|
||||
|
|
|
@ -781,7 +781,7 @@ public final class XContentBuilder implements BytesStream, Releasable {
|
|||
try {
|
||||
generator.close();
|
||||
} catch (IOException e) {
|
||||
// ignore
|
||||
throw new IllegalStateException("failed to close the XContentBuilder", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.common.xcontent.cbor;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonEncoding;
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.dataformat.cbor.CBORFactory;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -50,6 +51,8 @@ public class CborXContent implements XContent {
|
|||
static {
|
||||
cborFactory = new CBORFactory();
|
||||
cborFactory.configure(CBORFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now...
|
||||
// Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.cbor.CBORGenerator#close() method
|
||||
cborFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false);
|
||||
cborXContent = new CborXContent();
|
||||
}
|
||||
|
||||
|
|
|
@ -73,6 +73,8 @@ public class JsonXContent implements XContent {
|
|||
jsonFactory.configure(JsonGenerator.Feature.QUOTE_FIELD_NAMES, true);
|
||||
jsonFactory.configure(JsonParser.Feature.ALLOW_COMMENTS, true);
|
||||
jsonFactory.configure(JsonFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now...
|
||||
// Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.core.json.UTF8JsonGenerator#close() method
|
||||
jsonFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false);
|
||||
jsonXContent = new JsonXContent();
|
||||
}
|
||||
|
||||
|
|
|
@ -100,7 +100,7 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
|
||||
@Override
|
||||
public final void usePrettyPrint() {
|
||||
generator.setPrettyPrinter(new DefaultPrettyPrinter().withObjectIndenter(INDENTER));
|
||||
generator.setPrettyPrinter(new DefaultPrettyPrinter().withObjectIndenter(INDENTER).withArrayIndenter(INDENTER));
|
||||
prettyPrint = true;
|
||||
}
|
||||
|
||||
|
@ -389,6 +389,10 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
if (generator.isClosed()) {
|
||||
return;
|
||||
}
|
||||
JsonStreamContext context = generator.getOutputContext();
|
||||
if ((context != null) && (context.inRoot() == false)) {
|
||||
throw new IOException("unclosed object or array found");
|
||||
}
|
||||
if (writeLineFeedAtEnd) {
|
||||
flush();
|
||||
generator.writeRaw(LF);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.common.xcontent.smile;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonEncoding;
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.dataformat.smile.SmileFactory;
|
||||
import com.fasterxml.jackson.dataformat.smile.SmileGenerator;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -51,6 +52,8 @@ public class SmileXContent implements XContent {
|
|||
smileFactory = new SmileFactory();
|
||||
smileFactory.configure(SmileGenerator.Feature.ENCODE_BINARY_AS_7BIT, false); // for now, this is an overhead, might make sense for web sockets
|
||||
smileFactory.configure(SmileFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now...
|
||||
// Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.smile.SmileGenerator#close() method
|
||||
smileFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false);
|
||||
smileXContent = new SmileXContent();
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.membership.MembershipAction;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -53,6 +54,7 @@ public class NodeJoinController extends AbstractComponent {
|
|||
|
||||
final ClusterService clusterService;
|
||||
final RoutingService routingService;
|
||||
final ElectMasterService electMaster;
|
||||
final DiscoverySettings discoverySettings;
|
||||
final AtomicBoolean accumulateJoins = new AtomicBoolean(false);
|
||||
|
||||
|
@ -62,10 +64,11 @@ public class NodeJoinController extends AbstractComponent {
|
|||
|
||||
protected final Map<DiscoveryNode, List<MembershipAction.JoinCallback>> pendingJoinRequests = new HashMap<>();
|
||||
|
||||
public NodeJoinController(ClusterService clusterService, RoutingService routingService, DiscoverySettings discoverySettings, Settings settings) {
|
||||
public NodeJoinController(ClusterService clusterService, RoutingService routingService, ElectMasterService electMaster, DiscoverySettings discoverySettings, Settings settings) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.routingService = routingService;
|
||||
this.electMaster = electMaster;
|
||||
this.discoverySettings = discoverySettings;
|
||||
}
|
||||
|
||||
|
@ -86,7 +89,7 @@ public class NodeJoinController extends AbstractComponent {
|
|||
assert accumulateJoins.get() : "waitToBeElectedAsMaster is called we are not accumulating joins";
|
||||
|
||||
final CountDownLatch done = new CountDownLatch(1);
|
||||
final ElectionContext newContext = new ElectionContext(callback, requiredMasterJoins, clusterService) {
|
||||
final ElectionContext newContext = new ElectionContext(callback, requiredMasterJoins) {
|
||||
@Override
|
||||
void onClose() {
|
||||
if (electionContext.compareAndSet(this, null)) {
|
||||
|
@ -304,16 +307,14 @@ public class NodeJoinController extends AbstractComponent {
|
|||
static abstract class ElectionContext implements ElectionCallback {
|
||||
private final ElectionCallback callback;
|
||||
private final int requiredMasterJoins;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
/** set to true after enough joins have been seen and a cluster update task is submitted to become master */
|
||||
final AtomicBoolean pendingSetAsMasterTask = new AtomicBoolean();
|
||||
final AtomicBoolean closed = new AtomicBoolean();
|
||||
|
||||
ElectionContext(ElectionCallback callback, int requiredMasterJoins, ClusterService clusterService) {
|
||||
ElectionContext(ElectionCallback callback, int requiredMasterJoins) {
|
||||
this.callback = callback;
|
||||
this.requiredMasterJoins = requiredMasterJoins;
|
||||
this.clusterService = clusterService;
|
||||
}
|
||||
|
||||
abstract void onClose();
|
||||
|
@ -321,7 +322,7 @@ public class NodeJoinController extends AbstractComponent {
|
|||
@Override
|
||||
public void onElectedAsMaster(ClusterState state) {
|
||||
assert pendingSetAsMasterTask.get() : "onElectedAsMaster called but pendingSetAsMasterTask is not set";
|
||||
assertClusterStateThread();
|
||||
ClusterService.assertClusterStateThread();
|
||||
assert state.nodes().isLocalNodeElectedMaster() : "onElectedAsMaster called but local node is not master";
|
||||
if (closed.compareAndSet(false, true)) {
|
||||
try {
|
||||
|
@ -334,7 +335,7 @@ public class NodeJoinController extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
assertClusterStateThread();
|
||||
ClusterService.assertClusterStateThread();
|
||||
if (closed.compareAndSet(false, true)) {
|
||||
try {
|
||||
onClose();
|
||||
|
@ -343,10 +344,6 @@ public class NodeJoinController extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void assertClusterStateThread() {
|
||||
assert clusterService instanceof ClusterService == false || ((ClusterService) clusterService).assertClusterStateThread();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -449,6 +446,8 @@ public class NodeJoinController extends AbstractComponent {
|
|||
logger.error("unexpected error during [{}]", e, source);
|
||||
}
|
||||
}
|
||||
|
||||
NodeJoinController.this.electMaster.logMinimumMasterNodesWarningIfNecessary(oldState, newState);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -215,7 +215,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
nodesFD.setLocalNode(clusterService.localNode());
|
||||
joinThreadControl.start();
|
||||
pingService.start();
|
||||
this.nodeJoinController = new NodeJoinController(clusterService, routingService, discoverySettings, settings);
|
||||
this.nodeJoinController = new NodeJoinController(clusterService, routingService, electMaster, discoverySettings, settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -617,6 +617,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
electMaster.logMinimumMasterNodesWarningIfNecessary(oldState, newState);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -1144,14 +1145,14 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
|
||||
/** cleans any running joining thread and calls {@link #rejoin} */
|
||||
public ClusterState stopRunningThreadAndRejoin(ClusterState clusterState, String reason) {
|
||||
assertClusterStateThread();
|
||||
ClusterService.assertClusterStateThread();
|
||||
currentJoinThread.set(null);
|
||||
return rejoin(clusterState, reason);
|
||||
}
|
||||
|
||||
/** starts a new joining thread if there is no currently active one and join thread controlling is started */
|
||||
public void startNewThreadIfNotRunning() {
|
||||
assertClusterStateThread();
|
||||
ClusterService.assertClusterStateThread();
|
||||
if (joinThreadActive()) {
|
||||
return;
|
||||
}
|
||||
|
@ -1184,7 +1185,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
* If the given thread is not the currently running join thread, the command is ignored.
|
||||
*/
|
||||
public void markThreadAsDoneAndStartNew(Thread joinThread) {
|
||||
assertClusterStateThread();
|
||||
ClusterService.assertClusterStateThread();
|
||||
if (!markThreadAsDone(joinThread)) {
|
||||
return;
|
||||
}
|
||||
|
@ -1193,7 +1194,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
|
||||
/** marks the given joinThread as completed. Returns false if the supplied thread is not the currently active join thread */
|
||||
public boolean markThreadAsDone(Thread joinThread) {
|
||||
assertClusterStateThread();
|
||||
ClusterService.assertClusterStateThread();
|
||||
return currentJoinThread.compareAndSet(joinThread, null);
|
||||
}
|
||||
|
||||
|
@ -1209,9 +1210,5 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
running.set(true);
|
||||
}
|
||||
|
||||
private void assertClusterStateThread() {
|
||||
assert clusterService instanceof ClusterService == false || ((ClusterService) clusterService).assertClusterStateThread();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.discovery.zen.elect;
|
|||
import com.carrotsearch.hppc.ObjectContainer;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -80,6 +81,27 @@ public class ElectMasterService extends AbstractComponent {
|
|||
return count >= minimumMasterNodes;
|
||||
}
|
||||
|
||||
public boolean hasTooManyMasterNodes(Iterable<DiscoveryNode> nodes) {
|
||||
int count = 0;
|
||||
for (DiscoveryNode node : nodes) {
|
||||
if (node.isMasterNode()) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
return count > 1 && minimumMasterNodes <= count / 2;
|
||||
}
|
||||
|
||||
public void logMinimumMasterNodesWarningIfNecessary(ClusterState oldState, ClusterState newState) {
|
||||
// check if min_master_nodes setting is too low and log warning
|
||||
if (hasTooManyMasterNodes(oldState.nodes()) == false && hasTooManyMasterNodes(newState.nodes())) {
|
||||
logger.warn("value for setting \""
|
||||
+ ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()
|
||||
+ "\" is too low. This can result in data loss! Please set it to at least a quorum of master-eligible nodes "
|
||||
+ "(current value: [{}], total number of master-eligible nodes used for publishing in this round: [{}])",
|
||||
minimumMasterNodes(), newState.getNodes().getMasterNodes().size());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the given nodes sorted by likelihood of being elected as master, most likely first.
|
||||
* Non-master nodes are not removed but are rather put in the end
|
||||
|
|
|
@ -53,7 +53,6 @@ public class Environment {
|
|||
public static final Setting<List<String>> PATH_DATA_SETTING =
|
||||
Setting.listSetting("path.data", Collections.emptyList(), Function.identity(), Property.NodeScope);
|
||||
public static final Setting<String> PATH_LOGS_SETTING = Setting.simpleString("path.logs", Property.NodeScope);
|
||||
public static final Setting<String> PATH_PLUGINS_SETTING = Setting.simpleString("path.plugins", Property.NodeScope);
|
||||
public static final Setting<List<String>> PATH_REPO_SETTING =
|
||||
Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), Property.NodeScope);
|
||||
public static final Setting<String> PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", Property.NodeScope);
|
||||
|
@ -128,11 +127,7 @@ public class Environment {
|
|||
scriptsFile = configFile.resolve("scripts");
|
||||
}
|
||||
|
||||
if (PATH_PLUGINS_SETTING.exists(settings)) {
|
||||
pluginsFile = PathUtils.get(cleanPath(PATH_PLUGINS_SETTING.get(settings)));
|
||||
} else {
|
||||
pluginsFile = homeFile.resolve("plugins");
|
||||
}
|
||||
pluginsFile = homeFile.resolve("plugins");
|
||||
|
||||
List<String> dataPaths = PATH_DATA_SETTING.get(settings);
|
||||
if (dataPaths.isEmpty() == false) {
|
||||
|
|
|
@ -62,6 +62,24 @@ public class GatewayAllocator extends AbstractComponent {
|
|||
this.replicaShardAllocator = new InternalReplicaShardAllocator(settings, storeAction);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the given shard has an async fetch pending
|
||||
*/
|
||||
public boolean hasFetchPending(ShardId shardId, boolean primary) {
|
||||
if (primary) {
|
||||
AsyncShardFetch<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetch = asyncFetchStarted.get(shardId);
|
||||
if (fetch != null) {
|
||||
return fetch.getNumberOfInFlightFetches() > 0;
|
||||
}
|
||||
} else {
|
||||
AsyncShardFetch<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> fetch = asyncFetchStore.get(shardId);
|
||||
if (fetch != null) {
|
||||
return fetch.getNumberOfInFlightFetches() > 0;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public void setReallocation(final ClusterService clusterService, final RoutingService routingService) {
|
||||
this.routingService = routingService;
|
||||
clusterService.add(new ClusterStateListener() {
|
||||
|
|
|
@ -108,7 +108,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
currentNode, nodeWithHighestMatch);
|
||||
it.moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.REALLOCATED_REPLICA,
|
||||
"existing allocation of replica to [" + currentNode + "] cancelled, sync id match found on node [" + nodeWithHighestMatch + "]",
|
||||
null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
|
||||
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false));
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
}
|
||||
} else if (matchingNodes.hasAnyData() == false) {
|
||||
// if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation of the replica shard needs to be delayed
|
||||
changed |= ignoreUnassignedIfDelayed(unassignedIterator, shard);
|
||||
ignoreUnassignedIfDelayed(unassignedIterator, shard);
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
|
@ -195,21 +195,16 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
*
|
||||
* @param unassignedIterator iterator over unassigned shards
|
||||
* @param shard the shard which might be delayed
|
||||
* @return true iff allocation is delayed for this shard
|
||||
*/
|
||||
public boolean ignoreUnassignedIfDelayed(RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator, ShardRouting shard) {
|
||||
// calculate delay and store it in UnassignedInfo to be used by RoutingService
|
||||
long delay = shard.unassignedInfo().getLastComputedLeftDelayNanos();
|
||||
if (delay > 0) {
|
||||
logger.debug("[{}][{}]: delaying allocation of [{}] for [{}]", shard.index(), shard.id(), shard, TimeValue.timeValueNanos(delay));
|
||||
public void ignoreUnassignedIfDelayed(RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator, ShardRouting shard) {
|
||||
if (shard.unassignedInfo().isDelayed()) {
|
||||
logger.debug("{}: allocation of [{}] is delayed", shard.shardId(), shard);
|
||||
/**
|
||||
* mark it as changed, since we want to kick a publishing to schedule future allocation,
|
||||
* see {@link org.elasticsearch.cluster.routing.RoutingService#clusterChanged(ClusterChangedEvent)}).
|
||||
*/
|
||||
unassignedIterator.removeAndIgnore();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -50,7 +50,6 @@ import org.elasticsearch.index.engine.EngineFactory;
|
|||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCache;
|
||||
import org.elasticsearch.index.query.ParsedQuery;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
|
@ -151,11 +150,9 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
this.indexStore = indexStore;
|
||||
indexFieldData.setListener(new FieldDataCacheListener(this));
|
||||
this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this));
|
||||
PercolatorQueryCache percolatorQueryCache = new PercolatorQueryCache(indexSettings, IndexService.this::newQueryShardContext);
|
||||
this.warmer = new IndexWarmer(indexSettings.getSettings(), threadPool,
|
||||
bitsetFilterCache.createListener(threadPool),
|
||||
percolatorQueryCache.createListener(threadPool));
|
||||
this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache, percolatorQueryCache);
|
||||
bitsetFilterCache.createListener(threadPool));
|
||||
this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache);
|
||||
this.engineFactory = engineFactory;
|
||||
// initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE
|
||||
this.searcherWrapper = wrapperFactory.newWrapper(this);
|
||||
|
@ -239,8 +236,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
}
|
||||
}
|
||||
} finally {
|
||||
IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, analysisService, refreshTask, fsyncTask,
|
||||
cache().getPercolatorQueryCache());
|
||||
IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, analysisService, refreshTask, fsyncTask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -443,7 +439,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
return new QueryShardContext(
|
||||
indexSettings, indexCache.bitsetFilterCache(), indexFieldData, mapperService(),
|
||||
similarityService(), nodeServicesProvider.getScriptService(), nodeServicesProvider.getIndicesQueriesRegistry(),
|
||||
nodeServicesProvider.getClient(), indexCache.getPercolatorQueryCache(), indexReader,
|
||||
nodeServicesProvider.getClient(), indexReader,
|
||||
nodeServicesProvider.getClusterService().state()
|
||||
);
|
||||
}
|
||||
|
|
|
@ -88,6 +88,7 @@ public interface CharMatcher {
|
|||
case Character.CURRENCY_SYMBOL:
|
||||
case Character.MATH_SYMBOL:
|
||||
case Character.OTHER_SYMBOL:
|
||||
case Character.MODIFIER_SYMBOL:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
|
|
|
@ -33,13 +33,11 @@ import org.apache.lucene.analysis.util.CharArraySet;
|
|||
public final class FingerprintAnalyzer extends Analyzer {
|
||||
private final char separator;
|
||||
private final int maxOutputSize;
|
||||
private final boolean preserveOriginal;
|
||||
private final CharArraySet stopWords;
|
||||
|
||||
public FingerprintAnalyzer(CharArraySet stopWords, char separator, int maxOutputSize, boolean preserveOriginal) {
|
||||
public FingerprintAnalyzer(CharArraySet stopWords, char separator, int maxOutputSize) {
|
||||
this.separator = separator;
|
||||
this.maxOutputSize = maxOutputSize;
|
||||
this.preserveOriginal = preserveOriginal;
|
||||
this.stopWords = stopWords;
|
||||
}
|
||||
|
||||
|
@ -48,7 +46,7 @@ public final class FingerprintAnalyzer extends Analyzer {
|
|||
final Tokenizer tokenizer = new StandardTokenizer();
|
||||
TokenStream stream = tokenizer;
|
||||
stream = new LowerCaseFilter(stream);
|
||||
stream = new ASCIIFoldingFilter(stream, preserveOriginal);
|
||||
stream = new ASCIIFoldingFilter(stream, false);
|
||||
stream = new StopFilter(stream, stopWords);
|
||||
stream = new FingerprintFilter(stream, maxOutputSize, separator);
|
||||
return new TokenStreamComponents(tokenizer, stream);
|
||||
|
|
|
@ -34,10 +34,8 @@ import org.elasticsearch.index.IndexSettings;
|
|||
public class FingerprintAnalyzerProvider extends AbstractIndexAnalyzerProvider<Analyzer> {
|
||||
|
||||
public static ParseField MAX_OUTPUT_SIZE = FingerprintTokenFilterFactory.MAX_OUTPUT_SIZE;
|
||||
public static ParseField PRESERVE_ORIGINAL = ASCIIFoldingTokenFilterFactory.PRESERVE_ORIGINAL;
|
||||
|
||||
public static int DEFAULT_MAX_OUTPUT_SIZE = FingerprintTokenFilterFactory.DEFAULT_MAX_OUTPUT_SIZE;
|
||||
public static boolean DEFAULT_PRESERVE_ORIGINAL = ASCIIFoldingTokenFilterFactory.DEFAULT_PRESERVE_ORIGINAL;
|
||||
public static CharArraySet DEFAULT_STOP_WORDS = CharArraySet.EMPTY_SET;
|
||||
|
||||
private final FingerprintAnalyzer analyzer;
|
||||
|
@ -47,10 +45,9 @@ public class FingerprintAnalyzerProvider extends AbstractIndexAnalyzerProvider<A
|
|||
|
||||
char separator = FingerprintTokenFilterFactory.parseSeparator(settings);
|
||||
int maxOutputSize = settings.getAsInt(MAX_OUTPUT_SIZE.getPreferredName(),DEFAULT_MAX_OUTPUT_SIZE);
|
||||
boolean preserveOriginal = settings.getAsBoolean(PRESERVE_ORIGINAL.getPreferredName(), DEFAULT_PRESERVE_ORIGINAL);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, settings, DEFAULT_STOP_WORDS);
|
||||
|
||||
this.analyzer = new FingerprintAnalyzer(stopWords, separator, maxOutputSize, preserveOriginal);
|
||||
this.analyzer = new FingerprintAnalyzer(stopWords, separator, maxOutputSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -34,7 +34,6 @@ import java.util.Set;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
public class StopTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
private final CharArraySet stopWords;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue