Merge branch 'master' into feature/rank-eval

This commit is contained in:
Christoph Büscher 2017-06-29 15:07:45 +02:00
commit 2708bcc6ed
695 changed files with 20795 additions and 7689 deletions

View File

@ -34,6 +34,10 @@ apply plugin: 'com.github.johnrengelman.shadow'
// have the shadow plugin provide the runShadow task
apply plugin: 'application'
// Not published so no need to assemble
tasks.remove(assemble)
build.dependsOn.remove('assemble')
archivesBaseName = 'elasticsearch-benchmarks'
mainClassName = 'org.openjdk.jmh.Main'

View File

@ -152,10 +152,28 @@ task verifyVersions {
}
}
/*
* When adding backcompat behavior that spans major versions, temporarily
* disabling the backcompat tests is necessary. This flag controls
* the enabled state of every bwc task. It should be set back to true
* after the backport of the backcompat code is complete.
*/
allprojects {
ext.bwc_tests_enabled = true
}
task verifyBwcTestsEnabled {
doLast {
if (project.bwc_tests_enabled == false) {
throw new GradleException('Bwc tests are disabled. They must be re-enabled after completing backcompat behavior backporting.')
}
}
}
task branchConsistency {
description 'Ensures this branch is internally consistent. For example, that versions constants match released versions.'
group 'Verification'
dependsOn verifyVersions
dependsOn verifyVersions, verifyBwcTestsEnabled
}
subprojects {
@ -402,3 +420,17 @@ task run(type: Run) {
group = 'Verification'
impliesSubProjects = true
}
/* Remove assemble on all qa projects because we don't need to publish
* artifacts for them. */
gradle.projectsEvaluated {
subprojects {
if (project.path.startsWith(':qa')) {
Task assemble = project.tasks.findByName('assemble')
if (assemble) {
project.tasks.remove(assemble)
project.build.dependsOn.remove('assemble')
}
}
}
}

View File

@ -394,8 +394,11 @@ class BuildPlugin implements Plugin<Project> {
project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom t ->
// place the pom next to the jar it is for
t.destination = new File(project.buildDir, "distributions/${project.archivesBaseName}-${project.version}.pom")
// build poms with assemble
project.assemble.dependsOn(t)
// build poms with assemble (if the assemble task exists)
Task assemble = project.tasks.findByName('assemble')
if (assemble) {
assemble.dependsOn(t)
}
}
}
}

View File

@ -32,6 +32,9 @@ public class DocsTestPlugin extends RestTestPlugin {
public void apply(Project project) {
project.pluginManager.apply('elasticsearch.standalone-rest-test')
super.apply(project)
// Docs are published separately so no need to assemble
project.tasks.remove(project.assemble)
project.build.dependsOn.remove('assemble')
Map<String, String> defaultSubstitutions = [
/* These match up with the asciidoc syntax for substitutions but
* the values may differ. In particular {version} needs to resolve

View File

@ -127,6 +127,11 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
*/
Set<String> unconvertedCandidates = new HashSet<>()
/**
* The last non-TESTRESPONSE snippet.
*/
Snippet previousTest
/**
* Called each time a snippet is encountered. Tracks the snippets and
* calls buildTest to actually build the test.
@ -142,6 +147,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
}
if (snippet.testSetup) {
setup(snippet)
previousTest = snippet
return
}
if (snippet.testResponse) {
@ -150,6 +156,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
}
if (snippet.test || snippet.console) {
test(snippet)
previousTest = snippet
return
}
// Must be an unmarked snippet....
@ -158,7 +165,18 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
private void test(Snippet test) {
setupCurrent(test)
if (false == test.continued) {
if (test.continued) {
/* Catch some difficult to debug errors with // TEST[continued]
* and throw a helpful error message. */
if (previousTest == null || previousTest.path != test.path) {
throw new InvalidUserDataException("// TEST[continued] " +
"cannot be on first snippet in a file: $test")
}
if (previousTest != null && previousTest.testSetup) {
throw new InvalidUserDataException("// TEST[continued] " +
"cannot immediately follow // TESTSETUP: $test")
}
} else {
current.println('---')
current.println("\"line_$test.start\":")
/* The Elasticsearch test runner doesn't support the warnings

View File

@ -19,6 +19,7 @@
package org.elasticsearch.gradle.test
import org.apache.tools.ant.taskdefs.condition.Os
import org.elasticsearch.gradle.Version
import org.gradle.api.InvalidUserDataException
import org.gradle.api.Project
@ -143,7 +144,7 @@ class NodeInfo {
args.add("${esScript}")
}
env = [ 'JAVA_HOME' : project.javaHome ]
env = ['JAVA_HOME': project.javaHome]
args.addAll("-E", "node.portsfile=true")
String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ")
String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs
@ -158,7 +159,11 @@ class NodeInfo {
}
}
env.put('ES_JVM_OPTIONS', new File(confDir, 'jvm.options'))
args.addAll("-E", "path.conf=${confDir}")
if (Version.fromString(nodeVersion).major == 5) {
args.addAll("-E", "path.conf=${confDir}")
} else {
args.addAll("--path.conf", "${confDir}")
}
if (!System.properties.containsKey("tests.es.path.data")) {
args.addAll("-E", "path.data=${-> dataDir.toString()}")
}

View File

@ -13,12 +13,14 @@
<!-- JNA requires the no-argument constructor on JNAKernel32Library.SizeT to be public-->
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JNAKernel32Library.java" checks="RedundantModifier" />
<!-- the constructors on some local classes in these tests must be public-->
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsServiceTests.java" checks="RedundantModifier" />
<!-- Hopefully temporary suppression of LineLength on files that don't pass it. We should remove these when we the
files start to pass. -->
<suppress files="client[/\\]rest[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]HeapBufferedAsyncResponseConsumerTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]search[/\\]vectorhighlight[/\\]CustomFieldQuery.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]Action.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ActionRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]ClusterHealthRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]TransportClusterHealthAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]hotthreads[/\\]NodesHotThreadsRequestBuilder.java" checks="LineLength" />
@ -34,7 +36,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]reroute[/\\]TransportClusterRerouteAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]settings[/\\]ClusterUpdateSettingsAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]settings[/\\]ClusterUpdateSettingsRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]settings[/\\]SettingsUpdater.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]shards[/\\]ClusterSearchShardsAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]shards[/\\]ClusterSearchShardsRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]shards[/\\]TransportClusterSearchShardsAction.java" checks="LineLength" />
@ -144,7 +145,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]MultiSearchRequestBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchPhaseController.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]ShardSearchFailure.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportClearScrollAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]DelegatingActionListener.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptions.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]ToXContentToBytes.java" checks="LineLength" />
@ -254,8 +254,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]Discovery.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]DiscoverySettings.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ZenDiscovery.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]env[/\\]ESFileStore.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]AsyncShardFetch.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayAllocator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayMetaState.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayService.java" checks="LineLength" />
@ -272,7 +270,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]CustomAnalyzerProvider.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]ShingleTokenFilterFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]StemmerOverrideTokenFilterFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]compound[/\\]HyphenationCompoundWordTokenFilterFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]cache[/\\]bitset[/\\]BitsetFilterCache.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]codec[/\\]PerFieldMappingPostingFormatCodec.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]ElasticsearchConcurrentMergeScheduler.java" checks="LineLength" />
@ -343,7 +340,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]IndexingMemoryController.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]IndicesService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analysis[/\\]PreBuiltCacheFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]analysis[/\\]PreBuiltTokenFilters.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]breaker[/\\]HierarchyCircuitBreakerService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]fielddata[/\\]cache[/\\]IndicesFieldDataCache.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]fielddata[/\\]cache[/\\]IndicesFieldDataCacheListener.java" checks="LineLength" />
@ -378,7 +374,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalMultiBucketAggregation.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]BucketsAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]filters[/\\]FiltersAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]filters[/\\]InternalFilters.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]geogrid[/\\]GeoHashGridAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]histogram[/\\]HistogramAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]missing[/\\]MissingAggregator.java" checks="LineLength" />
@ -410,7 +405,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]ShardSearchTransportRequest.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]FieldLookup.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]LeafDocLookup.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]LeafFieldsLookup.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]query[/\\]QueryPhase.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]rescore[/\\]QueryRescorer.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]CategoryContextMapping.java" checks="LineLength" />
@ -544,7 +538,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]network[/\\]CidrsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]DistanceUnitTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]FuzzinessTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]BigArraysTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]LongObjectHashMapTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]EsExecutorsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]PrioritizedExecutorsTests.java" checks="LineLength" />
@ -571,7 +564,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexingSlowLogTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]MergePolicySettingsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]SearchSlowLogTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]NGramTokenizerFactoryTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]PatternCaptureTokenFilterTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]PreBuiltAnalyzerTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]commongrams[/\\]CommonGramsTokenFilterFactoryTests.java" checks="LineLength" />
@ -579,7 +571,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]InternalEngineTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]fielddata[/\\]AbstractFieldDataTestCase.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]fielddata[/\\]AbstractStringFieldDataTestCase.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]fielddata[/\\]BinaryDVFieldDataTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]fielddata[/\\]FieldDataCacheTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]fielddata[/\\]IndexFieldDataServiceTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]fielddata[/\\]ParentChildFieldDataTests.java" checks="LineLength" />
@ -611,7 +602,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]RoutingFieldMapperTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]SourceFieldMapperTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]TokenCountFieldMapperIntegrationIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]TypeFieldMapperTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]UpdateMappingOnClusterIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]UpdateMappingTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]BoolQueryBuilderTests.java" checks="LineLength" />
@ -679,7 +669,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]routing[/\\]SimpleRoutingIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]FileScriptTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptContextRegistryTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptContextTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptModesTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptServiceTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueModeTests.java" checks="LineLength" />
@ -702,7 +691,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]NestedAggregatorTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]AbstractGeoTestCase.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]TopHitsIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]ValueCountIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]pipeline[/\\]ExtendedStatsBucketIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]pipeline[/\\]moving[/\\]avg[/\\]MovAvgIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]pipeline[/\\]serialdiff[/\\]SerialDiffIT.java" checks="LineLength" />
@ -744,13 +732,11 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]validate[/\\]SimpleValidateQueryIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]versioning[/\\]SimpleVersioningIT.java" checks="LineLength" />
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]ExpressionScriptEngine.java" checks="LineLength" />
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]ExpressionTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]MoreExpressionTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]StoredExpressionTests.java" checks="LineLength" />
<suppress files="modules[/\\]reindex[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]reindex[/\\]TransportUpdateByQueryAction.java" checks="LineLength" />
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuCollationTokenFilterFactory.java" checks="LineLength" />
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuFoldingTokenFilterFactory.java" checks="LineLength" />
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuNormalizerTokenFilterFactory.java" checks="LineLength" />
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IndexableBinaryStringTools.java" checks="LineLength" />
<suppress files="plugins[/\\]analysis-kuromoji[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]JapaneseStopTokenFilterFactory.java" checks="LineLength" />
<suppress files="plugins[/\\]analysis-kuromoji[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]KuromojiAnalysisTests.java" checks="LineLength" />
@ -793,4 +779,4 @@
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]hamcrest[/\\]ElasticsearchAssertions.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSDirectoryService.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSIndexStore.java" checks="LineLength" />
</suppressions>
</suppressions>

View File

@ -1,6 +1,6 @@
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
elasticsearch = 6.0.0-alpha3
lucene = 7.0.0-snapshot-a0aef2f
lucene = 7.0.0-snapshot-ad2cb77
# optional dependencies
spatial4j = 0.6
@ -25,7 +25,7 @@ commonscodec = 1.10
hamcrest = 1.3
securemock = 1.2
# When updating mocksocket, please also update core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
mocksocket = 1.1
mocksocket = 1.2
# benchmark dependencies
jmh = 1.17.3

View File

@ -37,6 +37,10 @@ apply plugin: 'application'
group = 'org.elasticsearch.client'
// Not published so no need to assemble
tasks.remove(assemble)
build.dependsOn.remove('assemble')
archivesBaseName = 'client-benchmarks'
mainClassName = 'org.elasticsearch.client.benchmark.BenchmarkMain'

View File

@ -27,9 +27,12 @@ esplugin {
classname 'org.elasticsearch.plugin.noop.NoopPlugin'
}
// Not published so no need to assemble
tasks.remove(assemble)
build.dependsOn.remove('assemble')
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
// no unit tests
test.enabled = false
integTest.enabled = false

View File

@ -49,8 +49,7 @@ import org.elasticsearch.common.xcontent.ContextParser;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.join.aggregations.ChildrenAggregationBuilder;
import org.elasticsearch.join.aggregations.ParsedChildren;
import org.elasticsearch.plugins.spi.NamedXContentProvider;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.aggregations.Aggregation;
@ -92,8 +91,6 @@ import org.elasticsearch.search.aggregations.bucket.terms.ParsedDoubleTerms;
import org.elasticsearch.search.aggregations.bucket.terms.ParsedLongTerms;
import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms;
import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder;
import org.elasticsearch.search.aggregations.matrix.stats.ParsedMatrixStats;
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.avg.ParsedAvg;
import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregationBuilder;
@ -142,11 +139,13 @@ import org.elasticsearch.search.suggest.phrase.PhraseSuggestion;
import org.elasticsearch.search.suggest.term.TermSuggestion;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.ServiceLoader;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
@ -180,8 +179,9 @@ public class RestHighLevelClient {
*/
protected RestHighLevelClient(RestClient restClient, List<NamedXContentRegistry.Entry> namedXContentEntries) {
this.client = Objects.requireNonNull(restClient);
this.registry = new NamedXContentRegistry(Stream.of(getDefaultNamedXContents().stream(), namedXContentEntries.stream())
.flatMap(Function.identity()).collect(toList()));
this.registry = new NamedXContentRegistry(
Stream.of(getDefaultNamedXContents().stream(), getProvidedNamedXContents().stream(), namedXContentEntries.stream())
.flatMap(Function.identity()).collect(toList()));
}
/**
@ -395,6 +395,10 @@ public class RestHighLevelClient {
try {
return responseConverter.apply(e.getResponse());
} catch (Exception innerException) {
//the exception is ignored as we now try to parse the response as an error.
//this covers cases like get where 404 can either be a valid document not found response,
//or an error for which parsing is completely different. We try to consider the 404 response as a valid one
//first. If parsing of the response breaks, we fall back to parsing it as an error.
throw parseResponseException(e);
}
}
@ -566,8 +570,6 @@ public class RestHighLevelClient {
map.put(SignificantLongTerms.NAME, (p, c) -> ParsedSignificantLongTerms.fromXContent(p, (String) c));
map.put(SignificantStringTerms.NAME, (p, c) -> ParsedSignificantStringTerms.fromXContent(p, (String) c));
map.put(ScriptedMetricAggregationBuilder.NAME, (p, c) -> ParsedScriptedMetric.fromXContent(p, (String) c));
map.put(ChildrenAggregationBuilder.NAME, (p, c) -> ParsedChildren.fromXContent(p, (String) c));
map.put(MatrixStatsAggregationBuilder.NAME, (p, c) -> ParsedMatrixStats.fromXContent(p, (String) c));
List<NamedXContentRegistry.Entry> entries = map.entrySet().stream()
.map(entry -> new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(entry.getKey()), entry.getValue()))
.collect(Collectors.toList());
@ -579,4 +581,15 @@ public class RestHighLevelClient {
(parser, context) -> CompletionSuggestion.fromXContent(parser, (String)context)));
return entries;
}
/**
* Loads and returns the {@link NamedXContentRegistry.Entry} parsers provided by plugins.
*/
static List<NamedXContentRegistry.Entry> getProvidedNamedXContents() {
List<NamedXContentRegistry.Entry> entries = new ArrayList<>();
for (NamedXContentProvider service : ServiceLoader.load(NamedXContentProvider.class)) {
entries.addAll(service.getNamedXContentParsers());
}
return entries;
}
}

View File

@ -56,10 +56,12 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.cbor.CborXContent;
import org.elasticsearch.common.xcontent.smile.SmileXContent;
import org.elasticsearch.join.aggregations.ChildrenAggregationBuilder;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.Aggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder;
import org.elasticsearch.search.suggest.Suggest;
import org.elasticsearch.test.ESTestCase;
import org.junit.Before;
@ -69,6 +71,7 @@ import org.mockito.internal.matchers.VarargMatcher;
import java.io.IOException;
import java.net.SocketTimeoutException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
@ -613,9 +616,9 @@ public class RestHighLevelClientTests extends ESTestCase {
assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage());
}
public void testNamedXContents() {
public void testDefaultNamedXContents() {
List<NamedXContentRegistry.Entry> namedXContents = RestHighLevelClient.getDefaultNamedXContents();
assertEquals(45, namedXContents.size());
assertEquals(43, namedXContents.size());
Map<Class<?>, Integer> categories = new HashMap<>();
for (NamedXContentRegistry.Entry namedXContent : namedXContents) {
Integer counter = categories.putIfAbsent(namedXContent.categoryClass, 1);
@ -624,10 +627,28 @@ public class RestHighLevelClientTests extends ESTestCase {
}
}
assertEquals(2, categories.size());
assertEquals(Integer.valueOf(42), categories.get(Aggregation.class));
assertEquals(Integer.valueOf(40), categories.get(Aggregation.class));
assertEquals(Integer.valueOf(3), categories.get(Suggest.Suggestion.class));
}
public void testProvidedNamedXContents() {
List<NamedXContentRegistry.Entry> namedXContents = RestHighLevelClient.getProvidedNamedXContents();
assertEquals(2, namedXContents.size());
Map<Class<?>, Integer> categories = new HashMap<>();
List<String> names = new ArrayList<>();
for (NamedXContentRegistry.Entry namedXContent : namedXContents) {
names.add(namedXContent.name.getPreferredName());
Integer counter = categories.putIfAbsent(namedXContent.categoryClass, 1);
if (counter != null) {
categories.put(namedXContent.categoryClass, counter + 1);
}
}
assertEquals(1, categories.size());
assertEquals(Integer.valueOf(2), categories.get(Aggregation.class));
assertTrue(names.contains(ChildrenAggregationBuilder.NAME));
assertTrue(names.contains(MatrixStatsAggregationBuilder.NAME));
}
private static class TrackingActionListener implements ActionListener<Integer> {
private final AtomicInteger statusCode = new AtomicInteger(-1);
private final AtomicReference<Exception> exception = new AtomicReference<>();

View File

@ -276,19 +276,20 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
}
public void testSearchWithParentJoin() throws IOException {
final String indexName = "child_example";
StringEntity parentMapping = new StringEntity("{\n" +
" \"mappings\": {\n" +
" \"answer\" : {\n" +
" \"_parent\" : {\n" +
" \"type\" : \"question\"\n" +
" \"qa\" : {\n" +
" \"properties\" : {\n" +
" \"qa_join_field\" : {\n" +
" \"type\" : \"join\",\n" +
" \"relations\" : { \"question\" : \"answer\" }\n" +
" }\n" +
" }\n" +
" }\n" +
" },\n" +
" \"settings\": {\n" +
" \"index.mapping.single_type\": false" +
" }\n" +
" }" +
"}", ContentType.APPLICATION_JSON);
client().performRequest("PUT", "/child_example", Collections.emptyMap(), parentMapping);
client().performRequest("PUT", "/" + indexName, Collections.emptyMap(), parentMapping);
StringEntity questionDoc = new StringEntity("{\n" +
" \"body\": \"<p>I have Windows 2003 server and i bought a new Windows 2008 server...\",\n" +
" \"title\": \"Whats the best way to file transfer my site from server to a newer one?\",\n" +
@ -296,9 +297,10 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
" \"windows-server-2003\",\n" +
" \"windows-server-2008\",\n" +
" \"file-transfer\"\n" +
" ]\n" +
" ],\n" +
" \"qa_join_field\" : \"question\"\n" +
"}", ContentType.APPLICATION_JSON);
client().performRequest("PUT", "/child_example/question/1", Collections.emptyMap(), questionDoc);
client().performRequest("PUT", "/" + indexName + "/qa/1", Collections.emptyMap(), questionDoc);
StringEntity answerDoc1 = new StringEntity("{\n" +
" \"owner\": {\n" +
" \"location\": \"Norfolk, United Kingdom\",\n" +
@ -306,9 +308,13 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
" \"id\": 48\n" +
" },\n" +
" \"body\": \"<p>Unfortunately you're pretty much limited to FTP...\",\n" +
" \"qa_join_field\" : {\n" +
" \"name\" : \"answer\",\n" +
" \"parent\" : \"1\"\n" +
" },\n" +
" \"creation_date\": \"2009-05-04T13:45:37.030\"\n" +
"}", ContentType.APPLICATION_JSON);
client().performRequest("PUT", "child_example/answer/1", Collections.singletonMap("parent", "1"), answerDoc1);
client().performRequest("PUT", "/" + indexName + "/qa/2", Collections.singletonMap("routing", "1"), answerDoc1);
StringEntity answerDoc2 = new StringEntity("{\n" +
" \"owner\": {\n" +
" \"location\": \"Norfolk, United Kingdom\",\n" +
@ -316,9 +322,13 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
" \"id\": 49\n" +
" },\n" +
" \"body\": \"<p>Use Linux...\",\n" +
" \"qa_join_field\" : {\n" +
" \"name\" : \"answer\",\n" +
" \"parent\" : \"1\"\n" +
" },\n" +
" \"creation_date\": \"2009-05-05T13:45:37.030\"\n" +
"}", ContentType.APPLICATION_JSON);
client().performRequest("PUT", "/child_example/answer/2", Collections.singletonMap("parent", "1"), answerDoc2);
client().performRequest("PUT", "/" + indexName + "/qa/3", Collections.singletonMap("routing", "1"), answerDoc2);
client().performRequest("POST", "/_refresh");
TermsAggregationBuilder leafTermAgg = new TermsAggregationBuilder("top-names", ValueType.STRING)
@ -328,7 +338,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
.size(10).subAggregation(childrenAgg);
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.size(0).aggregation(termsAgg);
SearchRequest searchRequest = new SearchRequest("child_example");
SearchRequest searchRequest = new SearchRequest(indexName);
searchRequest.source(searchSourceBuilder);
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);

View File

@ -553,7 +553,7 @@ public class RestClient implements Closeable {
return httpRequest;
}
private static URI buildUri(String pathPrefix, String path, Map<String, String> params) {
static URI buildUri(String pathPrefix, String path, Map<String, String> params) {
Objects.requireNonNull(path, "path must not be null");
try {
String fullPath;

View File

@ -23,6 +23,9 @@ import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import java.net.URI;
import java.util.Collections;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
@ -77,6 +80,22 @@ public class RestClientTests extends RestClientTestCase {
}
}
public void testBuildUriLeavesPathUntouched() {
{
URI uri = RestClient.buildUri("/foo$bar", "/index/type/id", Collections.<String, String>emptyMap());
assertEquals("/foo$bar/index/type/id", uri.getPath());
}
{
URI uri = RestClient.buildUri(null, "/foo$bar/ty/pe/i/d", Collections.<String, String>emptyMap());
assertEquals("/foo$bar/ty/pe/i/d", uri.getPath());
}
{
URI uri = RestClient.buildUri(null, "/index/type/id", Collections.singletonMap("foo$bar", "x/y/z"));
assertEquals("/index/type/id", uri.getPath());
assertEquals("foo$bar=x/y/z", uri.getQuery());
}
}
private static RestClient createRestClient() {
HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)};
return new RestClient(mock(CloseableHttpAsyncClient.class), randomLongBetween(1_000, 30_000), new Header[]{}, hosts, null, null);

View File

@ -1 +0,0 @@
5e191674c50c9d99c9838da52cbf67c411998f4e

View File

@ -0,0 +1 @@
00d3260223eac0405a82eeeb8439de0e5eb5f888

View File

@ -1 +0,0 @@
45bc34ab640d5d1a7491b523631b902f20db5384

View File

@ -0,0 +1 @@
3a698989219afd9150738899bc849075c102881b

View File

@ -1 +0,0 @@
b44d86e9077443c3ba4918a85603734461c6b448

View File

@ -0,0 +1 @@
bb636d31949418943454dbe2d72b9b66cd743f9f

View File

@ -1 +0,0 @@
409b616d40e2041a02890b2dc477ed845e3121e9

View File

@ -0,0 +1 @@
720252d786273edcc48b2ae7b380bc229fe8930c

View File

@ -1 +0,0 @@
cfac105541315e2ca54955f681b410a7aa3bbb9d

View File

@ -0,0 +1 @@
735178c26f3eb361c30657beeec9e57bd5548d58

View File

@ -1 +0,0 @@
993c1331130dd26c632b964fd8caac259bb9f3fc

View File

@ -0,0 +1 @@
de5e5cd9b00be4d005d0e51c74084be6c07b0bbd

View File

@ -1 +0,0 @@
ec1460a28850410112a6349a7fff27df31242295

View File

@ -0,0 +1 @@
796ca5e5a9af3cc21f50156fa7e614338ec15ceb

View File

@ -1 +0,0 @@
57d342dbe68cf05361ccfda6bb76f2410cac900b

View File

@ -0,0 +1 @@
7ba802083c4c97a07d9487c2b26ee39e4f8e3c7e

View File

@ -1 +0,0 @@
5ed10847b6a2353ac66decd5a2ee1a1d34353049

View File

@ -0,0 +1 @@
d66adfdb3f330b726420db5f8db21b17a0d9991d

View File

@ -1 +0,0 @@
23ce6c2ea59287d8fe4fe31f466e9a58a1efe7b5

View File

@ -0,0 +1 @@
569c6362cb87858fc282fd786ba0fda0c44f0a8b

View File

@ -1 +0,0 @@
78bda71c8e65428927136f81112a031aa9cd04d4

View File

@ -0,0 +1 @@
0ba62e91082910b1057027b8912395da670105d0

View File

@ -1 +0,0 @@
1e7ea95e6197176015b13551c7496be4867ede45

View File

@ -0,0 +1 @@
968e678dc4a236bbc8e4c2eb66f5702ea48aae10

View File

@ -1 +0,0 @@
5ae4ecd6c478456395ae9a3f954b8afc13629bb9

View File

@ -0,0 +1 @@
579670cc27104fdbd627959b7982a99eab1d16d1

View File

@ -1 +0,0 @@
d5d1a81fc290b9660a49557f848dc2a3c4f2048b

View File

@ -0,0 +1 @@
53f3fc06ed3357dc75d7b050172520aa86d41010

View File

@ -1 +0,0 @@
d77cdd8f2782062a3b4c319c64f0fa4d804aafed

View File

@ -0,0 +1 @@
5281aa095f4f46580ea2008ffd040733096d0246

View File

@ -78,6 +78,10 @@ public class Version implements Comparable<Version> {
public static final Version V_5_4_0 = new Version(V_5_4_0_ID, org.apache.lucene.util.Version.LUCENE_6_5_0);
public static final int V_5_4_1_ID = 5040199;
public static final Version V_5_4_1 = new Version(V_5_4_1_ID, org.apache.lucene.util.Version.LUCENE_6_5_1);
public static final int V_5_4_2_ID = 5040299;
public static final Version V_5_4_2 = new Version(V_5_4_2_ID, org.apache.lucene.util.Version.LUCENE_6_5_1);
public static final int V_5_4_3_ID = 5040399;
public static final Version V_5_4_3 = new Version(V_5_4_3_ID, org.apache.lucene.util.Version.LUCENE_6_5_1);
public static final int V_5_5_0_ID = 5050099;
public static final Version V_5_5_0 = new Version(V_5_5_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0);
public static final int V_5_6_0_ID = 5060099;
@ -116,6 +120,10 @@ public class Version implements Comparable<Version> {
return V_5_6_0;
case V_5_5_0_ID:
return V_5_5_0;
case V_5_4_3_ID:
return V_5_4_3;
case V_5_4_2_ID:
return V_5_4_2;
case V_5_4_1_ID:
return V_5_4_1;
case V_5_4_0_ID:

View File

@ -43,8 +43,6 @@ import java.net.URLEncoder;
import java.util.Locale;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken;
/**
* A base class for the response of a write operation that involves a single doc
@ -351,17 +349,15 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
context.setSeqNo(parser.longValue());
} else if (_PRIMARY_TERM.equals(currentFieldName)) {
context.setPrimaryTerm(parser.longValue());
} else {
throwUnknownField(currentFieldName, parser.getTokenLocation());
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (_SHARDS.equals(currentFieldName)) {
context.setShardInfo(ShardInfo.fromXContent(parser));
} else {
throwUnknownField(currentFieldName, parser.getTokenLocation());
parser.skipChildren(); // skip potential inner objects for forward compatibility
}
} else {
throwUnknownToken(token, parser.getTokenLocation());
} else if (token == XContentParser.Token.START_ARRAY) {
parser.skipChildren(); // skip potential inner arrays for forward compatibility
}
}

View File

@ -37,6 +37,7 @@ public class GetSnapshotsRequest extends MasterNodeRequest<GetSnapshotsRequest>
public static final String ALL_SNAPSHOTS = "_all";
public static final String CURRENT_SNAPSHOT = "_current";
public static final boolean DEFAULT_VERBOSE_MODE = true;
private String repository;
@ -44,7 +45,7 @@ public class GetSnapshotsRequest extends MasterNodeRequest<GetSnapshotsRequest>
private boolean ignoreUnavailable;
private boolean verbose = true;
private boolean verbose = DEFAULT_VERBOSE_MODE;
public GetSnapshotsRequest() {
}

View File

@ -49,6 +49,7 @@ import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.CharFilterFactory;
import org.elasticsearch.index.analysis.CustomAnalyzer;
import org.elasticsearch.index.analysis.CustomAnalyzerProvider;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.analysis.TokenFilterFactory;
@ -183,13 +184,14 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
Tuple<String, TokenizerFactory> tokenizerFactory = parseTokenizerFactory(request, indexAnalyzers,
analysisRegistry, environment);
TokenFilterFactory[] tokenFilterFactories = new TokenFilterFactory[0];
tokenFilterFactories = getTokenFilterFactories(request, indexSettings, analysisRegistry, environment, tokenFilterFactories);
List<CharFilterFactory> charFilterFactoryList = parseCharFilterFactories(request, indexSettings, analysisRegistry, environment);
CharFilterFactory[] charFilterFactories = new CharFilterFactory[0];
charFilterFactories = getCharFilterFactories(request, indexSettings, analysisRegistry, environment, charFilterFactories);
List<TokenFilterFactory> tokenFilterFactoryList = parseTokenFilterFactories(request, indexSettings, analysisRegistry,
environment, tokenizerFactory, charFilterFactoryList);
analyzer = new CustomAnalyzer(tokenizerFactory.v1(), tokenizerFactory.v2(), charFilterFactories, tokenFilterFactories);
analyzer = new CustomAnalyzer(tokenizerFactory.v1(), tokenizerFactory.v2(),
charFilterFactoryList.toArray(new CharFilterFactory[charFilterFactoryList.size()]),
tokenFilterFactoryList.toArray(new TokenFilterFactory[tokenFilterFactoryList.size()]));
closeAnalyzer = true;
} else if (analyzer == null) {
if (indexAnalyzers == null) {
@ -462,12 +464,13 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
return extendedAttributes;
}
private static CharFilterFactory[] getCharFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry,
Environment environment, CharFilterFactory[] charFilterFactories) throws IOException {
private static List<CharFilterFactory> parseCharFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry,
Environment environment) throws IOException {
List<CharFilterFactory> charFilterFactoryList = new ArrayList<>();
if (request.charFilters() != null && request.charFilters().size() > 0) {
charFilterFactories = new CharFilterFactory[request.charFilters().size()];
for (int i = 0; i < request.charFilters().size(); i++) {
final AnalyzeRequest.NameOrDefinition charFilter = request.charFilters().get(i);
List<AnalyzeRequest.NameOrDefinition> charFilters = request.charFilters();
for (AnalyzeRequest.NameOrDefinition charFilter : charFilters) {
CharFilterFactory charFilterFactory;
// parse anonymous settings
if (charFilter.definition != null) {
Settings settings = getAnonymousSettings(charFilter.definition);
@ -481,7 +484,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
throw new IllegalArgumentException("failed to find global char filter under [" + charFilterTypeName + "]");
}
// Need to set anonymous "name" of char_filter
charFilterFactories[i] = charFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_charfilter_[" + i + "]", settings);
charFilterFactory = charFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_charfilter", settings);
} else {
AnalysisModule.AnalysisProvider<CharFilterFactory> charFilterFactoryFactory;
if (indexSettings == null) {
@ -489,31 +492,34 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
if (charFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global char filter under [" + charFilter.name + "]");
}
charFilterFactories[i] = charFilterFactoryFactory.get(environment, charFilter.name);
charFilterFactory = charFilterFactoryFactory.get(environment, charFilter.name);
} else {
charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilter.name, indexSettings);
if (charFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find char filter under [" + charFilter.name + "]");
}
charFilterFactories[i] = charFilterFactoryFactory.get(indexSettings, environment, charFilter.name,
charFilterFactory = charFilterFactoryFactory.get(indexSettings, environment, charFilter.name,
AnalysisRegistry.getSettingsFromIndexSettings(indexSettings,
AnalysisRegistry.INDEX_ANALYSIS_CHAR_FILTER + "." + charFilter.name));
}
}
if (charFilterFactories[i] == null) {
if (charFilterFactory == null) {
throw new IllegalArgumentException("failed to find char filter under [" + charFilter.name + "]");
}
charFilterFactoryList.add(charFilterFactory);
}
}
return charFilterFactories;
return charFilterFactoryList;
}
private static TokenFilterFactory[] getTokenFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry,
Environment environment, TokenFilterFactory[] tokenFilterFactories) throws IOException {
private static List<TokenFilterFactory> parseTokenFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry,
Environment environment, Tuple<String, TokenizerFactory> tokenizerFactory,
List<CharFilterFactory> charFilterFactoryList) throws IOException {
List<TokenFilterFactory> tokenFilterFactoryList = new ArrayList<>();
if (request.tokenFilters() != null && request.tokenFilters().size() > 0) {
tokenFilterFactories = new TokenFilterFactory[request.tokenFilters().size()];
for (int i = 0; i < request.tokenFilters().size(); i++) {
final AnalyzeRequest.NameOrDefinition tokenFilter = request.tokenFilters().get(i);
List<AnalyzeRequest.NameOrDefinition> tokenFilters = request.tokenFilters();
for (AnalyzeRequest.NameOrDefinition tokenFilter : tokenFilters) {
TokenFilterFactory tokenFilterFactory;
// parse anonymous settings
if (tokenFilter.definition != null) {
Settings settings = getAnonymousSettings(tokenFilter.definition);
@ -527,7 +533,11 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
throw new IllegalArgumentException("failed to find global token filter under [" + filterTypeName + "]");
}
// Need to set anonymous "name" of tokenfilter
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenfilter_[" + i + "]", settings);
tokenFilterFactory = tokenFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenfilter", settings);
tokenFilterFactory = CustomAnalyzerProvider.checkAndApplySynonymFilter(tokenFilterFactory, tokenizerFactory.v1(), tokenizerFactory.v2(), tokenFilterFactoryList,
charFilterFactoryList, environment);
} else {
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory;
if (indexSettings == null) {
@ -535,23 +545,26 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
if (tokenFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilter.name + "]");
}
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(environment, tokenFilter.name);
tokenFilterFactory = tokenFilterFactoryFactory.get(environment, tokenFilter.name);
} else {
tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilter.name, indexSettings);
if (tokenFilterFactoryFactory == null) {
if (tokenFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilter.name + "]");
}
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(indexSettings, environment, tokenFilter.name,
AnalysisRegistry.getSettingsFromIndexSettings(indexSettings,
AnalysisRegistry.INDEX_ANALYSIS_FILTER + "." + tokenFilter.name));
Settings settings = AnalysisRegistry.getSettingsFromIndexSettings(indexSettings,
AnalysisRegistry.INDEX_ANALYSIS_FILTER + "." + tokenFilter.name);
tokenFilterFactory = tokenFilterFactoryFactory.get(indexSettings, environment, tokenFilter.name, settings);
tokenFilterFactory = CustomAnalyzerProvider.checkAndApplySynonymFilter(tokenFilterFactory, tokenizerFactory.v1(), tokenizerFactory.v2(), tokenFilterFactoryList,
charFilterFactoryList, environment);
}
}
if (tokenFilterFactories[i] == null) {
if (tokenFilterFactory == null) {
throw new IllegalArgumentException("failed to find or create token filter under [" + tokenFilter.name + "]");
}
tokenFilterFactoryList.add(tokenFilterFactory);
}
}
return tokenFilterFactories;
return tokenFilterFactoryList;
}
private static Tuple<String, TokenizerFactory> parseTokenizerFactory(AnalyzeRequest request, IndexAnalyzers indexAnalzyers,

View File

@ -38,7 +38,7 @@ public class DeleteIndexRequest extends AcknowledgedRequest<DeleteIndexRequest>
private String[] indices;
// Delete index should work by default on both open and closed indices.
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true);
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true, false, false, true);
public DeleteIndexRequest() {
}

View File

@ -32,7 +32,7 @@ public class DeleteIndexTemplateResponse extends AcknowledgedResponse {
DeleteIndexTemplateResponse() {
}
DeleteIndexTemplateResponse(boolean acknowledged) {
protected DeleteIndexTemplateResponse(boolean acknowledged) {
super(acknowledged);
}

View File

@ -22,6 +22,7 @@ package org.elasticsearch.action.bulk;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.VersionConflictEngineException;
/**
* A struct-like holder for a bulk items reponse, result, and the resulting
@ -39,4 +40,9 @@ class BulkItemResultHolder {
this.operationResult = operationResult;
this.replicaRequest = replicaRequest;
}
public boolean isVersionConflict() {
return operationResult == null ? false :
operationResult.getFailure() instanceof VersionConflictEngineException;
}
}

View File

@ -27,13 +27,13 @@ public interface MappingUpdatePerformer {
/**
* Update the mappings on the master.
*/
void updateMappings(Mapping update, ShardId shardId, String type) throws Exception;
void updateMappings(Mapping update, ShardId shardId, String type);
/**
* Throws a {@code ReplicationOperation.RetryOnPrimaryException} if the operation needs to be
* retried on the primary due to the mappings not being present yet, or a different exception if
* updating the mappings on the master failed.
*/
void verifyMappings(Mapping update, ShardId shardId) throws Exception;
void verifyMappings(Mapping update, ShardId shardId);
}

View File

@ -33,6 +33,7 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.action.support.replication.ReplicationOperation;
import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.action.support.replication.TransportWriteAction;
import org.elasticsearch.action.update.UpdateHelper;
import org.elasticsearch.action.update.UpdateRequest;
@ -50,7 +51,6 @@ import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.get.GetResult;
@ -66,7 +66,6 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.Map;
import java.util.function.LongSupplier;
@ -265,131 +264,150 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
return ExceptionsHelper.unwrapCause(e) instanceof VersionConflictEngineException;
}
/**
* Creates a new bulk item result from the given requests and result of performing the update operation on the shard.
*/
static BulkItemResultHolder processUpdateResponse(final UpdateRequest updateRequest, final String concreteIndex,
final Engine.Result result, final UpdateHelper.Result translate,
final IndexShard primary, final int bulkReqId) throws Exception {
assert result.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO : "failed result should not have a sequence number";
Engine.Operation.TYPE opType = result.getOperationType();
final UpdateResponse updateResponse;
final BulkItemRequest replicaRequest;
// enrich update response and set translated update (index/delete) request for replica execution in bulk items
if (opType == Engine.Operation.TYPE.INDEX) {
assert result instanceof Engine.IndexResult : result.getClass();
final IndexRequest updateIndexRequest = translate.action();
final IndexResponse indexResponse = new IndexResponse(primary.shardId(), updateIndexRequest.type(), updateIndexRequest.id(),
result.getSeqNo(), primary.getPrimaryTerm(), result.getVersion(), ((Engine.IndexResult) result).isCreated());
updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(),
indexResponse.getId(), indexResponse.getSeqNo(), indexResponse.getPrimaryTerm(), indexResponse.getVersion(),
indexResponse.getResult());
if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) ||
(updateRequest.fields() != null && updateRequest.fields().length > 0)) {
final BytesReference indexSourceAsBytes = updateIndexRequest.source();
final Tuple<XContentType, Map<String, Object>> sourceAndContent =
XContentHelper.convertToMap(indexSourceAsBytes, true, updateIndexRequest.getContentType());
updateResponse.setGetResult(UpdateHelper.extractGetResult(updateRequest, concreteIndex,
indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
}
// set translated request as replica request
replicaRequest = new BulkItemRequest(bulkReqId, updateIndexRequest);
} else if (opType == Engine.Operation.TYPE.DELETE) {
assert result instanceof Engine.DeleteResult : result.getClass();
final DeleteRequest updateDeleteRequest = translate.action();
final DeleteResponse deleteResponse = new DeleteResponse(primary.shardId(), updateDeleteRequest.type(), updateDeleteRequest.id(),
result.getSeqNo(), primary.getPrimaryTerm(), result.getVersion(), ((Engine.DeleteResult) result).isFound());
updateResponse = new UpdateResponse(deleteResponse.getShardInfo(), deleteResponse.getShardId(),
deleteResponse.getType(), deleteResponse.getId(), deleteResponse.getSeqNo(), deleteResponse.getPrimaryTerm(),
deleteResponse.getVersion(), deleteResponse.getResult());
final GetResult getResult = UpdateHelper.extractGetResult(updateRequest, concreteIndex, deleteResponse.getVersion(),
translate.updatedSourceAsMap(), translate.updateSourceContentType(), null);
updateResponse.setGetResult(getResult);
// set translated request as replica request
replicaRequest = new BulkItemRequest(bulkReqId, updateDeleteRequest);
} else {
throw new IllegalArgumentException("unknown operation type: " + opType);
}
return new BulkItemResultHolder(updateResponse, result, replicaRequest);
}
/**
* Executes update request once, delegating to a index or delete operation after translation.
* NOOP updates are indicated by returning a <code>null</code> operation in {@link BulkItemResultHolder}
*/
static BulkItemResultHolder executeUpdateRequestOnce(UpdateRequest updateRequest, IndexShard primary,
IndexMetaData metaData, String concreteIndex,
UpdateHelper updateHelper, LongSupplier nowInMillis,
BulkItemRequest primaryItemRequest, int bulkReqId,
final MappingUpdatePerformer mappingUpdater) throws Exception {
final UpdateHelper.Result translate;
// translate update request
try {
translate = updateHelper.prepare(updateRequest, primary, nowInMillis);
} catch (Exception failure) {
// we may fail translating a update to index or delete operation
// we use index result to communicate failure while translating update request
final Engine.Result result = new Engine.IndexResult(failure, updateRequest.version(), SequenceNumbersService.UNASSIGNED_SEQ_NO);
return new BulkItemResultHolder(null, result, primaryItemRequest);
}
final Engine.Result result;
// execute translated update request
switch (translate.getResponseResult()) {
case CREATED:
case UPDATED:
IndexRequest indexRequest = translate.action();
MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type());
indexRequest.process(mappingMd, concreteIndex);
result = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdater);
break;
case DELETED:
DeleteRequest deleteRequest = translate.action();
result = executeDeleteRequestOnPrimary(deleteRequest, primary, mappingUpdater);
break;
case NOOP:
primary.noopUpdate(updateRequest.type());
result = null;
break;
default: throw new IllegalStateException("Illegal update operation " + translate.getResponseResult());
}
if (result == null) {
// this is a noop operation
final UpdateResponse updateResponse = translate.action();
return new BulkItemResultHolder(updateResponse, result, primaryItemRequest);
} else if (result.hasFailure()) {
// There was a result, and the result was a failure
return new BulkItemResultHolder(null, result, primaryItemRequest);
} else {
// It was successful, we need to construct the response and return it
return processUpdateResponse(updateRequest, concreteIndex, result, translate, primary, bulkReqId);
}
}
/**
* Executes update request, delegating to a index or delete operation after translation,
* handles retries on version conflict and constructs update response
* NOTE: reassigns bulk item request at <code>requestIndex</code> for replicas to
* execute translated update request (NOOP update is an exception). NOOP updates are
* indicated by returning a <code>null</code> operation in {@link BulkItemResultHolder}
* */
* NOOP updates are indicated by returning a <code>null</code> operation
* in {@link BulkItemResultHolder}
*/
private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateRequest, IndexShard primary,
IndexMetaData metaData, BulkShardRequest request,
int requestIndex, UpdateHelper updateHelper,
LongSupplier nowInMillis,
final MappingUpdatePerformer mappingUpdater) throws Exception {
Engine.Result result = null;
UpdateResponse updateResponse = null;
BulkItemRequest replicaRequest = request.items()[requestIndex];
int maxAttempts = updateRequest.retryOnConflict();
for (int attemptCount = 0; attemptCount <= maxAttempts; attemptCount++) {
final UpdateHelper.Result translate;
// translate update request
try {
translate = updateHelper.prepare(updateRequest, primary, nowInMillis);
} catch (Exception failure) {
// we may fail translating a update to index or delete operation
// we use index result to communicate failure while translating update request
result = new Engine.IndexResult(failure, updateRequest.version(), SequenceNumbersService.UNASSIGNED_SEQ_NO);
break; // out of retry loop
}
// execute translated update request
switch (translate.getResponseResult()) {
case CREATED:
case UPDATED:
IndexRequest indexRequest = translate.action();
MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type());
indexRequest.process(mappingMd, request.index());
result = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdater);
break;
case DELETED:
DeleteRequest deleteRequest = translate.action();
result = executeDeleteRequestOnPrimary(deleteRequest, primary, mappingUpdater);
break;
case NOOP:
primary.noopUpdate(updateRequest.type());
break;
default: throw new IllegalStateException("Illegal update operation " + translate.getResponseResult());
}
if (result == null) {
// this is a noop operation
updateResponse = translate.action();
break; // out of retry loop
} else if (result.hasFailure() == false) {
// enrich update response and
// set translated update (index/delete) request for replica execution in bulk items
switch (result.getOperationType()) {
case INDEX:
assert result instanceof Engine.IndexResult : result.getClass();
IndexRequest updateIndexRequest = translate.action();
final IndexResponse indexResponse = new IndexResponse(
primary.shardId(),
updateIndexRequest.type(),
updateIndexRequest.id(),
result.getSeqNo(),
primary.getPrimaryTerm(),
result.getVersion(),
((Engine.IndexResult) result).isCreated());
BytesReference indexSourceAsBytes = updateIndexRequest.source();
updateResponse = new UpdateResponse(
indexResponse.getShardInfo(),
indexResponse.getShardId(),
indexResponse.getType(),
indexResponse.getId(),
indexResponse.getSeqNo(),
indexResponse.getPrimaryTerm(),
indexResponse.getVersion(),
indexResponse.getResult());
if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) ||
(updateRequest.fields() != null && updateRequest.fields().length > 0)) {
Tuple<XContentType, Map<String, Object>> sourceAndContent =
XContentHelper.convertToMap(indexSourceAsBytes, true, updateIndexRequest.getContentType());
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(),
indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
}
// set translated request as replica request
replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateIndexRequest);
break;
case DELETE:
assert result instanceof Engine.DeleteResult : result.getClass();
DeleteRequest updateDeleteRequest = translate.action();
DeleteResponse deleteResponse = new DeleteResponse(
primary.shardId(),
updateDeleteRequest.type(),
updateDeleteRequest.id(),
result.getSeqNo(),
primary.getPrimaryTerm(),
result.getVersion(),
((Engine.DeleteResult) result).isFound());
updateResponse = new UpdateResponse(
deleteResponse.getShardInfo(),
deleteResponse.getShardId(),
deleteResponse.getType(),
deleteResponse.getId(),
deleteResponse.getSeqNo(),
deleteResponse.getPrimaryTerm(),
deleteResponse.getVersion(),
deleteResponse.getResult());
final GetResult getResult = updateHelper.extractGetResult(
updateRequest,
request.index(),
deleteResponse.getVersion(),
translate.updatedSourceAsMap(),
translate.updateSourceContentType(),
null);
updateResponse.setGetResult(getResult);
// set translated request as replica request
replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateDeleteRequest);
break;
}
assert result.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO;
// successful operation
break; // out of retry loop
} else if (result.getFailure() instanceof VersionConflictEngineException == false) {
// not a version conflict exception
break; // out of retry loop
BulkItemRequest primaryItemRequest = request.items()[requestIndex];
assert primaryItemRequest.request() == updateRequest
: "expected bulk item request to contain the original update request, got: " +
primaryItemRequest.request() + " and " + updateRequest;
BulkItemResultHolder holder = null;
// There must be at least one attempt
int maxAttempts = Math.max(1, updateRequest.retryOnConflict());
for (int attemptCount = 0; attemptCount < maxAttempts; attemptCount++) {
holder = executeUpdateRequestOnce(updateRequest, primary, metaData, request.index(), updateHelper,
nowInMillis, primaryItemRequest, request.items()[requestIndex].id(), mappingUpdater);
// It was either a successful request, or it was a non-conflict failure
if (holder.isVersionConflict() == false) {
return holder;
}
}
return new BulkItemResultHolder(updateResponse, result, replicaRequest);
// We ran out of tries and haven't returned a valid bulk item response, so return the last one generated
return holder;
}
/** Modes for executing item request on replica depending on corresponding primary execution result */
@ -455,20 +473,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
switch (replicaItemExecutionMode(item, i)) {
case NORMAL:
final DocWriteResponse primaryResponse = item.getPrimaryResponse().getResponse();
switch (docWriteRequest.opType()) {
case CREATE:
case INDEX:
operationResult =
executeIndexRequestOnReplica(primaryResponse, (IndexRequest) docWriteRequest, primaryTerm, replica);
break;
case DELETE:
operationResult =
executeDeleteRequestOnReplica(primaryResponse, (DeleteRequest) docWriteRequest, primaryTerm, replica);
break;
default:
throw new IllegalStateException("Unexpected request operation type on replica: "
+ docWriteRequest.opType().getLowercase());
}
operationResult = performOpOnReplica(primaryResponse, docWriteRequest, primaryTerm, replica);
assert operationResult != null : "operation result must never be null when primary response has no failure";
location = syncOperationResultOrThrow(operationResult, location);
break;
@ -477,12 +482,12 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
case FAILURE:
final BulkItemResponse.Failure failure = item.getPrimaryResponse().getFailure();
assert failure.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO : "seq no must be assigned";
operationResult = executeFailureNoOpOnReplica(failure, primaryTerm, replica);
operationResult = replica.markSeqNoAsNoop(failure.getSeqNo(), primaryTerm, failure.getMessage());
assert operationResult != null : "operation result must never be null when primary response has no failure";
location = syncOperationResultOrThrow(operationResult, location);
break;
default:
throw new IllegalStateException("illegal replica item execution mode for: " + item.request());
throw new IllegalStateException("illegal replica item execution mode for: " + docWriteRequest);
}
} catch (Exception e) {
// if its not an ignore replica failure, we need to make sure to bubble up the failure
@ -495,195 +500,75 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
return location;
}
/** Syncs operation result to the translog or throws a shard not available failure */
private static Translog.Location syncOperationResultOrThrow(final Engine.Result operationResult,
final Translog.Location currentLocation) throws Exception {
final Translog.Location location;
if (operationResult.hasFailure()) {
// check if any transient write operation failures should be bubbled up
Exception failure = operationResult.getFailure();
assert failure instanceof MapperParsingException : "expected mapper parsing failures. got " + failure;
if (!TransportActions.isShardNotAvailableException(failure)) {
throw failure;
} else {
location = currentLocation;
}
} else {
location = locationToSync(currentLocation, operationResult.getTranslogLocation());
private static Engine.Result performOpOnReplica(DocWriteResponse primaryResponse, DocWriteRequest docWriteRequest,
long primaryTerm, IndexShard replica) throws Exception {
switch (docWriteRequest.opType()) {
case CREATE:
case INDEX:
final IndexRequest indexRequest = (IndexRequest) docWriteRequest;
final ShardId shardId = replica.shardId();
final SourceToParse sourceToParse =
SourceToParse.source(shardId.getIndexName(),
indexRequest.type(), indexRequest.id(), indexRequest.source(), indexRequest.getContentType())
.routing(indexRequest.routing()).parent(indexRequest.parent());
return replica.applyIndexOperationOnReplica(primaryResponse.getSeqNo(), primaryTerm, primaryResponse.getVersion(),
indexRequest.versionType().versionTypeForReplicationAndRecovery(), indexRequest.getAutoGeneratedTimestamp(),
indexRequest.isRetry(), sourceToParse, update -> {
throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(),
"Mappings are not available on the replica yet, triggered update: " + update);
});
case DELETE:
DeleteRequest deleteRequest = (DeleteRequest) docWriteRequest;
return replica.applyDeleteOperationOnReplica(primaryResponse.getSeqNo(), primaryTerm, primaryResponse.getVersion(),
deleteRequest.type(), deleteRequest.id(), deleteRequest.versionType().versionTypeForReplicationAndRecovery(),
update -> {
throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(),
"Mappings are not available on the replica yet, triggered update: " + update);
});
default:
throw new IllegalStateException("Unexpected request operation type on replica: "
+ docWriteRequest.opType().getLowercase());
}
return location;
}
private static Translog.Location locationToSync(Translog.Location current,
Translog.Location next) {
/* here we are moving forward in the translog with each operation. Under the hood this might
* cross translog files which is ok since from the user perspective the translog is like a
* tape where only the highest location needs to be fsynced in order to sync all previous
* locations even though they are not in the same file. When the translog rolls over files
* the previous file is fsynced on after closing if needed.*/
assert next != null : "next operation can't be null";
assert current == null || current.compareTo(next) < 0 :
"translog locations are not increasing";
return next;
}
/**
* Execute the given {@link IndexRequest} on a replica shard, throwing a
* {@link RetryOnReplicaException} if the operation needs to be re-tried.
*/
private static Engine.IndexResult executeIndexRequestOnReplica(DocWriteResponse primaryResponse, IndexRequest request,
long primaryTerm, IndexShard replica) throws IOException {
final Engine.Index operation;
try {
operation = prepareIndexOperationOnReplica(primaryResponse, request, primaryTerm, replica);
} catch (MapperParsingException e) {
return new Engine.IndexResult(e, primaryResponse.getVersion(), primaryResponse.getSeqNo());
}
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) {
final ShardId shardId = replica.shardId();
throw new RetryOnReplicaException(shardId,
"Mappings are not available on the replica yet, triggered update: " + update);
}
return replica.index(operation);
}
/** Utility method to prepare an index operation on replica shards */
static Engine.Index prepareIndexOperationOnReplica(
DocWriteResponse primaryResponse,
IndexRequest request,
long primaryTerm,
IndexShard replica) {
final ShardId shardId = replica.shardId();
final long version = primaryResponse.getVersion();
final long seqNo = primaryResponse.getSeqNo();
final SourceToParse sourceToParse =
SourceToParse.source(shardId.getIndexName(),
request.type(), request.id(), request.source(), request.getContentType())
.routing(request.routing()).parent(request.parent());
final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery();
assert versionType.validateVersionForWrites(version);
return replica.prepareIndexOnReplica(sourceToParse, seqNo, primaryTerm, version, versionType,
request.getAutoGeneratedTimestamp(), request.isRetry());
}
/** Utility method to prepare an index operation on primary shards */
private static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
final SourceToParse sourceToParse =
SourceToParse.source(request.index(), request.type(),
request.id(), request.source(), request.getContentType())
.routing(request.routing()).parent(request.parent());
return primary.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(),
request.getAutoGeneratedTimestamp(), request.isRetry());
}
/** Executes index operation on primary shard after updates mapping if dynamic mappings are found */
static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary,
MappingUpdatePerformer mappingUpdater) throws Exception {
// Update the mappings if parsing the documents includes new dynamic updates
final Engine.Index preUpdateOperation;
final Mapping mappingUpdate;
final boolean mappingUpdateNeeded;
final SourceToParse sourceToParse =
SourceToParse.source(request.index(), request.type(), request.id(), request.source(), request.getContentType())
.routing(request.routing()).parent(request.parent());
try {
preUpdateOperation = prepareIndexOperationOnPrimary(request, primary);
mappingUpdate = preUpdateOperation.parsedDoc().dynamicMappingsUpdate();
mappingUpdateNeeded = mappingUpdate != null;
if (mappingUpdateNeeded) {
mappingUpdater.updateMappings(mappingUpdate, primary.shardId(), request.type());
}
} catch (MapperParsingException | IllegalArgumentException failure) {
return new Engine.IndexResult(failure, request.version());
// if a mapping update is required to index this request, issue a mapping update on the master, and abort the
// current indexing operation so that it can be retried with the updated mapping from the master
// The early abort uses the RetryOnPrimaryException, but any other exception would be fine as well.
return primary.applyIndexOperationOnPrimary(request.version(), request.versionType(), sourceToParse,
request.getAutoGeneratedTimestamp(), request.isRetry(), update -> {
mappingUpdater.updateMappings(update, primary.shardId(), sourceToParse.type());
throw new ReplicationOperation.RetryOnPrimaryException(primary.shardId(), "Mapping updated");
});
} catch (ReplicationOperation.RetryOnPrimaryException e) {
return primary.applyIndexOperationOnPrimary(request.version(), request.versionType(), sourceToParse,
request.getAutoGeneratedTimestamp(), request.isRetry(), update -> mappingUpdater.verifyMappings(update, primary.shardId()));
}
// Verify that there are no more mappings that need to be applied. If there are failures, a
// ReplicationOperation.RetryOnPrimaryException is thrown.
final Engine.Index operation;
if (mappingUpdateNeeded) {
try {
operation = prepareIndexOperationOnPrimary(request, primary);
mappingUpdater.verifyMappings(operation.parsedDoc().dynamicMappingsUpdate(), primary.shardId());
} catch (MapperParsingException | IllegalStateException e) {
// there was an error in parsing the document that was not because
// of pending mapping updates, so return a failure for the result
return new Engine.IndexResult(e, request.version());
}
} else {
// There was no mapping update, the operation is the same as the pre-update version.
operation = preUpdateOperation;
}
return primary.index(operation);
}
private static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary,
final MappingUpdatePerformer mappingUpdater) throws Exception {
boolean mappingUpdateNeeded = false;
if (primary.indexSettings().isSingleType()) {
// When there is a single type, the unique identifier is only composed of the _id,
// so there is no way to differenciate foo#1 from bar#1. This is especially an issue
// if a user first deletes foo#1 and then indexes bar#1: since we do not encode the
// _type in the uid it might look like we are reindexing the same document, which
// would fail if bar#1 is indexed with a lower version than foo#1 was deleted with.
// In order to work around this issue, we make deletions create types. This way, we
// fail if index and delete operations do not use the same type.
try {
Mapping update = primary.mapperService().documentMapperWithAutoCreate(request.type()).getMapping();
if (update != null) {
mappingUpdateNeeded = true;
MappingUpdatePerformer mappingUpdater) throws Exception {
try {
return primary.applyDeleteOperationOnPrimary(request.version(), request.type(), request.id(), request.versionType(),
update -> {
mappingUpdater.updateMappings(update, primary.shardId(), request.type());
}
} catch (MapperParsingException | IllegalArgumentException e) {
return new Engine.DeleteResult(e, request.version(), SequenceNumbersService.UNASSIGNED_SEQ_NO, false);
}
throw new ReplicationOperation.RetryOnPrimaryException(primary.shardId(), "Mapping updated");
});
} catch (ReplicationOperation.RetryOnPrimaryException e) {
return primary.applyDeleteOperationOnPrimary(request.version(), request.type(), request.id(), request.versionType(),
update -> mappingUpdater.verifyMappings(update, primary.shardId()));
}
if (mappingUpdateNeeded) {
Mapping update = primary.mapperService().documentMapperWithAutoCreate(request.type()).getMapping();
mappingUpdater.verifyMappings(update, primary.shardId());
}
final Engine.Delete delete = primary.prepareDeleteOnPrimary(request.type(), request.id(), request.version(), request.versionType());
return primary.delete(delete);
}
private static Engine.DeleteResult executeDeleteRequestOnReplica(DocWriteResponse primaryResponse, DeleteRequest request,
final long primaryTerm, IndexShard replica) throws Exception {
if (replica.indexSettings().isSingleType()) {
// We need to wait for the replica to have the mappings
Mapping update;
try {
update = replica.mapperService().documentMapperWithAutoCreate(request.type()).getMapping();
} catch (MapperParsingException | IllegalArgumentException e) {
return new Engine.DeleteResult(e, request.version(), primaryResponse.getSeqNo(), false);
}
if (update != null) {
final ShardId shardId = replica.shardId();
throw new RetryOnReplicaException(shardId,
"Mappings are not available on the replica yet, triggered update: " + update);
}
}
final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery();
final long version = primaryResponse.getVersion();
assert versionType.validateVersionForWrites(version);
final Engine.Delete delete = replica.prepareDeleteOnReplica(request.type(), request.id(),
primaryResponse.getSeqNo(), primaryTerm, version, versionType);
return replica.delete(delete);
}
private static Engine.NoOpResult executeFailureNoOpOnReplica(BulkItemResponse.Failure primaryFailure, long primaryTerm,
IndexShard replica) throws IOException {
final Engine.NoOp noOp = replica.prepareMarkingSeqNoAsNoOpOnReplica(
primaryFailure.getSeqNo(), primaryTerm, primaryFailure.getMessage());
return replica.markSeqNoAsNoOp(noOp);
}
class ConcreteMappingUpdatePerformer implements MappingUpdatePerformer {
public void updateMappings(final Mapping update, final ShardId shardId,
final String type) throws Exception {
public void updateMappings(final Mapping update, final ShardId shardId, final String type) {
if (update != null) {
// can throw timeout exception when updating mappings or ISE for attempting to
// update default mappings which are bubbled up
@ -691,8 +576,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
}
}
public void verifyMappings(Mapping update,
final ShardId shardId) throws Exception {
public void verifyMappings(final Mapping update, final ShardId shardId) {
if (update != null) {
throw new ReplicationOperation.RetryOnPrimaryException(shardId,
"Dynamic mappings are not available on the node that holds the primary yet");

View File

@ -21,18 +21,20 @@ package org.elasticsearch.action.get;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.document.DocumentField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.get.GetField;
import org.elasticsearch.index.get.GetResult;
import java.io.IOException;
import java.util.Iterator;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
@ -42,7 +44,7 @@ import java.util.Objects;
* @see GetRequest
* @see org.elasticsearch.client.Client#get(GetRequest)
*/
public class GetResponse extends ActionResponse implements Iterable<GetField>, ToXContentObject {
public class GetResponse extends ActionResponse implements Iterable<DocumentField>, ToXContentObject {
GetResult getResult;
@ -136,11 +138,11 @@ public class GetResponse extends ActionResponse implements Iterable<GetField>, T
return getResult.getSource();
}
public Map<String, GetField> getFields() {
public Map<String, DocumentField> getFields() {
return getResult.getFields();
}
public GetField getField(String name) {
public DocumentField getField(String name) {
return getResult.field(name);
}
@ -149,7 +151,7 @@ public class GetResponse extends ActionResponse implements Iterable<GetField>, T
*/
@Deprecated
@Override
public Iterator<GetField> iterator() {
public Iterator<DocumentField> iterator() {
return getResult.iterator();
}
@ -158,8 +160,32 @@ public class GetResponse extends ActionResponse implements Iterable<GetField>, T
return getResult.toXContent(builder, params);
}
/**
* This method can be used to parse a {@link GetResponse} object when it has been printed out
* as a xcontent using the {@link #toXContent(XContentBuilder, Params)} method.
* <p>
* For forward compatibility reason this method might not fail if it tries to parse a field it
* doesn't know. But before returning the result it will check that enough information were
* parsed to return a valid {@link GetResponse} instance and throws a {@link ParsingException}
* otherwise. This is the case when we get a 404 back, which can be parsed as a normal
* {@link GetResponse} with found set to false, or as an elasticsearch exception. The caller
* of this method needs a way to figure out whether we got back a valid get response, which
* can be done by catching ParsingException.
*
* @param parser {@link XContentParser} to parse the response from
* @return a {@link GetResponse}
* @throws IOException is an I/O exception occurs during the parsing
*/
public static GetResponse fromXContent(XContentParser parser) throws IOException {
GetResult getResult = GetResult.fromXContent(parser);
// At this stage we ensure that we parsed enough information to return
// a valid GetResponse instance. If it's not the case, we throw an
// exception so that callers know it and can handle it correctly.
if (getResult.getIndex() == null && getResult.getType() == null && getResult.getId() == null) {
throw new ParsingException(parser.getTokenLocation(),
String.format(Locale.ROOT, "Missing required fields [%s,%s,%s]", GetResult._INDEX, GetResult._TYPE, GetResult._ID));
}
return new GetResponse(getResult);
}

View File

@ -0,0 +1,68 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.resync;
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.translog.Translog;
import java.io.IOException;
import java.util.List;
public final class ResyncReplicationRequest extends ReplicatedWriteRequest<ResyncReplicationRequest> {
private List<Translog.Operation> operations;
ResyncReplicationRequest() {
super();
}
public ResyncReplicationRequest(ShardId shardId, List<Translog.Operation> operations) {
super(shardId);
this.operations = operations;
}
public List<Translog.Operation> getOperations() {
return operations;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
operations = in.readList(Translog.Operation::readType);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeList(operations);
}
@Override
public String toString() {
return "TransportResyncReplicationAction.Request{" +
"shardId=" + shardId +
", timeout=" + timeout +
", index='" + index + '\'' +
", ops=" + operations.size() +
"}";
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.resync;
import org.elasticsearch.action.support.WriteResponse;
import org.elasticsearch.action.support.replication.ReplicationResponse;
public final class ResyncReplicationResponse extends ReplicationResponse implements WriteResponse {
@Override
public void setForcedRefresh(boolean forcedRefresh) {
// ignore
}
}

View File

@ -0,0 +1,175 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.resync;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.action.support.replication.ReplicationOperation;
import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.action.support.replication.TransportWriteAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.PrimaryReplicaSyncer;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.util.function.Supplier;
public class TransportResyncReplicationAction extends TransportWriteAction<ResyncReplicationRequest,
ResyncReplicationRequest, ResyncReplicationResponse> implements PrimaryReplicaSyncer.SyncAction {
public static String ACTION_NAME = "indices:admin/seq_no/resync";
@Inject
public TransportResyncReplicationAction(Settings settings, TransportService transportService,
ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool,
ShardStateAction shardStateAction, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters,
indexNameExpressionResolver, ResyncReplicationRequest::new, ResyncReplicationRequest::new, ThreadPool.Names.BULK);
}
@Override
protected void registerRequestHandlers(String actionName, TransportService transportService, Supplier<ResyncReplicationRequest> request,
Supplier<ResyncReplicationRequest> replicaRequest, String executor) {
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new OperationTransportHandler());
// we should never reject resync because of thread pool capacity on primary
transportService.registerRequestHandler(transportPrimaryAction,
() -> new ConcreteShardRequest<>(request),
executor, true, true,
new PrimaryOperationTransportHandler());
transportService.registerRequestHandler(transportReplicaAction,
() -> new ConcreteReplicaRequest<>(replicaRequest),
executor, true, true,
new ReplicaOperationTransportHandler());
}
@Override
protected ResyncReplicationResponse newResponseInstance() {
return new ResyncReplicationResponse();
}
@Override
protected ReplicationOperation.Replicas newReplicasProxy() {
// We treat the resync as best-effort for now and don't mark unavailable shard copies as stale.
return new ReplicasProxy();
}
@Override
protected void sendReplicaRequest(
final ConcreteReplicaRequest<ResyncReplicationRequest> replicaRequest,
final DiscoveryNode node,
final ActionListener<ReplicationOperation.ReplicaResponse> listener) {
if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
super.sendReplicaRequest(replicaRequest, node, listener);
} else {
listener.onResponse(new ReplicaResponse(replicaRequest.getTargetAllocationID(), SequenceNumbersService.UNASSIGNED_SEQ_NO));
}
}
@Override
protected WritePrimaryResult<ResyncReplicationRequest, ResyncReplicationResponse> shardOperationOnPrimary(
ResyncReplicationRequest request, IndexShard primary) throws Exception {
final ResyncReplicationRequest replicaRequest = performOnPrimary(request, primary);
return new WritePrimaryResult<>(replicaRequest, new ResyncReplicationResponse(), null, null, primary, logger);
}
public static ResyncReplicationRequest performOnPrimary(ResyncReplicationRequest request, IndexShard primary) {
return request;
}
@Override
protected WriteReplicaResult shardOperationOnReplica(ResyncReplicationRequest request, IndexShard replica) throws Exception {
Translog.Location location = performOnReplica(request, replica);
return new WriteReplicaResult(request, location, null, replica, logger);
}
public static Translog.Location performOnReplica(ResyncReplicationRequest request, IndexShard replica) throws Exception {
Translog.Location location = null;
for (Translog.Operation operation : request.getOperations()) {
try {
final Engine.Result operationResult = replica.applyTranslogOperation(operation, Engine.Operation.Origin.REPLICA,
update -> {
throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(),
"Mappings are not available on the replica yet, triggered update: " + update);
});
location = syncOperationResultOrThrow(operationResult, location);
} catch (Exception e) {
// if its not a failure to be ignored, let it bubble up
if (!TransportActions.isShardNotAvailableException(e)) {
throw e;
}
}
}
return location;
}
@Override
public void sync(ResyncReplicationRequest request, Task parentTask, String primaryAllocationId,
ActionListener<ResyncReplicationResponse> listener) {
// skip reroute phase
transportService.sendChildRequest(
clusterService.localNode(),
transportPrimaryAction,
new ConcreteShardRequest<>(request, primaryAllocationId),
parentTask,
transportOptions,
new TransportResponseHandler<ResyncReplicationResponse>() {
@Override
public ResyncReplicationResponse newInstance() {
return newResponseInstance();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleResponse(ResyncReplicationResponse response) {
listener.onResponse(response);
}
@Override
public void handleException(TransportException exp) {
final Throwable cause = exp.unwrapCause();
if (TransportActions.isShardNotAvailableException(cause)) {
logger.trace("primary became unavailable during resync, ignoring", exp);
} else {
listener.onFailure(exp);
}
}
});
}
}

View File

@ -71,7 +71,6 @@ public class SearchTransportService extends AbstractComponent {
public static final String QUERY_ACTION_NAME = "indices:data/read/search[phase/query]";
public static final String QUERY_ID_ACTION_NAME = "indices:data/read/search[phase/query/id]";
public static final String QUERY_SCROLL_ACTION_NAME = "indices:data/read/search[phase/query/scroll]";
public static final String QUERY_FETCH_ACTION_NAME = "indices:data/read/search[phase/query+fetch]";
public static final String QUERY_FETCH_SCROLL_ACTION_NAME = "indices:data/read/search[phase/query+fetch/scroll]";
public static final String FETCH_ID_SCROLL_ACTION_NAME = "indices:data/read/search[phase/fetch/id/scroll]";
public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]";
@ -117,26 +116,11 @@ public class SearchTransportService extends AbstractComponent {
public void sendExecuteQuery(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task,
final SearchActionListener<SearchPhaseResult> listener) {
// we optimize this and expect a QueryFetchSearchResult if we only have a single shard in the search request
// this used to be the QUERY_AND_FETCH which doesn't exists anymore.
// this used to be the QUERY_AND_FETCH which doesn't exist anymore.
final boolean fetchDocuments = request.numberOfShards() == 1;
Supplier<SearchPhaseResult> supplier = fetchDocuments ? QueryFetchSearchResult::new : QuerySearchResult::new;
if (connection.getVersion().before(Version.V_5_3_0) && fetchDocuments) {
// this is a BWC layer for pre 5.3 indices
if (request.scroll() != null) {
/**
* This is needed for nodes pre 5.3 when the single shard optimization is used.
* These nodes will set the last emitted doc only if the removed `query_and_fetch` search type is set
* in the request. See {@link SearchType}.
*/
request.searchType(SearchType.QUERY_AND_FETCH);
}
// TODO this BWC layer can be removed once this is back-ported to 5.3
transportService.sendChildRequest(connection, QUERY_FETCH_ACTION_NAME, request, task,
new ActionListenerResponseHandler<>(listener, supplier));
} else {
transportService.sendChildRequest(connection, QUERY_ACTION_NAME, request, task,
new ActionListenerResponseHandler<>(listener, supplier));
}
transportService.sendChildRequest(connection, QUERY_ACTION_NAME, request, task,
new ActionListenerResponseHandler<>(listener, supplier));
}
public void sendExecuteQuery(Transport.Connection connection, final QuerySearchRequest request, SearchTask task,
@ -353,20 +337,6 @@ public class SearchTransportService extends AbstractComponent {
});
TransportActionProxy.registerProxyAction(transportService, QUERY_SCROLL_ACTION_NAME, ScrollQuerySearchResult::new);
// this is for BWC with 5.3 until the QUERY_AND_FETCH removal change has been back-ported to 5.x
// in 5.3 we will only execute a `indices:data/read/search[phase/query+fetch]` if the node is pre 5.3
// such that we can remove this after the back-port.
transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
new TaskAwareTransportRequestHandler<ShardSearchTransportRequest>() {
@Override
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception {
assert request.numberOfShards() == 1 : "expected single shard request but got: " + request.numberOfShards();
SearchPhaseResult result = searchService.executeQueryPhase(request, (SearchTask)task);
channel.sendResponse(result);
}
});
TransportActionProxy.registerProxyAction(transportService, QUERY_FETCH_ACTION_NAME, QueryFetchSearchResult::new);
transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
new TaskAwareTransportRequestHandler<InternalScrollSearchRequest>() {
@Override

View File

@ -33,7 +33,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@ -184,7 +183,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
searchRequest.indices(), idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState));
OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
if (remoteClusterIndices.isEmpty()) {
executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, Collections.emptyList(),
executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, remoteClusterIndices, Collections.emptyList(),
(clusterName, nodeId) -> null, clusterState, Collections.emptyMap(), listener);
} else {
remoteClusterService.collectSearchShards(searchRequest.indicesOptions(), searchRequest.preference(), searchRequest.routing(),
@ -193,7 +192,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
Map<String, AliasFilter> remoteAliasFilters = new HashMap<>();
BiFunction<String, String, DiscoveryNode> clusterNodeLookup = processRemoteShards(searchShardsResponses,
remoteClusterIndices, remoteShardIterators, remoteAliasFilters);
executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, remoteShardIterators,
executeSearch((SearchTask) task, timeProvider, searchRequest, localIndices, remoteClusterIndices, remoteShardIterators,
clusterNodeLookup, clusterState, remoteAliasFilters, listener);
}, listener::onFailure));
}
@ -249,16 +248,16 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
}
private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, SearchRequest searchRequest, OriginalIndices localIndices,
List<SearchShardIterator> remoteShardIterators, BiFunction<String, String, DiscoveryNode> remoteConnections,
ClusterState clusterState, Map<String, AliasFilter> remoteAliasMap,
ActionListener<SearchResponse> listener) {
Map<String, OriginalIndices> remoteClusterIndices, List<SearchShardIterator> remoteShardIterators,
BiFunction<String, String, DiscoveryNode> remoteConnections, ClusterState clusterState,
Map<String, AliasFilter> remoteAliasMap, ActionListener<SearchResponse> listener) {
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
// of just for the _search api
final Index[] indices;
if (localIndices.indices().length == 0 && remoteShardIterators.size() > 0) {
if (localIndices.indices().length == 0 && remoteClusterIndices.isEmpty() == false) {
indices = Index.EMPTY_ARRAY; // don't search on _all if only remote indices were specified
} else {
indices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(),

View File

@ -40,7 +40,6 @@ import java.util.Arrays;
import java.util.List;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
/**
* Base class for write action responses.
@ -187,8 +186,8 @@ public class ReplicationResponse extends ActionResponse {
total = parser.intValue();
} else if (SUCCESSFUL.equals(currentFieldName)) {
successful = parser.intValue();
} else if (FAILED.equals(currentFieldName) == false) {
throwUnknownField(currentFieldName, parser.getTokenLocation());
} else {
parser.skipChildren();
}
} else if (token == XContentParser.Token.START_ARRAY) {
if (FAILURES.equals(currentFieldName)) {
@ -197,8 +196,10 @@ public class ReplicationResponse extends ActionResponse {
failuresList.add(Failure.fromXContent(parser));
}
} else {
throwUnknownField(currentFieldName, parser.getTokenLocation());
parser.skipChildren(); // skip potential inner arrays for forward compatibility
}
} else if (token == XContentParser.Token.START_OBJECT) {
parser.skipChildren(); // skip potential inner arrays for forward compatibility
}
}
Failure[] failures = EMPTY;
@ -365,15 +366,15 @@ public class ReplicationResponse extends ActionResponse {
status = RestStatus.valueOf(parser.text());
} else if (PRIMARY.equals(currentFieldName)) {
primary = parser.booleanValue();
} else {
throwUnknownField(currentFieldName, parser.getTokenLocation());
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (REASON.equals(currentFieldName)) {
reason = ElasticsearchException.fromXContent(parser);
} else {
throwUnknownField(currentFieldName, parser.getTokenLocation());
parser.skipChildren(); // skip potential inner objects for forward compatibility
}
} else if (token == XContentParser.Token.START_ARRAY) {
parser.skipChildren(); // skip potential inner arrays for forward compatibility
}
}
return new Failure(new ShardId(shardIndex, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), nodeId, reason, status, primary);

View File

@ -95,17 +95,17 @@ public abstract class TransportReplicationAction<
Response extends ReplicationResponse
> extends TransportAction<Request, Response> {
private final TransportService transportService;
protected final TransportService transportService;
protected final ClusterService clusterService;
protected final ShardStateAction shardStateAction;
private final IndicesService indicesService;
private final TransportRequestOptions transportOptions;
private final String executor;
protected final IndicesService indicesService;
protected final TransportRequestOptions transportOptions;
protected final String executor;
// package private for testing
private final String transportReplicaAction;
private final String transportPrimaryAction;
private final ReplicationOperation.Replicas replicasProxy;
protected final String transportReplicaAction;
protected final String transportPrimaryAction;
protected final ReplicationOperation.Replicas replicasProxy;
protected TransportReplicationAction(Settings settings, String actionName, TransportService transportService,
ClusterService clusterService, IndicesService indicesService,
@ -122,6 +122,15 @@ public abstract class TransportReplicationAction<
this.transportPrimaryAction = actionName + "[p]";
this.transportReplicaAction = actionName + "[r]";
registerRequestHandlers(actionName, transportService, request, replicaRequest, executor);
this.transportOptions = transportOptions();
this.replicasProxy = newReplicasProxy();
}
protected void registerRequestHandlers(String actionName, TransportService transportService, Supplier<Request> request,
Supplier<ReplicaRequest> replicaRequest, String executor) {
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new OperationTransportHandler());
transportService.registerRequestHandler(transportPrimaryAction, () -> new ConcreteShardRequest<>(request), executor,
new PrimaryOperationTransportHandler());
@ -130,10 +139,6 @@ public abstract class TransportReplicationAction<
() -> new ConcreteReplicaRequest<>(replicaRequest),
executor, true, true,
new ReplicaOperationTransportHandler());
this.transportOptions = transportOptions();
this.replicasProxy = newReplicasProxy();
}
@Override
@ -178,7 +183,7 @@ public abstract class TransportReplicationAction<
/**
* Synchronously execute the specified replica operation. This is done under a permit from
* {@link IndexShard#acquireReplicaOperationPermit(long, ActionListener, String)}.
* {@link IndexShard#acquireReplicaOperationPermit(long, long, ActionListener, String)}.
*
* @param shardRequest the request to the replica shard
* @param replica the replica shard to perform the operation on
@ -217,7 +222,12 @@ public abstract class TransportReplicationAction<
|| TransportActions.isShardNotAvailableException(e);
}
class OperationTransportHandler implements TransportRequestHandler<Request> {
protected class OperationTransportHandler implements TransportRequestHandler<Request> {
public OperationTransportHandler() {
}
@Override
public void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception {
execute(task, request, new ActionListener<Response>() {
@ -250,7 +260,12 @@ public abstract class TransportReplicationAction<
}
}
class PrimaryOperationTransportHandler implements TransportRequestHandler<ConcreteShardRequest<Request>> {
protected class PrimaryOperationTransportHandler implements TransportRequestHandler<ConcreteShardRequest<Request>> {
public PrimaryOperationTransportHandler() {
}
@Override
public void messageReceived(final ConcreteShardRequest<Request> request, final TransportChannel channel) throws Exception {
throw new UnsupportedOperationException("the task parameter is required for this operation");
@ -314,7 +329,6 @@ public abstract class TransportReplicationAction<
});
} else {
setPhase(replicationTask, "primary");
final IndexMetaData indexMetaData = clusterService.state().getMetaData().index(request.shardId().getIndex());
final ActionListener<Response> listener = createResponseListener(primaryShardReference);
createReplicatedOperation(request,
ActionListener.wrap(result -> result.respond(listener), listener::onFailure),
@ -437,7 +451,7 @@ public abstract class TransportReplicationAction<
}
}
class ReplicaOperationTransportHandler implements TransportRequestHandler<ConcreteReplicaRequest<ReplicaRequest>> {
public class ReplicaOperationTransportHandler implements TransportRequestHandler<ConcreteReplicaRequest<ReplicaRequest>> {
@Override
public void messageReceived(
@ -507,7 +521,6 @@ public abstract class TransportReplicationAction<
@Override
public void onResponse(Releasable releasable) {
try {
replica.updateGlobalCheckpointOnReplica(globalCheckpoint);
final ReplicaResult replicaResult = shardOperationOnReplica(request, replica);
releasable.close(); // release shard operation lock before responding to caller
final TransportReplicationAction.ReplicaResponse response =
@ -582,7 +595,7 @@ public abstract class TransportReplicationAction<
throw new ShardNotFoundException(this.replica.shardId(), "expected aID [{}] but found [{}]", targetAllocationID,
actualAllocationId);
}
replica.acquireReplicaOperationPermit(request.primaryTerm, this, executor);
replica.acquireReplicaOperationPermit(request.primaryTerm, globalCheckpoint, this, executor);
}
/**
@ -1049,7 +1062,11 @@ public abstract class TransportReplicationAction<
* shards. It also encapsulates the logic required for failing the replica
* if deemed necessary as well as marking it as stale when needed.
*/
class ReplicasProxy implements ReplicationOperation.Replicas<ReplicaRequest> {
protected class ReplicasProxy implements ReplicationOperation.Replicas<ReplicaRequest> {
public ReplicasProxy() {
}
@Override
public void performOn(
@ -1112,13 +1129,13 @@ public abstract class TransportReplicationAction<
private R request;
ConcreteShardRequest(Supplier<R> requestSupplier) {
public ConcreteShardRequest(Supplier<R> requestSupplier) {
request = requestSupplier.get();
// null now, but will be populated by reading from the streams
targetAllocationID = null;
}
ConcreteShardRequest(R request, String targetAllocationID) {
public ConcreteShardRequest(R request, String targetAllocationID) {
Objects.requireNonNull(request);
Objects.requireNonNull(targetAllocationID);
this.request = request;

View File

@ -23,6 +23,7 @@ import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.WriteResponse;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
@ -32,6 +33,11 @@ import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.translog.Translog;
@ -43,6 +49,7 @@ import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
@ -67,6 +74,37 @@ public abstract class TransportWriteAction<
indexNameExpressionResolver, request, replicaRequest, executor);
}
/** Syncs operation result to the translog or throws a shard not available failure */
protected static Location syncOperationResultOrThrow(final Engine.Result operationResult,
final Location currentLocation) throws Exception {
final Location location;
if (operationResult.hasFailure()) {
// check if any transient write operation failures should be bubbled up
Exception failure = operationResult.getFailure();
assert failure instanceof MapperParsingException : "expected mapper parsing failures. got " + failure;
if (!TransportActions.isShardNotAvailableException(failure)) {
throw failure;
} else {
location = currentLocation;
}
} else {
location = locationToSync(currentLocation, operationResult.getTranslogLocation());
}
return location;
}
protected static Location locationToSync(Location current, Location next) {
/* here we are moving forward in the translog with each operation. Under the hood this might
* cross translog files which is ok since from the user perspective the translog is like a
* tape where only the highest location needs to be fsynced in order to sync all previous
* locations even though they are not in the same file. When the translog rolls over files
* the previous file is fsynced on after closing if needed.*/
assert next != null : "next operation can't be null";
assert current == null || current.compareTo(next) < 0 :
"translog locations are not increasing";
return next;
}
@Override
protected ReplicationOperation.Replicas newReplicasProxy() {
return new WriteActionReplicasProxy();
@ -356,8 +394,8 @@ public abstract class TransportWriteAction<
createListener(onSuccess, onPrimaryDemoted, onIgnoredFailure));
}
public ShardStateAction.Listener createListener(final Runnable onSuccess, final Consumer<Exception> onPrimaryDemoted,
final Consumer<Exception> onIgnoredFailure) {
private ShardStateAction.Listener createListener(final Runnable onSuccess, final Consumer<Exception> onPrimaryDemoted,
final Consumer<Exception> onIgnoredFailure) {
return new ShardStateAction.Listener() {
@Override
public void onSuccess() {

View File

@ -44,9 +44,6 @@ public class BaseTasksResponse extends ActionResponse {
private List<TaskOperationFailure> taskFailures;
private List<FailedNodeException> nodeFailures;
public BaseTasksResponse() {
}
public BaseTasksResponse(List<TaskOperationFailure> taskFailures, List<? extends FailedNodeException> nodeFailures) {
this.taskFailures = taskFailures == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(taskFailures));
this.nodeFailures = nodeFailures == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(nodeFailures));

View File

@ -184,7 +184,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
(request.fields() != null && request.fields().length > 0)) {
Tuple<XContentType, Map<String, Object>> sourceAndContent =
XContentHelper.convertToMap(upsertSourceBytes, true, upsertRequest.getContentType());
update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes));
update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes));
} else {
update.setGetResult(null);
}
@ -201,7 +201,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
bulkAction.execute(toSingleItemBulkRequest(indexRequest), wrapBulkResponse(
ActionListener.<IndexResponse>wrap(response -> {
UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult());
update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes));
update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes));
update.setForcedRefresh(response.forcedRefresh());
listener.onResponse(update);
}, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount)))
@ -212,7 +212,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
bulkAction.execute(toSingleItemBulkRequest(deleteRequest), wrapBulkResponse(
ActionListener.<DeleteResponse>wrap(response -> {
UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult());
update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null));
update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null));
update.setForcedRefresh(response.forcedRefresh());
listener.onResponse(update);
}, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount)))

View File

@ -29,6 +29,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.document.DocumentField;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.settings.Settings;
@ -38,7 +39,6 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.DocumentMissingException;
import org.elasticsearch.index.engine.DocumentSourceMissingException;
import org.elasticsearch.index.get.GetField;
import org.elasticsearch.index.get.GetResult;
import org.elasticsearch.index.mapper.ParentFieldMapper;
import org.elasticsearch.index.mapper.RoutingFieldMapper;
@ -314,8 +314,9 @@ public class UpdateHelper extends AbstractComponent {
* Applies {@link UpdateRequest#fetchSource()} to the _source of the updated document to be returned in a update response.
* For BWC this function also extracts the {@link UpdateRequest#fields()} from the updated document to be returned in a update response
*/
public GetResult extractGetResult(final UpdateRequest request, String concreteIndex, long version, final Map<String, Object> source,
XContentType sourceContentType, @Nullable final BytesReference sourceAsBytes) {
public static GetResult extractGetResult(final UpdateRequest request, String concreteIndex, long version,
final Map<String, Object> source, XContentType sourceContentType,
@Nullable final BytesReference sourceAsBytes) {
if ((request.fields() == null || request.fields().length == 0) &&
(request.fetchSource() == null || request.fetchSource().fetchSource() == false)) {
return null;
@ -323,7 +324,7 @@ public class UpdateHelper extends AbstractComponent {
SourceLookup sourceLookup = new SourceLookup();
sourceLookup.setSource(source);
boolean sourceRequested = false;
Map<String, GetField> fields = null;
Map<String, DocumentField> fields = null;
if (request.fields() != null && request.fields().length > 0) {
for (String field : request.fields()) {
if (field.equals("_source")) {
@ -335,12 +336,12 @@ public class UpdateHelper extends AbstractComponent {
if (fields == null) {
fields = new HashMap<>(2);
}
GetField getField = fields.get(field);
if (getField == null) {
getField = new GetField(field, new ArrayList<>(2));
fields.put(field, getField);
DocumentField documentField = fields.get(field);
if (documentField == null) {
documentField = new DocumentField(field, new ArrayList<>(2));
fields.put(field, documentField);
}
getField.getValues().add(value);
documentField.getValues().add(value);
}
}
}

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.index.analysis.compound;
package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.compound.CompoundWordTokenFilterBase;
@ -38,7 +38,7 @@ public abstract class AbstractCompoundWordTokenFilterFactory extends AbstractTok
protected final boolean onlyLongestMatch;
protected final CharArraySet wordList;
public AbstractCompoundWordTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
protected AbstractCompoundWordTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
minWordSize = settings.getAsInt("min_word_size", CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE);

View File

@ -238,9 +238,12 @@ final class Bootstrap {
return keystore;
}
private static Environment createEnvironment(boolean foreground, Path pidFile,
SecureSettings secureSettings, Settings initialSettings) {
private static Environment createEnvironment(
final boolean foreground,
final Path pidFile,
final SecureSettings secureSettings,
final Settings initialSettings,
final Path configPath) {
Terminal terminal = foreground ? Terminal.DEFAULT : null;
Settings.Builder builder = Settings.builder();
if (pidFile != null) {
@ -250,7 +253,7 @@ final class Bootstrap {
if (secureSettings != null) {
builder.setSecureSettings(secureSettings);
}
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, Collections.emptyMap());
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, Collections.emptyMap(), configPath);
}
private void start() throws NodeValidationException {
@ -266,13 +269,6 @@ final class Bootstrap {
}
}
/** Set the system property before anything has a chance to trigger its use */
// TODO: why? is it just a bad default somewhere? or is it some BS around 'but the client' garbage <-- my guess
@SuppressForbidden(reason = "sets logger prefix on initialization")
static void initLoggerPrefix() {
System.setProperty("es.logger.prefix", "");
}
/**
* This method is invoked by {@link Elasticsearch#main(String[])} to startup elasticsearch.
*/
@ -281,9 +277,6 @@ final class Bootstrap {
final Path pidFile,
final boolean quiet,
final Environment initialEnv) throws BootstrapException, NodeValidationException, UserException {
// Set the system property before anything has a chance to trigger its use
initLoggerPrefix();
// force the class initializer for BootstrapInfo to run before
// the security manager is installed
BootstrapInfo.init();
@ -291,7 +284,7 @@ final class Bootstrap {
INSTANCE = new Bootstrap();
final SecureSettings keystore = loadSecureSettings(initialEnv);
Environment environment = createEnvironment(foreground, pidFile, keystore, initialEnv.settings());
final Environment environment = createEnvironment(foreground, pidFile, keystore, initialEnv.settings(), initialEnv.configFile());
try {
LogConfigurator.configure(environment);
} catch (IOException e) {

View File

@ -24,6 +24,7 @@ import com.sun.jna.Native;
import com.sun.jna.NativeLong;
import com.sun.jna.Pointer;
import com.sun.jna.Structure;
import com.sun.jna.WString;
import com.sun.jna.win32.StdCallLibrary;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
@ -223,6 +224,17 @@ final class JNAKernel32Library {
*/
native boolean CloseHandle(Pointer handle);
/**
* Retrieves the short path form of the specified path. See
* <a href="https://msdn.microsoft.com/en-us/library/windows/desktop/aa364989.aspx">{@code GetShortPathName}</a>.
*
* @param lpszLongPath the path string
* @param lpszShortPath a buffer to receive the short name
* @param cchBuffer the size of the buffer
* @return the length of the string copied into {@code lpszShortPath}, otherwise zero for failure
*/
native int GetShortPathNameW(WString lpszLongPath, char[] lpszShortPath, int cchBuffer);
/**
* Creates or opens a new job object
*

View File

@ -21,6 +21,7 @@ package org.elasticsearch.bootstrap;
import com.sun.jna.Native;
import com.sun.jna.Pointer;
import com.sun.jna.WString;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.logging.Loggers;
@ -194,6 +195,35 @@ class JNANatives {
}
}
/**
* Retrieves the short path form of the specified path.
*
* @param path the path
* @return the short path name (or the original path if getting the short path name fails for any reason)
*/
static String getShortPathName(String path) {
assert Constants.WINDOWS;
try {
final WString longPath = new WString("\\\\?\\" + path);
// first we get the length of the buffer needed
final int length = JNAKernel32Library.getInstance().GetShortPathNameW(longPath, null, 0);
if (length == 0) {
logger.warn("failed to get short path name: {}", Native.getLastError());
return path;
}
final char[] shortPath = new char[length];
// knowing the length of the buffer, now we get the short name
if (JNAKernel32Library.getInstance().GetShortPathNameW(longPath, shortPath, length) > 0) {
return Native.toString(shortPath);
} else {
logger.warn("failed to get short path name: {}", Native.getLastError());
return path;
}
} catch (final UnsatisfiedLinkError e) {
return path;
}
}
static void addConsoleCtrlHandler(ConsoleCtrlHandler handler) {
// The console Ctrl handler is necessary on Windows platforms only.
if (Constants.WINDOWS) {

View File

@ -76,6 +76,20 @@ final class Natives {
JNANatives.tryVirtualLock();
}
/**
* Retrieves the short path form of the specified path.
*
* @param path the path
* @return the short path name (or the original path if getting the short path name fails for any reason)
*/
static String getShortPathName(final String path) {
if (!JNA_AVAILABLE) {
logger.warn("cannot obtain short path for [{}] because JNA is not avilable", path);
return path;
}
return JNANatives.getShortPathName(path);
}
static void addConsoleCtrlHandler(ConsoleCtrlHandler handler) {
if (!JNA_AVAILABLE) {
logger.warn("cannot register console handler because JNA is not available");

View File

@ -256,7 +256,7 @@ final class Security {
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libFile(), "read,readlink");
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesFile(), "read,readlink");
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsFile(), "read,readlink");
addPath(policy, Environment.PATH_CONF_SETTING.getKey(), environment.configFile(), "read,readlink");
addPath(policy, "path.conf'", environment.configFile(), "read,readlink");
// read-write dirs
addPath(policy, "java.io.tmpdir", environment.tmpFile(), "read,readlink,write,delete");
addPath(policy, Environment.PATH_LOGS_SETTING.getKey(), environment.logsFile(), "read,readlink,write,delete");
@ -280,26 +280,6 @@ final class Security {
throw new IllegalStateException("unable to access [" + path + "]", e);
}
}
/*
* If path.data and default.path.data are set, we need read access to the paths in default.path.data to check for the existence of
* index directories there that could have arisen from a bug in the handling of simultaneous configuration of path.data and
* default.path.data that was introduced in Elasticsearch 5.3.0.
*
* If path.data is not set then default.path.data would take precedence in setting the data paths for the environment and
* permissions would have been granted above.
*
* If path.data is not set and default.path.data is not set, then we would fallback to the default data directory under
* Elasticsearch home and again permissions would have been granted above.
*
* If path.data is set and default.path.data is not set, there is nothing to do here.
*/
if (Environment.PATH_DATA_SETTING.exists(environment.settings())
&& Environment.DEFAULT_PATH_DATA_SETTING.exists(environment.settings())) {
for (final String path : Environment.DEFAULT_PATH_DATA_SETTING.get(environment.settings())) {
// write permissions are not needed here, we are not going to be writing to any paths here
addPath(policy, Environment.DEFAULT_PATH_DATA_SETTING.getKey(), getPath(path), "read,readlink");
}
}
for (Path path : environment.repoFiles()) {
addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete");
}
@ -309,11 +289,6 @@ final class Security {
}
}
@SuppressForbidden(reason = "read path that is not configured in environment")
private static Path getPath(final String path) {
return PathUtils.get(path);
}
/**
* Add dynamic {@link SocketPermission}s based on HTTP and transport settings.
*
@ -427,27 +402,6 @@ final class Security {
policy.add(new FilePermission(path.toString() + path.getFileSystem().getSeparator() + "-", permissions));
}
/**
* Add access to a directory iff it exists already
* @param policy current policy to add permissions to
* @param configurationName the configuration name associated with the path (for error messages only)
* @param path the path itself
* @param permissions set of file permissions to grant to the path
*/
static void addPathIfExists(Permissions policy, String configurationName, Path path, String permissions) {
if (Files.isDirectory(path)) {
// add each path twice: once for itself, again for files underneath it
policy.add(new FilePermission(path.toString(), permissions));
policy.add(new FilePermission(path.toString() + path.getFileSystem().getSeparator() + "-", permissions));
try {
path.getFileSystem().provider().checkAccess(path.toRealPath(), AccessMode.READ);
} catch (IOException e) {
throw new IllegalStateException("Unable to access '" + configurationName + "' (" + path + ")", e);
}
}
}
/**
* Ensures configured directory {@code path} exists.
* @throws IOException if {@code path} exists, but is not a directory, not accessible, or broken symbolic link.

View File

@ -19,6 +19,7 @@
package org.elasticsearch.bootstrap;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.Platforms;
@ -99,7 +100,22 @@ final class Spawner implements Closeable {
private Process spawnNativePluginController(
final Path spawnPath,
final Path tmpPath) throws IOException {
final ProcessBuilder pb = new ProcessBuilder(spawnPath.toString());
final String command;
if (Constants.WINDOWS) {
/*
* We have to get the short path name or starting the process could fail due to max path limitations. The underlying issue here
* is that starting the process on Windows ultimately involves the use of CreateProcessW. CreateProcessW has a limitation that
* if its first argument (the application name) is null, then its second argument (the command line for the process to start) is
* restricted in length to 260 characters (cf. https://msdn.microsoft.com/en-us/library/windows/desktop/ms682425.aspx). Since
* this is exactly how the JDK starts the process on Windows (cf.
* http://hg.openjdk.java.net/jdk8/jdk8/jdk/file/687fd7c7986d/src/windows/native/java/lang/ProcessImpl_md.c#l319), this
* limitation is in force. As such, we use the short name to avoid any such problems.
*/
command = Natives.getShortPathName(spawnPath.toString());
} else {
command = spawnPath.toString();
}
final ProcessBuilder pb = new ProcessBuilder(command);
// the only environment variable passes on the path to the temporary directory
pb.environment().clear();

View File

@ -242,7 +242,6 @@ final class SystemCallFilter {
static {
Map<String,Arch> m = new HashMap<>();
m.put("amd64", new Arch(0xC000003E, 0x3FFFFFFF, 57, 58, 59, 322, 317));
m.put("i386", new Arch(0x40000003, 0xFFFFFFFF, 2, 190, 11, 358, 354));
ARCHITECTURES = Collections.unmodifiableMap(m);
}

View File

@ -22,10 +22,14 @@ package org.elasticsearch.cli;
import joptsimple.OptionSet;
import joptsimple.OptionSpec;
import joptsimple.util.KeyValuePair;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.InternalSettingsPreparer;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
@ -34,10 +38,13 @@ import java.util.Map;
public abstract class EnvironmentAwareCommand extends Command {
private final OptionSpec<KeyValuePair> settingOption;
private final OptionSpec<String> pathConfOption;
public EnvironmentAwareCommand(String description) {
super(description);
this.settingOption = parser.accepts("E", "Configure a setting").withRequiredArg().ofType(KeyValuePair.class);
this.pathConfOption =
parser.acceptsAll(Arrays.asList("c", "path.conf"), "Configure config path").withRequiredArg().ofType(String.class);
}
@Override
@ -59,17 +66,22 @@ public abstract class EnvironmentAwareCommand extends Command {
settings.put(kvp.key, kvp.value);
}
putSystemPropertyIfSettingIsMissing(settings, "path.conf", "es.path.conf");
putSystemPropertyIfSettingIsMissing(settings, "path.data", "es.path.data");
putSystemPropertyIfSettingIsMissing(settings, "path.home", "es.path.home");
putSystemPropertyIfSettingIsMissing(settings, "path.logs", "es.path.logs");
execute(terminal, options, createEnv(terminal, settings));
final String pathConf = pathConfOption.value(options);
execute(terminal, options, createEnv(terminal, settings, getConfigPath(pathConf)));
}
@SuppressForbidden(reason = "need path to construct environment")
private static Path getConfigPath(final String pathConf) {
return pathConf == null ? null : Paths.get(pathConf);
}
/** Create an {@link Environment} for the command to use. Overrideable for tests. */
protected Environment createEnv(Terminal terminal, Map<String, String> settings) {
return InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, terminal, settings);
protected Environment createEnv(Terminal terminal, Map<String, String> settings, Path configPath) {
return InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, terminal, settings, configPath);
}
/** Ensure the given setting exists, reading it from system properties if not already set. */

View File

@ -97,7 +97,7 @@ public abstract class TransportClient extends AbstractClient {
.put(InternalSettingsPreparer.prepareSettings(settings))
.put(NetworkService.NETWORK_SERVER.getKey(), false)
.put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE);
return new PluginsService(settingsBuilder.build(), null, null, plugins);
return new PluginsService(settingsBuilder.build(), null, null, null, plugins);
}
protected static Collection<Class<? extends Plugin>> addPlugins(Collection<Class<? extends Plugin>> collection,

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster.action.index;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.IndicesAdminClient;
@ -34,8 +35,6 @@ import org.elasticsearch.index.Index;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.Mapping;
import java.util.concurrent.TimeoutException;
/**
* Called by shards in the cluster when their mapping was dynamically updated and it needs to be updated
* in the cluster state meta data (and broadcast to all members).
@ -77,7 +76,7 @@ public class MappingUpdatedAction extends AbstractComponent {
* Same as {@link #updateMappingOnMaster(Index, String, Mapping, TimeValue)}
* using the default timeout.
*/
public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate) throws Exception {
public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate) {
updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout);
}
@ -86,9 +85,9 @@ public class MappingUpdatedAction extends AbstractComponent {
* {@code timeout}. When this method returns successfully mappings have
* been applied to the master node and propagated to data nodes.
*/
public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate, TimeValue timeout) throws Exception {
public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate, TimeValue timeout) {
if (updateMappingRequest(index, type, mappingUpdate, timeout).get().isAcknowledged() == false) {
throw new TimeoutException("Failed to acknowledge mapping update within [" + timeout + "]");
throw new ElasticsearchTimeoutException("Failed to acknowledge mapping update within [" + timeout + "]");
}
}
}

View File

@ -23,7 +23,6 @@ import com.carrotsearch.hppc.LongArrayList;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.Version;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.cluster.Diff;
@ -259,6 +258,13 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
Setting.Property.Dynamic,
Setting.Property.IndexScope);
/**
* an internal index format description, allowing us to find out if this index is upgraded or needs upgrading
*/
private static final String INDEX_FORMAT = "index.format";
public static final Setting<Integer> INDEX_FORMAT_SETTING =
Setting.intSetting(INDEX_FORMAT, 0, Setting.Property.IndexScope, Setting.Property.Final);
public static final String KEY_IN_SYNC_ALLOCATIONS = "in_sync_allocations";
static final String KEY_VERSION = "version";
static final String KEY_ROUTING_NUM_SHARDS = "routing_num_shards";
@ -1051,6 +1057,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
}
final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE);
return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
tmpAliases.build(), customs.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters,
indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards);
@ -1318,7 +1325,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
* @param sourceIndexMetadata the metadata of the source index
* @param targetNumberOfShards the total number of shards in the target index
* @return the routing factor for and shrunk index with the given number of target shards.
* @throws IllegalArgumentException if the number of source shards is greater than the number of target shards or if the source shards
* @throws IllegalArgumentException if the number of source shards is less than the number of target shards or if the source shards
* are not divisible by the number of target shards.
*/
public static int getRoutingFactor(IndexMetaData sourceIndexMetadata, int targetNumberOfShards) {

View File

@ -29,8 +29,6 @@ import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.joda.DateMathParser;
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
@ -58,8 +56,6 @@ public class IndexNameExpressionResolver extends AbstractComponent {
private final List<ExpressionResolver> expressionResolvers;
private final DateMathExpressionResolver dateMathExpressionResolver;
private static final DeprecationLogger DEPRECATION_LOGGER =
new DeprecationLogger(Loggers.getLogger(IndexNameExpressionResolver.class));
public IndexNameExpressionResolver(Settings settings) {
super(settings);
@ -592,7 +588,6 @@ public class IndexNameExpressionResolver extends AbstractComponent {
private Set<String> innerResolve(Context context, List<String> expressions, IndicesOptions options, MetaData metaData) {
Set<String> result = null;
boolean wildcardSeen = false;
boolean plusSeen = false;
for (int i = 0; i < expressions.size(); i++) {
String expression = expressions.get(i);
if (aliasOrIndexExists(metaData, expression)) {
@ -605,14 +600,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
throw infe(expression);
}
boolean add = true;
if (expression.charAt(0) == '+') {
// if its the first, add empty result set
plusSeen = true;
if (i == 0) {
result = new HashSet<>();
}
expression = expression.substring(1);
} else if (expression.charAt(0) == '-') {
if (expression.charAt(0) == '-') {
// if there is a negation without a wildcard being previously seen, add it verbatim,
// otherwise return the expression
if (wildcardSeen) {
@ -655,9 +643,6 @@ public class IndexNameExpressionResolver extends AbstractComponent {
wildcardSeen = true;
}
}
if (plusSeen) {
DEPRECATION_LOGGER.deprecated("support for '+' as part of index expressions is deprecated");
}
return result;
}

View File

@ -387,6 +387,14 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
throws IOException {
builder.startObject(indexTemplateMetaData.name());
toInnerXContent(indexTemplateMetaData, builder, params);
builder.endObject();
}
public static void toInnerXContent(IndexTemplateMetaData indexTemplateMetaData, XContentBuilder builder, ToXContent.Params params)
throws IOException {
builder.field("order", indexTemplateMetaData.order());
if (indexTemplateMetaData.version() != null) {
builder.field("version", indexTemplateMetaData.version());
@ -430,8 +438,6 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
AliasMetaData.Builder.toXContent(cursor.value, builder, params);
}
builder.endObject();
builder.endObject();
}
public static IndexTemplateMetaData fromXContent(XContentParser parser, String templateName) throws IOException {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster.metadata;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.AbstractDiffable;
import org.elasticsearch.cluster.Diff;
@ -170,7 +171,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
/**
* Converts the serialized compressed form of the mappings into a parsed map.
*/
public Map<String, Object> sourceAsMap() throws IOException {
public Map<String, Object> sourceAsMap() throws ElasticsearchParseException {
Map<String, Object> mapping = XContentHelper.convertToMap(source.compressedReference(), true).v2();
if (mapping.size() == 1 && mapping.containsKey(type())) {
// the type name is the root value, reduce it
@ -182,7 +183,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
/**
* Converts the serialized compressed form of the mappings into a parsed map.
*/
public Map<String, Object> getSourceAsMap() throws IOException {
public Map<String, Object> getSourceAsMap() throws ElasticsearchParseException {
return sourceAsMap();
}

View File

@ -91,6 +91,7 @@ import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiFunction;
import java.util.function.Predicate;
import java.util.stream.IntStream;
import static org.elasticsearch.action.support.ContextPreservingActionListener.wrapPreservingContext;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS;
@ -340,19 +341,44 @@ public class MetaDataCreateIndexService extends AbstractComponent {
indexSettingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, request.getProvidedName());
indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID());
final Index shrinkFromIndex = request.shrinkFrom();
int routingNumShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexSettingsBuilder.build());;
if (shrinkFromIndex != null) {
prepareShrinkIndexSettings(currentState, mappings.keySet(), indexSettingsBuilder, shrinkFromIndex,
request.index());
IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(shrinkFromIndex);
final IndexMetaData.Builder tmpImdBuilder = IndexMetaData.builder(request.index());
final int routingNumShards;
if (shrinkFromIndex == null) {
routingNumShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexSettingsBuilder.build());
} else {
final IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(shrinkFromIndex);
routingNumShards = sourceMetaData.getRoutingNumShards();
}
tmpImdBuilder.setRoutingNumShards(routingNumShards);
if (shrinkFromIndex != null) {
prepareShrinkIndexSettings(
currentState, mappings.keySet(), indexSettingsBuilder, shrinkFromIndex, request.index());
}
final Settings actualIndexSettings = indexSettingsBuilder.build();
tmpImdBuilder.settings(actualIndexSettings);
if (shrinkFromIndex != null) {
/*
* We need to arrange that the primary term on all the shards in the shrunken index is at least as large as
* the maximum primary term on all the shards in the source index. This ensures that we have correct
* document-level semantics regarding sequence numbers in the shrunken index.
*/
final IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(shrinkFromIndex);
final long primaryTerm =
IntStream
.range(0, sourceMetaData.getNumberOfShards())
.mapToLong(sourceMetaData::primaryTerm)
.max()
.getAsLong();
for (int shardId = 0; shardId < tmpImdBuilder.numberOfShards(); shardId++) {
tmpImdBuilder.primaryTerm(shardId, primaryTerm);
}
}
Settings actualIndexSettings = indexSettingsBuilder.build();
IndexMetaData.Builder tmpImdBuilder = IndexMetaData.builder(request.index())
.setRoutingNumShards(routingNumShards);
// Set up everything, now locally create the index to see that things are ok, and apply
final IndexMetaData tmpImd = tmpImdBuilder.settings(actualIndexSettings).build();
final IndexMetaData tmpImd = tmpImdBuilder.build();
ActiveShardCount waitForActiveShards = request.waitForActiveShards();
if (waitForActiveShards == ActiveShardCount.DEFAULT) {
waitForActiveShards = tmpImd.getWaitForActiveShards();
@ -408,6 +434,11 @@ public class MetaDataCreateIndexService extends AbstractComponent {
final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(request.index())
.settings(actualIndexSettings)
.setRoutingNumShards(routingNumShards);
for (int shardId = 0; shardId < tmpImd.getNumberOfShards(); shardId++) {
indexMetaDataBuilder.primaryTerm(shardId, tmpImd.primaryTerm(shardId));
}
for (MappingMetaData mappingMd : mappingsMetaData.values()) {
indexMetaDataBuilder.putMapping(mappingMd);
}

View File

@ -0,0 +1,268 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.indices.IndexTemplateMissingException;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.UnaryOperator;
import static java.util.Collections.singletonMap;
/**
* Upgrades Templates on behalf of installed {@link Plugin}s when a node joins the cluster
*/
public class TemplateUpgradeService extends AbstractComponent implements ClusterStateListener {
private final UnaryOperator<Map<String, IndexTemplateMetaData>> indexTemplateMetaDataUpgraders;
public final ClusterService clusterService;
public final ThreadPool threadPool;
public final Client client;
private final AtomicInteger updatesInProgress = new AtomicInteger();
private ImmutableOpenMap<String, IndexTemplateMetaData> lastTemplateMetaData;
public TemplateUpgradeService(Settings settings, Client client, ClusterService clusterService, ThreadPool threadPool,
Collection<UnaryOperator<Map<String, IndexTemplateMetaData>>> indexTemplateMetaDataUpgraders) {
super(settings);
this.client = client;
this.clusterService = clusterService;
this.threadPool = threadPool;
this.indexTemplateMetaDataUpgraders = templates -> {
Map<String, IndexTemplateMetaData> upgradedTemplates = new HashMap<>(templates);
for (UnaryOperator<Map<String, IndexTemplateMetaData>> upgrader : indexTemplateMetaDataUpgraders) {
upgradedTemplates = upgrader.apply(upgradedTemplates);
}
return upgradedTemplates;
};
clusterService.addListener(this);
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
ClusterState state = event.state();
if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) {
// wait until the gateway has recovered from disk, otherwise we think may not have the index templates,
// while they actually do exist
return;
}
if (updatesInProgress.get() > 0) {
// we are already running some updates - skip this cluster state update
return;
}
ImmutableOpenMap<String, IndexTemplateMetaData> templates = state.getMetaData().getTemplates();
if (templates == lastTemplateMetaData) {
// we already checked these sets of templates - no reason to check it again
// we can do identity check here because due to cluster state diffs the actual map will not change
// if there were no changes
return;
}
if (shouldLocalNodeUpdateTemplates(state.nodes()) == false) {
return;
}
lastTemplateMetaData = templates;
Optional<Tuple<Map<String, BytesReference>, Set<String>>> changes = calculateTemplateChanges(templates);
if (changes.isPresent()) {
logger.info("Starting template upgrade to version {}, {} templates will be updated and {} will be removed",
Version.CURRENT,
changes.get().v1().size(),
changes.get().v2().size());
if (updatesInProgress.compareAndSet(0, changes.get().v1().size() + changes.get().v2().size())) {
threadPool.generic().execute(() -> updateTemplates(changes.get().v1(), changes.get().v2()));
}
}
}
/**
* Checks if the current node should update the templates
*
* If the master has the newest verison in the cluster - it will be dedicated template updater.
* Otherwise the node with the highest id among nodes with the highest version should update the templates
*/
boolean shouldLocalNodeUpdateTemplates(DiscoveryNodes nodes) {
DiscoveryNode localNode = nodes.getLocalNode();
// Only data and master nodes should update the template
if (localNode.isDataNode() || localNode.isMasterNode()) {
DiscoveryNode masterNode = nodes.getMasterNode();
if (masterNode == null) {
return false;
}
Version maxVersion = nodes.getLargestNonClientNodeVersion();
if (maxVersion.equals(masterNode.getVersion())) {
// If the master has the latest version - we will allow it to handle the update
return nodes.isLocalNodeElectedMaster();
} else {
if (maxVersion.equals(localNode.getVersion()) == false) {
// The localhost node doesn't have the latest version - not going to update
return false;
}
for (ObjectCursor<DiscoveryNode> node : nodes.getMasterAndDataNodes().values()) {
if (node.value.getVersion().equals(maxVersion) && node.value.getId().compareTo(localNode.getId()) > 0) {
// We have a node with higher id then mine - it should update
return false;
}
}
// We have the highest version and highest id - we should perform the update
return true;
}
} else {
return false;
}
}
void updateTemplates(Map<String, BytesReference> changes, Set<String> deletions) {
for (Map.Entry<String, BytesReference> change : changes.entrySet()) {
PutIndexTemplateRequest request =
new PutIndexTemplateRequest(change.getKey()).source(change.getValue(), XContentType.JSON);
request.masterNodeTimeout(TimeValue.timeValueMinutes(1));
client.admin().indices().putTemplate(request, new ActionListener<PutIndexTemplateResponse>() {
@Override
public void onResponse(PutIndexTemplateResponse response) {
if(updatesInProgress.decrementAndGet() == 0) {
logger.info("Finished upgrading templates to version {}", Version.CURRENT);
}
if (response.isAcknowledged() == false) {
logger.warn("Error updating template [{}], request was not acknowledged", change.getKey());
}
}
@Override
public void onFailure(Exception e) {
if(updatesInProgress.decrementAndGet() == 0) {
logger.info("Templates were upgraded to version {}", Version.CURRENT);
}
logger.warn(new ParameterizedMessage("Error updating template [{}]", change.getKey()), e);
}
});
}
for (String template : deletions) {
DeleteIndexTemplateRequest request = new DeleteIndexTemplateRequest(template);
request.masterNodeTimeout(TimeValue.timeValueMinutes(1));
client.admin().indices().deleteTemplate(request, new ActionListener<DeleteIndexTemplateResponse>() {
@Override
public void onResponse(DeleteIndexTemplateResponse response) {
updatesInProgress.decrementAndGet();
if (response.isAcknowledged() == false) {
logger.warn("Error deleting template [{}], request was not acknowledged", template);
}
}
@Override
public void onFailure(Exception e) {
updatesInProgress.decrementAndGet();
if (e instanceof IndexTemplateMissingException == false) {
// we might attempt to delete the same template from different nodes - so that's ok if template doesn't exist
// otherwise we need to warn
logger.warn(new ParameterizedMessage("Error deleting template [{}]", template), e);
}
}
});
}
}
int getUpdatesInProgress() {
return updatesInProgress.get();
}
Optional<Tuple<Map<String, BytesReference>, Set<String>>> calculateTemplateChanges(
ImmutableOpenMap<String, IndexTemplateMetaData> templates) {
// collect current templates
Map<String, IndexTemplateMetaData> existingMap = new HashMap<>();
for (ObjectObjectCursor<String, IndexTemplateMetaData> customCursor : templates) {
existingMap.put(customCursor.key, customCursor.value);
}
// upgrade global custom meta data
Map<String, IndexTemplateMetaData> upgradedMap = indexTemplateMetaDataUpgraders.apply(existingMap);
if (upgradedMap.equals(existingMap) == false) {
Set<String> deletes = new HashSet<>();
Map<String, BytesReference> changes = new HashMap<>();
// remove templates if needed
existingMap.keySet().forEach(s -> {
if (upgradedMap.containsKey(s) == false) {
deletes.add(s);
}
});
upgradedMap.forEach((key, value) -> {
if (value.equals(existingMap.get(key)) == false) {
changes.put(key, toBytesReference(value));
}
});
return Optional.of(new Tuple<>(changes, deletes));
}
return Optional.empty();
}
private static final ToXContent.Params PARAMS = new ToXContent.MapParams(singletonMap("reduce_mappings", "true"));
private BytesReference toBytesReference(IndexTemplateMetaData templateMetaData) {
try {
return XContentHelper.toXContent((builder, params) -> {
IndexTemplateMetaData.Builder.toInnerXContent(templateMetaData, builder, params);
return builder;
}, XContentType.JSON, PARAMS, false);
} catch (IOException ex) {
throw new IllegalStateException("Cannot serialize template [" + templateMetaData.getName() + "]", ex);
}
}
}

View File

@ -56,13 +56,14 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
private final String masterNodeId;
private final String localNodeId;
private final Version minNonClientNodeVersion;
private final Version maxNonClientNodeVersion;
private final Version maxNodeVersion;
private final Version minNodeVersion;
private DiscoveryNodes(ImmutableOpenMap<String, DiscoveryNode> nodes, ImmutableOpenMap<String, DiscoveryNode> dataNodes,
ImmutableOpenMap<String, DiscoveryNode> masterNodes, ImmutableOpenMap<String, DiscoveryNode> ingestNodes,
String masterNodeId, String localNodeId, Version minNonClientNodeVersion, Version maxNodeVersion,
Version minNodeVersion) {
String masterNodeId, String localNodeId, Version minNonClientNodeVersion, Version maxNonClientNodeVersion,
Version maxNodeVersion, Version minNodeVersion) {
this.nodes = nodes;
this.dataNodes = dataNodes;
this.masterNodes = masterNodes;
@ -70,6 +71,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
this.masterNodeId = masterNodeId;
this.localNodeId = localNodeId;
this.minNonClientNodeVersion = minNonClientNodeVersion;
this.maxNonClientNodeVersion = maxNonClientNodeVersion;
this.minNodeVersion = minNodeVersion;
this.maxNodeVersion = maxNodeVersion;
}
@ -234,12 +236,25 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
/**
* Returns the version of the node with the oldest version in the cluster that is not a client node
*
* If there are no non-client nodes, Version.CURRENT will be returned.
*
* @return the oldest version in the cluster
*/
public Version getSmallestNonClientNodeVersion() {
return minNonClientNodeVersion;
}
/**
* Returns the version of the node with the youngest version in the cluster that is not a client node.
*
* If there are no non-client nodes, Version.CURRENT will be returned.
*
* @return the youngest version in the cluster
*/
public Version getLargestNonClientNodeVersion() {
return maxNonClientNodeVersion;
}
/**
* Returns the version of the node with the oldest version in the cluster.
*
@ -252,7 +267,7 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
/**
* Returns the version of the node with the youngest version in the cluster
*
* @return the oldest version in the cluster
* @return the youngest version in the cluster
*/
public Version getMaxNodeVersion() {
return maxNodeVersion;
@ -654,15 +669,25 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
ImmutableOpenMap.Builder<String, DiscoveryNode> ingestNodesBuilder = ImmutableOpenMap.builder();
Version minNodeVersion = Version.CURRENT;
Version maxNodeVersion = Version.CURRENT;
Version minNonClientNodeVersion = Version.CURRENT;
// The node where we are building this on might not be a master or a data node, so we cannot assume
// that there is a node with the current version as a part of the cluster.
Version minNonClientNodeVersion = null;
Version maxNonClientNodeVersion = null;
for (ObjectObjectCursor<String, DiscoveryNode> nodeEntry : nodes) {
if (nodeEntry.value.isDataNode()) {
dataNodesBuilder.put(nodeEntry.key, nodeEntry.value);
minNonClientNodeVersion = Version.min(minNonClientNodeVersion, nodeEntry.value.getVersion());
}
if (nodeEntry.value.isMasterNode()) {
masterNodesBuilder.put(nodeEntry.key, nodeEntry.value);
minNonClientNodeVersion = Version.min(minNonClientNodeVersion, nodeEntry.value.getVersion());
}
if (nodeEntry.value.isDataNode() || nodeEntry.value.isMasterNode()) {
if (minNonClientNodeVersion == null) {
minNonClientNodeVersion = nodeEntry.value.getVersion();
maxNonClientNodeVersion = nodeEntry.value.getVersion();
} else {
minNonClientNodeVersion = Version.min(minNonClientNodeVersion, nodeEntry.value.getVersion());
maxNonClientNodeVersion = Version.max(maxNonClientNodeVersion, nodeEntry.value.getVersion());
}
}
if (nodeEntry.value.isIngestNode()) {
ingestNodesBuilder.put(nodeEntry.key, nodeEntry.value);
@ -673,7 +698,8 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
return new DiscoveryNodes(
nodes.build(), dataNodesBuilder.build(), masterNodesBuilder.build(), ingestNodesBuilder.build(),
masterNodeId, localNodeId, minNonClientNodeVersion, maxNodeVersion, minNodeVersion
masterNodeId, localNodeId, minNonClientNodeVersion == null ? Version.CURRENT : minNonClientNodeVersion,
maxNonClientNodeVersion == null ? Version.CURRENT : maxNonClientNodeVersion, maxNodeVersion, minNodeVersion
);
}

View File

@ -91,7 +91,7 @@ public class CacheBuilder<K, V> {
}
public Cache<K, V> build() {
Cache<K, V> cache = new Cache();
Cache<K, V> cache = new Cache<>();
if (maximumWeight != -1) {
cache.setMaximumWeight(maximumWeight);
}

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.index.get;
package org.elasticsearch.common.document;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -25,7 +25,9 @@ import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.get.GetResult;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.search.SearchHit;
import java.io.IOException;
import java.util.ArrayList;
@ -36,34 +38,52 @@ import java.util.Objects;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
import static org.elasticsearch.common.xcontent.XContentParserUtils.parseStoredFieldsValue;
public class GetField implements Streamable, ToXContent, Iterable<Object> {
/**
* A single field name and values part of {@link SearchHit} and {@link GetResult}.
*
* @see SearchHit
* @see GetResult
*/
public class DocumentField implements Streamable, ToXContent, Iterable<Object> {
private String name;
private List<Object> values;
private GetField() {
private DocumentField() {
}
public GetField(String name, List<Object> values) {
public DocumentField(String name, List<Object> values) {
this.name = Objects.requireNonNull(name, "name must not be null");
this.values = Objects.requireNonNull(values, "values must not be null");
}
/**
* The name of the field.
*/
public String getName() {
return name;
}
public Object getValue() {
if (values != null && !values.isEmpty()) {
return values.get(0);
/**
* The first value of the hit.
*/
public <V> V getValue() {
if (values == null || values.isEmpty()) {
return null;
}
return null;
return (V)values.get(0);
}
/**
* The field values.
*/
public List<Object> getValues() {
return values;
}
/**
* @return The field is a metadata field
*/
public boolean isMetadataField() {
return MapperService.isMetadataField(name);
}
@ -73,8 +93,8 @@ public class GetField implements Streamable, ToXContent, Iterable<Object> {
return values.iterator();
}
public static GetField readGetField(StreamInput in) throws IOException {
GetField result = new GetField();
public static DocumentField readDocumentField(StreamInput in) throws IOException {
DocumentField result = new DocumentField();
result.readFrom(in);
return result;
}
@ -102,25 +122,26 @@ public class GetField implements Streamable, ToXContent, Iterable<Object> {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray(name);
for (Object value : values) {
//this call doesn't really need to support writing any kind of object.
//Stored fields values are converted using MappedFieldType#valueForDisplay.
//As a result they can either be Strings, Numbers, Booleans, or BytesReference, that's all.
// this call doesn't really need to support writing any kind of object.
// Stored fields values are converted using MappedFieldType#valueForDisplay.
// As a result they can either be Strings, Numbers, Booleans, or BytesReference, that's
// all.
builder.value(value);
}
builder.endArray();
return builder;
}
public static GetField fromXContent(XContentParser parser) throws IOException {
public static DocumentField fromXContent(XContentParser parser) throws IOException {
ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation);
String fieldName = parser.currentName();
XContentParser.Token token = parser.nextToken();
ensureExpectedToken(XContentParser.Token.START_ARRAY, token, parser::getTokenLocation);
List<Object> values = new ArrayList<>();
while((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
values.add(parseStoredFieldsValue(parser));
}
return new GetField(fieldName, values);
return new DocumentField(fieldName, values);
}
@Override
@ -131,9 +152,8 @@ public class GetField implements Streamable, ToXContent, Iterable<Object> {
if (o == null || getClass() != o.getClass()) {
return false;
}
GetField objects = (GetField) o;
return Objects.equals(name, objects.name) &&
Objects.equals(values, objects.values);
DocumentField objects = (DocumentField) o;
return Objects.equals(name, objects.name) && Objects.equals(values, objects.values);
}
@Override
@ -143,9 +163,9 @@ public class GetField implements Streamable, ToXContent, Iterable<Object> {
@Override
public String toString() {
return "GetField{" +
return "DocumentField{" +
"name='" + name + '\'' +
", values=" + values +
'}';
}
}
}

View File

@ -272,7 +272,7 @@ public class Lucene {
public static TopDocs readTopDocs(StreamInput in) throws IOException {
byte type = in.readByte();
if (type == 0) {
int totalHits = in.readVInt();
long totalHits = in.readVLong();
float maxScore = in.readFloat();
ScoreDoc[] scoreDocs = new ScoreDoc[in.readVInt()];
@ -281,7 +281,7 @@ public class Lucene {
}
return new TopDocs(totalHits, scoreDocs, maxScore);
} else if (type == 1) {
int totalHits = in.readVInt();
long totalHits = in.readVLong();
float maxScore = in.readFloat();
SortField[] fields = new SortField[in.readVInt()];
@ -385,7 +385,7 @@ public class Lucene {
out.writeByte((byte) 2);
CollapseTopFieldDocs collapseDocs = (CollapseTopFieldDocs) topDocs;
out.writeVInt(topDocs.totalHits);
out.writeVLong(topDocs.totalHits);
out.writeFloat(topDocs.getMaxScore());
out.writeString(collapseDocs.field);
@ -405,7 +405,7 @@ public class Lucene {
out.writeByte((byte) 1);
TopFieldDocs topFieldDocs = (TopFieldDocs) topDocs;
out.writeVInt(topDocs.totalHits);
out.writeVLong(topDocs.totalHits);
out.writeFloat(topDocs.getMaxScore());
out.writeVInt(topFieldDocs.fields.length);
@ -419,7 +419,7 @@ public class Lucene {
}
} else {
out.writeByte((byte) 0);
out.writeVInt(topDocs.totalHits);
out.writeVLong(topDocs.totalHits);
out.writeFloat(topDocs.getMaxScore());
out.writeVInt(topDocs.scoreDocs.length);

View File

@ -119,7 +119,7 @@ public class ScriptScoreFunction extends ScoreFunction {
@Override
public boolean needsScores() {
return script.needsScores();
return script.needs_score();
}
@Override

View File

@ -19,7 +19,6 @@ package org.elasticsearch.common.lucene.uid;
* under the License.
*/
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
@ -43,8 +42,11 @@ import java.io.IOException;
* not thread safe, so it is the caller's job to create and use one
* instance of this per thread. Do not use this if a term may appear
* in more than one document! It will only return the first one it
* finds. */
* finds.
* This class uses live docs, so it should be cached based on the
* {@link org.apache.lucene.index.IndexReader#getReaderCacheHelper() reader cache helper}
* rather than the {@link LeafReader#getCoreCacheHelper() core cache helper}.
*/
final class PerThreadIDVersionAndSeqNoLookup {
// TODO: do we really need to store all this stuff? some if it might not speed up anything.
// we keep it around for now, to reduce the amount of e.g. hash lookups by field and stuff
@ -64,8 +66,7 @@ final class PerThreadIDVersionAndSeqNoLookup {
*/
PerThreadIDVersionAndSeqNoLookup(LeafReader reader, String uidField) throws IOException {
this.uidField = uidField;
Fields fields = reader.fields();
Terms terms = fields.terms(uidField);
Terms terms = reader.terms(uidField);
if (terms == null) {
throw new IllegalArgumentException("reader misses the [" + uidField + "] field");
}
@ -79,12 +80,17 @@ final class PerThreadIDVersionAndSeqNoLookup {
this.readerKey = readerKey;
}
/** Return null if id is not found. */
public DocIdAndVersion lookupVersion(BytesRef id, Bits liveDocs, LeafReaderContext context)
/** Return null if id is not found.
* We pass the {@link LeafReaderContext} as an argument so that things
* still work with reader wrappers that hide some documents while still
* using the same cache key. Otherwise we'd have to disable caching
* entirely for these readers.
*/
public DocIdAndVersion lookupVersion(BytesRef id, LeafReaderContext context)
throws IOException {
assert context.reader().getCoreCacheHelper().getKey().equals(readerKey) :
"context's reader is not the same as the reader class was initialized on.";
int docID = getDocID(id, liveDocs);
int docID = getDocID(id, context.reader().getLiveDocs());
if (docID != DocIdSetIterator.NO_MORE_DOCS) {
final NumericDocValues versions = context.reader().getNumericDocValues(VersionFieldMapper.NAME);
@ -122,10 +128,10 @@ final class PerThreadIDVersionAndSeqNoLookup {
}
/** Return null if id is not found. */
DocIdAndSeqNo lookupSeqNo(BytesRef id, Bits liveDocs, LeafReaderContext context) throws IOException {
DocIdAndSeqNo lookupSeqNo(BytesRef id, LeafReaderContext context) throws IOException {
assert context.reader().getCoreCacheHelper().getKey().equals(readerKey) :
"context's reader is not the same as the reader class was initialized on.";
int docID = getDocID(id, liveDocs);
int docID = getDocID(id, context.reader().getLiveDocs());
if (docID != DocIdSetIterator.NO_MORE_DOCS) {
NumericDocValues seqNos = context.reader().getNumericDocValues(SeqNoFieldMapper.NAME);
long seqNo;
@ -139,18 +145,4 @@ final class PerThreadIDVersionAndSeqNoLookup {
return null;
}
}
/**
* returns 0 if the primary term is not found.
*
* Note that 0 is an illegal primary term. See {@link org.elasticsearch.cluster.metadata.IndexMetaData#primaryTerm(int)}
**/
long lookUpPrimaryTerm(int docID, LeafReader reader) throws IOException {
NumericDocValues primaryTerms = reader.getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME);
if (primaryTerms != null && primaryTerms.advanceExact(docID)) {
return primaryTerms.longValue();
} else {
return 0;
}
}
}

View File

@ -20,11 +20,12 @@
package org.elasticsearch.common.lucene.uid;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.Term;
import org.apache.lucene.util.CloseableThreadLocal;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import java.io.IOException;
import java.util.List;
@ -36,26 +37,31 @@ import static org.elasticsearch.common.lucene.uid.Versions.NOT_FOUND;
/** Utility class to resolve the Lucene doc ID, version, seqNo and primaryTerms for a given uid. */
public final class VersionsAndSeqNoResolver {
static final ConcurrentMap<Object, CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup>> lookupStates =
static final ConcurrentMap<IndexReader.CacheKey, CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup[]>> lookupStates =
ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
// Evict this reader from lookupStates once it's closed:
private static final IndexReader.ClosedListener removeLookupState = key -> {
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup> ctl = lookupStates.remove(key);
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup[]> ctl = lookupStates.remove(key);
if (ctl != null) {
ctl.close();
}
};
private static PerThreadIDVersionAndSeqNoLookup getLookupState(LeafReader reader, String uidField) throws IOException {
IndexReader.CacheHelper cacheHelper = reader.getCoreCacheHelper();
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup> ctl = lookupStates.get(cacheHelper.getKey());
private static PerThreadIDVersionAndSeqNoLookup[] getLookupState(IndexReader reader, String uidField) throws IOException {
// We cache on the top level
// This means cache entries have a shorter lifetime, maybe as low as 1s with the
// default refresh interval and a steady indexing rate, but on the other hand it
// proved to be cheaper than having to perform a CHM and a TL get for every segment.
// See https://github.com/elastic/elasticsearch/pull/19856.
IndexReader.CacheHelper cacheHelper = reader.getReaderCacheHelper();
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup[]> ctl = lookupStates.get(cacheHelper.getKey());
if (ctl == null) {
// First time we are seeing this reader's core; make a new CTL:
ctl = new CloseableThreadLocal<>();
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup> other = lookupStates.putIfAbsent(cacheHelper.getKey(), ctl);
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup[]> other = lookupStates.putIfAbsent(cacheHelper.getKey(), ctl);
if (other == null) {
// Our CTL won, we must remove it when the core is closed:
// Our CTL won, we must remove it when the reader is closed:
cacheHelper.addClosedListener(removeLookupState);
} else {
// Another thread beat us to it: just use their CTL:
@ -63,13 +69,22 @@ public final class VersionsAndSeqNoResolver {
}
}
PerThreadIDVersionAndSeqNoLookup lookupState = ctl.get();
PerThreadIDVersionAndSeqNoLookup[] lookupState = ctl.get();
if (lookupState == null) {
lookupState = new PerThreadIDVersionAndSeqNoLookup(reader, uidField);
lookupState = new PerThreadIDVersionAndSeqNoLookup[reader.leaves().size()];
for (LeafReaderContext leaf : reader.leaves()) {
lookupState[leaf.ord] = new PerThreadIDVersionAndSeqNoLookup(leaf.reader(), uidField);
}
ctl.set(lookupState);
} else if (Objects.equals(lookupState.uidField, uidField) == false) {
}
if (lookupState.length != reader.leaves().size()) {
throw new AssertionError("Mismatched numbers of leaves: " + lookupState.length + " != " + reader.leaves().size());
}
if (lookupState.length > 0 && Objects.equals(lookupState[0].uidField, uidField) == false) {
throw new AssertionError("Index does not consistently use the same uid field: ["
+ uidField + "] != [" + lookupState.uidField + "]");
+ uidField + "] != [" + lookupState[0].uidField + "]");
}
return lookupState;
@ -112,17 +127,14 @@ public final class VersionsAndSeqNoResolver {
* </ul>
*/
public static DocIdAndVersion loadDocIdAndVersion(IndexReader reader, Term term) throws IOException {
PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, term.field());
List<LeafReaderContext> leaves = reader.leaves();
if (leaves.isEmpty()) {
return null;
}
// iterate backwards to optimize for the frequently updated documents
// which are likely to be in the last segments
for (int i = leaves.size() - 1; i >= 0; i--) {
LeafReaderContext context = leaves.get(i);
LeafReader leaf = context.reader();
PerThreadIDVersionAndSeqNoLookup lookup = getLookupState(leaf, term.field());
DocIdAndVersion result = lookup.lookupVersion(term.bytes(), leaf.getLiveDocs(), context);
final LeafReaderContext leaf = leaves.get(i);
PerThreadIDVersionAndSeqNoLookup lookup = lookups[leaf.ord];
DocIdAndVersion result = lookup.lookupVersion(term.bytes(), leaf);
if (result != null) {
return result;
}
@ -137,17 +149,14 @@ public final class VersionsAndSeqNoResolver {
* </ul>
*/
public static DocIdAndSeqNo loadDocIdAndSeqNo(IndexReader reader, Term term) throws IOException {
PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, term.field());
List<LeafReaderContext> leaves = reader.leaves();
if (leaves.isEmpty()) {
return null;
}
// iterate backwards to optimize for the frequently updated documents
// which are likely to be in the last segments
for (int i = leaves.size() - 1; i >= 0; i--) {
LeafReaderContext context = leaves.get(i);
LeafReader leaf = context.reader();
PerThreadIDVersionAndSeqNoLookup lookup = getLookupState(leaf, term.field());
DocIdAndSeqNo result = lookup.lookupSeqNo(term.bytes(), leaf.getLiveDocs(), context);
final LeafReaderContext leaf = leaves.get(i);
PerThreadIDVersionAndSeqNoLookup lookup = lookups[leaf.ord];
DocIdAndSeqNo result = lookup.lookupSeqNo(term.bytes(), leaf);
if (result != null) {
return result;
}
@ -159,9 +168,13 @@ public final class VersionsAndSeqNoResolver {
* Load the primaryTerm associated with the given {@link DocIdAndSeqNo}
*/
public static long loadPrimaryTerm(DocIdAndSeqNo docIdAndSeqNo, String uidField) throws IOException {
LeafReader leaf = docIdAndSeqNo.context.reader();
PerThreadIDVersionAndSeqNoLookup lookup = getLookupState(leaf, uidField);
long result = lookup.lookUpPrimaryTerm(docIdAndSeqNo.docId, leaf);
NumericDocValues primaryTerms = docIdAndSeqNo.context.reader().getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME);
long result;
if (primaryTerms != null && primaryTerms.advanceExact(docIdAndSeqNo.docId)) {
result = primaryTerms.longValue();
} else {
result = 0;
}
assert result > 0 : "should always resolve a primary term for a resolved sequence number. primary_term [" + result + "]"
+ " docId [" + docIdAndSeqNo.docId + "] seqNo [" + docIdAndSeqNo.seqNo + "]";
return result;

View File

@ -314,12 +314,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
HunspellService.HUNSPELL_IGNORE_CASE,
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
Environment.DEFAULT_PATH_CONF_SETTING,
Environment.PATH_CONF_SETTING,
Environment.DEFAULT_PATH_DATA_SETTING,
Environment.PATH_DATA_SETTING,
Environment.PATH_HOME_SETTING,
Environment.DEFAULT_PATH_LOGS_SETTING,
Environment.PATH_LOGS_SETTING,
Environment.PATH_REPO_SETTING,
Environment.PATH_SHARED_DATA_SETTING,

View File

@ -77,6 +77,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING,
IndexMetaData.INDEX_PRIORITY_SETTING,
IndexMetaData.INDEX_DATA_PATH_SETTING,
IndexMetaData.INDEX_FORMAT_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING,
@ -127,6 +128,8 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING,
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING,
IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING,
IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING,
IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING,
IndexFieldDataService.INDEX_FIELDDATA_CACHE_KEY,
FieldMapper.IGNORE_MALFORMED_SETTING,
FieldMapper.COERCE_SETTING,

View File

@ -32,7 +32,7 @@ public class KeyStoreCli extends MultiCommand {
subcommands.put("create", new CreateKeyStoreCommand());
subcommands.put("list", new ListKeyStoreCommand());
subcommands.put("add", new AddStringKeyStoreCommand());
subcommands.put("add-file", new AddStringKeyStoreCommand());
subcommands.put("add-file", new AddFileKeyStoreCommand());
subcommands.put("remove", new RemoveSettingKeyStoreCommand());
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.common.settings;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.security.GeneralSecurityException;
import java.util.Set;
@ -40,4 +41,7 @@ public interface SecureSettings extends Closeable {
/** Return a file setting. The {@link InputStream} should be closed once it is used. */
InputStream getFile(String setting) throws GeneralSecurityException;
@Override
void close() throws IOException;
}

View File

@ -345,7 +345,7 @@ public class Setting<T> extends ToXContentToBytes {
/** Logs a deprecation warning if the setting is deprecated and used. */
protected void checkDeprecation(Settings settings) {
// They're using the setting, so we need to tell them to stop
if (this.isDeprecated() && this.exists(settings)) {
if (this.isDeprecated() && this.exists(settings) && settings.addDeprecatedSetting(this)) {
// It would be convenient to show its replacement key, but replacement is often not so simple
final DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(getClass()));
deprecationLogger.deprecated("[{}] setting was deprecated in Elasticsearch and will be removed in a future release! " +

View File

@ -55,7 +55,6 @@ import java.util.Dictionary;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
@ -63,6 +62,7 @@ import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.function.Predicate;
@ -93,6 +93,22 @@ public final class Settings implements ToXContent {
/** The first level of setting names. This is constructed lazily in {@link #names()}. */
private final SetOnce<Set<String>> firstLevelNames = new SetOnce<>();
/**
* The set of deprecated settings tracked by this settings object.
*/
private final Set<String> deprecatedSettings = Collections.newSetFromMap(new ConcurrentHashMap<>());
/**
* Add the setting as a tracked deprecated setting.
*
* @param setting the deprecated setting to track
* @return true if the setting was not already tracked as a deprecated setting, otherwise false
*/
boolean addDeprecatedSetting(final Setting setting) {
assert setting.isDeprecated() && setting.exists(this) : setting.getKey();
return deprecatedSettings.add(setting.getKey());
}
/**
* Setting names found in this Settings for both string and secure settings.
* This is constructed lazily in {@link #keySet()}.
@ -610,8 +626,10 @@ public final class Settings implements ToXContent {
}
public static void writeSettingsToStream(Settings settings, StreamOutput out) throws IOException {
out.writeVInt(settings.size());
for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
// pull getAsMap() to exclude secure settings in size()
Set<Map.Entry<String, String>> entries = settings.getAsMap().entrySet();
out.writeVInt(entries.size());
for (Map.Entry<String, String> entry : entries) {
out.writeString(entry.getKey());
out.writeOptionalString(entry.getValue());
}
@ -716,6 +734,10 @@ public final class Settings implements ToXContent {
if (secureSettings.isLoaded() == false) {
throw new IllegalStateException("Secure settings must already be loaded");
}
if (this.secureSettings.get() != null) {
throw new IllegalArgumentException("Secure settings already set. Existing settings: " +
this.secureSettings.get().getSettingNames() + ", new settings: " + secureSettings.getSettingNames());
}
this.secureSettings.set(secureSettings);
return this;
}

View File

@ -71,12 +71,31 @@ public final class Sets {
return !left.stream().anyMatch(k -> right.contains(k));
}
/**
* The relative complement, or difference, of the specified left and right set. Namely, the resulting set contains all the elements that
* are in the left set but not in the right set. Neither input is mutated by this operation, an entirely new set is returned.
*
* @param left the left set
* @param right the right set
* @param <T> the type of the elements of the sets
* @return the relative complement of the left set with respect to the right set
*/
public static <T> Set<T> difference(Set<T> left, Set<T> right) {
Objects.requireNonNull(left);
Objects.requireNonNull(right);
return left.stream().filter(k -> !right.contains(k)).collect(Collectors.toSet());
}
/**
* The relative complement, or difference, of the specified left and right set, returned as a sorted set. Namely, the resulting set
* contains all the elements that are in the left set but not in the right set, and the set is sorted using the natural ordering of
* element type. Neither input is mutated by this operation, an entirely new set is returned.
*
* @param left the left set
* @param right the right set
* @param <T> the type of the elements of the sets
* @return the sorted relative complement of the left set with respect to the right set
*/
public static <T> SortedSet<T> sortedDifference(Set<T> left, Set<T> right) {
Objects.requireNonNull(left);
Objects.requireNonNull(right);

View File

@ -23,10 +23,10 @@ import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.rest.action.search.RestSearchAction;
import java.io.IOException;
import java.util.Locale;
import java.util.function.Consumer;
import java.util.function.Supplier;
/**
@ -115,29 +115,39 @@ public final class XContentParserUtils {
* (ex: terms#foo where "terms" refers to the type of a registered {@link NamedXContentRegistry.Entry},
* "#" is the delimiter and "foo" the name of the object to parse).
*
* It also expected that following this field name is either an Object or an array xContent structure and
* the cursor points to the start token of this structure.
*
* The method splits the field's name to extract the type and name and then parses the object
* using the {@link XContentParser#namedObject(Class, String, Object)} method.
*
* @param parser the current {@link XContentParser}
* @param delimiter the delimiter to use to splits the field's name
* @param objectClass the object class of the object to parse
* @param consumer something to consume the parsed object
* @param <T> the type of the object to parse
* @return the parsed object
* @throws IOException if anything went wrong during parsing or if the type or name cannot be derived
* from the field's name
* @throws ParsingException if the parser isn't positioned on either START_OBJECT or START_ARRAY at the beginning
*/
public static <T> T parseTypedKeysObject(XContentParser parser, String delimiter, Class<T> objectClass) throws IOException {
public static <T> void parseTypedKeysObject(XContentParser parser, String delimiter, Class<T> objectClass, Consumer<T> consumer)
throws IOException {
if (parser.currentToken() != XContentParser.Token.START_OBJECT && parser.currentToken() != XContentParser.Token.START_ARRAY) {
throwUnknownToken(parser.currentToken(), parser.getTokenLocation());
}
String currentFieldName = parser.currentName();
if (Strings.hasLength(currentFieldName)) {
int position = currentFieldName.indexOf(delimiter);
if (position > 0) {
String type = currentFieldName.substring(0, position);
String name = currentFieldName.substring(position + 1);
return parser.namedObject(objectClass, type, name);
consumer.accept(parser.namedObject(objectClass, type, name));
return;
}
// if we didn't find a delimiter we ignore the object or array for forward compatibility instead of throwing an error
parser.skipChildren();
} else {
throw new ParsingException(parser.getTokenLocation(), "Failed to parse object: empty key");
}
throw new ParsingException(parser.getTokenLocation(), "Cannot parse object of class [" + objectClass.getSimpleName()
+ "] without type information. Set [" + RestSearchAction.TYPED_KEYS_PARAM + "] parameter on the request to ensure the"
+ " type information is added to the response output");
}
}

View File

@ -97,7 +97,7 @@ public class XContentMapValues {
}
}
public static Object extractValue(String path, Map<String, Object> map) {
public static Object extractValue(String path, Map<?, ?> map) {
String[] pathElements = path.split("\\.");
if (pathElements.length == 0) {
return null;

View File

@ -46,16 +46,10 @@ import java.util.function.Function;
// public+forbidden api!
public class Environment {
public static final Setting<String> PATH_HOME_SETTING = Setting.simpleString("path.home", Property.NodeScope);
public static final Setting<String> DEFAULT_PATH_CONF_SETTING = Setting.simpleString("default.path.conf", Property.NodeScope);
public static final Setting<String> PATH_CONF_SETTING =
new Setting<>("path.conf", DEFAULT_PATH_CONF_SETTING, Function.identity(), Property.NodeScope);
public static final Setting<List<String>> DEFAULT_PATH_DATA_SETTING =
Setting.listSetting("default.path.data", Collections.emptyList(), Function.identity(), Property.NodeScope);
public static final Setting<List<String>> PATH_DATA_SETTING =
Setting.listSetting("path.data", DEFAULT_PATH_DATA_SETTING, Function.identity(), Property.NodeScope);
public static final Setting<String> DEFAULT_PATH_LOGS_SETTING = Setting.simpleString("default.path.logs", Property.NodeScope);
Setting.listSetting("path.data", Collections.emptyList(), Function.identity(), Property.NodeScope);
public static final Setting<String> PATH_LOGS_SETTING =
new Setting<>("path.logs", DEFAULT_PATH_LOGS_SETTING, Function.identity(), Property.NodeScope);
new Setting<>("path.logs", "", Function.identity(), Property.NodeScope);
public static final Setting<List<String>> PATH_REPO_SETTING =
Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), Property.NodeScope);
public static final Setting<String> PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", Property.NodeScope);
@ -92,6 +86,10 @@ public class Environment {
private final Path tmpFile = PathUtils.get(System.getProperty("java.io.tmpdir"));
public Environment(Settings settings) {
this(settings, null);
}
public Environment(final Settings settings, final Path configPath) {
final Path homeFile;
if (PATH_HOME_SETTING.exists(settings)) {
homeFile = PathUtils.get(PATH_HOME_SETTING.get(settings)).normalize();
@ -99,9 +97,8 @@ public class Environment {
throw new IllegalStateException(PATH_HOME_SETTING.getKey() + " is not configured");
}
// this is trappy, Setting#get(Settings) will get a fallback setting yet return false for Settings#exists(Settings)
if (PATH_CONF_SETTING.exists(settings) || DEFAULT_PATH_CONF_SETTING.exists(settings)) {
configFile = PathUtils.get(PATH_CONF_SETTING.get(settings)).normalize();
if (configPath != null) {
configFile = configPath.normalize();
} else {
configFile = homeFile.resolve("config");
}
@ -137,7 +134,7 @@ public class Environment {
}
// this is trappy, Setting#get(Settings) will get a fallback setting yet return false for Settings#exists(Settings)
if (PATH_LOGS_SETTING.exists(settings) || DEFAULT_PATH_LOGS_SETTING.exists(settings)) {
if (PATH_LOGS_SETTING.exists(settings)) {
logsFile = PathUtils.get(PATH_LOGS_SETTING.get(settings)).normalize();
} else {
logsFile = homeFile.resolve("logs");
@ -160,7 +157,6 @@ public class Environment {
}
finalSettings.put(PATH_LOGS_SETTING.getKey(), logsFile);
this.settings = finalSettings.build();
}
/**

Some files were not shown because too many files have changed in this diff Show More