Merge branch 'master' into fix/19772-toString
# Conflicts: # core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java
This commit is contained in:
commit
4d272cc9b2
|
@ -17,6 +17,8 @@ request block and provide responses for all of the below items.
|
|||
|
||||
**Elasticsearch version**:
|
||||
|
||||
**Plugins installed**: []
|
||||
|
||||
**JVM version**:
|
||||
|
||||
**OS version**:
|
||||
|
|
|
@ -145,7 +145,7 @@ public class AllocationBenchmark {
|
|||
RoutingTable routingTable = rb.build();
|
||||
DiscoveryNodes.Builder nb = DiscoveryNodes.builder();
|
||||
for (int i = 1; i <= numNodes; i++) {
|
||||
nb.put(Allocators.newNode("node" + i, Collections.singletonMap("tag", "tag_" + (i % numTags))));
|
||||
nb.add(Allocators.newNode("node" + i, Collections.singletonMap("tag", "tag_" + (i % numTags))));
|
||||
}
|
||||
initialClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
|
||||
.metaData(metaData).routingTable(routingTable).nodes
|
||||
|
|
|
@ -119,6 +119,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
current.println(" reason: $test.skipTest")
|
||||
}
|
||||
if (test.setup != null) {
|
||||
// Insert a setup defined outside of the docs
|
||||
String setup = setups[test.setup]
|
||||
if (setup == null) {
|
||||
throw new InvalidUserDataException("Couldn't find setup "
|
||||
|
@ -136,13 +137,23 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
response.contents.eachLine { current.println(" $it") }
|
||||
}
|
||||
|
||||
void emitDo(String method, String pathAndQuery,
|
||||
String body, String catchPart, boolean inSetup) {
|
||||
void emitDo(String method, String pathAndQuery, String body,
|
||||
String catchPart, List warnings, boolean inSetup) {
|
||||
def (String path, String query) = pathAndQuery.tokenize('?')
|
||||
current.println(" - do:")
|
||||
if (catchPart != null) {
|
||||
current.println(" catch: $catchPart")
|
||||
}
|
||||
if (false == warnings.isEmpty()) {
|
||||
current.println(" warnings:")
|
||||
for (String warning in warnings) {
|
||||
// Escape " because we're going to quote the warning
|
||||
String escaped = warning.replaceAll('"', '\\\\"')
|
||||
/* Quote the warning in case it starts with [ which makes
|
||||
* it look too much like an array. */
|
||||
current.println(" - \"$escaped\"")
|
||||
}
|
||||
}
|
||||
current.println(" raw:")
|
||||
current.println(" method: $method")
|
||||
current.println(" path: \"$path\"")
|
||||
|
@ -200,7 +211,8 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
// Leading '/'s break the generated paths
|
||||
pathAndQuery = pathAndQuery.substring(1)
|
||||
}
|
||||
emitDo(method, pathAndQuery, body, catchPart, inSetup)
|
||||
emitDo(method, pathAndQuery, body, catchPart, snippet.warnings,
|
||||
inSetup)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -37,8 +37,9 @@ public class SnippetsTask extends DefaultTask {
|
|||
private static final String CATCH = /catch:\s*((?:\/[^\/]+\/)|[^ \]]+)/
|
||||
private static final String SKIP = /skip:([^\]]+)/
|
||||
private static final String SETUP = /setup:([^ \]]+)/
|
||||
private static final String WARNING = /warning:(.+)/
|
||||
private static final String TEST_SYNTAX =
|
||||
/(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP) ?/
|
||||
/(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP|$WARNING) ?/
|
||||
|
||||
/**
|
||||
* Action to take on each snippet. Called with a single parameter, an
|
||||
|
@ -158,6 +159,10 @@ public class SnippetsTask extends DefaultTask {
|
|||
snippet.setup = it.group(6)
|
||||
return
|
||||
}
|
||||
if (it.group(7) != null) {
|
||||
snippet.warnings.add(it.group(7))
|
||||
return
|
||||
}
|
||||
throw new InvalidUserDataException(
|
||||
"Invalid test marker: $line")
|
||||
}
|
||||
|
@ -230,6 +235,7 @@ public class SnippetsTask extends DefaultTask {
|
|||
String language = null
|
||||
String catchPart = null
|
||||
String setup = null
|
||||
List warnings = new ArrayList()
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
|
@ -254,6 +260,9 @@ public class SnippetsTask extends DefaultTask {
|
|||
if (setup) {
|
||||
result += "[setup:$setup]"
|
||||
}
|
||||
for (String warning in warnings) {
|
||||
result += "[warning:$warning]"
|
||||
}
|
||||
}
|
||||
if (testResponse) {
|
||||
result += '// TESTRESPONSE'
|
||||
|
|
|
@ -61,6 +61,9 @@ public class ForbiddenPatternsTask extends DefaultTask {
|
|||
// add mandatory rules
|
||||
patterns.put('nocommit', /nocommit/)
|
||||
patterns.put('tab', /\t/)
|
||||
if (System.getProperty('build.snapshot', 'true').equals('false')) {
|
||||
patterns.put('norelease', /norelease/)
|
||||
}
|
||||
|
||||
inputs.property("excludes", filesFilter.excludes)
|
||||
inputs.property("rules", patterns)
|
||||
|
|
|
@ -38,7 +38,7 @@ public class LoggerUsageTask extends LoggedExec {
|
|||
|
||||
private FileCollection classpath;
|
||||
|
||||
private List<File> classDirectories;
|
||||
private FileCollection classDirectories;
|
||||
|
||||
public LoggerUsageTask() {
|
||||
project.afterEvaluate {
|
||||
|
@ -46,15 +46,25 @@ public class LoggerUsageTask extends LoggedExec {
|
|||
description = "Runs LoggerUsageCheck on ${classDirectories}"
|
||||
executable = new File(project.javaHome, 'bin/java')
|
||||
if (classDirectories == null) {
|
||||
classDirectories = []
|
||||
if (project.sourceSets.findByName("main") && project.sourceSets.main.output.classesDir.exists()) {
|
||||
classDirectories += [project.sourceSets.main.output.classesDir]
|
||||
// Default to main and test class files
|
||||
List files = []
|
||||
// But only if the source sets that will make them exist
|
||||
if (project.sourceSets.findByName("main")) {
|
||||
files.add(project.sourceSets.main.output.classesDir)
|
||||
dependsOn project.tasks.classes
|
||||
}
|
||||
if (project.sourceSets.findByName("test") && project.sourceSets.test.output.classesDir.exists()) {
|
||||
classDirectories += [project.sourceSets.test.output.classesDir]
|
||||
if (project.sourceSets.findByName("test")) {
|
||||
files.add(project.sourceSets.test.output.classesDir)
|
||||
dependsOn project.tasks.testClasses
|
||||
}
|
||||
/* In an extra twist, it isn't good enough that the source set
|
||||
* exists. Empty source sets won't make a classes directory
|
||||
* which will cause the check to fail. We have to filter the
|
||||
* empty directories out manually. This filter is done right
|
||||
* before the actual logger usage check giving the rest of the
|
||||
* build the opportunity to actually build the directory.
|
||||
*/
|
||||
classDirectories = project.files(files).filter { it.exists() }
|
||||
}
|
||||
doFirst({
|
||||
args('-cp', getClasspath().asPath, 'org.elasticsearch.test.loggerusage.ESLoggerUsageChecker')
|
||||
|
@ -79,11 +89,11 @@ public class LoggerUsageTask extends LoggedExec {
|
|||
}
|
||||
|
||||
@InputFiles
|
||||
List<File> getClassDirectories() {
|
||||
FileCollection getClassDirectories() {
|
||||
return classDirectories
|
||||
}
|
||||
|
||||
void setClassDirectories(List<File> classDirectories) {
|
||||
void setClassDirectories(FileCollection classDirectories) {
|
||||
this.classDirectories = classDirectories
|
||||
}
|
||||
|
||||
|
|
|
@ -17,16 +17,12 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]search[/\\]postingshighlight[/\\]CustomPostingsHighlighter.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]search[/\\]vectorhighlight[/\\]CustomFieldQuery.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]Action.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ActionModule.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ActionRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]ClusterHealthRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]TransportClusterHealthAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]hotthreads[/\\]NodesHotThreadsRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]hotthreads[/\\]TransportNodesHotThreadsAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]info[/\\]NodeInfo.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]info[/\\]TransportNodesInfoAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]stats[/\\]NodesStatsRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]stats[/\\]TransportNodesStatsAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]delete[/\\]DeleteRepositoryRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]delete[/\\]TransportDeleteRepositoryAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]get[/\\]GetRepositoriesRequestBuilder.java" checks="LineLength" />
|
||||
|
@ -49,11 +45,9 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]snapshots[/\\]delete[/\\]DeleteSnapshotRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]snapshots[/\\]delete[/\\]TransportDeleteSnapshotAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]snapshots[/\\]get[/\\]GetSnapshotsRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]snapshots[/\\]get[/\\]TransportGetSnapshotsAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]snapshots[/\\]restore[/\\]RestoreSnapshotRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]snapshots[/\\]restore[/\\]TransportRestoreSnapshotAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]snapshots[/\\]status[/\\]SnapshotsStatusRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]snapshots[/\\]status[/\\]TransportNodesSnapshotsStatus.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]snapshots[/\\]status[/\\]TransportSnapshotsStatusAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]state[/\\]ClusterStateRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]state[/\\]TransportClusterStateAction.java" checks="LineLength" />
|
||||
|
@ -185,7 +179,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]ActionFilter.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]AutoCreateIndex.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]DelegatingActionListener.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]HandledTransportAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptions.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]ToXContentToBytes.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]broadcast[/\\]BroadcastOperationRequestBuilder.java" checks="LineLength" />
|
||||
|
@ -202,7 +195,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]master[/\\]info[/\\]ClusterInfoRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]master[/\\]info[/\\]TransportClusterInfoAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]nodes[/\\]NodesOperationRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]nodes[/\\]TransportNodesAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]replication[/\\]ReplicationRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]replication[/\\]TransportBroadcastReplicationAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]single[/\\]instance[/\\]InstanceShardOperationRequestBuilder.java" checks="LineLength" />
|
||||
|
@ -244,7 +236,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]IncompatibleClusterStateVersionException.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]InternalClusterInfoService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]LocalNodeMasterListener.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]SnapshotsInProgress.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]action[/\\]index[/\\]NodeIndexDeletedAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]action[/\\]index[/\\]NodeMappingRefreshAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]action[/\\]shard[/\\]ShardStateAction.java" checks="LineLength" />
|
||||
|
@ -273,18 +264,12 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]RoutingService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]RoutingTable.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]ShardRouting.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]UnassignedInfo.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]AllocationService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]FailedRerouteAllocation.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]RoutingAllocation.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]StartedRerouteAllocation.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]allocator[/\\]BalancedShardsAllocator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]command[/\\]AbstractAllocateAllocationCommand.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]command[/\\]AllocateEmptyPrimaryAllocationCommand.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]command[/\\]AllocateReplicaAllocationCommand.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]command[/\\]AllocateStalePrimaryAllocationCommand.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]command[/\\]AllocationCommands.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]command[/\\]CancelAllocationCommand.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]command[/\\]MoveAllocationCommand.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]AllocationDeciders.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]service[/\\]InternalClusterService.java" checks="LineLength" />
|
||||
|
@ -300,7 +285,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]collect[/\\]ImmutableOpenIntMap.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]GeoDistance.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]DefaultConstructionProxyFactory.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]InjectorImpl.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]internal[/\\]ConstructionContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]multibindings[/\\]MapBinder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]spi[/\\]InjectionPoint.java" checks="LineLength" />
|
||||
|
@ -319,7 +303,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]network[/\\]NetworkModule.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]network[/\\]NetworkService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]recycler[/\\]Recyclers.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]rounding[/\\]Rounding.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]ByteSizeValue.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]BigArrays.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]CancellableThreads.java" checks="LineLength" />
|
||||
|
@ -354,7 +337,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]LocalAllocateDangledIndices.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]PrimaryShardAllocator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]ReplicaShardAllocator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]TransportNodesListGatewayMetaState.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]AlreadyExpiredException.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]CompositeIndexEventListener.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexSettings.java" checks="LineLength" />
|
||||
|
@ -376,7 +358,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]InternalEngine.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]LiveVersionMap.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]ShadowEngine.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]VersionValue.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]fielddata[/\\]IndexFieldDataCache.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]fielddata[/\\]IndexFieldDataService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]fielddata[/\\]fieldcomparator[/\\]DoubleValuesComparatorSource.java" checks="LineLength" />
|
||||
|
@ -407,7 +388,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MapperService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]Mapping.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MetadataFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]ParseContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]ParsedDocument.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]core[/\\]CompletionFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]core[/\\]LegacyDateFieldMapper.java" checks="LineLength" />
|
||||
|
@ -494,7 +474,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]DummyPluginInfo.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]RemovePluginCommand.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoriesModule.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoriesService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]Repository.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoryModule.java" checks="LineLength" />
|
||||
|
@ -520,13 +499,10 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]search[/\\]RestClearScrollAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]search[/\\]RestSearchScrollAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]suggest[/\\]RestSuggestAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]support[/\\]RestActions.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]termvectors[/\\]RestTermVectorsAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]AbstractScriptParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptContextRegistry.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptEngineRegistry.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptModes.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptModule.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptParameterParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptSettings.java" checks="LineLength" />
|
||||
|
@ -551,7 +527,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]missing[/\\]InternalMissing.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]missing[/\\]MissingAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]InternalReverseNested.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]NestedAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]ReverseNestedAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]RangeAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]DiversifiedBytesHashSamplerAggregator.java" checks="LineLength" />
|
||||
|
@ -569,7 +544,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]ScriptHeuristic.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]SignificanceHeuristic.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]AbstractTermsParametersParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]DoubleTermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]GlobalOrdinalsStringTermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]InternalOrder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]LongTermsAggregator.java" checks="LineLength" />
|
||||
|
@ -600,7 +574,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchPhase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSearchResult.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSubPhase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSubPhaseContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSubPhaseParseElement.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]explain[/\\]ExplainFetchSubPhase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]fielddata[/\\]FieldDataFieldsParseElement.java" checks="LineLength" />
|
||||
|
@ -614,7 +587,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]highlight[/\\]PostingsHighlighter.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]highlight[/\\]vectorhighlight[/\\]SimpleFragmentsBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]highlight[/\\]vectorhighlight[/\\]SourceScoreOrderFragmentsBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]highlight[/\\]vectorhighlight[/\\]SourceSimpleFragmentsBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]DefaultSearchContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]FilteredSearchContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]InternalSearchHit.java" checks="LineLength" />
|
||||
|
@ -676,7 +648,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineRequestParsingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineResponseTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]WriteableIngestDocumentTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]MultiSearchRequestTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchRequestBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]AutoCreateIndexTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptionsTests.java" checks="LineLength" />
|
||||
|
@ -696,8 +667,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bwcompat[/\\]BasicAnalysisBackwardCompatibilityIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bwcompat[/\\]BasicBackwardsCompatibilityIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bwcompat[/\\]GetIndexBackwardsCompatibilityIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bwcompat[/\\]OldIndexBackwardsCompatibilityIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bwcompat[/\\]RecoveryWithUnsupportedIndicesIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]bwcompat[/\\]RestoreBackwardsCompatIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]AbstractClientHeadersTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]ClusterHealthIT.java" checks="LineLength" />
|
||||
|
@ -731,9 +700,7 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]AllocationIdTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]DelayedAllocationIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]PrimaryAllocationIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]RoutingServiceTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]RoutingTableTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]ShardRoutingHelper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]ShardRoutingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]UnassignedInfoTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]AddIncrementallyTests.java" checks="LineLength" />
|
||||
|
@ -785,9 +752,7 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]hash[/\\]MessageDigestsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]ModuleTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]index[/\\]FreqTermsEnumTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]uid[/\\]VersionsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]network[/\\]CidrsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]rounding[/\\]TimeZoneRoundingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]DistanceUnitTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]FuzzinessTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]BigArraysTests.java" checks="LineLength" />
|
||||
|
@ -795,10 +760,7 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]LongObjectHashMapTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]EsExecutorsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]PrioritizedExecutorsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]XContentFactoryTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]builder[/\\]XContentBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]cbor[/\\]JsonVsCborTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]smile[/\\]JsonVsSmileTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]xcontent[/\\]support[/\\]filtering[/\\]FilterPathGeneratorFilteringTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]deps[/\\]joda[/\\]SimpleJodaTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]BlockingClusterStatePublishResponseHandlerTests.java" checks="LineLength" />
|
||||
|
@ -912,7 +874,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]RangeQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SpanMultiTermQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SpanNotQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]plugin[/\\]CustomQueryParserIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]support[/\\]QueryInnerHitsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]MultiMatchQueryTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]geo[/\\]GeoUtilsTests.java" checks="LineLength" />
|
||||
|
@ -951,7 +912,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]mapping[/\\]SimpleGetFieldMappingsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]mapping[/\\]SimpleGetMappingsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]mapping[/\\]UpdateMappingIntegrationIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]memory[/\\]breaker[/\\]CircuitBreakerServiceIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]memory[/\\]breaker[/\\]CircuitBreakerUnitTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]memory[/\\]breaker[/\\]RandomExceptionCircuitBreakerIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]IndexPrimaryRelocationIT.java" checks="LineLength" />
|
||||
|
@ -982,18 +942,15 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]BytesRestResponseTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]CorsRegexDefaultIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]CorsRegexIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]RestControllerTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]routing[/\\]AliasResolveRoutingIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]routing[/\\]AliasRoutingIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]routing[/\\]SimpleRoutingIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]FileScriptTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]NativeScriptTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptContextRegistryTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptContextTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptModesTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptParameterParserTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptServiceTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptSettingsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueModeTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchWithRejectionsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]MissingValueIT.java" checks="LineLength" />
|
||||
|
@ -1009,13 +966,10 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]ShardReduceIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]ShardSizeTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]SignificantTermsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]SignificantTermsSignificanceScoreIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]TermsDocCountErrorIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]TermsShardMinDocCountIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]NestedAggregatorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]AbstractGeoTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]AvgIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]SumIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]TopHitsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]ValueCountIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]pipeline[/\\]ExtendedStatsBucketIT.java" checks="LineLength" />
|
||||
|
@ -1062,26 +1016,21 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SharedClusterSnapshotRestoreIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotBackwardsCompatibilityIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotUtilsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]mockstore[/\\]MockRepository.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESBlobStoreRepositoryIntegTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]geo[/\\]RandomShapeGenerator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]hamcrest[/\\]ElasticsearchGeoAssertions.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]threadpool[/\\]SimpleThreadPoolIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]threadpool[/\\]ThreadPoolSerializationTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]threadpool[/\\]UpdateThreadPoolSettingsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]timestamp[/\\]SimpleTimestampIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ttl[/\\]SimpleTTLIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]update[/\\]UpdateIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]validate[/\\]SimpleValidateQueryIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]versioning[/\\]SimpleVersioningIT.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]ExpressionPlugin.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]ExpressionScriptEngineService.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]ExpressionSearchScript.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]ExpressionTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]IndexedExpressionTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]MoreExpressionTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyPlugin.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptEngineService.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovySecurityTests.java" checks="LineLength" />
|
||||
|
@ -1109,13 +1058,11 @@
|
|||
<suppress files="plugins[/\\]discovery-ec2[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]aws[/\\]AbstractAwsTestCase.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-ec2[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]ec2[/\\]AmazonEC2Mock.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]discovery-gce[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]gce[/\\]GceNetworkTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugin[/\\]javascript[/\\]JavaScriptPlugin.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]JavaScriptScriptEngineService.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]JavaScriptScriptEngineTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]JavaScriptScriptMultiThreadedTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]JavaScriptSecurityTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-javascript[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]javascript[/\\]SimpleBench.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugin[/\\]python[/\\]PythonPlugin.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonScriptEngineTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonScriptMultiThreadedTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonSecurityTests.java" checks="LineLength" />
|
||||
|
@ -1138,7 +1085,6 @@
|
|||
<suppress files="plugins[/\\]repository-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]azure[/\\]AbstractAzureWithThirdPartyTestCase.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]azure[/\\]storage[/\\]AzureStorageServiceMock.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]azure[/\\]AzureSnapshotRestoreServiceTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]azure[/\\]AzureSnapshotRestoreTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-hdfs[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]hdfs[/\\]HdfsRepository.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-hdfs[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]hdfs[/\\]HdfsTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-s3[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]aws[/\\]blobstore[/\\]DefaultS3OutputStream.java" checks="LineLength" />
|
||||
|
@ -1157,7 +1103,6 @@
|
|||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]cli[/\\]CliToolTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]MockBigArrays.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]script[/\\]NativeSignificanceScoreScriptWithParams.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]AbstractQueryTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]BackgroundIndexer.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]CompositeTestCluster.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]CorruptionUtils.java" checks="LineLength" />
|
||||
|
@ -1169,7 +1114,6 @@
|
|||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ExternalNode.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ExternalTestCluster.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]IndexSettingsModule.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]InternalSettingsPlugin.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]InternalTestCluster.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]MockIndexEventListener.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]TestSearchContext.java" checks="LineLength" />
|
||||
|
@ -1184,7 +1128,6 @@
|
|||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]junit[/\\]listeners[/\\]LoggingListener.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSDirectoryService.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSIndexStore.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]test[/\\]InternalTestClusterTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]cli[/\\]CliTool.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]admin[/\\]indices[/\\]settings[/\\]RestGetSettingsAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeService.java" checks="LineLength" />
|
||||
|
|
|
@ -4,7 +4,8 @@ lucene = 6.1.0
|
|||
# optional dependencies
|
||||
spatial4j = 0.6
|
||||
jts = 1.13
|
||||
jackson = 2.7.1
|
||||
jackson = 2.8.1
|
||||
snakeyaml = 1.15
|
||||
log4j = 1.2.17
|
||||
slf4j = 1.6.2
|
||||
jna = 4.2.2
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
4127b62db028f981e81caa248953c0899d720f98
|
|
@ -0,0 +1 @@
|
|||
fd13b1c033741d48291315c6370f7d475a42dccf
|
|
@ -34,6 +34,7 @@ dependencies {
|
|||
compile "org.elasticsearch.plugin:percolator-client:${version}"
|
||||
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
testCompile "junit:junit:${versions.junit}"
|
||||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.transport.client;
|
||||
|
||||
import io.netty.util.ThreadDeathWatcher;
|
||||
import io.netty.util.concurrent.GlobalEventExecutor;
|
||||
import org.elasticsearch.client.transport.TransportClient;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -34,6 +36,7 @@ import java.util.Arrays;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* A builder to create an instance of {@link TransportClient}
|
||||
|
@ -53,7 +56,6 @@ public class PreBuiltTransportClient extends TransportClient {
|
|||
Arrays.asList(
|
||||
Netty3Plugin.class,
|
||||
Netty4Plugin.class,
|
||||
TransportPlugin.class,
|
||||
ReindexPlugin.class,
|
||||
PercolatorPlugin.class,
|
||||
MustachePlugin.class));
|
||||
|
@ -67,25 +69,22 @@ public class PreBuiltTransportClient extends TransportClient {
|
|||
super(settings, Settings.EMPTY, addPlugins(plugins, PRE_INSTALLED_PLUGINS));
|
||||
}
|
||||
|
||||
public static final class TransportPlugin extends Plugin {
|
||||
|
||||
private static final Setting<Boolean> ASSERT_NETTY_BUGLEVEL =
|
||||
Setting.boolSetting("netty.assert.buglevel", true, Setting.Property.NodeScope);
|
||||
|
||||
@Override
|
||||
public List<Setting<?>> getSettings() {
|
||||
return Collections.singletonList(ASSERT_NETTY_BUGLEVEL);
|
||||
@Override
|
||||
public void close() {
|
||||
super.close();
|
||||
if (NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings) == false
|
||||
|| NetworkModule.TRANSPORT_TYPE_SETTING.get(settings).equals(Netty4Plugin.NETTY_TRANSPORT_NAME)) {
|
||||
try {
|
||||
GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
try {
|
||||
ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Settings additionalSettings() {
|
||||
return Settings.builder()
|
||||
.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME)
|
||||
.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME)
|
||||
.put("netty.assert.buglevel", true)
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -27,12 +27,13 @@ import org.elasticsearch.index.reindex.ReindexPlugin;
|
|||
import org.elasticsearch.percolator.PercolatorPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.mustache.MustachePlugin;
|
||||
import org.elasticsearch.transport.Netty3Plugin;
|
||||
import org.elasticsearch.transport.Netty4Plugin;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class PreBuiltTransportClientTests extends RandomizedTest {
|
||||
|
@ -41,7 +42,8 @@ public class PreBuiltTransportClientTests extends RandomizedTest {
|
|||
public void testPluginInstalled() {
|
||||
try (TransportClient client = new PreBuiltTransportClient(Settings.EMPTY)) {
|
||||
Settings settings = client.settings();
|
||||
assertEquals(Netty3Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
|
||||
assertEquals(Netty4Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
|
||||
assertEquals(Netty4Plugin.NETTY_TRANSPORT_NAME, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,7 +56,8 @@ public class PreBuiltTransportClientTests extends RandomizedTest {
|
|||
new PreBuiltTransportClient(Settings.EMPTY, plugin);
|
||||
fail("exception expected");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("plugin is already installed", ex.getMessage());
|
||||
assertTrue("Expected message to start with [plugin already exists: ] but was instead [" + ex.getMessage() + "]",
|
||||
ex.getMessage().startsWith("plugin already exists: "));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,6 +69,7 @@ dependencies {
|
|||
compile 'org.joda:joda-convert:1.2'
|
||||
|
||||
// json and yaml
|
||||
compile "org.yaml:snakeyaml:${versions.snakeyaml}"
|
||||
compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}"
|
||||
|
|
|
@ -179,19 +179,6 @@ public final class ExceptionsHelper {
|
|||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the given throwable is and OutOfMemoryException, otherwise <code>false</code>
|
||||
*/
|
||||
public static boolean isOOM(Throwable t) {
|
||||
return t != null
|
||||
&& (t instanceof OutOfMemoryError
|
||||
|| (t instanceof IllegalStateException
|
||||
&& t.getMessage() != null
|
||||
&& t.getMessage().contains("OutOfMemoryError")
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Throws the specified exception. If null if specified then <code>true</code> is returned.
|
||||
*/
|
||||
|
|
|
@ -71,6 +71,8 @@ public class Version {
|
|||
public static final Version V_2_3_3 = new Version(V_2_3_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_4_ID = 2030499;
|
||||
public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_5_ID = 2030599;
|
||||
public static final Version V_2_3_5 = new Version(V_2_3_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_5_0_0_alpha1_ID = 5000001;
|
||||
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_alpha2_ID = 5000002;
|
||||
|
@ -104,6 +106,8 @@ public class Version {
|
|||
return V_5_0_0_alpha2;
|
||||
case V_5_0_0_alpha1_ID:
|
||||
return V_5_0_0_alpha1;
|
||||
case V_2_3_5_ID:
|
||||
return V_2_3_5;
|
||||
case V_2_3_4_ID:
|
||||
return V_2_3_4;
|
||||
case V_2_3_3_ID:
|
||||
|
|
|
@ -177,7 +177,17 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
|||
return source(buildFromSimplifiedDef(type, source));
|
||||
}
|
||||
|
||||
/**
|
||||
* @param type the mapping type
|
||||
* @param source consisting of field/properties pairs (e.g. "field1",
|
||||
* "type=string,store=true"). If the number of arguments is not
|
||||
* divisible by two an {@link IllegalArgumentException} is thrown
|
||||
* @return the mappings definition
|
||||
*/
|
||||
public static XContentBuilder buildFromSimplifiedDef(String type, Object... source) {
|
||||
if (source.length % 2 != 0) {
|
||||
throw new IllegalArgumentException("mapping source must be pairs of fieldnames and properties definition.");
|
||||
}
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.startObject();
|
||||
|
|
|
@ -30,11 +30,11 @@ import java.io.IOException;
|
|||
*/
|
||||
public class PutMappingResponse extends AcknowledgedResponse {
|
||||
|
||||
PutMappingResponse() {
|
||||
protected PutMappingResponse() {
|
||||
|
||||
}
|
||||
|
||||
PutMappingResponse(boolean acknowledged) {
|
||||
protected PutMappingResponse(boolean acknowledged) {
|
||||
super(acknowledged);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,10 +29,10 @@ import java.io.IOException;
|
|||
*/
|
||||
public class PutIndexTemplateResponse extends AcknowledgedResponse {
|
||||
|
||||
PutIndexTemplateResponse() {
|
||||
protected PutIndexTemplateResponse() {
|
||||
}
|
||||
|
||||
PutIndexTemplateResponse(boolean acknowledged) {
|
||||
protected PutIndexTemplateResponse(boolean acknowledged) {
|
||||
super(acknowledged);
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ import org.elasticsearch.search.internal.InternalSearchResponse;
|
|||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.List;
|
||||
|
@ -74,7 +75,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
protected final AtomicArray<FirstResult> firstResults;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final Object shardFailuresMutex = new Object();
|
||||
protected volatile ScoreDoc[] sortedShardList;
|
||||
protected volatile ScoreDoc[] sortedShardDocs;
|
||||
|
||||
protected AbstractSearchAsyncAction(ESLogger logger, SearchTransportService searchTransportService, ClusterService clusterService,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
|
@ -321,8 +322,11 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
// we only release search context that we did not fetch from if we are not scrolling
|
||||
if (request.scroll() == null) {
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults.asList()) {
|
||||
final TopDocs topDocs = entry.value.queryResult().queryResult().topDocs();
|
||||
if (topDocs != null && topDocs.scoreDocs.length > 0 // the shard had matches
|
||||
QuerySearchResult queryResult = entry.value.queryResult().queryResult();
|
||||
final TopDocs topDocs = queryResult.topDocs();
|
||||
final Suggest suggest = queryResult.suggest();
|
||||
if (((topDocs != null && topDocs.scoreDocs.length > 0) // the shard had matches
|
||||
||suggest != null && suggest.hasScoreDocs()) // or had suggest docs
|
||||
&& docIdsToLoad.get(entry.index) == null) { // but none of them made it to the global top docs
|
||||
try {
|
||||
DiscoveryNode node = nodes.get(entry.value.queryResult().shardTarget().nodeId());
|
||||
|
@ -343,12 +347,8 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
|
||||
protected ShardFetchSearchRequest createFetchRequest(QuerySearchResult queryResult, AtomicArray.Entry<IntArrayList> entry,
|
||||
ScoreDoc[] lastEmittedDocPerShard) {
|
||||
if (lastEmittedDocPerShard != null) {
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry.value, lastEmittedDoc);
|
||||
} else {
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry.value);
|
||||
}
|
||||
final ScoreDoc lastEmittedDoc = (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[entry.index] : null;
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry.value, lastEmittedDoc);
|
||||
}
|
||||
|
||||
protected abstract void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
|
|
|
@ -118,8 +118,8 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
|
|||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
sortedShardDocs = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, queryFetchResults,
|
||||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
|
|
|
@ -135,18 +135,17 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
|
|||
}
|
||||
|
||||
void innerExecuteFetchPhase() throws Exception {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, queryResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
final boolean isScrollRequest = request.scroll() != null;
|
||||
sortedShardDocs = searchPhaseController.sortDocs(isScrollRequest, queryResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardDocs);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
|
||||
request, sortedShardList, firstResults.length()
|
||||
);
|
||||
final ScoreDoc[] lastEmittedDocPerShard = (request.scroll() != null) ?
|
||||
searchPhaseController.getLastEmittedDocPerShard(queryResults.asList(), sortedShardDocs, firstResults.length()) : null;
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
QuerySearchResult queryResult = queryResults.get(entry.index);
|
||||
|
@ -196,12 +195,10 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
|
|||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults,
|
||||
final boolean isScrollRequest = request.scroll() != null;
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(isScrollRequest, sortedShardDocs, queryResults,
|
||||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults);
|
||||
}
|
||||
String scrollId = isScrollRequest ? TransportSearchHelper.buildScrollId(request.searchType(), firstResults) : null;
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
releaseIrrelevantSearchContexts(queryResults, docIdsToLoad);
|
||||
|
|
|
@ -60,14 +60,11 @@ class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetc
|
|||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
final boolean isScrollRequest = request.scroll() != null;
|
||||
sortedShardDocs = searchPhaseController.sortDocs(isScrollRequest, firstResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(isScrollRequest, sortedShardDocs, firstResults,
|
||||
firstResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults);
|
||||
}
|
||||
String scrollId = isScrollRequest ? TransportSearchHelper.buildScrollId(request.searchType(), firstResults) : null;
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
|
|
|
@ -68,18 +68,17 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
|
|||
|
||||
@Override
|
||||
protected void moveToSecondPhase() throws Exception {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
final boolean isScrollRequest = request.scroll() != null;
|
||||
sortedShardDocs = searchPhaseController.sortDocs(isScrollRequest, firstResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardDocs);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
|
||||
request, sortedShardList, firstResults.length()
|
||||
);
|
||||
final ScoreDoc[] lastEmittedDocPerShard = isScrollRequest ?
|
||||
searchPhaseController.getLastEmittedDocPerShard(firstResults.asList(), sortedShardDocs, firstResults.length()) : null;
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
QuerySearchResultProvider queryResult = firstResults.get(entry.index);
|
||||
|
@ -129,12 +128,10 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
|
|||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
final boolean isScrollRequest = request.scroll() != null;
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(isScrollRequest, sortedShardDocs, firstResults,
|
||||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults);
|
||||
}
|
||||
String scrollId = isScrollRequest ? TransportSearchHelper.buildScrollId(request.searchType(), firstResults) : null;
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps,
|
||||
successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
|
||||
|
|
|
@ -168,8 +168,8 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
|||
}
|
||||
|
||||
private void innerFinishHim() throws Exception {
|
||||
ScoreDoc[] sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, queryFetchResults,
|
||||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
|
|
|
@ -53,7 +53,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
final AtomicArray<QuerySearchResult> queryResults;
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
private volatile ScoreDoc[] sortedShardList;
|
||||
private volatile ScoreDoc[] sortedShardDocs;
|
||||
private final AtomicInteger successfulOps;
|
||||
|
||||
SearchScrollQueryThenFetchAsyncAction(ESLogger logger, ClusterService clusterService,
|
||||
|
@ -165,9 +165,9 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
}
|
||||
|
||||
private void executeFetchPhase() throws Exception {
|
||||
sortedShardList = searchPhaseController.sortDocs(true, queryResults);
|
||||
sortedShardDocs = searchPhaseController.sortDocs(true, queryResults);
|
||||
AtomicArray<IntArrayList> docIdsToLoad = new AtomicArray<>(queryResults.length());
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardDocs);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
|
@ -175,7 +175,8 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
}
|
||||
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(sortedShardList, queryResults.length());
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(queryResults.asList(),
|
||||
sortedShardDocs, queryResults.length());
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
IntArrayList docIds = entry.value;
|
||||
|
@ -216,7 +217,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
}
|
||||
|
||||
private void innerFinishHim() {
|
||||
InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults);
|
||||
InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, queryResults, fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
|
|
|
@ -136,7 +136,7 @@ public class ReplicationOperation<
|
|||
}
|
||||
|
||||
if (shard.relocating() && shard.relocatingNodeId().equals(localNodeId) == false) {
|
||||
performOnReplica(shard.buildTargetRelocatingShard(), replicaRequest);
|
||||
performOnReplica(shard.getTargetRelocatingShard(), replicaRequest);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ public class ReplicationOperation<
|
|||
shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false));
|
||||
String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard);
|
||||
logger.warn("[{}] {}", replicaException, shard.shardId(), message);
|
||||
replicasProxy.failShard(shard, primary.routingEntry(), message, replicaException,
|
||||
replicasProxy.failShard(shard, replicaRequest.primaryTerm(), message, replicaException,
|
||||
ReplicationOperation.this::decPendingAndFinishIfNeeded,
|
||||
ReplicationOperation.this::onPrimaryDemoted,
|
||||
throwable -> decPendingAndFinishIfNeeded()
|
||||
|
@ -327,7 +327,7 @@ public class ReplicationOperation<
|
|||
/**
|
||||
* Fail the specified shard, removing it from the current set of active shards
|
||||
* @param replica shard to fail
|
||||
* @param primary the primary shard that requested the failure
|
||||
* @param primaryTerm the primary term of the primary shard when requesting the failure
|
||||
* @param message a (short) description of the reason
|
||||
* @param exception the original exception which caused the ReplicationOperation to request the shard to be failed
|
||||
* @param onSuccess a callback to call when the shard has been successfully removed from the active set.
|
||||
|
@ -335,7 +335,7 @@ public class ReplicationOperation<
|
|||
* by the master.
|
||||
* @param onIgnoredFailure a callback to call when failing a shard has failed, but it that failure can be safely ignored and the
|
||||
*/
|
||||
void failShard(ShardRouting replica, ShardRouting primary, String message, Exception exception, Runnable onSuccess,
|
||||
void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception, Runnable onSuccess,
|
||||
Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure);
|
||||
}
|
||||
|
||||
|
|
|
@ -866,10 +866,10 @@ public abstract class TransportReplicationAction<
|
|||
}
|
||||
|
||||
@Override
|
||||
public void failShard(ShardRouting replica, ShardRouting primary, String message, Exception exception,
|
||||
public void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception,
|
||||
Runnable onSuccess, Consumer<Exception> onFailure, Consumer<Exception> onIgnoredFailure) {
|
||||
shardStateAction.shardFailed(
|
||||
replica, primary, message, exception,
|
||||
shardStateAction.remoteShardFailed(
|
||||
replica, primaryTerm, message, exception,
|
||||
new ShardStateAction.Listener() {
|
||||
@Override
|
||||
public void onSuccess() {
|
||||
|
|
|
@ -124,6 +124,9 @@ public abstract class TransportClient extends AbstractClient {
|
|||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(networkModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
entries.addAll(pluginsService.filterPlugins(Plugin.class).stream()
|
||||
.flatMap(p -> p.getNamedWriteables().stream())
|
||||
.collect(Collectors.toList()));
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries);
|
||||
|
||||
ModulesBuilder modules = new ModulesBuilder();
|
||||
|
@ -167,7 +170,7 @@ public abstract class TransportClient extends AbstractClient {
|
|||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
|
||||
ClientTemplate transportClient = new ClientTemplate(injector, pluginLifecycleComponents, nodesService, proxy);
|
||||
ClientTemplate transportClient = new ClientTemplate(injector, pluginLifecycleComponents, nodesService, proxy, namedWriteableRegistry);
|
||||
resourcesToClose.clear();
|
||||
return transportClient;
|
||||
} finally {
|
||||
|
@ -180,12 +183,15 @@ public abstract class TransportClient extends AbstractClient {
|
|||
private final List<LifecycleComponent> pluginLifecycleComponents;
|
||||
private final TransportClientNodesService nodesService;
|
||||
private final TransportProxyClient proxy;
|
||||
private final NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
private ClientTemplate(Injector injector, List<LifecycleComponent> pluginLifecycleComponents, TransportClientNodesService nodesService, TransportProxyClient proxy) {
|
||||
private ClientTemplate(Injector injector, List<LifecycleComponent> pluginLifecycleComponents,
|
||||
TransportClientNodesService nodesService, TransportProxyClient proxy, NamedWriteableRegistry namedWriteableRegistry) {
|
||||
this.injector = injector;
|
||||
this.pluginLifecycleComponents = pluginLifecycleComponents;
|
||||
this.nodesService = nodesService;
|
||||
this.proxy = proxy;
|
||||
this.namedWriteableRegistry = namedWriteableRegistry;
|
||||
}
|
||||
|
||||
Settings getSettings() {
|
||||
|
@ -200,6 +206,7 @@ public abstract class TransportClient extends AbstractClient {
|
|||
public static final String CLIENT_TYPE = "transport";
|
||||
|
||||
final Injector injector;
|
||||
final NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
private final List<LifecycleComponent> pluginLifecycleComponents;
|
||||
private final TransportClientNodesService nodesService;
|
||||
|
@ -228,6 +235,7 @@ public abstract class TransportClient extends AbstractClient {
|
|||
this.pluginLifecycleComponents = Collections.unmodifiableList(template.pluginLifecycleComponents);
|
||||
this.nodesService = template.nodesService;
|
||||
this.proxy = template.proxy;
|
||||
this.namedWriteableRegistry = template.namedWriteableRegistry;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -199,10 +199,14 @@ public class ClusterChangedEvent {
|
|||
return nodesRemoved() || nodesAdded();
|
||||
}
|
||||
|
||||
// Determines whether or not the current cluster state represents an entirely
|
||||
// different cluster from the previous cluster state, which will happen when a
|
||||
// master node is elected that has never been part of the cluster before.
|
||||
private boolean isNewCluster() {
|
||||
/**
|
||||
* Determines whether or not the current cluster state represents an entirely
|
||||
* new cluster, either when a node joins a cluster for the first time or when
|
||||
* the node receives a cluster state update from a brand new cluster (different
|
||||
* UUID from the previous cluster), which will happen when a master node is
|
||||
* elected that has never been part of the cluster before.
|
||||
*/
|
||||
public boolean isNewCluster() {
|
||||
final String prevClusterUUID = previousState.metaData().clusterUUID();
|
||||
final String currClusterUUID = state.metaData().clusterUUID();
|
||||
return prevClusterUUID.equals(currClusterUUID) == false;
|
||||
|
|
|
@ -280,6 +280,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
|
||||
public String prettyPrint() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("cluster uuid: ").append(metaData.clusterUUID()).append("\n");
|
||||
sb.append("version: ").append(version).append("\n");
|
||||
sb.append("state uuid: ").append(stateUUID).append("\n");
|
||||
sb.append("from_diff: ").append(wasReadFromDiff).append("\n");
|
||||
|
@ -624,6 +625,10 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
return this;
|
||||
}
|
||||
|
||||
public DiscoveryNodes nodes() {
|
||||
return nodes;
|
||||
}
|
||||
|
||||
public Builder routingResult(RoutingAllocation.Result routingResult) {
|
||||
this.routingTable = routingResult.routingTable();
|
||||
this.metaData = routingResult.metaData();
|
||||
|
@ -722,7 +727,6 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException {
|
||||
return PROTO.readFrom(in, localNode);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -29,9 +29,8 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor;
|
|||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.MasterNodeChangePredicate;
|
||||
import org.elasticsearch.cluster.NotMasterException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
|
@ -64,11 +63,10 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.Set;
|
||||
|
||||
public class ShardStateAction extends AbstractComponent {
|
||||
|
||||
|
@ -87,19 +85,19 @@ public class ShardStateAction extends AbstractComponent {
|
|||
this.clusterService = clusterService;
|
||||
this.threadPool = threadPool;
|
||||
|
||||
transportService.registerRequestHandler(SHARD_STARTED_ACTION_NAME, ShardRoutingEntry::new, ThreadPool.Names.SAME, new ShardStartedTransportHandler(clusterService, new ShardStartedClusterStateTaskExecutor(allocationService, logger), logger));
|
||||
transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ShardRoutingEntry::new, ThreadPool.Names.SAME, new ShardFailedTransportHandler(clusterService, new ShardFailedClusterStateTaskExecutor(allocationService, routingService, logger), logger));
|
||||
transportService.registerRequestHandler(SHARD_STARTED_ACTION_NAME, ShardEntry::new, ThreadPool.Names.SAME, new ShardStartedTransportHandler(clusterService, new ShardStartedClusterStateTaskExecutor(allocationService, logger), logger));
|
||||
transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ShardEntry::new, ThreadPool.Names.SAME, new ShardFailedTransportHandler(clusterService, new ShardFailedClusterStateTaskExecutor(allocationService, routingService, logger), logger));
|
||||
}
|
||||
|
||||
private void sendShardAction(final String actionName, final ClusterStateObserver observer, final ShardRoutingEntry shardRoutingEntry, final Listener listener) {
|
||||
private void sendShardAction(final String actionName, final ClusterStateObserver observer, final ShardEntry shardEntry, final Listener listener) {
|
||||
DiscoveryNode masterNode = observer.observedState().nodes().getMasterNode();
|
||||
if (masterNode == null) {
|
||||
logger.warn("{} no master known for action [{}] for shard [{}]", shardRoutingEntry.getShardRouting().shardId(), actionName, shardRoutingEntry.getShardRouting());
|
||||
waitForNewMasterAndRetry(actionName, observer, shardRoutingEntry, listener);
|
||||
logger.warn("{} no master known for action [{}] for shard entry [{}]", shardEntry.shardId, actionName, shardEntry);
|
||||
waitForNewMasterAndRetry(actionName, observer, shardEntry, listener);
|
||||
} else {
|
||||
logger.debug("{} sending [{}] to [{}] for shard [{}]", shardRoutingEntry.getShardRouting().shardId(), actionName, masterNode.getId(), shardRoutingEntry);
|
||||
logger.debug("{} sending [{}] to [{}] for shard entry [{}]", shardEntry.shardId, actionName, masterNode.getId(), shardEntry);
|
||||
transportService.sendRequest(masterNode,
|
||||
actionName, shardRoutingEntry, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
actionName, shardEntry, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
@Override
|
||||
public void handleResponse(TransportResponse.Empty response) {
|
||||
listener.onSuccess();
|
||||
|
@ -108,9 +106,9 @@ public class ShardStateAction extends AbstractComponent {
|
|||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
if (isMasterChannelException(exp)) {
|
||||
waitForNewMasterAndRetry(actionName, observer, shardRoutingEntry, listener);
|
||||
waitForNewMasterAndRetry(actionName, observer, shardEntry, listener);
|
||||
} else {
|
||||
logger.warn("{} unexpected failure while sending request [{}] to [{}] for shard [{}]", exp, shardRoutingEntry.getShardRouting().shardId(), actionName, masterNode, shardRoutingEntry);
|
||||
logger.warn("{} unexpected failure while sending request [{}] to [{}] for shard entry [{}]", exp, shardEntry.shardId, actionName, masterNode, shardEntry);
|
||||
listener.onFailure(exp instanceof RemoteTransportException ? (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : new ElasticsearchException(exp.getCause())) : exp);
|
||||
}
|
||||
}
|
||||
|
@ -129,34 +127,46 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Send a shard failed request to the master node to update the
|
||||
* cluster state.
|
||||
* @param shardRouting the shard to fail
|
||||
* @param sourceShardRouting the source shard requesting the failure (must be the shard itself, or the primary shard)
|
||||
* Send a shard failed request to the master node to update the cluster state with the failure of a shard on another node.
|
||||
*
|
||||
* @param shardRouting the shard to fail
|
||||
* @param primaryTerm the primary term associated with the primary shard that is failing the shard.
|
||||
* @param message the reason for the failure
|
||||
* @param failure the underlying cause of the failure
|
||||
* @param listener callback upon completion of the request
|
||||
*/
|
||||
public void shardFailed(final ShardRouting shardRouting, ShardRouting sourceShardRouting, final String message, @Nullable final Exception failure, Listener listener) {
|
||||
public void remoteShardFailed(final ShardRouting shardRouting, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) {
|
||||
assert primaryTerm > 0L : "primary term should be strictly positive";
|
||||
shardFailed(shardRouting, primaryTerm, message, failure, listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a shard failed request to the master node to update the cluster state when a shard on the local node failed.
|
||||
*/
|
||||
public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, Listener listener) {
|
||||
shardFailed(shardRouting, 0L, message, failure, listener);
|
||||
}
|
||||
|
||||
private void shardFailed(final ShardRouting shardRouting, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) {
|
||||
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
|
||||
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, sourceShardRouting, message, failure);
|
||||
sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardRoutingEntry, listener);
|
||||
ShardEntry shardEntry = new ShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), primaryTerm, message, failure);
|
||||
sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardEntry, listener);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, ShardRoutingEntry shardRoutingEntry, Listener listener) {
|
||||
protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, ShardEntry shardEntry, Listener listener) {
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
public void onNewClusterState(ClusterState state) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("new cluster state [{}] after waiting for master election to fail shard [{}]", state.prettyPrint(), shardRoutingEntry);
|
||||
logger.trace("new cluster state [{}] after waiting for master election to fail shard entry [{}]", state.prettyPrint(), shardEntry);
|
||||
}
|
||||
sendShardAction(actionName, observer, shardRoutingEntry, listener);
|
||||
sendShardAction(actionName, observer, shardEntry, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClusterServiceClose() {
|
||||
logger.warn("{} node closed while execution action [{}] for shard [{}]", shardRoutingEntry.failure, shardRoutingEntry.getShardRouting().shardId(), actionName, shardRoutingEntry.getShardRouting());
|
||||
logger.warn("{} node closed while execution action [{}] for shard entry [{}]", shardEntry.failure, shardEntry.shardId, actionName, shardEntry);
|
||||
listener.onFailure(new NodeClosedException(clusterService.localNode()));
|
||||
}
|
||||
|
||||
|
@ -168,7 +178,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}, MasterNodeChangePredicate.INSTANCE);
|
||||
}
|
||||
|
||||
private static class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
|
||||
private static class ShardFailedTransportHandler implements TransportRequestHandler<ShardEntry> {
|
||||
private final ClusterService clusterService;
|
||||
private final ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor;
|
||||
private final ESLogger logger;
|
||||
|
@ -180,8 +190,8 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
|
||||
logger.warn("{} received shard failed for {}", request.failure, request.shardRouting.shardId(), request);
|
||||
public void messageReceived(ShardEntry request, TransportChannel channel) throws Exception {
|
||||
logger.warn("{} received shard failed for {}", request.failure, request.shardId, request);
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-failed",
|
||||
request,
|
||||
|
@ -190,22 +200,22 @@ public class ShardStateAction extends AbstractComponent {
|
|||
new ClusterStateTaskListener() {
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error("{} unexpected failure while failing shard [{}]", e, request.shardRouting.shardId(), request.shardRouting);
|
||||
logger.error("{} unexpected failure while failing shard [{}]", e, request.shardId, request);
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (Exception channelException) {
|
||||
channelException.addSuppressed(e);
|
||||
logger.warn("{} failed to send failure [{}] while failing shard [{}]", channelException, request.shardRouting.shardId(), e, request.shardRouting);
|
||||
logger.warn("{} failed to send failure [{}] while failing shard [{}]", channelException, request.shardId, e, request);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNoLongerMaster(String source) {
|
||||
logger.error("{} no longer master while failing shard [{}]", request.shardRouting.shardId(), request.shardRouting);
|
||||
logger.error("{} no longer master while failing shard [{}]", request.shardId, request);
|
||||
try {
|
||||
channel.sendResponse(new NotMasterException(source));
|
||||
} catch (Exception channelException) {
|
||||
logger.warn("{} failed to send no longer master while failing shard [{}]", channelException, request.shardRouting.shardId(), request.shardRouting);
|
||||
logger.warn("{} failed to send no longer master while failing shard [{}]", channelException, request.shardId, request);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -214,7 +224,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
try {
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
} catch (Exception channelException) {
|
||||
logger.warn("{} failed to send response while failing shard [{}]", channelException, request.shardRouting.shardId(), request.shardRouting);
|
||||
logger.warn("{} failed to send response while failing shard [{}]", channelException, request.shardId, request);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -222,63 +232,81 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
static class ShardFailedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardRoutingEntry> {
|
||||
public static class ShardFailedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardEntry> {
|
||||
private final AllocationService allocationService;
|
||||
private final RoutingService routingService;
|
||||
private final ESLogger logger;
|
||||
|
||||
ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, ESLogger logger) {
|
||||
public ShardFailedClusterStateTaskExecutor(AllocationService allocationService, RoutingService routingService, ESLogger logger) {
|
||||
this.allocationService = allocationService;
|
||||
this.routingService = routingService;
|
||||
this.logger = logger;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String describeTasks(List<ShardRoutingEntry> tasks) {
|
||||
return tasks.stream().map(entry -> entry.getShardRouting().toString()).reduce((s1, s2) -> s1 + ", " + s2).orElse("");
|
||||
}
|
||||
public BatchResult<ShardEntry> execute(ClusterState currentState, List<ShardEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardEntry> batchResultBuilder = BatchResult.builder();
|
||||
List<ShardEntry> tasksToBeApplied = new ArrayList<>();
|
||||
List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>();
|
||||
Set<ShardRouting> seenShardRoutings = new HashSet<>(); // to prevent duplicates
|
||||
|
||||
@Override
|
||||
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder();
|
||||
for (ShardEntry task : tasks) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(task.shardId.getIndex());
|
||||
if (indexMetaData == null) {
|
||||
// tasks that correspond to non-existent shards are marked as successful
|
||||
logger.debug("{} ignoring shard failed task [{}] (unknown index {})", task.shardId, task, task.shardId.getIndex());
|
||||
batchResultBuilder.success(task);
|
||||
} else {
|
||||
// non-local requests
|
||||
if (task.primaryTerm > 0) {
|
||||
long currentPrimaryTerm = indexMetaData.primaryTerm(task.shardId.id());
|
||||
if (currentPrimaryTerm != task.primaryTerm) {
|
||||
assert currentPrimaryTerm > task.primaryTerm : "received a primary term with a higher term than in the " +
|
||||
"current cluster state (received [" + task.primaryTerm + "] but current is [" + currentPrimaryTerm + "])";
|
||||
logger.debug("{} failing shard failed task [{}] (primary term {} does not match current term {})", task.shardId,
|
||||
task, task.primaryTerm, indexMetaData.primaryTerm(task.shardId.id()));
|
||||
batchResultBuilder.failure(task, new NoLongerPrimaryShardException(
|
||||
task.shardId,
|
||||
"primary term [" + task.primaryTerm + "] did not match current primary term [" + currentPrimaryTerm + "]"));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// partition tasks into those that correspond to shards
|
||||
// that exist versus do not exist
|
||||
Map<ValidationResult, List<ShardRoutingEntry>> partition =
|
||||
tasks.stream().collect(Collectors.groupingBy(task -> validateTask(currentState, task)));
|
||||
|
||||
// tasks that correspond to non-existent shards are marked
|
||||
// as successful
|
||||
batchResultBuilder.successes(partition.getOrDefault(ValidationResult.SHARD_MISSING, Collections.emptyList()));
|
||||
ShardRouting matched = currentState.getRoutingTable().getByAllocationId(task.shardId, task.allocationId);
|
||||
if (matched == null) {
|
||||
// tasks that correspond to non-existent shards are marked as successful
|
||||
logger.debug("{} ignoring shard failed task [{}] (shard does not exist anymore)", task.shardId, task);
|
||||
batchResultBuilder.success(task);
|
||||
} else {
|
||||
// remove duplicate actions as allocation service expects a clean list without duplicates
|
||||
if (seenShardRoutings.contains(matched)) {
|
||||
logger.trace("{} ignoring shard failed task [{}] (already scheduled to fail {})", task.shardId, task, matched);
|
||||
tasksToBeApplied.add(task);
|
||||
} else {
|
||||
logger.debug("{} failing shard {} (shard failed task: [{}])", task.shardId, matched, task);
|
||||
tasksToBeApplied.add(task);
|
||||
shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(matched, task.message, task.failure));
|
||||
seenShardRoutings.add(matched);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assert tasksToBeApplied.size() >= shardRoutingsToBeApplied.size();
|
||||
|
||||
ClusterState maybeUpdatedState = currentState;
|
||||
List<ShardRoutingEntry> tasksToFail = partition.getOrDefault(ValidationResult.VALID, Collections.emptyList());
|
||||
try {
|
||||
List<FailedRerouteAllocation.FailedShard> failedShards =
|
||||
tasksToFail
|
||||
.stream()
|
||||
.map(task -> new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure))
|
||||
.collect(Collectors.toList());
|
||||
RoutingAllocation.Result result = applyFailedShards(currentState, failedShards);
|
||||
RoutingAllocation.Result result = applyFailedShards(currentState, shardRoutingsToBeApplied);
|
||||
if (result.changed()) {
|
||||
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
|
||||
}
|
||||
batchResultBuilder.successes(tasksToFail);
|
||||
batchResultBuilder.successes(tasksToBeApplied);
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to apply failed shards {}", e, shardRoutingsToBeApplied);
|
||||
// failures are communicated back to the requester
|
||||
// cluster state will not be updated in this case
|
||||
batchResultBuilder.failures(tasksToFail, e);
|
||||
batchResultBuilder.failures(tasksToBeApplied, e);
|
||||
}
|
||||
|
||||
partition
|
||||
.getOrDefault(ValidationResult.SOURCE_INVALID, Collections.emptyList())
|
||||
.forEach(task -> batchResultBuilder.failure(
|
||||
task,
|
||||
new NoLongerPrimaryShardException(
|
||||
task.getShardRouting().shardId(),
|
||||
"source shard [" + task.sourceShardRouting + "] is neither the local allocation nor the primary allocation")
|
||||
));
|
||||
|
||||
return batchResultBuilder.build(maybeUpdatedState);
|
||||
}
|
||||
|
||||
|
@ -287,36 +315,6 @@ public class ShardStateAction extends AbstractComponent {
|
|||
return allocationService.applyFailedShards(currentState, failedShards);
|
||||
}
|
||||
|
||||
private enum ValidationResult {
|
||||
VALID,
|
||||
SOURCE_INVALID,
|
||||
SHARD_MISSING
|
||||
}
|
||||
|
||||
private ValidationResult validateTask(ClusterState currentState, ShardRoutingEntry task) {
|
||||
|
||||
// non-local requests
|
||||
if (!task.shardRouting.isSameAllocation(task.sourceShardRouting)) {
|
||||
IndexShardRoutingTable indexShard = currentState.getRoutingTable().shardRoutingTableOrNull(task.shardRouting.shardId());
|
||||
if (indexShard == null) {
|
||||
return ValidationResult.SOURCE_INVALID;
|
||||
}
|
||||
ShardRouting primaryShard = indexShard.primaryShard();
|
||||
if (primaryShard == null || !primaryShard.isSameAllocation(task.sourceShardRouting)) {
|
||||
return ValidationResult.SOURCE_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
RoutingNode routingNode = currentState.getRoutingNodes().node(task.getShardRouting().currentNodeId());
|
||||
if (routingNode != null) {
|
||||
ShardRouting maybe = routingNode.getByShardId(task.getShardRouting().shardId());
|
||||
if (maybe != null && maybe.isSameAllocation(task.getShardRouting())) {
|
||||
return ValidationResult.VALID;
|
||||
}
|
||||
}
|
||||
return ValidationResult.SHARD_MISSING;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {
|
||||
int numberOfUnassignedShards = clusterChangedEvent.state().getRoutingNodes().unassigned().size();
|
||||
|
@ -332,11 +330,11 @@ public class ShardStateAction extends AbstractComponent {
|
|||
|
||||
public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener) {
|
||||
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
|
||||
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, shardRouting, message, null);
|
||||
sendShardAction(SHARD_STARTED_ACTION_NAME, observer, shardRoutingEntry, listener);
|
||||
ShardEntry shardEntry = new ShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, null);
|
||||
sendShardAction(SHARD_STARTED_ACTION_NAME, observer, shardEntry, listener);
|
||||
}
|
||||
|
||||
private static class ShardStartedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
|
||||
private static class ShardStartedTransportHandler implements TransportRequestHandler<ShardEntry> {
|
||||
private final ClusterService clusterService;
|
||||
private final ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor;
|
||||
private final ESLogger logger;
|
||||
|
@ -348,8 +346,8 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
|
||||
logger.debug("{} received shard started for [{}]", request.shardRouting.shardId(), request);
|
||||
public void messageReceived(ShardEntry request, TransportChannel channel) throws Exception {
|
||||
logger.debug("{} received shard started for [{}]", request.shardId, request);
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-started",
|
||||
request,
|
||||
|
@ -360,7 +358,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
private static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
|
||||
public static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor<ShardEntry>, ClusterStateTaskListener {
|
||||
private final AllocationService allocationService;
|
||||
private final ESLogger logger;
|
||||
|
||||
|
@ -370,17 +368,45 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String describeTasks(List<ShardRoutingEntry> tasks) {
|
||||
return tasks.stream().map(entry -> entry.getShardRouting().toString()).reduce((s1, s2) -> s1 + ", " + s2).orElse("");
|
||||
}
|
||||
|
||||
@Override
|
||||
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardRoutingEntry> builder = BatchResult.builder();
|
||||
public BatchResult<ShardEntry> execute(ClusterState currentState, List<ShardEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardEntry> builder = BatchResult.builder();
|
||||
List<ShardEntry> tasksToBeApplied = new ArrayList<>();
|
||||
List<ShardRouting> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
|
||||
for (ShardRoutingEntry task : tasks) {
|
||||
shardRoutingsToBeApplied.add(task.shardRouting);
|
||||
Set<ShardRouting> seenShardRoutings = new HashSet<>(); // to prevent duplicates
|
||||
for (ShardEntry task : tasks) {
|
||||
assert task.primaryTerm == 0L : "shard is only started by itself: " + task;
|
||||
|
||||
ShardRouting matched = currentState.getRoutingTable().getByAllocationId(task.shardId, task.allocationId);
|
||||
if (matched == null) {
|
||||
// tasks that correspond to non-existent shards are marked as successful. The reason is that we resend shard started
|
||||
// events on every cluster state publishing that does not contain the shard as started yet. This means that old stale
|
||||
// requests might still be in flight even after the shard has already been started or failed on the master. We just
|
||||
// ignore these requests for now.
|
||||
logger.debug("{} ignoring shard started task [{}] (shard does not exist anymore)", task.shardId, task);
|
||||
builder.success(task);
|
||||
} else {
|
||||
if (matched.initializing() == false) {
|
||||
assert matched.active() : "expected active shard routing for task " + task + " but found " + matched;
|
||||
// same as above, this might have been a stale in-flight request, so we just ignore.
|
||||
logger.debug("{} ignoring shard started task [{}] (shard exists but is not initializing: {})", task.shardId, task,
|
||||
matched);
|
||||
builder.success(task);
|
||||
} else {
|
||||
// remove duplicate actions as allocation service expects a clean list without duplicates
|
||||
if (seenShardRoutings.contains(matched)) {
|
||||
logger.trace("{} ignoring shard started task [{}] (already scheduled to start {})", task.shardId, task, matched);
|
||||
tasksToBeApplied.add(task);
|
||||
} else {
|
||||
logger.debug("{} starting shard {} (shard started task: [{}])", task.shardId, matched, task);
|
||||
tasksToBeApplied.add(task);
|
||||
shardRoutingsToBeApplied.add(matched);
|
||||
seenShardRoutings.add(matched);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assert tasksToBeApplied.size() >= shardRoutingsToBeApplied.size();
|
||||
|
||||
ClusterState maybeUpdatedState = currentState;
|
||||
try {
|
||||
RoutingAllocation.Result result =
|
||||
|
@ -388,9 +414,10 @@ public class ShardStateAction extends AbstractComponent {
|
|||
if (result.changed()) {
|
||||
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
|
||||
}
|
||||
builder.successes(tasks);
|
||||
builder.successes(tasksToBeApplied);
|
||||
} catch (Exception e) {
|
||||
builder.failures(tasks, e);
|
||||
logger.warn("failed to apply started shards {}", e, shardRoutingsToBeApplied);
|
||||
builder.failures(tasksToBeApplied, e);
|
||||
}
|
||||
|
||||
return builder.build(maybeUpdatedState);
|
||||
|
@ -402,31 +429,38 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public static class ShardRoutingEntry extends TransportRequest {
|
||||
ShardRouting shardRouting;
|
||||
ShardRouting sourceShardRouting;
|
||||
public static class ShardEntry extends TransportRequest {
|
||||
ShardId shardId;
|
||||
String allocationId;
|
||||
long primaryTerm;
|
||||
String message;
|
||||
Exception failure;
|
||||
|
||||
public ShardRoutingEntry() {
|
||||
public ShardEntry() {
|
||||
}
|
||||
|
||||
ShardRoutingEntry(ShardRouting shardRouting, ShardRouting sourceShardRouting, String message, @Nullable Exception failure) {
|
||||
this.shardRouting = shardRouting;
|
||||
this.sourceShardRouting = sourceShardRouting;
|
||||
public ShardEntry(ShardId shardId, String allocationId, long primaryTerm, String message, @Nullable Exception failure) {
|
||||
this.shardId = shardId;
|
||||
this.allocationId = allocationId;
|
||||
this.primaryTerm = primaryTerm;
|
||||
this.message = message;
|
||||
this.failure = failure;
|
||||
}
|
||||
|
||||
public ShardRouting getShardRouting() {
|
||||
return shardRouting;
|
||||
public ShardId getShardId() {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
public String getAllocationId() {
|
||||
return allocationId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
shardRouting = new ShardRouting(in);
|
||||
sourceShardRouting = new ShardRouting(in);
|
||||
shardId = ShardId.readShardId(in);
|
||||
allocationId = in.readString();
|
||||
primaryTerm = in.readVLong();
|
||||
message = in.readString();
|
||||
failure = in.readException();
|
||||
}
|
||||
|
@ -434,8 +468,9 @@ public class ShardStateAction extends AbstractComponent {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
shardRouting.writeTo(out);
|
||||
sourceShardRouting.writeTo(out);
|
||||
shardId.writeTo(out);
|
||||
out.writeString(allocationId);
|
||||
out.writeVLong(primaryTerm);
|
||||
out.writeString(message);
|
||||
out.writeException(failure);
|
||||
}
|
||||
|
@ -443,8 +478,9 @@ public class ShardStateAction extends AbstractComponent {
|
|||
@Override
|
||||
public String toString() {
|
||||
List<String> components = new ArrayList<>(4);
|
||||
components.add("target shard [" + shardRouting + "]");
|
||||
components.add("source shard [" + sourceShardRouting + "]");
|
||||
components.add("shard id [" + shardId + "]");
|
||||
components.add("allocation id [" + allocationId + "]");
|
||||
components.add("primary term [" + primaryTerm + "]");
|
||||
components.add("message [" + message + "]");
|
||||
if (failure != null) {
|
||||
components.add("failure [" + ExceptionsHelper.detailedMessage(failure) + "]");
|
||||
|
|
|
@ -357,14 +357,14 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
Builder builder = new Builder().masterNodeId(masterNodeId).localNodeId(localNodeId);
|
||||
for (DiscoveryNode node : this) {
|
||||
if (newNodes.contains(node.getId())) {
|
||||
builder.put(node);
|
||||
builder.add(node);
|
||||
}
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public DiscoveryNodes newNode(DiscoveryNode node) {
|
||||
return new Builder(this).put(node).build();
|
||||
return new Builder(this).add(node).build();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -554,8 +554,8 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
node = localNode;
|
||||
}
|
||||
// some one already built this and validated it's OK, skip the n2 scans
|
||||
assert builder.validatePut(node) == null : "building disco nodes from network doesn't pass preflight: "
|
||||
+ builder.validatePut(node);
|
||||
assert builder.validateAdd(node) == null : "building disco nodes from network doesn't pass preflight: "
|
||||
+ builder.validateAdd(node);
|
||||
builder.putUnsafe(node);
|
||||
}
|
||||
return builder.build();
|
||||
|
@ -592,10 +592,10 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
|
||||
/**
|
||||
* adds a disco node to the builder. Will throw an {@link IllegalArgumentException} if
|
||||
* the supplied node doesn't pass the pre-flight checks performed by {@link #validatePut(DiscoveryNode)}
|
||||
* the supplied node doesn't pass the pre-flight checks performed by {@link #validateAdd(DiscoveryNode)}
|
||||
*/
|
||||
public Builder put(DiscoveryNode node) {
|
||||
final String preflight = validatePut(node);
|
||||
public Builder add(DiscoveryNode node) {
|
||||
final String preflight = validateAdd(node);
|
||||
if (preflight != null) {
|
||||
throw new IllegalArgumentException(preflight);
|
||||
}
|
||||
|
@ -603,6 +603,16 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a node by its id
|
||||
*
|
||||
* @param nodeId id of the wanted node
|
||||
* @return wanted node if it exists. Otherwise <code>null</code>
|
||||
*/
|
||||
@Nullable public DiscoveryNode get(String nodeId) {
|
||||
return nodes.get(nodeId);
|
||||
}
|
||||
|
||||
private void putUnsafe(DiscoveryNode node) {
|
||||
nodes.put(node.getId(), node);
|
||||
}
|
||||
|
@ -635,10 +645,10 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
*
|
||||
* @return null if all is OK or an error message explaining why a node can not be added.
|
||||
*
|
||||
* Note: if this method returns a non-null value, calling {@link #put(DiscoveryNode)} will fail with an
|
||||
* Note: if this method returns a non-null value, calling {@link #add(DiscoveryNode)} will fail with an
|
||||
* exception
|
||||
*/
|
||||
private String validatePut(DiscoveryNode node) {
|
||||
private String validateAdd(DiscoveryNode node) {
|
||||
for (ObjectCursor<DiscoveryNode> cursor : nodes.values()) {
|
||||
final DiscoveryNode existingNode = cursor.value;
|
||||
if (node.getAddress().equals(existingNode.getAddress()) &&
|
||||
|
@ -646,9 +656,9 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
|||
return "can't add node " + node + ", found existing node " + existingNode + " with same address";
|
||||
}
|
||||
if (node.getId().equals(existingNode.getId()) &&
|
||||
node.getAddress().equals(existingNode.getAddress()) == false) {
|
||||
node.equals(existingNode) == false) {
|
||||
return "can't add node " + node + ", found existing node " + existingNode
|
||||
+ " with the same id, but a different address";
|
||||
+ " with the same id but is a different node instance";
|
||||
}
|
||||
}
|
||||
return null;
|
||||
|
|
|
@ -98,7 +98,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
}
|
||||
if (shard.relocating()) {
|
||||
// create the target initializing shard routing on the node the shard is relocating to
|
||||
allInitializingShards.add(shard.buildTargetRelocatingShard());
|
||||
allInitializingShards.add(shard.getTargetRelocatingShard());
|
||||
}
|
||||
if (shard.assignedToNode()) {
|
||||
assignedShards.add(shard);
|
||||
|
|
|
@ -108,7 +108,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
k -> new LinkedHashMap<>()); // LinkedHashMap to preserve order
|
||||
// add the counterpart shard with relocatingNodeId reflecting the source from which
|
||||
// it's relocating from.
|
||||
ShardRouting targetShardRouting = shard.buildTargetRelocatingShard();
|
||||
ShardRouting targetShardRouting = shard.getTargetRelocatingShard();
|
||||
addInitialRecovery(targetShardRouting, indexShard.primary);
|
||||
previousValue = entries.put(targetShardRouting.shardId(), targetShardRouting);
|
||||
if (previousValue != null) {
|
||||
|
@ -276,6 +276,20 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
return replicaSet == null ? EMPTY : Collections.unmodifiableList(replicaSet);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public ShardRouting getByAllocationId(ShardId shardId, String allocationId) {
|
||||
final List<ShardRouting> replicaSet = assignedShards.get(shardId);
|
||||
if (replicaSet == null) {
|
||||
return null;
|
||||
}
|
||||
for (ShardRouting shardRouting : replicaSet) {
|
||||
if (shardRouting.allocationId().getId().equals(allocationId)) {
|
||||
return shardRouting;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the active primary shard for the given shard id or <code>null</code> if
|
||||
* no primary is found or the primary is not active.
|
||||
|
@ -406,7 +420,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
ensureMutable();
|
||||
relocatingShards++;
|
||||
ShardRouting source = shard.relocate(nodeId, expectedShardSize);
|
||||
ShardRouting target = source.buildTargetRelocatingShard();
|
||||
ShardRouting target = source.getTargetRelocatingShard();
|
||||
updateAssigned(shard, source);
|
||||
node(target.currentNodeId()).add(target);
|
||||
assignedShardsAdd(target);
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.cluster.Diffable;
|
|||
import org.elasticsearch.cluster.DiffableUtils;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -145,6 +146,26 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
|||
.orElse(null);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public ShardRouting getByAllocationId(ShardId shardId, String allocationId) {
|
||||
IndexShardRoutingTable shardRoutingTable = shardRoutingTableOrNull(shardId);
|
||||
if (shardRoutingTable == null) {
|
||||
return null;
|
||||
}
|
||||
for (ShardRouting shardRouting : shardRoutingTable.assignedShards()) {
|
||||
if (shardRouting.allocationId().getId().equals(allocationId)) {
|
||||
return shardRouting;
|
||||
}
|
||||
if (shardRouting.relocating()) {
|
||||
if (shardRouting.getTargetRelocatingShard().allocationId().getId().equals(allocationId)) {
|
||||
return shardRouting.getTargetRelocatingShard();
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
public boolean validate(MetaData metaData) {
|
||||
for (IndexRoutingTable indexRoutingTable : this) {
|
||||
if (indexRoutingTable.validate(metaData) == false) {
|
||||
|
@ -245,7 +266,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
|||
if (predicate.test(shardRouting)) {
|
||||
set.add(shardRouting.shardsIt());
|
||||
if (includeRelocationTargets && shardRouting.relocating()) {
|
||||
set.add(new PlainShardIterator(shardRouting.shardId(), Collections.singletonList(shardRouting.buildTargetRelocatingShard())));
|
||||
set.add(new PlainShardIterator(shardRouting.shardId(), Collections.singletonList(shardRouting.getTargetRelocatingShard())));
|
||||
}
|
||||
} else if (includeEmpty) { // we need this for counting properly, just make it an empty one
|
||||
set.add(new PlainShardIterator(shardRouting.shardId(), Collections.<ShardRouting>emptyList()));
|
||||
|
@ -278,7 +299,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
|||
if (predicate.test(shardRouting)) {
|
||||
shards.add(shardRouting);
|
||||
if (includeRelocationTargets && shardRouting.relocating()) {
|
||||
shards.add(shardRouting.buildTargetRelocatingShard());
|
||||
shards.add(shardRouting.getTargetRelocatingShard());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,6 +56,8 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
private final AllocationId allocationId;
|
||||
private final transient List<ShardRouting> asList;
|
||||
private final long expectedShardSize;
|
||||
@Nullable
|
||||
private final ShardRouting targetRelocatingShard;
|
||||
|
||||
/**
|
||||
* A constructor to internally create shard routing instances, note, the internal flag should only be set to true
|
||||
|
@ -74,11 +76,22 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
this.unassignedInfo = unassignedInfo;
|
||||
this.allocationId = allocationId;
|
||||
this.expectedShardSize = expectedShardSize;
|
||||
this.targetRelocatingShard = initializeTargetRelocatingShard();
|
||||
assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE || state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state;
|
||||
assert expectedShardSize >= 0 || state != ShardRoutingState.INITIALIZING || state != ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state;
|
||||
assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta";
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private ShardRouting initializeTargetRelocatingShard() {
|
||||
if (state == ShardRoutingState.RELOCATING) {
|
||||
return new ShardRouting(shardId, relocatingNodeId, currentNodeId, restoreSource, primary,
|
||||
ShardRoutingState.INITIALIZING, unassignedInfo, AllocationId.newTargetRelocation(allocationId), expectedShardSize);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new unassigned shard.
|
||||
*/
|
||||
|
@ -177,14 +190,13 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates a shard routing representing the target shard.
|
||||
* Returns a shard routing representing the target shard.
|
||||
* The target shard routing will be the INITIALIZING state and have relocatingNodeId set to the
|
||||
* source node.
|
||||
*/
|
||||
public ShardRouting buildTargetRelocatingShard() {
|
||||
public ShardRouting getTargetRelocatingShard() {
|
||||
assert relocating();
|
||||
return new ShardRouting(shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, unassignedInfo,
|
||||
AllocationId.newTargetRelocation(allocationId), expectedShardSize);
|
||||
return targetRelocatingShard;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -282,6 +294,7 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
}
|
||||
expectedShardSize = shardSize;
|
||||
asList = Collections.singletonList(this);
|
||||
targetRelocatingShard = initializeTargetRelocatingShard();
|
||||
}
|
||||
|
||||
public ShardRouting(StreamInput in) throws IOException {
|
||||
|
@ -453,7 +466,7 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if this shard is a relocation target for another shard (i.e., was created with {@link #buildTargetRelocatingShard()}
|
||||
* Returns <code>true</code> if this shard is a relocation target for another shard (i.e., was created with {@link #initializeTargetRelocatingShard()}
|
||||
*/
|
||||
public boolean isRelocationTarget() {
|
||||
return state == ShardRoutingState.INITIALIZING && relocatingNodeId != null;
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.cluster.routing.RoutingTable;
|
|||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation.Result;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
|
@ -84,23 +85,25 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Applies the started shards. Note, shards can be called several times within this method.
|
||||
* Applies the started shards. Note, only initializing ShardRouting instances that exist in the routing table should be
|
||||
* provided as parameter and no duplicates should be contained.
|
||||
* <p>
|
||||
* If the same instance of the routing table is returned, then no change has been made.</p>
|
||||
*/
|
||||
public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List<? extends ShardRouting> startedShards) {
|
||||
public Result applyStartedShards(ClusterState clusterState, List<ShardRouting> startedShards) {
|
||||
return applyStartedShards(clusterState, startedShards, true);
|
||||
}
|
||||
|
||||
public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List<? extends ShardRouting> startedShards, boolean withReroute) {
|
||||
public Result applyStartedShards(ClusterState clusterState, List<ShardRouting> startedShards, boolean withReroute) {
|
||||
if (startedShards.isEmpty()) {
|
||||
return new Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||
routingNodes.unassigned().shuffle();
|
||||
StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState, startedShards, clusterInfoService.getClusterInfo(), currentNanoTime());
|
||||
boolean changed = applyStartedShards(allocation, startedShards);
|
||||
if (!changed) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState, startedShards,
|
||||
clusterInfoService.getClusterInfo(), currentNanoTime());
|
||||
applyStartedShards(allocation, startedShards);
|
||||
gatewayAllocator.applyStartedShards(allocation);
|
||||
if (withReroute) {
|
||||
reroute(allocation);
|
||||
|
@ -109,12 +112,12 @@ public class AllocationService extends AbstractComponent {
|
|||
return buildResultAndLogHealthChange(allocation, "shards started [" + startedShardsAsString + "] ...");
|
||||
}
|
||||
|
||||
protected RoutingAllocation.Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason) {
|
||||
protected Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason) {
|
||||
return buildResultAndLogHealthChange(allocation, reason, new RoutingExplanations());
|
||||
|
||||
}
|
||||
|
||||
protected RoutingAllocation.Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason, RoutingExplanations explanations) {
|
||||
protected Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason, RoutingExplanations explanations) {
|
||||
MetaData oldMetaData = allocation.metaData();
|
||||
RoutingTable oldRoutingTable = allocation.routingTable();
|
||||
RoutingNodes newRoutingNodes = allocation.routingNodes();
|
||||
|
@ -128,7 +131,7 @@ public class AllocationService extends AbstractComponent {
|
|||
metaData(newMetaData).routingTable(newRoutingTable).build()),
|
||||
reason
|
||||
);
|
||||
return new RoutingAllocation.Result(true, newRoutingTable, newMetaData, explanations);
|
||||
return new Result(true, newRoutingTable, newMetaData, explanations);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -186,7 +189,7 @@ public class AllocationService extends AbstractComponent {
|
|||
// we do not use newPrimary.isTargetRelocationOf(oldPrimary) because that one enforces newPrimary to
|
||||
// be initializing. However, when the target shard is activated, we still want the primary term to staty
|
||||
// the same
|
||||
(oldPrimary.relocating() && newPrimary.isSameAllocation(oldPrimary.buildTargetRelocatingShard()))) {
|
||||
(oldPrimary.relocating() && newPrimary.isSameAllocation(oldPrimary.getTargetRelocatingShard()))) {
|
||||
// do nothing
|
||||
} else {
|
||||
// incrementing the primary term
|
||||
|
@ -210,38 +213,76 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
|
||||
public Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
|
||||
return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Applies the failed shards. Note, shards can be called several times within this method.
|
||||
* Applies the failed shards. Note, only assigned ShardRouting instances that exist in the routing table should be
|
||||
* provided as parameter and no duplicates should be contained.
|
||||
*
|
||||
* <p>
|
||||
* If the same instance of the routing table is returned, then no change has been made.</p>
|
||||
*/
|
||||
public RoutingAllocation.Result applyFailedShards(ClusterState clusterState, List<FailedRerouteAllocation.FailedShard> failedShards) {
|
||||
public Result applyFailedShards(ClusterState clusterState, List<FailedRerouteAllocation.FailedShard> failedShards) {
|
||||
if (failedShards.isEmpty()) {
|
||||
return new Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||
routingNodes.unassigned().shuffle();
|
||||
long currentNanoTime = currentNanoTime();
|
||||
FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState, failedShards, clusterInfoService.getClusterInfo(), currentNanoTime);
|
||||
boolean changed = false;
|
||||
// as failing primaries also fail associated replicas, we fail replicas first here so that their nodes are added to ignore list
|
||||
List<FailedRerouteAllocation.FailedShard> orderedFailedShards = new ArrayList<>(failedShards);
|
||||
orderedFailedShards.sort(Comparator.comparing(failedShard -> failedShard.shard.primary()));
|
||||
for (FailedRerouteAllocation.FailedShard failedShard : orderedFailedShards) {
|
||||
UnassignedInfo unassignedInfo = failedShard.shard.unassignedInfo();
|
||||
final int failedAllocations = unassignedInfo != null ? unassignedInfo.getNumFailedAllocations() : 0;
|
||||
changed |= applyFailedShard(allocation, failedShard.shard, true, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShard.message, failedShard.failure,
|
||||
failedAllocations + 1, currentNanoTime, System.currentTimeMillis(), false, AllocationStatus.NO_ATTEMPT));
|
||||
FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState, failedShards,
|
||||
clusterInfoService.getClusterInfo(), currentNanoTime);
|
||||
|
||||
for (FailedRerouteAllocation.FailedShard failedShardEntry : failedShards) {
|
||||
ShardRouting shardToFail = failedShardEntry.routingEntry;
|
||||
allocation.addIgnoreShardForNode(shardToFail.shardId(), shardToFail.currentNodeId());
|
||||
// failing a primary also fails initializing replica shards, re-resolve ShardRouting
|
||||
ShardRouting failedShard = routingNodes.getByAllocationId(shardToFail.shardId(), shardToFail.allocationId().getId());
|
||||
if (failedShard != null) {
|
||||
if (failedShard != shardToFail) {
|
||||
logger.trace("{} shard routing modified in an earlier iteration (previous: {}, current: {})",
|
||||
shardToFail.shardId(), shardToFail, failedShard);
|
||||
}
|
||||
int failedAllocations = failedShard.unassignedInfo() != null ? failedShard.unassignedInfo().getNumFailedAllocations() : 0;
|
||||
UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShardEntry.message,
|
||||
failedShardEntry.failure, failedAllocations + 1, currentNanoTime, System.currentTimeMillis(), false,
|
||||
AllocationStatus.NO_ATTEMPT);
|
||||
applyFailedShard(allocation, failedShard, unassignedInfo);
|
||||
} else {
|
||||
logger.trace("{} shard routing failed in an earlier iteration (routing: {})", shardToFail.shardId(), shardToFail);
|
||||
}
|
||||
}
|
||||
gatewayAllocator.applyFailedShards(allocation);
|
||||
|
||||
reroute(allocation);
|
||||
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.routingEntry.shardId().toString());
|
||||
return buildResultAndLogHealthChange(allocation, "shards failed [" + failedShardsAsString + "] ...");
|
||||
}
|
||||
|
||||
/**
|
||||
* unassigned an shards that are associated with nodes that are no longer part of the cluster, potentially promoting replicas
|
||||
* if needed.
|
||||
*/
|
||||
public RoutingAllocation.Result deassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) {
|
||||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||
routingNodes.unassigned().shuffle();
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
|
||||
clusterInfoService.getClusterInfo(), currentNanoTime(), false);
|
||||
|
||||
// first, clear from the shards any node id they used to belong to that is now dead
|
||||
boolean changed = deassociateDeadNodes(allocation);
|
||||
|
||||
if (reroute) {
|
||||
changed |= reroute(allocation);
|
||||
}
|
||||
|
||||
if (!changed) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
gatewayAllocator.applyFailedShards(allocation);
|
||||
reroute(allocation);
|
||||
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString());
|
||||
return buildResultAndLogHealthChange(allocation, "shards failed [" + failedShardsAsString + "] ...");
|
||||
return buildResultAndLogHealthChange(allocation, reason);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -259,9 +300,9 @@ public class AllocationService extends AbstractComponent {
|
|||
metaData.getIndexSafe(shardRouting.index()).getSettings());
|
||||
if (newComputedLeftDelayNanos == 0) {
|
||||
changed = true;
|
||||
unassignedIterator.updateUnassignedInfo(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(), unassignedInfo.getFailure(),
|
||||
unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(), unassignedInfo.getUnassignedTimeInMillis(), false,
|
||||
unassignedInfo.getLastAllocationStatus()));
|
||||
unassignedIterator.updateUnassignedInfo(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(),
|
||||
unassignedInfo.getFailure(), unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(),
|
||||
unassignedInfo.getUnassignedTimeInMillis(), false, unassignedInfo.getLastAllocationStatus()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -285,7 +326,7 @@ public class AllocationService extends AbstractComponent {
|
|||
.collect(Collectors.joining(", "));
|
||||
}
|
||||
|
||||
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) {
|
||||
public Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) {
|
||||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// we don't shuffle the unassigned shards here, to try and get as close as possible to
|
||||
// a consistent result of the effect the commands have on the routing
|
||||
|
@ -311,7 +352,7 @@ public class AllocationService extends AbstractComponent {
|
|||
* <p>
|
||||
* If the same instance of the routing table is returned, then no change has been made.
|
||||
*/
|
||||
public RoutingAllocation.Result reroute(ClusterState clusterState, String reason) {
|
||||
public Result reroute(ClusterState clusterState, String reason) {
|
||||
return reroute(clusterState, reason, false);
|
||||
}
|
||||
|
||||
|
@ -320,7 +361,7 @@ public class AllocationService extends AbstractComponent {
|
|||
* <p>
|
||||
* If the same instance of the routing table is returned, then no change has been made.
|
||||
*/
|
||||
protected RoutingAllocation.Result reroute(ClusterState clusterState, String reason, boolean debug) {
|
||||
protected Result reroute(ClusterState clusterState, String reason, boolean debug) {
|
||||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||
routingNodes.unassigned().shuffle();
|
||||
|
@ -328,7 +369,7 @@ public class AllocationService extends AbstractComponent {
|
|||
clusterInfoService.getClusterInfo(), currentNanoTime(), false);
|
||||
allocation.debugDecision(debug);
|
||||
if (!reroute(allocation)) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
return new Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
return buildResultAndLogHealthChange(allocation, reason);
|
||||
}
|
||||
|
@ -342,13 +383,9 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
|
||||
private boolean reroute(RoutingAllocation allocation) {
|
||||
boolean changed = false;
|
||||
// first, clear from the shards any node id they used to belong to that is now dead
|
||||
changed |= deassociateDeadNodes(allocation);
|
||||
assert deassociateDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See deassociateDeadNodes";
|
||||
|
||||
// elect primaries *before* allocating unassigned, so backups of primaries that failed
|
||||
// will be moved to primary state and not wait for primaries to be allocated and recovered (*from gateway*)
|
||||
changed |= electPrimariesAndUnassignedDanglingReplicas(allocation);
|
||||
boolean changed = electPrimariesAndUnassignedDanglingReplicas(allocation);
|
||||
|
||||
// now allocate all the unassigned to available nodes
|
||||
if (allocation.routingNodes().unassigned().size() > 0) {
|
||||
|
@ -380,8 +417,8 @@ public class AllocationService extends AbstractComponent {
|
|||
if (candidate != null) {
|
||||
shardEntry = unassignedIterator.demotePrimaryToReplicaShard();
|
||||
ShardRouting primarySwappedCandidate = routingNodes.promoteAssignedReplicaShardToPrimary(candidate);
|
||||
changed = true;
|
||||
if (primarySwappedCandidate.relocatingNodeId() != null) {
|
||||
changed = true;
|
||||
// its also relocating, make sure to move the other routing to primary
|
||||
RoutingNode node = routingNodes.node(primarySwappedCandidate.relocatingNodeId());
|
||||
if (node != null) {
|
||||
|
@ -396,7 +433,6 @@ public class AllocationService extends AbstractComponent {
|
|||
IndexMetaData index = allocation.metaData().getIndexSafe(primarySwappedCandidate.index());
|
||||
if (IndexMetaData.isIndexUsingShadowReplicas(index.getSettings())) {
|
||||
routingNodes.reinitShadowPrimary(primarySwappedCandidate);
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -420,7 +456,7 @@ public class AllocationService extends AbstractComponent {
|
|||
boolean delayed = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).nanos() > 0;
|
||||
UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]",
|
||||
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), delayed, AllocationStatus.NO_ATTEMPT);
|
||||
applyFailedShard(allocation, shardRouting, false, unassignedInfo);
|
||||
applyFailedShard(allocation, shardRouting, unassignedInfo);
|
||||
}
|
||||
// its a dead node, remove it, note, its important to remove it *after* we apply failed shard
|
||||
// since it relies on the fact that the RoutingNode exists in the list of nodes
|
||||
|
@ -429,111 +465,70 @@ public class AllocationService extends AbstractComponent {
|
|||
return changed;
|
||||
}
|
||||
|
||||
private boolean failReplicasForUnassignedPrimary(RoutingAllocation allocation, ShardRouting primary) {
|
||||
private boolean failReplicasForUnassignedPrimary(RoutingAllocation allocation, ShardRouting failedPrimary) {
|
||||
assert failedPrimary.primary() : "can only fail replicas for primary shard: " + failedPrimary;
|
||||
List<ShardRouting> replicas = new ArrayList<>();
|
||||
for (ShardRouting routing : allocation.routingNodes().assignedShards(primary.shardId())) {
|
||||
for (ShardRouting routing : allocation.routingNodes().assignedShards(failedPrimary.shardId())) {
|
||||
if (!routing.primary() && routing.initializing()) {
|
||||
replicas.add(routing);
|
||||
}
|
||||
}
|
||||
boolean changed = false;
|
||||
for (ShardRouting routing : replicas) {
|
||||
changed |= applyFailedShard(allocation, routing, false,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.PRIMARY_FAILED, "primary failed while replica initializing",
|
||||
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false,
|
||||
AllocationStatus.NO_ATTEMPT));
|
||||
for (ShardRouting failedReplica : replicas) {
|
||||
UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.PRIMARY_FAILED,
|
||||
"primary failed while replica initializing", null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false,
|
||||
AllocationStatus.NO_ATTEMPT);
|
||||
applyFailedShard(allocation, failedReplica, unassignedInfo);
|
||||
}
|
||||
return changed;
|
||||
return replicas.isEmpty() == false;
|
||||
}
|
||||
|
||||
private boolean applyStartedShards(RoutingAllocation routingAllocation, Iterable<? extends ShardRouting> startedShardEntries) {
|
||||
boolean dirty = false;
|
||||
// apply shards might be called several times with the same shard, ignore it
|
||||
private void applyStartedShards(RoutingAllocation routingAllocation, List<ShardRouting> startedShardEntries) {
|
||||
assert startedShardEntries.isEmpty() == false : "non-empty list of started shard entries expected";
|
||||
RoutingNodes routingNodes = routingAllocation.routingNodes();
|
||||
for (ShardRouting startedShard : startedShardEntries) {
|
||||
assert startedShard.initializing();
|
||||
assert startedShard.initializing() : "only initializing shards can be started";
|
||||
assert routingAllocation.metaData().index(startedShard.shardId().getIndex()) != null :
|
||||
"shard started for unknown index (shard entry: " + startedShard + ")";
|
||||
assert startedShard == routingNodes.getByAllocationId(startedShard.shardId(), startedShard.allocationId().getId()) :
|
||||
"shard routing to start does not exist in routing table, expected: " + startedShard + " but was: " +
|
||||
routingNodes.getByAllocationId(startedShard.shardId(), startedShard.allocationId().getId());
|
||||
|
||||
// validate index still exists. strictly speaking this is not needed but it gives clearer logs
|
||||
if (routingAllocation.metaData().index(startedShard.index()) == null) {
|
||||
logger.debug("{} ignoring shard started, unknown index (routing: {})", startedShard.shardId(), startedShard);
|
||||
continue;
|
||||
}
|
||||
routingNodes.started(startedShard);
|
||||
logger.trace("{} marked shard as started (routing: {})", startedShard.shardId(), startedShard);
|
||||
|
||||
RoutingNode currentRoutingNode = routingNodes.node(startedShard.currentNodeId());
|
||||
if (currentRoutingNode == null) {
|
||||
logger.debug("{} failed to find shard in order to start it [failed to find node], ignoring (routing: {})", startedShard.shardId(), startedShard);
|
||||
continue;
|
||||
}
|
||||
|
||||
ShardRouting matchingShard = currentRoutingNode.getByShardId(startedShard.shardId());
|
||||
if (matchingShard == null) {
|
||||
logger.debug("{} failed to find shard in order to start it [failed to find shard], ignoring (routing: {})", startedShard.shardId(), startedShard);
|
||||
} else if (matchingShard.isSameAllocation(startedShard) == false) {
|
||||
logger.debug("{} failed to find shard with matching allocation id in order to start it [failed to find matching shard], ignoring (routing: {}, matched shard routing: {})", startedShard.shardId(), startedShard, matchingShard);
|
||||
} else {
|
||||
startedShard = matchingShard;
|
||||
if (startedShard.active()) {
|
||||
logger.trace("{} shard is already started, ignoring (routing: {})", startedShard.shardId(), startedShard);
|
||||
} else {
|
||||
assert startedShard.initializing();
|
||||
dirty = true;
|
||||
routingNodes.started(startedShard);
|
||||
logger.trace("{} marked shard as started (routing: {})", startedShard.shardId(), startedShard);
|
||||
|
||||
if (startedShard.relocatingNodeId() != null) {
|
||||
// relocation target has been started, remove relocation source
|
||||
RoutingNode relocationSourceNode = routingNodes.node(startedShard.relocatingNodeId());
|
||||
ShardRouting relocationSourceShard = relocationSourceNode.getByShardId(startedShard.shardId());
|
||||
assert relocationSourceShard.isRelocationSourceOf(startedShard);
|
||||
routingNodes.remove(relocationSourceShard);
|
||||
}
|
||||
}
|
||||
if (startedShard.relocatingNodeId() != null) {
|
||||
// relocation target has been started, remove relocation source
|
||||
RoutingNode relocationSourceNode = routingNodes.node(startedShard.relocatingNodeId());
|
||||
ShardRouting relocationSourceShard = relocationSourceNode.getByShardId(startedShard.shardId());
|
||||
assert relocationSourceShard.isRelocationSourceOf(startedShard);
|
||||
assert relocationSourceShard.getTargetRelocatingShard() == startedShard : "relocation target mismatch, expected: "
|
||||
+ startedShard + " but was: " + relocationSourceShard.getTargetRelocatingShard();
|
||||
routingNodes.remove(relocationSourceShard);
|
||||
}
|
||||
}
|
||||
return dirty;
|
||||
}
|
||||
|
||||
/**
|
||||
* Applies the relevant logic to handle a failed shard. Returns <tt>true</tt> if changes happened that
|
||||
* require relocation.
|
||||
* Applies the relevant logic to handle a failed shard.
|
||||
*/
|
||||
private boolean applyFailedShard(RoutingAllocation allocation, ShardRouting failedShard, boolean addToIgnoreList, UnassignedInfo unassignedInfo) {
|
||||
IndexRoutingTable indexRoutingTable = allocation.routingTable().index(failedShard.index());
|
||||
if (indexRoutingTable == null) {
|
||||
logger.debug("{} ignoring shard failure, unknown index in {} ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary());
|
||||
return false;
|
||||
}
|
||||
private void applyFailedShard(RoutingAllocation allocation, ShardRouting failedShard, UnassignedInfo unassignedInfo) {
|
||||
RoutingNodes routingNodes = allocation.routingNodes();
|
||||
assert failedShard.assignedToNode() : "only assigned shards can be failed";
|
||||
assert allocation.metaData().index(failedShard.shardId().getIndex()) != null :
|
||||
"shard failed for unknown index (shard entry: " + failedShard + ")";
|
||||
assert routingNodes.getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId()) == failedShard :
|
||||
"shard routing to fail does not exist in routing table, expected: " + failedShard + " but was: " +
|
||||
routingNodes.getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId());
|
||||
|
||||
RoutingNode matchedNode = routingNodes.node(failedShard.currentNodeId());
|
||||
if (matchedNode == null) {
|
||||
logger.debug("{} ignoring shard failure, unknown node in {} ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary());
|
||||
return false;
|
||||
}
|
||||
|
||||
ShardRouting matchedShard = matchedNode.getByShardId(failedShard.shardId());
|
||||
if (matchedShard != null && matchedShard.isSameAllocation(failedShard)) {
|
||||
logger.debug("{} failed shard {} found in routingNodes, failing it ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary());
|
||||
// replace incoming instance to make sure we work on the latest one
|
||||
failedShard = matchedShard;
|
||||
} else {
|
||||
logger.debug("{} ignoring shard failure, unknown allocation id in {} ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary());
|
||||
return false;
|
||||
}
|
||||
|
||||
logger.debug("{} failing shard {} with unassigned info ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary());
|
||||
if (failedShard.primary()) {
|
||||
// fail replicas first otherwise we move RoutingNodes into an inconsistent state
|
||||
failReplicasForUnassignedPrimary(allocation, failedShard);
|
||||
}
|
||||
|
||||
if (addToIgnoreList) {
|
||||
// make sure we ignore this shard on the relevant node
|
||||
allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId());
|
||||
}
|
||||
|
||||
cancelShard(logger, failedShard, unassignedInfo, routingNodes);
|
||||
assert matchedNode.getByShardId(failedShard.shardId()) == null : "failedShard " + failedShard + " was matched but wasn't removed";
|
||||
return true;
|
||||
assert routingNodes.node(failedShard.currentNodeId()).getByShardId(failedShard.shardId()) == null : "failedShard " + failedShard +
|
||||
" was matched but wasn't removed";
|
||||
}
|
||||
|
||||
public static void cancelShard(ESLogger logger, ShardRouting cancelledShard, UnassignedInfo unassignedInfo, RoutingNodes routingNodes) {
|
||||
|
@ -544,11 +539,13 @@ public class AllocationService extends AbstractComponent {
|
|||
// The shard is a target of a relocating shard. In that case we only
|
||||
// need to remove the target shard and cancel the source relocation.
|
||||
// No shard is left unassigned
|
||||
logger.trace("{} is a relocation target, resolving source to cancel relocation ({})", cancelledShard, unassignedInfo.shortSummary());
|
||||
logger.trace("{} is a relocation target, resolving source to cancel relocation ({})", cancelledShard,
|
||||
unassignedInfo.shortSummary());
|
||||
RoutingNode sourceNode = routingNodes.node(cancelledShard.relocatingNodeId());
|
||||
ShardRouting sourceShard = sourceNode.getByShardId(cancelledShard.shardId());
|
||||
assert sourceShard.isRelocationSourceOf(cancelledShard);
|
||||
logger.trace("{}, resolved source to [{}]. canceling relocation ... ({})", cancelledShard.shardId(), sourceShard, unassignedInfo.shortSummary());
|
||||
logger.trace("{}, resolved source to [{}]. canceling relocation ... ({})", cancelledShard.shardId(), sourceShard,
|
||||
unassignedInfo.shortSummary());
|
||||
routingNodes.cancelRelocation(sourceShard);
|
||||
routingNodes.remove(cancelledShard);
|
||||
} else {
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
@ -39,25 +40,28 @@ public class FailedRerouteAllocation extends RoutingAllocation {
|
|||
* details on why it failed.
|
||||
*/
|
||||
public static class FailedShard {
|
||||
public final ShardRouting shard;
|
||||
public final ShardRouting routingEntry;
|
||||
public final String message;
|
||||
public final Exception failure;
|
||||
|
||||
public FailedShard(ShardRouting shard, String message, Exception failure) {
|
||||
this.shard = shard;
|
||||
public FailedShard(ShardRouting routingEntry, String message, Exception failure) {
|
||||
assert routingEntry.assignedToNode() : "only assigned shards can be failed " + routingEntry;
|
||||
this.routingEntry = routingEntry;
|
||||
this.message = message;
|
||||
this.failure = failure;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "failed shard, shard " + shard + ", message [" + message + "], failure [" + ExceptionsHelper.detailedMessage(failure) + "]";
|
||||
return "failed shard, shard " + routingEntry + ", message [" + message + "], failure [" +
|
||||
ExceptionsHelper.detailedMessage(failure) + "]";
|
||||
}
|
||||
}
|
||||
|
||||
private final List<FailedShard> failedShards;
|
||||
|
||||
public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<FailedShard> failedShards, ClusterInfo clusterInfo, long currentNanoTime) {
|
||||
public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState,
|
||||
List<FailedShard> failedShards, ClusterInfo clusterInfo, long currentNanoTime) {
|
||||
super(deciders, routingNodes, clusterState, clusterInfo, currentNanoTime, false);
|
||||
this.failedShards = failedShards;
|
||||
}
|
||||
|
|
|
@ -150,7 +150,8 @@ public class RoutingAllocation {
|
|||
* @param clusterState cluster state before rerouting
|
||||
* @param currentNanoTime the nano time to use for all delay allocation calculation (typically {@link System#nanoTime()})
|
||||
*/
|
||||
public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, long currentNanoTime, boolean retryFailed) {
|
||||
public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo,
|
||||
long currentNanoTime, boolean retryFailed) {
|
||||
this.deciders = deciders;
|
||||
this.routingNodes = routingNodes;
|
||||
this.metaData = clusterState.metaData();
|
||||
|
|
|
@ -33,9 +33,10 @@ import java.util.List;
|
|||
*/
|
||||
public class StartedRerouteAllocation extends RoutingAllocation {
|
||||
|
||||
private final List<? extends ShardRouting> startedShards;
|
||||
private final List<ShardRouting> startedShards;
|
||||
|
||||
public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<? extends ShardRouting> startedShards, ClusterInfo clusterInfo, long currentNanoTime) {
|
||||
public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState,
|
||||
List<ShardRouting> startedShards, ClusterInfo clusterInfo, long currentNanoTime) {
|
||||
super(deciders, routingNodes, clusterState, clusterInfo, currentNanoTime, false);
|
||||
this.startedShards = startedShards;
|
||||
}
|
||||
|
@ -44,7 +45,7 @@ public class StartedRerouteAllocation extends RoutingAllocation {
|
|||
* Get started shards
|
||||
* @return list of started shards
|
||||
*/
|
||||
public List<? extends ShardRouting> startedShards() {
|
||||
public List<ShardRouting> startedShards() {
|
||||
return startedShards;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -188,11 +188,11 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
|
|||
} else if (shardRouting.relocating()) {
|
||||
initializingShard = shardRouting.cancelRelocation()
|
||||
.relocate(currentNodeId, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)
|
||||
.buildTargetRelocatingShard();
|
||||
.getTargetRelocatingShard();
|
||||
} else {
|
||||
assert shardRouting.started();
|
||||
initializingShard = shardRouting.relocate(currentNodeId, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)
|
||||
.buildTargetRelocatingShard();
|
||||
.getTargetRelocatingShard();
|
||||
}
|
||||
assert initializingShard.initializing();
|
||||
return initializingShard;
|
||||
|
|
|
@ -156,7 +156,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
|
||||
public synchronized void setLocalNode(DiscoveryNode localNode) {
|
||||
assert clusterState.nodes().getLocalNodeId() == null : "local node is already set";
|
||||
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.getId());
|
||||
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).add(localNode).localNodeId(localNode.getId());
|
||||
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build();
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.common.logging.Loggers;
|
|||
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
|
@ -108,6 +109,7 @@ public class ParseField {
|
|||
* names for this {@link ParseField}.
|
||||
*/
|
||||
boolean match(String fieldName, boolean strict) {
|
||||
Objects.requireNonNull(fieldName, "fieldName cannot be null");
|
||||
// if this parse field has not been completely deprecated then try to
|
||||
// match the preferred name
|
||||
if (allReplacedWith == null && fieldName.equals(name)) {
|
||||
|
|
|
@ -33,9 +33,11 @@ import java.io.InputStream;
|
|||
import java.io.OutputStream;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.FileAlreadyExistsException;
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.SimpleFileVisitor;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
|
@ -89,7 +91,19 @@ public class FsBlobContainer extends AbstractBlobContainer {
|
|||
@Override
|
||||
public void deleteBlob(String blobName) throws IOException {
|
||||
Path blobPath = path.resolve(blobName);
|
||||
Files.delete(blobPath);
|
||||
if (Files.isDirectory(blobPath)) {
|
||||
// delete directory recursively as long as it is empty (only contains empty directories),
|
||||
// which is the reason we aren't deleting any files, only the directories on the post-visit
|
||||
Files.walkFileTree(blobPath, new SimpleFileVisitor<Path>() {
|
||||
@Override
|
||||
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
|
||||
Files.delete(dir);
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
});
|
||||
} else {
|
||||
Files.delete(blobPath);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -41,7 +41,9 @@ import java.util.Locale;
|
|||
public enum GeoDistance implements Writeable {
|
||||
/**
|
||||
* Calculates distance as points on a plane. Faster, but less accurate than {@link #ARC}.
|
||||
* @deprecated use {@link GeoUtils#planeDistance}
|
||||
*/
|
||||
@Deprecated
|
||||
PLANE {
|
||||
@Override
|
||||
public double calculate(double sourceLatitude, double sourceLongitude, double targetLatitude, double targetLongitude, DistanceUnit unit) {
|
||||
|
@ -63,7 +65,11 @@ public enum GeoDistance implements Writeable {
|
|||
|
||||
/**
|
||||
* Calculates distance factor.
|
||||
* Note: {@code calculate} is simply returning the RHS of the spherical law of cosines from 2 lat,lon points.
|
||||
* {@code normalize} also returns the RHS of the spherical law of cosines for a given distance
|
||||
* @deprecated use {@link SloppyMath#haversinMeters} to get distance in meters, law of cosines is being removed
|
||||
*/
|
||||
@Deprecated
|
||||
FACTOR {
|
||||
@Override
|
||||
public double calculate(double sourceLatitude, double sourceLongitude, double targetLatitude, double targetLongitude, DistanceUnit unit) {
|
||||
|
@ -85,7 +91,9 @@ public enum GeoDistance implements Writeable {
|
|||
},
|
||||
/**
|
||||
* Calculates distance as points on a globe.
|
||||
* @deprecated use {@link GeoUtils#arcDistance}
|
||||
*/
|
||||
@Deprecated
|
||||
ARC {
|
||||
@Override
|
||||
public double calculate(double sourceLatitude, double sourceLongitude, double targetLatitude, double targetLongitude, DistanceUnit unit) {
|
||||
|
@ -143,6 +151,7 @@ public enum GeoDistance implements Writeable {
|
|||
* Default {@link GeoDistance} function. This method should be used, If no specific function has been selected.
|
||||
* This is an alias for <code>SLOPPY_ARC</code>
|
||||
*/
|
||||
@Deprecated
|
||||
public static final GeoDistance DEFAULT = SLOPPY_ARC;
|
||||
|
||||
public abstract double normalize(double distance, DistanceUnit unit);
|
||||
|
|
|
@ -1,18 +1,20 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.geo;
|
||||
|
||||
|
@ -300,4 +302,14 @@ public class GeoHashUtils {
|
|||
|
||||
return neighbors;
|
||||
}
|
||||
|
||||
/** returns the latitude value from the string based geohash */
|
||||
public static final double decodeLatitude(final String geohash) {
|
||||
return GeoPointField.decodeLatitude(mortonEncode(geohash));
|
||||
}
|
||||
|
||||
/** returns the latitude value from the string based geohash */
|
||||
public static final double decodeLongitude(final String geohash) {
|
||||
return GeoPointField.decodeLongitude(mortonEncode(geohash));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -478,6 +478,21 @@ public class GeoUtils {
|
|||
return SloppyMath.haversinMeters(centerLat, centerLon, centerLat, (MAX_LON + centerLon) % 360);
|
||||
}
|
||||
|
||||
/** Return the distance (in meters) between 2 lat,lon geo points using the haversine method implemented by lucene */
|
||||
public static double arcDistance(double lat1, double lon1, double lat2, double lon2) {
|
||||
return SloppyMath.haversinMeters(lat1, lon1, lat2, lon2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the distance (in meters) between 2 lat,lon geo points using a simple tangential plane
|
||||
* this provides a faster alternative to {@link GeoUtils#arcDistance} when points are within 5 km
|
||||
*/
|
||||
public static double planeDistance(double lat1, double lon1, double lat2, double lon2) {
|
||||
double x = (lon2 - lon1) * SloppyMath.TO_RADIANS * Math.cos((lat2 + lat1) / 2.0 * SloppyMath.TO_RADIANS);
|
||||
double y = (lat2 - lat1) * SloppyMath.TO_RADIANS;
|
||||
return Math.sqrt(x * x + y * y) * EARTH_MEAN_RADIUS;
|
||||
}
|
||||
|
||||
private GeoUtils() {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationComman
|
|||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.util.Providers;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -57,9 +56,12 @@ public class NetworkModule extends AbstractModule {
|
|||
public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type";
|
||||
public static final String HTTP_TYPE_KEY = "http.type";
|
||||
public static final String LOCAL_TRANSPORT = "local";
|
||||
public static final String HTTP_TYPE_DEFAULT_KEY = "http.type.default";
|
||||
public static final String TRANSPORT_TYPE_DEFAULT_KEY = "transport.type.default";
|
||||
|
||||
public static final Setting<String> TRANSPORT_DEFAULT_TYPE_SETTING = Setting.simpleString("transport.type.default", Property.NodeScope);
|
||||
public static final Setting<String> HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString("http.type.default", Property.NodeScope);
|
||||
public static final Setting<String> TRANSPORT_DEFAULT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_DEFAULT_KEY,
|
||||
Property.NodeScope);
|
||||
public static final Setting<String> HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_DEFAULT_KEY, Property.NodeScope);
|
||||
public static final Setting<String> HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope);
|
||||
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope);
|
||||
public static final Setting<String> TRANSPORT_SERVICE_TYPE_SETTING =
|
||||
|
|
|
@ -19,10 +19,13 @@
|
|||
package org.elasticsearch.common.rounding;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.joda.time.DateTimeField;
|
||||
import org.joda.time.DateTimeZone;
|
||||
import org.joda.time.IllegalInstantException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
@ -54,103 +57,67 @@ public abstract class Rounding implements Streamable {
|
|||
@Override
|
||||
public abstract int hashCode();
|
||||
|
||||
/**
|
||||
* Rounding strategy which is based on an interval
|
||||
*
|
||||
* {@code rounded = value - (value % interval) }
|
||||
*/
|
||||
public static class Interval extends Rounding {
|
||||
public static Builder builder(DateTimeUnit unit) {
|
||||
return new Builder(unit);
|
||||
}
|
||||
|
||||
static final byte ID = 0;
|
||||
public static Builder builder(TimeValue interval) {
|
||||
return new Builder(interval);
|
||||
}
|
||||
|
||||
public static final ParseField INTERVAL_FIELD = new ParseField("interval");
|
||||
public static class Builder {
|
||||
|
||||
private long interval;
|
||||
private final DateTimeUnit unit;
|
||||
private final long interval;
|
||||
|
||||
public Interval() { // for serialization
|
||||
private DateTimeZone timeZone = DateTimeZone.UTC;
|
||||
|
||||
public Builder(DateTimeUnit unit) {
|
||||
this.unit = unit;
|
||||
this.interval = -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new interval rounding.
|
||||
*
|
||||
* @param interval The interval
|
||||
*/
|
||||
public Interval(long interval) {
|
||||
this.interval = interval;
|
||||
public Builder(TimeValue interval) {
|
||||
this.unit = null;
|
||||
if (interval.millis() < 1)
|
||||
throw new IllegalArgumentException("Zero or negative time interval not supported");
|
||||
this.interval = interval.millis();
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte id() {
|
||||
return ID;
|
||||
public Builder timeZone(DateTimeZone timeZone) {
|
||||
if (timeZone == null) {
|
||||
throw new IllegalArgumentException("Setting null as timezone is not supported");
|
||||
}
|
||||
this.timeZone = timeZone;
|
||||
return this;
|
||||
}
|
||||
|
||||
public static long roundKey(long value, long interval) {
|
||||
if (value < 0) {
|
||||
return (value - interval + 1) / interval;
|
||||
public Rounding build() {
|
||||
Rounding timeZoneRounding;
|
||||
if (unit != null) {
|
||||
timeZoneRounding = new TimeUnitRounding(unit, timeZone);
|
||||
} else {
|
||||
return value / interval;
|
||||
timeZoneRounding = new TimeIntervalRounding(interval, timeZone);
|
||||
}
|
||||
}
|
||||
|
||||
public static long roundValue(long key, long interval) {
|
||||
return key * interval;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long round(long value) {
|
||||
return roundKey(value, interval) * interval;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextRoundingValue(long value) {
|
||||
assert value == round(value);
|
||||
return value + interval;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
interval = in.readVLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(interval);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(interval);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
Interval other = (Interval) obj;
|
||||
return Objects.equals(interval, other.interval);
|
||||
return timeZoneRounding;
|
||||
}
|
||||
}
|
||||
|
||||
public static class FactorRounding extends Rounding {
|
||||
static class TimeUnitRounding extends Rounding {
|
||||
|
||||
static final byte ID = 7;
|
||||
static final byte ID = 1;
|
||||
|
||||
public static final ParseField FACTOR_FIELD = new ParseField("factor");
|
||||
private DateTimeUnit unit;
|
||||
private DateTimeField field;
|
||||
private DateTimeZone timeZone;
|
||||
|
||||
private Rounding rounding;
|
||||
|
||||
private float factor;
|
||||
|
||||
FactorRounding() { // for serialization
|
||||
TimeUnitRounding() { // for serialization
|
||||
}
|
||||
|
||||
FactorRounding(Rounding rounding, float factor) {
|
||||
this.rounding = rounding;
|
||||
this.factor = factor;
|
||||
TimeUnitRounding(DateTimeUnit unit, DateTimeZone timeZone) {
|
||||
this.unit = unit;
|
||||
this.field = unit.field(timeZone);
|
||||
this.timeZone = timeZone;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -160,31 +127,51 @@ public abstract class Rounding implements Streamable {
|
|||
|
||||
@Override
|
||||
public long round(long utcMillis) {
|
||||
return rounding.round((long) (factor * utcMillis));
|
||||
long rounded = field.roundFloor(utcMillis);
|
||||
if (timeZone.isFixed() == false && timeZone.getOffset(utcMillis) != timeZone.getOffset(rounded)) {
|
||||
// in this case, we crossed a time zone transition. In some edge
|
||||
// cases this will
|
||||
// result in a value that is not a rounded value itself. We need
|
||||
// to round again
|
||||
// to make sure. This will have no affect in cases where
|
||||
// 'rounded' was already a proper
|
||||
// rounded value
|
||||
rounded = field.roundFloor(rounded);
|
||||
}
|
||||
assert rounded == field.roundFloor(rounded);
|
||||
return rounded;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextRoundingValue(long value) {
|
||||
return rounding.nextRoundingValue(value);
|
||||
public long nextRoundingValue(long utcMillis) {
|
||||
long floor = round(utcMillis);
|
||||
// add one unit and round to get to next rounded value
|
||||
long next = round(field.add(floor, 1));
|
||||
if (next == floor) {
|
||||
// in rare case we need to add more than one unit
|
||||
next = round(field.add(floor, 2));
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
rounding = Rounding.Streams.read(in);
|
||||
factor = in.readFloat();
|
||||
unit = DateTimeUnit.resolve(in.readByte());
|
||||
timeZone = DateTimeZone.forID(in.readString());
|
||||
field = unit.field(timeZone);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
Rounding.Streams.write(rounding, out);
|
||||
out.writeFloat(factor);
|
||||
out.writeByte(unit.id());
|
||||
out.writeString(timeZone.getID());
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(rounding, factor);
|
||||
return Objects.hash(unit, timeZone);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
|
@ -193,28 +180,31 @@ public abstract class Rounding implements Streamable {
|
|||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
FactorRounding other = (FactorRounding) obj;
|
||||
return Objects.equals(rounding, other.rounding)
|
||||
&& Objects.equals(factor, other.factor);
|
||||
TimeUnitRounding other = (TimeUnitRounding) obj;
|
||||
return Objects.equals(unit, other.unit) && Objects.equals(timeZone, other.timeZone);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[" + timeZone + "][" + unit + "]";
|
||||
}
|
||||
}
|
||||
|
||||
public static class OffsetRounding extends Rounding {
|
||||
static class TimeIntervalRounding extends Rounding {
|
||||
|
||||
static final byte ID = 8;
|
||||
static final byte ID = 2;
|
||||
|
||||
public static final ParseField OFFSET_FIELD = new ParseField("offset");
|
||||
private long interval;
|
||||
private DateTimeZone timeZone;
|
||||
|
||||
private Rounding rounding;
|
||||
|
||||
private long offset;
|
||||
|
||||
OffsetRounding() { // for serialization
|
||||
TimeIntervalRounding() { // for serialization
|
||||
}
|
||||
|
||||
public OffsetRounding(Rounding intervalRounding, long offset) {
|
||||
this.rounding = intervalRounding;
|
||||
this.offset = offset;
|
||||
TimeIntervalRounding(long interval, DateTimeZone timeZone) {
|
||||
if (interval < 1)
|
||||
throw new IllegalArgumentException("Zero or negative time interval not supported");
|
||||
this.interval = interval;
|
||||
this.timeZone = timeZone;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -223,32 +213,100 @@ public abstract class Rounding implements Streamable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long round(long value) {
|
||||
return rounding.round(value - offset) + offset;
|
||||
public long round(long utcMillis) {
|
||||
long timeLocal = timeZone.convertUTCToLocal(utcMillis);
|
||||
long rounded = roundKey(timeLocal, interval) * interval;
|
||||
long roundedUTC;
|
||||
if (isInDSTGap(rounded) == false) {
|
||||
roundedUTC = timeZone.convertLocalToUTC(rounded, true, utcMillis);
|
||||
// check if we crossed DST transition, in this case we want the
|
||||
// last rounded value before the transition
|
||||
long transition = timeZone.previousTransition(utcMillis);
|
||||
if (transition != utcMillis && transition > roundedUTC) {
|
||||
roundedUTC = round(transition - 1);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Edge case where the rounded local time is illegal and landed
|
||||
* in a DST gap. In this case, we choose 1ms tick after the
|
||||
* transition date. We don't want the transition date itself
|
||||
* because those dates, when rounded themselves, fall into the
|
||||
* previous interval. This would violate the invariant that the
|
||||
* rounding operation should be idempotent.
|
||||
*/
|
||||
roundedUTC = timeZone.previousTransition(utcMillis) + 1;
|
||||
}
|
||||
return roundedUTC;
|
||||
}
|
||||
|
||||
private static long roundKey(long value, long interval) {
|
||||
if (value < 0) {
|
||||
return (value - interval + 1) / interval;
|
||||
} else {
|
||||
return value / interval;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether the local instant is a valid instant in the given
|
||||
* time zone. The logic for this is taken from
|
||||
* {@link DateTimeZone#convertLocalToUTC(long, boolean)} for the
|
||||
* `strict` mode case, but instead of throwing an
|
||||
* {@link IllegalInstantException}, which is costly, we want to return a
|
||||
* flag indicating that the value is illegal in that time zone.
|
||||
*/
|
||||
private boolean isInDSTGap(long instantLocal) {
|
||||
if (timeZone.isFixed()) {
|
||||
return false;
|
||||
}
|
||||
// get the offset at instantLocal (first estimate)
|
||||
int offsetLocal = timeZone.getOffset(instantLocal);
|
||||
// adjust instantLocal using the estimate and recalc the offset
|
||||
int offset = timeZone.getOffset(instantLocal - offsetLocal);
|
||||
// if the offsets differ, we must be near a DST boundary
|
||||
if (offsetLocal != offset) {
|
||||
// determine if we are in the DST gap
|
||||
long nextLocal = timeZone.nextTransition(instantLocal - offsetLocal);
|
||||
if (nextLocal == (instantLocal - offsetLocal)) {
|
||||
nextLocal = Long.MAX_VALUE;
|
||||
}
|
||||
long nextAdjusted = timeZone.nextTransition(instantLocal - offset);
|
||||
if (nextAdjusted == (instantLocal - offset)) {
|
||||
nextAdjusted = Long.MAX_VALUE;
|
||||
}
|
||||
if (nextLocal != nextAdjusted) {
|
||||
// we are in the DST gap
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextRoundingValue(long value) {
|
||||
return rounding.nextRoundingValue(value - offset) + offset;
|
||||
public long nextRoundingValue(long time) {
|
||||
long timeLocal = time;
|
||||
timeLocal = timeZone.convertUTCToLocal(time);
|
||||
long next = timeLocal + interval;
|
||||
return timeZone.convertLocalToUTC(next, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
rounding = Rounding.Streams.read(in);
|
||||
offset = in.readLong();
|
||||
interval = in.readVLong();
|
||||
timeZone = DateTimeZone.forID(in.readString());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
Rounding.Streams.write(rounding, out);
|
||||
out.writeLong(offset);
|
||||
out.writeVLong(interval);
|
||||
out.writeString(timeZone.getID());
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(rounding, offset);
|
||||
return Objects.hash(interval, timeZone);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
|
@ -257,9 +315,8 @@ public abstract class Rounding implements Streamable {
|
|||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
OffsetRounding other = (OffsetRounding) obj;
|
||||
return Objects.equals(rounding, other.rounding)
|
||||
&& Objects.equals(offset, other.offset);
|
||||
TimeIntervalRounding other = (TimeIntervalRounding) obj;
|
||||
return Objects.equals(interval, other.interval) && Objects.equals(timeZone, other.timeZone);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -274,11 +331,8 @@ public abstract class Rounding implements Streamable {
|
|||
Rounding rounding = null;
|
||||
byte id = in.readByte();
|
||||
switch (id) {
|
||||
case Interval.ID: rounding = new Interval(); break;
|
||||
case TimeZoneRounding.TimeUnitRounding.ID: rounding = new TimeZoneRounding.TimeUnitRounding(); break;
|
||||
case TimeZoneRounding.TimeIntervalRounding.ID: rounding = new TimeZoneRounding.TimeIntervalRounding(); break;
|
||||
case TimeZoneRounding.FactorRounding.ID: rounding = new FactorRounding(); break;
|
||||
case OffsetRounding.ID: rounding = new OffsetRounding(); break;
|
||||
case TimeUnitRounding.ID: rounding = new TimeUnitRounding(); break;
|
||||
case TimeIntervalRounding.ID: rounding = new TimeIntervalRounding(); break;
|
||||
default: throw new ElasticsearchException("unknown rounding id [" + id + "]");
|
||||
}
|
||||
rounding.readFrom(in);
|
||||
|
|
|
@ -1,314 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.rounding;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.joda.time.DateTimeField;
|
||||
import org.joda.time.DateTimeZone;
|
||||
import org.joda.time.IllegalInstantException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A rounding strategy for dates. It is typically used to group together dates
|
||||
* that are part of the same hour/day/month, taking into account time zones and
|
||||
* daylight saving times.
|
||||
*/
|
||||
public abstract class TimeZoneRounding extends Rounding {
|
||||
public static final ParseField INTERVAL_FIELD = new ParseField("interval");
|
||||
public static final ParseField TIME_ZONE_FIELD = new ParseField("time_zone");
|
||||
|
||||
public static Builder builder(DateTimeUnit unit) {
|
||||
return new Builder(unit);
|
||||
}
|
||||
|
||||
public static Builder builder(TimeValue interval) {
|
||||
return new Builder(interval);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
private final DateTimeUnit unit;
|
||||
private final long interval;
|
||||
|
||||
private DateTimeZone timeZone = DateTimeZone.UTC;
|
||||
|
||||
private float factor = 1.0f;
|
||||
|
||||
private long offset;
|
||||
|
||||
public Builder(DateTimeUnit unit) {
|
||||
this.unit = unit;
|
||||
this.interval = -1;
|
||||
}
|
||||
|
||||
public Builder(TimeValue interval) {
|
||||
this.unit = null;
|
||||
if (interval.millis() < 1)
|
||||
throw new IllegalArgumentException("Zero or negative time interval not supported");
|
||||
this.interval = interval.millis();
|
||||
}
|
||||
|
||||
public Builder timeZone(DateTimeZone timeZone) {
|
||||
if (timeZone == null) {
|
||||
throw new IllegalArgumentException("Setting null as timezone is not supported");
|
||||
}
|
||||
this.timeZone = timeZone;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder offset(long offset) {
|
||||
this.offset = offset;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder factor(float factor) {
|
||||
this.factor = factor;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Rounding build() {
|
||||
Rounding timeZoneRounding;
|
||||
if (unit != null) {
|
||||
timeZoneRounding = new TimeUnitRounding(unit, timeZone);
|
||||
} else {
|
||||
timeZoneRounding = new TimeIntervalRounding(interval, timeZone);
|
||||
}
|
||||
if (offset != 0) {
|
||||
timeZoneRounding = new OffsetRounding(timeZoneRounding, offset);
|
||||
}
|
||||
if (factor != 1.0f) {
|
||||
timeZoneRounding = new FactorRounding(timeZoneRounding, factor);
|
||||
}
|
||||
return timeZoneRounding;
|
||||
}
|
||||
}
|
||||
|
||||
static class TimeUnitRounding extends TimeZoneRounding {
|
||||
|
||||
static final byte ID = 1;
|
||||
|
||||
private DateTimeUnit unit;
|
||||
private DateTimeField field;
|
||||
private DateTimeZone timeZone;
|
||||
|
||||
TimeUnitRounding() { // for serialization
|
||||
}
|
||||
|
||||
TimeUnitRounding(DateTimeUnit unit, DateTimeZone timeZone) {
|
||||
this.unit = unit;
|
||||
this.field = unit.field(timeZone);
|
||||
this.timeZone = timeZone;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte id() {
|
||||
return ID;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long round(long utcMillis) {
|
||||
long rounded = field.roundFloor(utcMillis);
|
||||
if (timeZone.isFixed() == false && timeZone.getOffset(utcMillis) != timeZone.getOffset(rounded)) {
|
||||
// in this case, we crossed a time zone transition. In some edge cases this will
|
||||
// result in a value that is not a rounded value itself. We need to round again
|
||||
// to make sure. This will have no affect in cases where 'rounded' was already a proper
|
||||
// rounded value
|
||||
rounded = field.roundFloor(rounded);
|
||||
}
|
||||
assert rounded == field.roundFloor(rounded);
|
||||
return rounded;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextRoundingValue(long utcMillis) {
|
||||
long floor = round(utcMillis);
|
||||
// add one unit and round to get to next rounded value
|
||||
long next = round(field.add(floor, 1));
|
||||
if (next == floor) {
|
||||
// in rare case we need to add more than one unit
|
||||
next = round(field.add(floor, 2));
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
unit = DateTimeUnit.resolve(in.readByte());
|
||||
timeZone = DateTimeZone.forID(in.readString());
|
||||
field = unit.field(timeZone);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte(unit.id());
|
||||
out.writeString(timeZone.getID());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(unit, timeZone);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
TimeUnitRounding other = (TimeUnitRounding) obj;
|
||||
return Objects.equals(unit, other.unit)
|
||||
&& Objects.equals(timeZone, other.timeZone);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[" + timeZone + "][" + unit +"]";
|
||||
}
|
||||
}
|
||||
|
||||
static class TimeIntervalRounding extends TimeZoneRounding {
|
||||
|
||||
static final byte ID = 2;
|
||||
|
||||
private long interval;
|
||||
private DateTimeZone timeZone;
|
||||
|
||||
TimeIntervalRounding() { // for serialization
|
||||
}
|
||||
|
||||
TimeIntervalRounding(long interval, DateTimeZone timeZone) {
|
||||
if (interval < 1)
|
||||
throw new IllegalArgumentException("Zero or negative time interval not supported");
|
||||
this.interval = interval;
|
||||
this.timeZone = timeZone;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte id() {
|
||||
return ID;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long round(long utcMillis) {
|
||||
long timeLocal = timeZone.convertUTCToLocal(utcMillis);
|
||||
long rounded = Rounding.Interval.roundValue(Rounding.Interval.roundKey(timeLocal, interval), interval);
|
||||
long roundedUTC;
|
||||
if (isInDSTGap(rounded) == false) {
|
||||
roundedUTC = timeZone.convertLocalToUTC(rounded, true, utcMillis);
|
||||
// check if we crossed DST transition, in this case we want the last rounded value before the transition
|
||||
long transition = timeZone.previousTransition(utcMillis);
|
||||
if (transition != utcMillis && transition > roundedUTC) {
|
||||
roundedUTC = round(transition - 1);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Edge case where the rounded local time is illegal and landed
|
||||
* in a DST gap. In this case, we choose 1ms tick after the
|
||||
* transition date. We don't want the transition date itself
|
||||
* because those dates, when rounded themselves, fall into the
|
||||
* previous interval. This would violate the invariant that the
|
||||
* rounding operation should be idempotent.
|
||||
*/
|
||||
roundedUTC = timeZone.previousTransition(utcMillis) + 1;
|
||||
}
|
||||
return roundedUTC;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether the local instant is a valid instant in the given
|
||||
* time zone. The logic for this is taken from
|
||||
* {@link DateTimeZone#convertLocalToUTC(long, boolean)} for the
|
||||
* `strict` mode case, but instead of throwing an
|
||||
* {@link IllegalInstantException}, which is costly, we want to return a
|
||||
* flag indicating that the value is illegal in that time zone.
|
||||
*/
|
||||
private boolean isInDSTGap(long instantLocal) {
|
||||
if (timeZone.isFixed()) {
|
||||
return false;
|
||||
}
|
||||
// get the offset at instantLocal (first estimate)
|
||||
int offsetLocal = timeZone.getOffset(instantLocal);
|
||||
// adjust instantLocal using the estimate and recalc the offset
|
||||
int offset = timeZone.getOffset(instantLocal - offsetLocal);
|
||||
// if the offsets differ, we must be near a DST boundary
|
||||
if (offsetLocal != offset) {
|
||||
// determine if we are in the DST gap
|
||||
long nextLocal = timeZone.nextTransition(instantLocal - offsetLocal);
|
||||
if (nextLocal == (instantLocal - offsetLocal)) {
|
||||
nextLocal = Long.MAX_VALUE;
|
||||
}
|
||||
long nextAdjusted = timeZone.nextTransition(instantLocal - offset);
|
||||
if (nextAdjusted == (instantLocal - offset)) {
|
||||
nextAdjusted = Long.MAX_VALUE;
|
||||
}
|
||||
if (nextLocal != nextAdjusted) {
|
||||
// we are in the DST gap
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextRoundingValue(long time) {
|
||||
long timeLocal = time;
|
||||
timeLocal = timeZone.convertUTCToLocal(time);
|
||||
long next = timeLocal + interval;
|
||||
return timeZone.convertLocalToUTC(next, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
interval = in.readVLong();
|
||||
timeZone = DateTimeZone.forID(in.readString());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(interval);
|
||||
out.writeString(timeZone.getID());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(interval, timeZone);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
TimeIntervalRounding other = (TimeIntervalRounding) obj;
|
||||
return Objects.equals(interval, other.interval)
|
||||
&& Objects.equals(timeZone, other.timeZone);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -24,6 +24,7 @@ import com.fasterxml.jackson.core.JsonStreamContext;
|
|||
import com.fasterxml.jackson.core.base.GeneratorBase;
|
||||
import com.fasterxml.jackson.core.filter.FilteringGeneratorDelegate;
|
||||
import com.fasterxml.jackson.core.io.SerializedString;
|
||||
import com.fasterxml.jackson.core.json.JsonWriteContext;
|
||||
import com.fasterxml.jackson.core.util.DefaultIndenter;
|
||||
import com.fasterxml.jackson.core.util.DefaultPrettyPrinter;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -271,7 +272,9 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
public void writeEndRaw() {
|
||||
assert base != null : "JsonGenerator should be of instance GeneratorBase but was: " + generator.getClass();
|
||||
if (base != null) {
|
||||
base.getOutputContext().writeValue();
|
||||
JsonStreamContext context = base.getOutputContext();
|
||||
assert (context instanceof JsonWriteContext) : "Expected an instance of JsonWriteContext but was: " + context.getClass();
|
||||
((JsonWriteContext) context).writeValue();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -87,11 +87,6 @@ public class JsonXContentParser extends AbstractXContentParser {
|
|||
|
||||
@Override
|
||||
public BytesRef utf8Bytes() throws IOException {
|
||||
// Tentative workaround for https://github.com/elastic/elasticsearch/issues/8629
|
||||
// TODO: Remove this when we upgrade jackson to 2.6.x.
|
||||
if (parser.getTextLength() == 0) {
|
||||
return new BytesRef();
|
||||
}
|
||||
return new BytesRef(CharBuffer.wrap(parser.getTextCharacters(), parser.getTextOffset(), parser.getTextLength()));
|
||||
}
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
|
|||
public ClusterState execute(ClusterState currentState) {
|
||||
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
|
||||
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
|
||||
nodesBuilder.put(discovery.localNode());
|
||||
nodesBuilder.add(discovery.localNode());
|
||||
}
|
||||
nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId());
|
||||
// remove the NO_MASTER block in this case
|
||||
|
@ -160,7 +160,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
|
|||
public ClusterState execute(ClusterState currentState) {
|
||||
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
|
||||
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
|
||||
nodesBuilder.put(discovery.localNode());
|
||||
nodesBuilder.add(discovery.localNode());
|
||||
}
|
||||
nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId());
|
||||
currentState = ClusterState.builder(currentState).nodes(nodesBuilder).build();
|
||||
|
@ -231,8 +231,8 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
|
|||
}
|
||||
// reroute here, so we eagerly remove dead nodes from the routing
|
||||
ClusterState updatedState = ClusterState.builder(currentState).nodes(newNodes).build();
|
||||
RoutingAllocation.Result routingResult = master.allocationService.reroute(
|
||||
ClusterState.builder(updatedState).build(), "elected as master");
|
||||
RoutingAllocation.Result routingResult = master.allocationService.deassociateDeadNodes(
|
||||
ClusterState.builder(updatedState).build(), true, "node stopped");
|
||||
return ClusterState.builder(updatedState).routingResult(routingResult).build();
|
||||
}
|
||||
|
||||
|
|
|
@ -413,8 +413,7 @@ public class NodeJoinController extends AbstractComponent {
|
|||
|
||||
final DiscoveryNodes currentNodes = currentState.nodes();
|
||||
boolean nodesChanged = false;
|
||||
ClusterState.Builder newState = ClusterState.builder(currentState);
|
||||
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes);
|
||||
ClusterState.Builder newState;
|
||||
|
||||
if (joiningNodes.size() == 1 && joiningNodes.get(0).equals(FINISH_ELECTION_TASK)) {
|
||||
return results.successes(joiningNodes).build(currentState);
|
||||
|
@ -423,16 +422,17 @@ public class NodeJoinController extends AbstractComponent {
|
|||
// use these joins to try and become the master.
|
||||
// Note that we don't have to do any validation of the amount of joining nodes - the commit
|
||||
// during the cluster state publishing guarantees that we have enough
|
||||
nodesBuilder.masterNodeId(currentNodes.getLocalNodeId());
|
||||
ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks())
|
||||
.removeGlobalBlock(discoverySettings.getNoMasterBlock()).build();
|
||||
newState.blocks(clusterBlocks);
|
||||
newState = becomeMasterAndTrimConflictingNodes(currentState, joiningNodes);
|
||||
nodesChanged = true;
|
||||
} else if (nodesBuilder.isLocalNodeElectedMaster() == false) {
|
||||
} else if (currentNodes.isLocalNodeElectedMaster() == false) {
|
||||
logger.trace("processing node joins, but we are not the master. current master: {}", currentNodes.getMasterNode());
|
||||
throw new NotMasterException("Node [" + currentNodes.getLocalNode() + "] not master for join request");
|
||||
} else {
|
||||
newState = ClusterState.builder(currentState);
|
||||
}
|
||||
|
||||
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes());
|
||||
|
||||
assert nodesBuilder.isLocalNodeElectedMaster();
|
||||
|
||||
// processing any joins
|
||||
|
@ -443,7 +443,7 @@ public class NodeJoinController extends AbstractComponent {
|
|||
logger.debug("received a join request for an existing node [{}]", node);
|
||||
} else {
|
||||
try {
|
||||
nodesBuilder.put(node);
|
||||
nodesBuilder.add(node);
|
||||
nodesChanged = true;
|
||||
} catch (IllegalArgumentException e) {
|
||||
results.failure(node, e);
|
||||
|
@ -468,6 +468,28 @@ public class NodeJoinController extends AbstractComponent {
|
|||
return results.build(newState.build());
|
||||
}
|
||||
|
||||
private ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState currentState, List<DiscoveryNode> joiningNodes) {
|
||||
assert currentState.nodes().getMasterNodeId() == null : currentState.prettyPrint();
|
||||
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentState.nodes());
|
||||
nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId());
|
||||
ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks())
|
||||
.removeGlobalBlock(discoverySettings.getNoMasterBlock()).build();
|
||||
for (final DiscoveryNode joiningNode : joiningNodes) {
|
||||
final DiscoveryNode existingNode = nodesBuilder.get(joiningNode.getId());
|
||||
if (existingNode != null && existingNode.equals(joiningNode) == false) {
|
||||
logger.debug("removing existing node [{}], which conflicts with incoming join from [{}]", existingNode, joiningNode);
|
||||
nodesBuilder.remove(existingNode.getId());
|
||||
}
|
||||
}
|
||||
|
||||
// now trim any left over dead nodes - either left there when the previous master stepped down
|
||||
// or removed by us above
|
||||
ClusterState tmpState = ClusterState.builder(currentState).nodes(nodesBuilder).blocks(clusterBlocks).build();
|
||||
RoutingAllocation.Result result = allocationService.deassociateDeadNodes(tmpState, false,
|
||||
"removed dead nodes on election");
|
||||
return ClusterState.builder(tmpState).routingResult(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
// we validate that we are allowed to change the cluster state during cluster state processing
|
||||
|
|
|
@ -570,7 +570,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
if (!electMasterService.hasEnoughMasterNodes(remainingNodesClusterState.nodes())) {
|
||||
return resultBuilder.build(rejoin.apply(remainingNodesClusterState, "not enough master nodes"));
|
||||
} else {
|
||||
final RoutingAllocation.Result routingResult = allocationService.reroute(remainingNodesClusterState, describeTasks(tasks));
|
||||
final RoutingAllocation.Result routingResult =
|
||||
allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks));
|
||||
return resultBuilder.build(ClusterState.builder(remainingNodesClusterState).routingResult(routingResult).build());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -124,8 +124,8 @@ public class GatewayAllocator extends AbstractComponent {
|
|||
|
||||
public void applyFailedShards(FailedRerouteAllocation allocation) {
|
||||
for (FailedRerouteAllocation.FailedShard shard : allocation.failedShards()) {
|
||||
Releasables.close(asyncFetchStarted.remove(shard.shard.shardId()));
|
||||
Releasables.close(asyncFetchStore.remove(shard.shard.shardId()));
|
||||
Releasables.close(asyncFetchStarted.remove(shard.routingEntry.shardId()));
|
||||
Releasables.close(asyncFetchStore.remove(shard.routingEntry.shardId()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -705,9 +705,6 @@ public abstract class Engine implements Closeable {
|
|||
if (Lucene.isCorruptionException(e)) {
|
||||
failEngine("corrupt file (source: [" + source + "])", e);
|
||||
return true;
|
||||
} else if (ExceptionsHelper.isOOM(e)) {
|
||||
failEngine("out of memory (source: [" + source + "])", e);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -22,8 +22,9 @@ package org.elasticsearch.index.fielddata;
|
|||
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.geo.GeoDistance;
|
||||
import org.elasticsearch.common.geo.GeoHashUtils;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
import org.joda.time.DateTimeZone;
|
||||
import org.joda.time.MutableDateTime;
|
||||
|
@ -190,7 +191,7 @@ public interface ScriptDocValues<T> extends List<T> {
|
|||
}
|
||||
}
|
||||
|
||||
public static class GeoPoints extends AbstractList<GeoPoint> implements ScriptDocValues<GeoPoint> {
|
||||
class GeoPoints extends AbstractList<GeoPoint> implements ScriptDocValues<GeoPoint> {
|
||||
|
||||
private final MultiGeoPointValues values;
|
||||
|
||||
|
@ -253,124 +254,41 @@ public interface ScriptDocValues<T> extends List<T> {
|
|||
return values.count();
|
||||
}
|
||||
|
||||
public double factorDistance(double lat, double lon) {
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.FACTOR.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
|
||||
}
|
||||
|
||||
public double factorDistanceWithDefault(double lat, double lon, double defaultValue) {
|
||||
if (isEmpty()) {
|
||||
return defaultValue;
|
||||
}
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.FACTOR.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
|
||||
}
|
||||
|
||||
public double factorDistance02(double lat, double lon) {
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.FACTOR.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT) + 1;
|
||||
}
|
||||
|
||||
public double factorDistance13(double lat, double lon) {
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.FACTOR.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT) + 2;
|
||||
}
|
||||
|
||||
public double arcDistance(double lat, double lon) {
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
|
||||
return GeoUtils.arcDistance(point.lat(), point.lon(), lat, lon);
|
||||
}
|
||||
|
||||
public double arcDistanceWithDefault(double lat, double lon, double defaultValue) {
|
||||
if (isEmpty()) {
|
||||
return defaultValue;
|
||||
}
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
|
||||
return arcDistance(lat, lon);
|
||||
}
|
||||
|
||||
public double arcDistanceInKm(double lat, double lon) {
|
||||
public double planeDistance(double lat, double lon) {
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.KILOMETERS);
|
||||
return GeoUtils.planeDistance(point.lat(), point.lon(), lat, lon);
|
||||
}
|
||||
|
||||
public double arcDistanceInKmWithDefault(double lat, double lon, double defaultValue) {
|
||||
public double planeDistanceWithDefault(double lat, double lon, double defaultValue) {
|
||||
if (isEmpty()) {
|
||||
return defaultValue;
|
||||
}
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.KILOMETERS);
|
||||
}
|
||||
|
||||
public double arcDistanceInMiles(double lat, double lon) {
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.MILES);
|
||||
}
|
||||
|
||||
public double arcDistanceInMilesWithDefault(double lat, double lon, double defaultValue) {
|
||||
if (isEmpty()) {
|
||||
return defaultValue;
|
||||
}
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.ARC.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.MILES);
|
||||
}
|
||||
|
||||
public double distance(double lat, double lon) {
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
|
||||
}
|
||||
|
||||
public double distanceWithDefault(double lat, double lon, double defaultValue) {
|
||||
if (isEmpty()) {
|
||||
return defaultValue;
|
||||
}
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.DEFAULT);
|
||||
}
|
||||
|
||||
public double distanceInKm(double lat, double lon) {
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.KILOMETERS);
|
||||
}
|
||||
|
||||
public double distanceInKmWithDefault(double lat, double lon, double defaultValue) {
|
||||
if (isEmpty()) {
|
||||
return defaultValue;
|
||||
}
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.KILOMETERS);
|
||||
}
|
||||
|
||||
public double distanceInMiles(double lat, double lon) {
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.MILES);
|
||||
}
|
||||
|
||||
public double distanceInMilesWithDefault(double lat, double lon, double defaultValue) {
|
||||
if (isEmpty()) {
|
||||
return defaultValue;
|
||||
}
|
||||
GeoPoint point = getValue();
|
||||
return GeoDistance.PLANE.calculate(point.lat(), point.lon(), lat, lon, DistanceUnit.MILES);
|
||||
return planeDistance(lat, lon);
|
||||
}
|
||||
|
||||
public double geohashDistance(String geohash) {
|
||||
GeoPoint point = getValue();
|
||||
GeoPoint p = new GeoPoint().resetFromGeoHash(geohash);
|
||||
return GeoDistance.ARC.calculate(point.lat(), point.lon(), p.lat(), p.lon(), DistanceUnit.DEFAULT);
|
||||
return GeoUtils.arcDistance(point.lat(), point.lon(), GeoHashUtils.decodeLatitude(geohash),
|
||||
GeoHashUtils.decodeLongitude(geohash));
|
||||
}
|
||||
|
||||
public double geohashDistanceInKm(String geohash) {
|
||||
GeoPoint point = getValue();
|
||||
GeoPoint p = new GeoPoint().resetFromGeoHash(geohash);
|
||||
return GeoDistance.ARC.calculate(point.lat(), point.lon(), p.lat(), p.lon(), DistanceUnit.KILOMETERS);
|
||||
public double geohashDistanceWithDefault(String geohash, double defaultValue) {
|
||||
if (isEmpty()) {
|
||||
return defaultValue;
|
||||
}
|
||||
return geohashDistance(geohash);
|
||||
}
|
||||
|
||||
public double geohashDistanceInMiles(String geohash) {
|
||||
GeoPoint point = getValue();
|
||||
GeoPoint p = new GeoPoint().resetFromGeoHash(geohash);
|
||||
return GeoDistance.ARC.calculate(point.lat(), point.lon(), p.lat(), p.lon(), DistanceUnit.MILES);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -368,10 +368,7 @@ public final class ShardGetService extends AbstractIndexShardComponent {
|
|||
}
|
||||
|
||||
List<Object> values = searchLookup.source().extractRawValues(field);
|
||||
if (!values.isEmpty()) {
|
||||
for (int i = 0; i < values.size(); i++) {
|
||||
values.set(i, fieldMapper.fieldType().valueForSearch(values.get(i)));
|
||||
}
|
||||
if (values.isEmpty() == false) {
|
||||
value = values;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -340,17 +340,13 @@ final class DocumentParser {
|
|||
return;
|
||||
}
|
||||
XContentParser parser = context.parser();
|
||||
|
||||
String currentFieldName = parser.currentName();
|
||||
if (atRoot && MapperService.isMetadataField(currentFieldName)) {
|
||||
throw new MapperParsingException("Field [" + currentFieldName + "] is a metadata field and cannot be added inside a document. Use the index API request parameters.");
|
||||
}
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
if (token == XContentParser.Token.VALUE_NULL) {
|
||||
// the object is null ("obj1" : null), simply bail
|
||||
return;
|
||||
}
|
||||
|
||||
String currentFieldName = parser.currentName();
|
||||
if (token.isValue()) {
|
||||
throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + "] as object, but found a concrete value");
|
||||
}
|
||||
|
@ -384,6 +380,9 @@ final class DocumentParser {
|
|||
parseArray(context, mapper, currentFieldName);
|
||||
} else if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
if (MapperService.isMetadataField(context.path().pathAsText(currentFieldName))) {
|
||||
throw new MapperParsingException("Field [" + currentFieldName + "] is a metadata field and cannot be added inside a document. Use the index API request parameters.");
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_NULL) {
|
||||
parseNullValue(context, mapper, currentFieldName);
|
||||
} else if (token == null) {
|
||||
|
|
|
@ -238,6 +238,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
|||
}
|
||||
}
|
||||
|
||||
private final Version indexCreatedVersion;
|
||||
protected MappedFieldType fieldType;
|
||||
protected final MappedFieldType defaultFieldType;
|
||||
protected MultiFields multiFields;
|
||||
|
@ -246,6 +247,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
|||
protected FieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
|
||||
super(simpleName);
|
||||
assert indexSettings != null;
|
||||
this.indexCreatedVersion = Version.indexCreated(indexSettings);
|
||||
fieldType.freeze();
|
||||
this.fieldType = fieldType;
|
||||
defaultFieldType.freeze();
|
||||
|
@ -283,7 +285,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
|||
if (!customBoost()
|
||||
// don't set boosts eg. on dv fields
|
||||
&& field.fieldType().indexOptions() != IndexOptions.NONE
|
||||
&& Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) {
|
||||
&& indexCreatedVersion.before(Version.V_5_0_0_alpha1)) {
|
||||
field.setBoost(fieldType().boost());
|
||||
}
|
||||
context.doc().add(field);
|
||||
|
|
|
@ -168,6 +168,16 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap
|
|||
failIfNoDocValues();
|
||||
return new DocValuesIndexFieldData.Builder();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object valueForSearch(Object value) {
|
||||
if (value == null) {
|
||||
return null;
|
||||
}
|
||||
// keywords are internally stored as utf8 bytes
|
||||
BytesRef binaryValue = (BytesRef) value;
|
||||
return binaryValue.utf8ToString();
|
||||
}
|
||||
}
|
||||
|
||||
private Boolean includeInAll;
|
||||
|
@ -252,12 +262,14 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap
|
|||
context.allEntries().addText(fieldType().name(), value, fieldType().boost());
|
||||
}
|
||||
|
||||
// convert to utf8 only once before feeding postings/dv/stored fields
|
||||
final BytesRef binaryValue = new BytesRef(value);
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
Field field = new Field(fieldType().name(), value, fieldType());
|
||||
Field field = new Field(fieldType().name(), binaryValue, fieldType());
|
||||
fields.add(field);
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
fields.add(new SortedSetDocValuesField(fieldType().name(), new BytesRef(value)));
|
||||
fields.add(new SortedSetDocValuesField(fieldType().name(), binaryValue));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.mapper.geo;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.geo.GeoHashUtils;
|
||||
import org.apache.lucene.util.LegacyNumericUtils;
|
||||
|
@ -47,6 +48,7 @@ import org.elasticsearch.index.mapper.core.LegacyDoubleFieldMapper;
|
|||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LegacyNumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
|
@ -149,7 +151,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
|
||||
public abstract Y build(BuilderContext context, String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||
Settings indexSettings, FieldMapper latMapper, FieldMapper lonMapper,
|
||||
KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo);
|
||||
FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo);
|
||||
|
||||
public Y build(Mapper.BuilderContext context) {
|
||||
GeoPointFieldType geoPointFieldType = (GeoPointFieldType)fieldType;
|
||||
|
@ -176,10 +178,17 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
}
|
||||
geoPointFieldType.setLatLonEnabled(latMapper.fieldType(), lonMapper.fieldType());
|
||||
}
|
||||
KeywordFieldMapper geoHashMapper = null;
|
||||
FieldMapper geoHashMapper = null;
|
||||
if (enableGeoHash || enableGeoHashPrefix) {
|
||||
// TODO: possible also implicitly enable geohash if geohash precision is set
|
||||
geoHashMapper = new KeywordFieldMapper.Builder(Names.GEOHASH).index(true).includeInAll(false).store(fieldType.stored()).build(context);
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0_alpha1)) {
|
||||
geoHashMapper = new KeywordFieldMapper.Builder(Names.GEOHASH)
|
||||
.index(true).includeInAll(false).store(fieldType.stored()).build(context);
|
||||
} else {
|
||||
geoHashMapper = new StringFieldMapper.Builder(Names.GEOHASH)
|
||||
.tokenized(false).index(true).omitNorms(true).indexOptions(IndexOptions.DOCS)
|
||||
.includeInAll(false).store(fieldType.stored()).build(context);
|
||||
}
|
||||
geoPointFieldType.setGeoHashEnabled(geoHashMapper.fieldType(), geoHashPrecision, enableGeoHashPrefix);
|
||||
}
|
||||
context.path().remove();
|
||||
|
@ -380,12 +389,12 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
|
||||
protected FieldMapper lonMapper;
|
||||
|
||||
protected KeywordFieldMapper geoHashMapper;
|
||||
protected FieldMapper geoHashMapper;
|
||||
|
||||
protected Explicit<Boolean> ignoreMalformed;
|
||||
|
||||
protected BaseGeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
|
||||
FieldMapper latMapper, FieldMapper lonMapper, KeywordFieldMapper geoHashMapper,
|
||||
FieldMapper latMapper, FieldMapper lonMapper, FieldMapper geoHashMapper,
|
||||
MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
||||
this.latMapper = latMapper;
|
||||
|
@ -556,7 +565,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
@Override
|
||||
public FieldMapper updateFieldType(Map<String, MappedFieldType> fullNameToFieldType) {
|
||||
BaseGeoPointFieldMapper updated = (BaseGeoPointFieldMapper) super.updateFieldType(fullNameToFieldType);
|
||||
KeywordFieldMapper geoUpdated = geoHashMapper == null ? null : (KeywordFieldMapper) geoHashMapper.updateFieldType(fullNameToFieldType);
|
||||
FieldMapper geoUpdated = geoHashMapper == null ? null : geoHashMapper.updateFieldType(fullNameToFieldType);
|
||||
FieldMapper latUpdated = latMapper == null ? null : latMapper.updateFieldType(fullNameToFieldType);
|
||||
FieldMapper lonUpdated = lonMapper == null ? null : lonMapper.updateFieldType(fullNameToFieldType);
|
||||
if (updated == this
|
||||
|
|
|
@ -79,7 +79,7 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
|||
@Override
|
||||
public GeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType,
|
||||
MappedFieldType defaultFieldType, Settings indexSettings, FieldMapper latMapper,
|
||||
FieldMapper lonMapper, KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
FieldMapper lonMapper, FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
CopyTo copyTo) {
|
||||
fieldType.setTokenized(false);
|
||||
if (context.indexCreatedVersion().before(Version.V_2_3_0)) {
|
||||
|
@ -110,7 +110,7 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
|||
|
||||
public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
|
||||
FieldMapper latMapper, FieldMapper lonMapper,
|
||||
KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
|
||||
FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields,
|
||||
ignoreMalformed, copyTo);
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.geo.GeoDistance;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
|
@ -40,7 +39,6 @@ import org.elasticsearch.index.mapper.MapperParsingException;
|
|||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.CustomDocValuesField;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -109,7 +107,7 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
|
|||
@Override
|
||||
public GeoPointFieldMapperLegacy build(BuilderContext context, String simpleName, MappedFieldType fieldType,
|
||||
MappedFieldType defaultFieldType, Settings indexSettings, FieldMapper latMapper,
|
||||
FieldMapper lonMapper, KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
FieldMapper lonMapper, FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
CopyTo copyTo) {
|
||||
fieldType.setTokenized(false);
|
||||
setupFieldType(context);
|
||||
|
@ -267,7 +265,7 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
|
|||
|
||||
public GeoPointFieldMapperLegacy(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
|
||||
FieldMapper latMapper, FieldMapper lonMapper,
|
||||
KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
Explicit<Boolean> coerce, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields,
|
||||
ignoreMalformed, copyTo);
|
||||
|
|
|
@ -26,10 +26,12 @@ import org.apache.lucene.search.spans.SpanQuery;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.support.ToXContentToBytes;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.BytesRefs;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentLocation;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -290,4 +292,12 @@ public abstract class AbstractQueryBuilder<QB extends AbstractQueryBuilder<QB>>
|
|||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
protected static void throwParsingExceptionOnMultipleFields(String queryName, XContentLocation contentLocation,
|
||||
String processedFieldName, String currentFieldName) {
|
||||
if (processedFieldName != null) {
|
||||
throw new ParsingException(contentLocation, "[" + queryName + "] query doesn't support multiple fields, found ["
|
||||
+ processedFieldName + "] and [" + currentFieldName + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder<CommonTermsQue
|
|||
throw new IllegalArgumentException("field name is null or empty");
|
||||
}
|
||||
if (text == null) {
|
||||
throw new IllegalArgumentException("text cannot be null.");
|
||||
throw new IllegalArgumentException("text cannot be null");
|
||||
}
|
||||
this.fieldName = fieldName;
|
||||
this.text = text;
|
||||
|
@ -265,11 +265,8 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder<CommonTermsQue
|
|||
|
||||
public static Optional<CommonTermsQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
XContentParser.Token token = parser.nextToken();
|
||||
if (token != XContentParser.Token.FIELD_NAME) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] query malformed, no field");
|
||||
}
|
||||
String fieldName = parser.currentName();
|
||||
|
||||
String fieldName = null;
|
||||
Object text = null;
|
||||
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
|
||||
String analyzer = null;
|
||||
|
@ -280,78 +277,77 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder<CommonTermsQue
|
|||
Operator lowFreqOperator = CommonTermsQueryBuilder.DEFAULT_LOW_FREQ_OCCUR;
|
||||
float cutoffFrequency = CommonTermsQueryBuilder.DEFAULT_CUTOFF_FREQ;
|
||||
String queryName = null;
|
||||
token = parser.nextToken();
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
String currentFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) {
|
||||
String innerFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
innerFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (parseContext.getParseFieldMatcher().match(innerFieldName, LOW_FREQ_FIELD)) {
|
||||
lowFreqMinimumShouldMatch = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(innerFieldName, HIGH_FREQ_FIELD)) {
|
||||
highFreqMinimumShouldMatch = parser.text();
|
||||
XContentParser.Token token;
|
||||
String currentFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (parseContext.isDeprecatedSetting(currentFieldName)) {
|
||||
// skip
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
|
||||
fieldName = currentFieldName;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) {
|
||||
String innerFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
innerFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (parseContext.getParseFieldMatcher().match(innerFieldName, LOW_FREQ_FIELD)) {
|
||||
lowFreqMinimumShouldMatch = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(innerFieldName, HIGH_FREQ_FIELD)) {
|
||||
highFreqMinimumShouldMatch = parser.text();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME +
|
||||
"] query does not support [" + innerFieldName
|
||||
+ "] for [" + currentFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME +
|
||||
"] query does not support [" + innerFieldName
|
||||
+ "] for [" + currentFieldName + "]");
|
||||
"] unexpected token type [" + token
|
||||
+ "] after [" + innerFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME +
|
||||
"] unexpected token type [" + token
|
||||
+ "] after [" + innerFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME +
|
||||
"] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
} else if (token.isValue()) {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERY_FIELD)) {
|
||||
text = parser.objectText();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, ANALYZER_FIELD)) {
|
||||
analyzer = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, DISABLE_COORD_FIELD)) {
|
||||
disableCoord = parser.booleanValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, HIGH_FREQ_OPERATOR_FIELD)) {
|
||||
highFreqOperator = Operator.fromString(parser.text());
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, LOW_FREQ_OPERATOR_FIELD)) {
|
||||
lowFreqOperator = Operator.fromString(parser.text());
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) {
|
||||
lowFreqMinimumShouldMatch = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, CUTOFF_FREQUENCY_FIELD)) {
|
||||
cutoffFrequency = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME +
|
||||
"] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME +
|
||||
"] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
} else if (token.isValue()) {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERY_FIELD)) {
|
||||
text = parser.objectText();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, ANALYZER_FIELD)) {
|
||||
analyzer = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, DISABLE_COORD_FIELD)) {
|
||||
disableCoord = parser.booleanValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, HIGH_FREQ_OPERATOR_FIELD)) {
|
||||
highFreqOperator = Operator.fromString(parser.text());
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, LOW_FREQ_OPERATOR_FIELD)) {
|
||||
lowFreqOperator = Operator.fromString(parser.text());
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) {
|
||||
lowFreqMinimumShouldMatch = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, CUTOFF_FREQUENCY_FIELD)) {
|
||||
cutoffFrequency = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME +
|
||||
"] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
parser.nextToken();
|
||||
} else {
|
||||
text = parser.objectText();
|
||||
// move to the next token
|
||||
token = parser.nextToken();
|
||||
if (token != XContentParser.Token.END_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[common] query parsed in simplified form, with direct field name, but included more options than just " +
|
||||
"the field name, possibly use its 'options' form, with 'query' element?");
|
||||
} else {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName());
|
||||
fieldName = parser.currentName();
|
||||
text = parser.objectText();
|
||||
}
|
||||
}
|
||||
|
||||
if (text == null) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "No text specified for text query");
|
||||
}
|
||||
return Optional.of(new CommonTermsQueryBuilder(fieldName, text)
|
||||
.lowFreqMinimumShouldMatch(lowFreqMinimumShouldMatch)
|
||||
.highFreqMinimumShouldMatch(highFreqMinimumShouldMatch)
|
||||
|
|
|
@ -152,7 +152,7 @@ public class FuzzyQueryBuilder extends AbstractQueryBuilder<FuzzyQueryBuilder> i
|
|||
*/
|
||||
public FuzzyQueryBuilder(String fieldName, Object value) {
|
||||
if (Strings.isEmpty(fieldName)) {
|
||||
throw new IllegalArgumentException("field name cannot be null or empty.");
|
||||
throw new IllegalArgumentException("field name cannot be null or empty");
|
||||
}
|
||||
if (value == null) {
|
||||
throw new IllegalArgumentException("query value cannot be null");
|
||||
|
@ -258,63 +258,58 @@ public class FuzzyQueryBuilder extends AbstractQueryBuilder<FuzzyQueryBuilder> i
|
|||
|
||||
public static Optional<FuzzyQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
|
||||
XContentParser.Token token = parser.nextToken();
|
||||
if (token != XContentParser.Token.FIELD_NAME) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[fuzzy] query malformed, no field");
|
||||
}
|
||||
|
||||
String fieldName = parser.currentName();
|
||||
String fieldName = null;
|
||||
Object value = null;
|
||||
|
||||
Fuzziness fuzziness = FuzzyQueryBuilder.DEFAULT_FUZZINESS;
|
||||
int prefixLength = FuzzyQueryBuilder.DEFAULT_PREFIX_LENGTH;
|
||||
int maxExpansions = FuzzyQueryBuilder.DEFAULT_MAX_EXPANSIONS;
|
||||
boolean transpositions = FuzzyQueryBuilder.DEFAULT_TRANSPOSITIONS;
|
||||
String rewrite = null;
|
||||
|
||||
String queryName = null;
|
||||
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
|
||||
|
||||
token = parser.nextToken();
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
String currentFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, TERM_FIELD)) {
|
||||
value = parser.objectBytes();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) {
|
||||
value = parser.objectBytes();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, Fuzziness.FIELD)) {
|
||||
fuzziness = Fuzziness.parse(parser);
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, PREFIX_LENGTH_FIELD)) {
|
||||
prefixLength = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) {
|
||||
maxExpansions = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, TRANSPOSITIONS_FIELD)) {
|
||||
transpositions = parser.booleanValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, REWRITE_FIELD)) {
|
||||
rewrite = parser.textOrNull();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (parseContext.isDeprecatedSetting(currentFieldName)) {
|
||||
// skip
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
|
||||
fieldName = currentFieldName;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[fuzzy] query does not support [" + currentFieldName + "]");
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, TERM_FIELD)) {
|
||||
value = parser.objectBytes();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) {
|
||||
value = parser.objectBytes();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, Fuzziness.FIELD)) {
|
||||
fuzziness = Fuzziness.parse(parser);
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, PREFIX_LENGTH_FIELD)) {
|
||||
prefixLength = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) {
|
||||
maxExpansions = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, TRANSPOSITIONS_FIELD)) {
|
||||
transpositions = parser.booleanValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, REWRITE_FIELD)) {
|
||||
rewrite = parser.textOrNull();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[fuzzy] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName());
|
||||
fieldName = parser.currentName();
|
||||
value = parser.objectBytes();
|
||||
}
|
||||
parser.nextToken();
|
||||
} else {
|
||||
value = parser.objectBytes();
|
||||
// move to the next token
|
||||
parser.nextToken();
|
||||
}
|
||||
|
||||
if (value == null) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "no value specified for fuzzy query");
|
||||
}
|
||||
return Optional.of(new FuzzyQueryBuilder(fieldName, value)
|
||||
.fuzziness(fuzziness)
|
||||
|
|
|
@ -359,9 +359,9 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
|
|||
fieldName = currentFieldName;
|
||||
GeoUtils.parseGeoPoint(parser, point);
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
|
||||
// the json in the format of -> field : { lat : 30, lon : 12 }
|
||||
String currentName = parser.currentName();
|
||||
assert currentFieldName != null;
|
||||
fieldName = currentFieldName;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
|
|
|
@ -192,62 +192,53 @@ public class MatchPhrasePrefixQueryBuilder extends AbstractQueryBuilder<MatchPhr
|
|||
|
||||
public static Optional<MatchPhrasePrefixQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
|
||||
XContentParser.Token token = parser.nextToken();
|
||||
if (token != XContentParser.Token.FIELD_NAME) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] query malformed, no field");
|
||||
}
|
||||
String fieldName = parser.currentName();
|
||||
|
||||
String fieldName = null;
|
||||
Object value = null;
|
||||
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
|
||||
String analyzer = null;
|
||||
int slop = MatchQuery.DEFAULT_PHRASE_SLOP;
|
||||
int maxExpansion = FuzzyQuery.defaultMaxExpansions;
|
||||
String queryName = null;
|
||||
|
||||
token = parser.nextToken();
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
String currentFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.QUERY_FIELD)) {
|
||||
value = parser.objectText();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.ANALYZER_FIELD)) {
|
||||
analyzer = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchPhraseQueryBuilder.SLOP_FIELD)) {
|
||||
slop = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) {
|
||||
maxExpansion = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
XContentParser.Token token;
|
||||
String currentFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (parseContext.isDeprecatedSetting(currentFieldName)) {
|
||||
// skip
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
|
||||
fieldName = currentFieldName;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.QUERY_FIELD)) {
|
||||
value = parser.objectText();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.ANALYZER_FIELD)) {
|
||||
analyzer = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchPhraseQueryBuilder.SLOP_FIELD)) {
|
||||
slop = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) {
|
||||
maxExpansion = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[" + NAME + "] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[" + NAME + "] query does not support [" + currentFieldName + "]");
|
||||
"[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName());
|
||||
fieldName = parser.currentName();
|
||||
value = parser.objectText();
|
||||
}
|
||||
parser.nextToken();
|
||||
} else {
|
||||
value = parser.objectText();
|
||||
// move to the next token
|
||||
token = parser.nextToken();
|
||||
if (token != XContentParser.Token.END_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + NAME
|
||||
+ "] query parsed in simplified form, with direct field name, "
|
||||
+ "but included more options than just the field name, possibly use its 'options' form, with 'query' element?");
|
||||
}
|
||||
}
|
||||
|
||||
if (value == null) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "No text specified for text query");
|
||||
}
|
||||
|
||||
MatchPhrasePrefixQueryBuilder matchQuery = new MatchPhrasePrefixQueryBuilder(fieldName, value);
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.index.query;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -49,7 +50,7 @@ public class MatchPhraseQueryBuilder extends AbstractQueryBuilder<MatchPhraseQue
|
|||
private int slop = MatchQuery.DEFAULT_PHRASE_SLOP;
|
||||
|
||||
public MatchPhraseQueryBuilder(String fieldName, Object value) {
|
||||
if (fieldName == null) {
|
||||
if (Strings.isEmpty(fieldName)) {
|
||||
throw new IllegalArgumentException("[" + NAME + "] requires fieldName");
|
||||
}
|
||||
if (value == null) {
|
||||
|
@ -163,59 +164,50 @@ public class MatchPhraseQueryBuilder extends AbstractQueryBuilder<MatchPhraseQue
|
|||
|
||||
public static Optional<MatchPhraseQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
|
||||
XContentParser.Token token = parser.nextToken();
|
||||
if (token != XContentParser.Token.FIELD_NAME) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] query malformed, no field");
|
||||
}
|
||||
String fieldName = parser.currentName();
|
||||
|
||||
String fieldName = null;
|
||||
Object value = null;
|
||||
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
|
||||
String analyzer = null;
|
||||
int slop = MatchQuery.DEFAULT_PHRASE_SLOP;
|
||||
String queryName = null;
|
||||
|
||||
token = parser.nextToken();
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
String currentFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.QUERY_FIELD)) {
|
||||
value = parser.objectText();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.ANALYZER_FIELD)) {
|
||||
analyzer = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, SLOP_FIELD)) {
|
||||
slop = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (parseContext.isDeprecatedSetting(currentFieldName)) {
|
||||
// skip
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
|
||||
fieldName = currentFieldName;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.QUERY_FIELD)) {
|
||||
value = parser.objectText();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MatchQueryBuilder.ANALYZER_FIELD)) {
|
||||
analyzer = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, SLOP_FIELD)) {
|
||||
slop = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[" + NAME + "] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[" + NAME + "] query does not support [" + currentFieldName + "]");
|
||||
"[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName());
|
||||
fieldName = parser.currentName();
|
||||
value = parser.objectText();
|
||||
}
|
||||
parser.nextToken();
|
||||
} else {
|
||||
value = parser.objectText();
|
||||
// move to the next token
|
||||
token = parser.nextToken();
|
||||
if (token != XContentParser.Token.END_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + NAME
|
||||
+ "] query parsed in simplified form, with direct field name, "
|
||||
+ "but included more options than just the field name, possibly use its 'options' form, with 'query' element?");
|
||||
}
|
||||
}
|
||||
|
||||
if (value == null) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "No text specified for text query");
|
||||
}
|
||||
|
||||
MatchPhraseQueryBuilder matchQuery = new MatchPhraseQueryBuilder(fieldName, value);
|
||||
|
|
|
@ -510,13 +510,7 @@ public class MatchQueryBuilder extends AbstractQueryBuilder<MatchQueryBuilder> {
|
|||
|
||||
public static Optional<MatchQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
|
||||
XContentParser.Token token = parser.nextToken();
|
||||
if (token != XContentParser.Token.FIELD_NAME) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + MatchQueryBuilder.NAME + "] query malformed, no field");
|
||||
}
|
||||
String fieldName = parser.currentName();
|
||||
|
||||
String fieldName = null;
|
||||
MatchQuery.Type type = MatchQuery.Type.BOOLEAN;
|
||||
Object value = null;
|
||||
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
|
||||
|
@ -533,80 +527,82 @@ public class MatchQueryBuilder extends AbstractQueryBuilder<MatchQueryBuilder> {
|
|||
Float cutOffFrequency = null;
|
||||
ZeroTermsQuery zeroTermsQuery = MatchQuery.DEFAULT_ZERO_TERMS_QUERY;
|
||||
String queryName = null;
|
||||
|
||||
token = parser.nextToken();
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
String currentFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERY_FIELD)) {
|
||||
value = parser.objectText();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, TYPE_FIELD)) {
|
||||
String tStr = parser.text();
|
||||
if ("boolean".equals(tStr)) {
|
||||
type = MatchQuery.Type.BOOLEAN;
|
||||
} else if ("phrase".equals(tStr)) {
|
||||
type = MatchQuery.Type.PHRASE;
|
||||
} else if ("phrase_prefix".equals(tStr) || ("phrasePrefix".equals(tStr))) {
|
||||
type = MatchQuery.Type.PHRASE_PREFIX;
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] query does not support type " + tStr);
|
||||
}
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, ANALYZER_FIELD)) {
|
||||
analyzer = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, SLOP_FIELD)) {
|
||||
slop = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, Fuzziness.FIELD)) {
|
||||
fuzziness = Fuzziness.parse(parser);
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, PREFIX_LENGTH_FIELD)) {
|
||||
prefixLength = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) {
|
||||
maxExpansion = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, OPERATOR_FIELD)) {
|
||||
operator = Operator.fromString(parser.text());
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) {
|
||||
minimumShouldMatch = parser.textOrNull();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, FUZZY_REWRITE_FIELD)) {
|
||||
fuzzyRewrite = parser.textOrNull();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, FUZZY_TRANSPOSITIONS_FIELD)) {
|
||||
fuzzyTranspositions = parser.booleanValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, LENIENT_FIELD)) {
|
||||
lenient = parser.booleanValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, CUTOFF_FREQUENCY_FIELD)) {
|
||||
cutOffFrequency = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, ZERO_TERMS_QUERY_FIELD)) {
|
||||
String zeroTermsDocs = parser.text();
|
||||
if ("none".equalsIgnoreCase(zeroTermsDocs)) {
|
||||
zeroTermsQuery = MatchQuery.ZeroTermsQuery.NONE;
|
||||
} else if ("all".equalsIgnoreCase(zeroTermsDocs)) {
|
||||
zeroTermsQuery = MatchQuery.ZeroTermsQuery.ALL;
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (parseContext.isDeprecatedSetting(currentFieldName)) {
|
||||
// skip
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
|
||||
fieldName = currentFieldName;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERY_FIELD)) {
|
||||
value = parser.objectText();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, TYPE_FIELD)) {
|
||||
String tStr = parser.text();
|
||||
if ("boolean".equals(tStr)) {
|
||||
type = MatchQuery.Type.BOOLEAN;
|
||||
} else if ("phrase".equals(tStr)) {
|
||||
type = MatchQuery.Type.PHRASE;
|
||||
} else if ("phrase_prefix".equals(tStr) || ("phrasePrefix".equals(tStr))) {
|
||||
type = MatchQuery.Type.PHRASE_PREFIX;
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] query does not support type " + tStr);
|
||||
}
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, ANALYZER_FIELD)) {
|
||||
analyzer = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, SLOP_FIELD)) {
|
||||
slop = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, Fuzziness.FIELD)) {
|
||||
fuzziness = Fuzziness.parse(parser);
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, PREFIX_LENGTH_FIELD)) {
|
||||
prefixLength = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MAX_EXPANSIONS_FIELD)) {
|
||||
maxExpansion = parser.intValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, OPERATOR_FIELD)) {
|
||||
operator = Operator.fromString(parser.text());
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, MINIMUM_SHOULD_MATCH_FIELD)) {
|
||||
minimumShouldMatch = parser.textOrNull();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, FUZZY_REWRITE_FIELD)) {
|
||||
fuzzyRewrite = parser.textOrNull();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, FUZZY_TRANSPOSITIONS_FIELD)) {
|
||||
fuzzyTranspositions = parser.booleanValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, LENIENT_FIELD)) {
|
||||
lenient = parser.booleanValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, CUTOFF_FREQUENCY_FIELD)) {
|
||||
cutOffFrequency = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, ZERO_TERMS_QUERY_FIELD)) {
|
||||
String zeroTermsDocs = parser.text();
|
||||
if ("none".equalsIgnoreCase(zeroTermsDocs)) {
|
||||
zeroTermsQuery = MatchQuery.ZeroTermsQuery.NONE;
|
||||
} else if ("all".equalsIgnoreCase(zeroTermsDocs)) {
|
||||
zeroTermsQuery = MatchQuery.ZeroTermsQuery.ALL;
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"Unsupported zero_terms_docs value [" + zeroTermsDocs + "]");
|
||||
}
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"Unsupported zero_terms_docs value [" + zeroTermsDocs + "]");
|
||||
"[" + NAME + "] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[" + NAME + "] query does not support [" + currentFieldName + "]");
|
||||
"[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
|
||||
}
|
||||
}
|
||||
parser.nextToken();
|
||||
} else {
|
||||
value = parser.objectText();
|
||||
// move to the next token
|
||||
token = parser.nextToken();
|
||||
if (token != XContentParser.Token.END_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[match] query parsed in simplified form, with direct field name, "
|
||||
+ "but included more options than just the field name, possibly use its 'options' form, with 'query' element?");
|
||||
} else {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName());
|
||||
fieldName = parser.currentName();
|
||||
value = parser.objectText();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ public class PrefixQueryBuilder extends AbstractQueryBuilder<PrefixQueryBuilder>
|
|||
throw new IllegalArgumentException("field name is null or empty");
|
||||
}
|
||||
if (value == null) {
|
||||
throw new IllegalArgumentException("value cannot be null.");
|
||||
throw new IllegalArgumentException("value cannot be null");
|
||||
}
|
||||
this.fieldName = fieldName;
|
||||
this.value = value;
|
||||
|
@ -120,7 +120,7 @@ public class PrefixQueryBuilder extends AbstractQueryBuilder<PrefixQueryBuilder>
|
|||
public static Optional<PrefixQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
|
||||
String fieldName = parser.currentName();
|
||||
String fieldName = null;
|
||||
String value = null;
|
||||
String rewrite = null;
|
||||
|
||||
|
@ -134,6 +134,7 @@ public class PrefixQueryBuilder extends AbstractQueryBuilder<PrefixQueryBuilder>
|
|||
} else if (parseContext.isDeprecatedSetting(currentFieldName)) {
|
||||
// skip
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
|
||||
fieldName = currentFieldName;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
|
@ -149,19 +150,17 @@ public class PrefixQueryBuilder extends AbstractQueryBuilder<PrefixQueryBuilder>
|
|||
rewrite = parser.textOrNull();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[regexp] query does not support [" + currentFieldName + "]");
|
||||
"[prefix] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fieldName = currentFieldName;
|
||||
value = parser.textOrNull();
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName());
|
||||
fieldName = currentFieldName;
|
||||
value = parser.textOrNull();
|
||||
}
|
||||
}
|
||||
|
||||
if (value == null) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "No value specified for prefix query");
|
||||
}
|
||||
return Optional.of(new PrefixQueryBuilder(fieldName, value)
|
||||
.rewrite(rewrite)
|
||||
.boost(boost)
|
||||
|
|
|
@ -109,16 +109,17 @@ public class QueryParseContext implements ParseFieldMatcherSupplier {
|
|||
String queryName = parser.currentName();
|
||||
// move to the next START_OBJECT
|
||||
token = parser.nextToken();
|
||||
if (token != XContentParser.Token.START_OBJECT && token != XContentParser.Token.START_ARRAY) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[_na] query malformed, no start_object after query name");
|
||||
if (token != XContentParser.Token.START_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + queryName + "] query malformed, no start_object after query name");
|
||||
}
|
||||
@SuppressWarnings("unchecked")
|
||||
Optional<QueryBuilder> result = (Optional<QueryBuilder>) indicesQueriesRegistry.lookup(queryName, parseFieldMatcher,
|
||||
parser.getTokenLocation()).fromXContent(this);
|
||||
if (parser.currentToken() == XContentParser.Token.END_OBJECT || parser.currentToken() == XContentParser.Token.END_ARRAY) {
|
||||
// if we are at END_OBJECT, move to the next one...
|
||||
parser.nextToken();
|
||||
if (parser.currentToken() != XContentParser.Token.END_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[" + queryName + "] malformed query, expected [END_OBJECT] but found [" + parser.currentToken() + "]");
|
||||
}
|
||||
parser.nextToken();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -318,6 +318,7 @@ public class RangeQueryBuilder extends AbstractQueryBuilder<RangeQueryBuilder> i
|
|||
} else if (parseContext.isDeprecatedSetting(currentFieldName)) {
|
||||
// skip
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
|
||||
fieldName = currentFieldName;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
|
|
|
@ -77,7 +77,7 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder<RegexpQueryBuilder>
|
|||
throw new IllegalArgumentException("field name is null or empty");
|
||||
}
|
||||
if (value == null) {
|
||||
throw new IllegalArgumentException("value cannot be null.");
|
||||
throw new IllegalArgumentException("value cannot be null");
|
||||
}
|
||||
this.fieldName = fieldName;
|
||||
this.value = value;
|
||||
|
@ -180,10 +180,8 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder<RegexpQueryBuilder>
|
|||
|
||||
public static Optional<RegexpQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
|
||||
String fieldName = parser.currentName();
|
||||
String fieldName = null;
|
||||
String rewrite = null;
|
||||
|
||||
String value = null;
|
||||
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
|
||||
int flagsValue = RegexpQueryBuilder.DEFAULT_FLAGS_VALUE;
|
||||
|
@ -197,6 +195,7 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder<RegexpQueryBuilder>
|
|||
} else if (parseContext.isDeprecatedSetting(currentFieldName)) {
|
||||
// skip
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
|
||||
fieldName = currentFieldName;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
|
@ -227,15 +226,13 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder<RegexpQueryBuilder>
|
|||
if (parseContext.getParseFieldMatcher().match(currentFieldName, NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
} else {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName());
|
||||
fieldName = currentFieldName;
|
||||
value = parser.textOrNull();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (value == null) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "No value specified for regexp query");
|
||||
}
|
||||
return Optional.of(new RegexpQueryBuilder(fieldName, value)
|
||||
.flags(flagsValue)
|
||||
.maxDeterminizedStates(maxDeterminizedStates)
|
||||
|
|
|
@ -186,7 +186,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
/** Add a field to run the query against. */
|
||||
public SimpleQueryStringBuilder field(String field) {
|
||||
if (Strings.isEmpty(field)) {
|
||||
throw new IllegalArgumentException("supplied field is null or empty.");
|
||||
throw new IllegalArgumentException("supplied field is null or empty");
|
||||
}
|
||||
this.fieldsAndWeights.put(field, AbstractQueryBuilder.DEFAULT_BOOST);
|
||||
return this;
|
||||
|
@ -195,7 +195,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
/** Add a field to run the query against with a specific boost. */
|
||||
public SimpleQueryStringBuilder field(String field, float boost) {
|
||||
if (Strings.isEmpty(field)) {
|
||||
throw new IllegalArgumentException("supplied field is null or empty.");
|
||||
throw new IllegalArgumentException("supplied field is null or empty");
|
||||
}
|
||||
this.fieldsAndWeights.put(field, boost);
|
||||
return this;
|
||||
|
|
|
@ -94,49 +94,41 @@ public class SpanTermQueryBuilder extends BaseTermQueryBuilder<SpanTermQueryBuil
|
|||
|
||||
public static Optional<SpanTermQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException, ParsingException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
token = parser.nextToken();
|
||||
}
|
||||
|
||||
assert token == XContentParser.Token.FIELD_NAME;
|
||||
String fieldName = parser.currentName();
|
||||
|
||||
|
||||
String fieldName = null;
|
||||
Object value = null;
|
||||
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
|
||||
String queryName = null;
|
||||
token = parser.nextToken();
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
String currentFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, TERM_FIELD)) {
|
||||
value = parser.objectBytes();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, BaseTermQueryBuilder.VALUE_FIELD)) {
|
||||
value = parser.objectBytes();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
|
||||
fieldName = currentFieldName;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[span_term] query does not support [" + currentFieldName + "]");
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, TERM_FIELD)) {
|
||||
value = parser.objectBytes();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, BaseTermQueryBuilder.VALUE_FIELD)) {
|
||||
value = parser.objectBytes();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[span_term] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName());
|
||||
fieldName = parser.currentName();
|
||||
value = parser.objectBytes();
|
||||
}
|
||||
parser.nextToken();
|
||||
} else {
|
||||
value = parser.objectBytes();
|
||||
// move to the next token
|
||||
parser.nextToken();
|
||||
}
|
||||
|
||||
if (value == null) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "No value specified for term query");
|
||||
}
|
||||
|
||||
SpanTermQueryBuilder result = new SpanTermQueryBuilder(fieldName, value);
|
||||
|
|
|
@ -98,11 +98,7 @@ public class TermQueryBuilder extends BaseTermQueryBuilder<TermQueryBuilder> {
|
|||
} else if (parseContext.isDeprecatedSetting(currentFieldName)) {
|
||||
// skip
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
// also support a format of "term" : {"field_name" : { ... }}
|
||||
if (fieldName != null) {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[term] query does not support different field names, use [bool] query instead");
|
||||
}
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
|
||||
fieldName = currentFieldName;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
|
@ -123,10 +119,7 @@ public class TermQueryBuilder extends BaseTermQueryBuilder<TermQueryBuilder> {
|
|||
}
|
||||
}
|
||||
} else if (token.isValue()) {
|
||||
if (fieldName != null) {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[term] query does not support different field names, use [bool] query instead");
|
||||
}
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName());
|
||||
fieldName = currentFieldName;
|
||||
value = parser.objectBytes();
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
|
|
|
@ -75,7 +75,7 @@ public class WildcardQueryBuilder extends AbstractQueryBuilder<WildcardQueryBuil
|
|||
throw new IllegalArgumentException("field name is null or empty");
|
||||
}
|
||||
if (value == null) {
|
||||
throw new IllegalArgumentException("value cannot be null.");
|
||||
throw new IllegalArgumentException("value cannot be null");
|
||||
}
|
||||
this.fieldName = fieldName;
|
||||
this.value = value;
|
||||
|
@ -135,49 +135,48 @@ public class WildcardQueryBuilder extends AbstractQueryBuilder<WildcardQueryBuil
|
|||
|
||||
public static Optional<WildcardQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
|
||||
XContentParser.Token token = parser.nextToken();
|
||||
if (token != XContentParser.Token.FIELD_NAME) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[wildcard] query malformed, no field");
|
||||
}
|
||||
String fieldName = parser.currentName();
|
||||
String fieldName = null;
|
||||
String rewrite = null;
|
||||
|
||||
String value = null;
|
||||
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
|
||||
String queryName = null;
|
||||
token = parser.nextToken();
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
String currentFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else {
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, WILDCARD_FIELD)) {
|
||||
value = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) {
|
||||
value = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, REWRITE_FIELD)) {
|
||||
rewrite = parser.textOrNull();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (parseContext.isDeprecatedSetting(currentFieldName)) {
|
||||
// skip
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
|
||||
fieldName = currentFieldName;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[wildcard] query does not support [" + currentFieldName + "]");
|
||||
if (parseContext.getParseFieldMatcher().match(currentFieldName, WILDCARD_FIELD)) {
|
||||
value = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, VALUE_FIELD)) {
|
||||
value = parser.text();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, REWRITE_FIELD)) {
|
||||
rewrite = parser.textOrNull();
|
||||
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"[wildcard] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName());
|
||||
fieldName = parser.currentName();
|
||||
value = parser.text();
|
||||
}
|
||||
parser.nextToken();
|
||||
} else {
|
||||
value = parser.text();
|
||||
parser.nextToken();
|
||||
}
|
||||
|
||||
if (value == null) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "No value specified for wildcard query");
|
||||
}
|
||||
return Optional.of(new WildcardQueryBuilder(fieldName, value)
|
||||
.rewrite(rewrite)
|
||||
.boost(boost)
|
||||
|
|
|
@ -18,16 +18,19 @@
|
|||
*/
|
||||
package org.elasticsearch.index.translog;
|
||||
|
||||
import org.apache.lucene.store.ByteArrayDataOutput;
|
||||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.store.DataInput;
|
||||
import org.apache.lucene.store.DataOutput;
|
||||
import org.apache.lucene.store.InputStreamDataInput;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.OutputStreamIndexOutput;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.elasticsearch.common.io.Channels;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.channels.FileChannel;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.OpenOption;
|
||||
import java.nio.file.Path;
|
||||
|
||||
|
@ -35,69 +38,117 @@ import java.nio.file.Path;
|
|||
*/
|
||||
class Checkpoint {
|
||||
|
||||
static final int BUFFER_SIZE = Integer.BYTES // ops
|
||||
+ Long.BYTES // offset
|
||||
+ Long.BYTES;// generation
|
||||
final long offset;
|
||||
final int numOps;
|
||||
final long generation;
|
||||
|
||||
private static final int INITIAL_VERSION = 1; // start with 1, just to recognize there was some magic serialization logic before
|
||||
|
||||
private static final String CHECKPOINT_CODEC = "ckp";
|
||||
|
||||
static final int FILE_SIZE = CodecUtil.headerLength(CHECKPOINT_CODEC)
|
||||
+ Integer.BYTES // ops
|
||||
+ Long.BYTES // offset
|
||||
+ Long.BYTES // generation
|
||||
+ CodecUtil.footerLength();
|
||||
|
||||
static final int LEGACY_NON_CHECKSUMMED_FILE_LENGTH = Integer.BYTES // ops
|
||||
+ Long.BYTES // offset
|
||||
+ Long.BYTES; // generation
|
||||
|
||||
Checkpoint(long offset, int numOps, long generation) {
|
||||
this.offset = offset;
|
||||
this.numOps = numOps;
|
||||
this.generation = generation;
|
||||
}
|
||||
|
||||
Checkpoint(DataInput in) throws IOException {
|
||||
offset = in.readLong();
|
||||
numOps = in.readInt();
|
||||
generation = in.readLong();
|
||||
}
|
||||
|
||||
private void write(FileChannel channel) throws IOException {
|
||||
byte[] buffer = new byte[BUFFER_SIZE];
|
||||
final ByteArrayDataOutput out = new ByteArrayDataOutput(buffer);
|
||||
write(out);
|
||||
Channels.writeToChannel(buffer, channel);
|
||||
}
|
||||
|
||||
void write(DataOutput out) throws IOException {
|
||||
private void write(DataOutput out) throws IOException {
|
||||
out.writeLong(offset);
|
||||
out.writeInt(numOps);
|
||||
out.writeLong(generation);
|
||||
}
|
||||
|
||||
// reads a checksummed checkpoint introduced in ES 5.0.0
|
||||
static Checkpoint readChecksummedV1(DataInput in) throws IOException {
|
||||
return new Checkpoint(in.readLong(), in.readInt(), in.readLong());
|
||||
}
|
||||
|
||||
// reads checkpoint from ES < 5.0.0
|
||||
static Checkpoint readNonChecksummed(DataInput in) throws IOException {
|
||||
return new Checkpoint(in.readLong(), in.readInt(), in.readLong());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Checkpoint{" +
|
||||
"offset=" + offset +
|
||||
", numOps=" + numOps +
|
||||
", translogFileGeneration= " + generation +
|
||||
'}';
|
||||
"offset=" + offset +
|
||||
", numOps=" + numOps +
|
||||
", translogFileGeneration= " + generation +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static Checkpoint read(Path path) throws IOException {
|
||||
try (InputStream in = Files.newInputStream(path)) {
|
||||
return new Checkpoint(new InputStreamDataInput(in));
|
||||
try (Directory dir = new SimpleFSDirectory(path.getParent())) {
|
||||
try (final IndexInput indexInput = dir.openInput(path.getFileName().toString(), IOContext.DEFAULT)) {
|
||||
if (indexInput.length() == LEGACY_NON_CHECKSUMMED_FILE_LENGTH) {
|
||||
// OLD unchecksummed file that was written < ES 5.0.0
|
||||
return Checkpoint.readNonChecksummed(indexInput);
|
||||
}
|
||||
// We checksum the entire file before we even go and parse it. If it's corrupted we barf right here.
|
||||
CodecUtil.checksumEntireFile(indexInput);
|
||||
final int fileVersion = CodecUtil.checkHeader(indexInput, CHECKPOINT_CODEC, INITIAL_VERSION, INITIAL_VERSION);
|
||||
return Checkpoint.readChecksummedV1(indexInput);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void write(ChannelFactory factory, Path checkpointFile, Checkpoint checkpoint, OpenOption... options) throws IOException {
|
||||
final ByteArrayOutputStream byteOutputStream = new ByteArrayOutputStream(FILE_SIZE) {
|
||||
@Override
|
||||
public synchronized byte[] toByteArray() {
|
||||
// don't clone
|
||||
return buf;
|
||||
}
|
||||
};
|
||||
final String resourceDesc = "checkpoint(path=\"" + checkpointFile + "\", gen=" + checkpoint + ")";
|
||||
try (final OutputStreamIndexOutput indexOutput =
|
||||
new OutputStreamIndexOutput(resourceDesc, checkpointFile.toString(), byteOutputStream, FILE_SIZE)) {
|
||||
CodecUtil.writeHeader(indexOutput, CHECKPOINT_CODEC, INITIAL_VERSION);
|
||||
checkpoint.write(indexOutput);
|
||||
CodecUtil.writeFooter(indexOutput);
|
||||
|
||||
assert indexOutput.getFilePointer() == FILE_SIZE :
|
||||
"get you number straights. Bytes written: " + indexOutput.getFilePointer() + " buffer size: " + FILE_SIZE;
|
||||
assert indexOutput.getFilePointer() < 512 :
|
||||
"checkpoint files have to be smaller 512b for atomic writes. size: " + indexOutput.getFilePointer();
|
||||
|
||||
}
|
||||
// now go and write to the channel, in one go.
|
||||
try (FileChannel channel = factory.open(checkpointFile, options)) {
|
||||
checkpoint.write(channel);
|
||||
Channels.writeToChannel(byteOutputStream.toByteArray(), channel);
|
||||
// no need to force metadata, file size stays the same and we did the full fsync
|
||||
// when we first created the file, so the directory entry doesn't change as well
|
||||
channel.force(false);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Checkpoint that = (Checkpoint) o;
|
||||
|
||||
if (offset != that.offset) return false;
|
||||
if (numOps != that.numOps) return false;
|
||||
if (offset != that.offset) {
|
||||
return false;
|
||||
}
|
||||
if (numOps != that.numOps) {
|
||||
return false;
|
||||
}
|
||||
return generation == that.generation;
|
||||
|
||||
}
|
||||
|
|
|
@ -200,7 +200,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
Files.createDirectories(location);
|
||||
final long generation = 1;
|
||||
Checkpoint checkpoint = new Checkpoint(0, 0, generation);
|
||||
Checkpoint.write(getChannelFactory(), location.resolve(CHECKPOINT_FILE_NAME), checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
|
||||
final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME);
|
||||
Checkpoint.write(getChannelFactory(), checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
|
||||
IOUtils.fsync(checkpointFile, false);
|
||||
current = createWriter(generation);
|
||||
this.lastCommittedTranslogFileGeneration = NOT_SET_GENERATION;
|
||||
|
||||
|
|
|
@ -36,11 +36,9 @@ import org.apache.lucene.util.IOUtils;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.cli.SettingCommand;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.translog.Checkpoint;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.channels.Channels;
|
||||
|
@ -168,12 +166,11 @@ public class TruncateTranslogCommand extends SettingCommand {
|
|||
|
||||
/** Write a checkpoint file to the given location with the given generation */
|
||||
public static void writeEmptyCheckpoint(Path filename, int translogLength, long translogGeneration) throws IOException {
|
||||
try (FileChannel fc = FileChannel.open(filename, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW);
|
||||
OutputStreamDataOutput out = new OutputStreamDataOutput(Channels.newOutputStream(fc))) {
|
||||
Checkpoint emptyCheckpoint = new Checkpoint(translogLength, 0, translogGeneration);
|
||||
emptyCheckpoint.write(out);
|
||||
fc.force(true);
|
||||
}
|
||||
Checkpoint emptyCheckpoint = new Checkpoint(translogLength, 0, translogGeneration);
|
||||
Checkpoint.write(FileChannel::open, filename, emptyCheckpoint,
|
||||
StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW);
|
||||
// fsync with metadata here to make sure.
|
||||
IOUtils.fsync(filename, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -177,7 +177,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
|||
|
||||
deleteIndices(event); // also deletes shards of deleted indices
|
||||
|
||||
removeUnallocatedIndices(state); // also removes shards of removed indices
|
||||
removeUnallocatedIndices(event); // also removes shards of removed indices
|
||||
|
||||
failMissingShards(state);
|
||||
|
||||
|
@ -216,7 +216,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
|||
if (masterNode != null) { // TODO: can we remove this? Is resending shard failures the responsibility of shardStateAction?
|
||||
String message = "master " + masterNode + " has not removed previously failed shard. resending shard failure";
|
||||
logger.trace("[{}] re-sending failed shard [{}], reason [{}]", matchedRouting.shardId(), matchedRouting, message);
|
||||
shardStateAction.shardFailed(matchedRouting, matchedRouting, message, null, SHARD_STATE_ACTION_LISTENER);
|
||||
shardStateAction.localShardFailed(matchedRouting, message, null, SHARD_STATE_ACTION_LISTENER);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -286,28 +286,16 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
|||
});
|
||||
}
|
||||
}
|
||||
|
||||
// delete local indices that do neither exist in previous cluster state nor part of tombstones
|
||||
for (AllocatedIndex<? extends Shard> indexService : indicesService) {
|
||||
Index index = indexService.index();
|
||||
IndexMetaData indexMetaData = event.state().metaData().index(index);
|
||||
if (indexMetaData == null) {
|
||||
assert false : "index" + index + " exists locally, doesn't have a metadata but is not part"
|
||||
+ " of the delete index list. \nprevious state: " + event.previousState().prettyPrint()
|
||||
+ "\n current state:\n" + event.state().prettyPrint();
|
||||
logger.warn("[{}] isn't part of metadata but is part of in memory structures. removing", index);
|
||||
indicesService.deleteIndex(index, "isn't part of metadata (explicit check)");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes indices that have no shards allocated to this node. This does not delete the shard data as we wait for enough
|
||||
* shard copies to exist in the cluster before deleting shard data (triggered by {@link org.elasticsearch.indices.store.IndicesStore}).
|
||||
*
|
||||
* @param state new cluster state
|
||||
* @param event the cluster changed event
|
||||
*/
|
||||
private void removeUnallocatedIndices(final ClusterState state) {
|
||||
private void removeUnallocatedIndices(final ClusterChangedEvent event) {
|
||||
final ClusterState state = event.state();
|
||||
final String localNodeId = state.nodes().getLocalNodeId();
|
||||
assert localNodeId != null;
|
||||
|
||||
|
@ -322,6 +310,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
|||
for (AllocatedIndex<? extends Shard> indexService : indicesService) {
|
||||
Index index = indexService.index();
|
||||
if (indicesWithShards.contains(index) == false) {
|
||||
// if the cluster change indicates a brand new cluster, we only want
|
||||
// to remove the in-memory structures for the index and not delete the
|
||||
// contents on disk because the index will later be re-imported as a
|
||||
// dangling index
|
||||
assert state.metaData().index(index) != null || event.isNewCluster() :
|
||||
"index " + index + " does not exist in the cluster state, it should either " +
|
||||
"have been deleted or the cluster must be new";
|
||||
logger.debug("{} removing index, no shards allocated", index);
|
||||
indicesService.removeIndex(index, "removing index (no shards allocated)");
|
||||
}
|
||||
|
@ -686,7 +681,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
|||
try {
|
||||
logger.warn("[{}] marking and sending shard failed due to [{}]", failure, shardRouting.shardId(), message);
|
||||
failedShardsCache.put(shardRouting.shardId(), shardRouting);
|
||||
shardStateAction.shardFailed(shardRouting, shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER);
|
||||
shardStateAction.localShardFailed(shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER);
|
||||
} catch (Exception inner) {
|
||||
if (failure != null) inner.addSuppressed(failure);
|
||||
logger.warn(
|
||||
|
|
|
@ -174,6 +174,9 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve
|
|||
} else {
|
||||
metadataSnapshot = recoveryTarget.indexShard().snapshotStoreMetadata();
|
||||
}
|
||||
} catch (org.apache.lucene.index.IndexNotFoundException e) {
|
||||
// happens on an empty folder. no need to log
|
||||
metadataSnapshot = Store.MetadataSnapshot.EMPTY;
|
||||
} catch (IOException e) {
|
||||
logger.warn("error while listing local files, recover as if there are none", e);
|
||||
metadataSnapshot = Store.MetadataSnapshot.EMPTY;
|
||||
|
|
|
@ -345,7 +345,8 @@ public class Node implements Closeable {
|
|||
final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables);
|
||||
client = new NodeClient(settings, threadPool);
|
||||
Collection<Object> pluginComponents = pluginsService.filterPlugins(Plugin.class).stream()
|
||||
.flatMap(p -> p.createComponents(client, clusterService, threadPool, resourceWatcherService).stream())
|
||||
.flatMap(p -> p.createComponents(client, clusterService, threadPool, resourceWatcherService,
|
||||
scriptModule.getScriptService()).stream())
|
||||
.collect(Collectors.toList());
|
||||
modules.add(b -> {
|
||||
b.bind(PluginsService.class).toInstance(pluginsService);
|
||||
|
|
|
@ -45,6 +45,10 @@ import static java.util.Collections.emptyMap;
|
|||
* }
|
||||
* }
|
||||
* }</pre>
|
||||
*
|
||||
* Elasticsearch doesn't have any automatic mechanism to share these components between indexes. If any component is heavy enough to warrant
|
||||
* such sharing then it is the Pugin's responsibility to do it in their {@link AnalysisProvider} implementation. We recommend against doing
|
||||
* this unless absolutely necessary because it can be difficult to get the caching right given things like behavior changes across versions.
|
||||
*/
|
||||
public interface AnalysisPlugin {
|
||||
/**
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.settings.SettingsModule;
|
|||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.indices.analysis.AnalysisModule;
|
||||
import org.elasticsearch.script.ScriptModule;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.threadpool.ExecutorBuilder;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
|
@ -82,9 +83,10 @@ public abstract class Plugin {
|
|||
* @param clusterService A service to allow watching and updating cluster state
|
||||
* @param threadPool A service to allow retrieving an executor to run an async action
|
||||
* @param resourceWatcherService A service to watch for changes to node local files
|
||||
* @param scriptService A service to allow running scripts on the local node
|
||||
*/
|
||||
public Collection<Object> createComponents(Client client, ClusterService clusterService, ThreadPool threadPool,
|
||||
ResourceWatcherService resourceWatcherService) {
|
||||
ResourceWatcherService resourceWatcherService, ScriptService scriptService) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
|
|
|
@ -38,15 +38,19 @@ public final class IndexId implements Writeable, ToXContent {
|
|||
|
||||
private final String name;
|
||||
private final String id;
|
||||
private final int hashCode;
|
||||
|
||||
public IndexId(final String name, final String id) {
|
||||
this.name = name;
|
||||
this.id = id;
|
||||
this.hashCode = computeHashCode();
|
||||
|
||||
}
|
||||
|
||||
public IndexId(final StreamInput in) throws IOException {
|
||||
this.name = in.readString();
|
||||
this.id = in.readString();
|
||||
this.hashCode = computeHashCode();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -90,6 +94,10 @@ public final class IndexId implements Writeable, ToXContent {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
private int computeHashCode() {
|
||||
return Objects.hash(name, id);
|
||||
}
|
||||
|
||||
|
|
|
@ -101,6 +101,7 @@ import java.io.FileNotFoundException;
|
|||
import java.io.FilterInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.DirectoryNotEmptyException;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
|
@ -406,7 +407,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
}
|
||||
try {
|
||||
// Delete snapshot from the index file, since it is the maintainer of truth of active snapshots
|
||||
writeIndexGen(repositoryData.removeSnapshot(snapshotId));
|
||||
final RepositoryData updatedRepositoryData = repositoryData.removeSnapshot(snapshotId);
|
||||
writeIndexGen(updatedRepositoryData);
|
||||
|
||||
// delete the snapshot file
|
||||
safeSnapshotBlobDelete(snapshot, snapshotId.getUUID());
|
||||
|
@ -436,6 +438,27 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup indices that are no longer part of the repository
|
||||
final Collection<IndexId> indicesToCleanUp = Sets.newHashSet(repositoryData.getIndices().values());
|
||||
indicesToCleanUp.removeAll(updatedRepositoryData.getIndices().values());
|
||||
final BlobContainer indicesBlobContainer = blobStore().blobContainer(basePath().add("indices"));
|
||||
for (final IndexId indexId : indicesToCleanUp) {
|
||||
try {
|
||||
indicesBlobContainer.deleteBlob(indexId.getId());
|
||||
} catch (DirectoryNotEmptyException dnee) {
|
||||
// if the directory isn't empty for some reason, it will fail to clean up;
|
||||
// we'll ignore that and accept that cleanup didn't fully succeed.
|
||||
// since we are using UUIDs for path names, this won't be an issue for
|
||||
// snapshotting indices of the same name
|
||||
logger.debug("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " +
|
||||
"its index folder due to the directory not being empty.", dnee, metadata.name(), indexId);
|
||||
} catch (IOException ioe) {
|
||||
// a different IOException occurred while trying to delete - will just log the issue for now
|
||||
logger.debug("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " +
|
||||
"its index folder.", ioe, metadata.name(), indexId);
|
||||
}
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
throw new RepositoryException(metadata.name(), "failed to update snapshot in repository", ex);
|
||||
}
|
||||
|
|
|
@ -30,9 +30,9 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
|||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.Table;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
|
@ -47,75 +47,26 @@ import org.elasticsearch.threadpool.ThreadPoolStats;
|
|||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.GET;
|
||||
|
||||
public class RestThreadPoolAction extends AbstractCatAction {
|
||||
|
||||
private static final String[] SUPPORTED_NAMES = new String[]{
|
||||
ThreadPool.Names.BULK,
|
||||
ThreadPool.Names.FLUSH,
|
||||
ThreadPool.Names.GENERIC,
|
||||
ThreadPool.Names.GET,
|
||||
ThreadPool.Names.INDEX,
|
||||
ThreadPool.Names.MANAGEMENT,
|
||||
ThreadPool.Names.FORCE_MERGE,
|
||||
ThreadPool.Names.REFRESH,
|
||||
ThreadPool.Names.SEARCH,
|
||||
ThreadPool.Names.SNAPSHOT,
|
||||
ThreadPool.Names.WARMER
|
||||
};
|
||||
|
||||
private static final String[] SUPPORTED_ALIASES = new String[]{
|
||||
"b",
|
||||
"f",
|
||||
"ge",
|
||||
"g",
|
||||
"i",
|
||||
"ma",
|
||||
"fm",
|
||||
"r",
|
||||
"s",
|
||||
"sn",
|
||||
"w"
|
||||
};
|
||||
|
||||
static {
|
||||
assert SUPPORTED_ALIASES.length == SUPPORTED_NAMES.length: "SUPPORTED_NAMES/ALIASES mismatch";
|
||||
}
|
||||
|
||||
private static final String[] DEFAULT_THREAD_POOLS = new String[]{
|
||||
ThreadPool.Names.BULK,
|
||||
ThreadPool.Names.INDEX,
|
||||
ThreadPool.Names.SEARCH,
|
||||
};
|
||||
|
||||
private static final Map<String, String> ALIAS_TO_THREAD_POOL;
|
||||
private static final Map<String, String> THREAD_POOL_TO_ALIAS;
|
||||
|
||||
static {
|
||||
ALIAS_TO_THREAD_POOL = new HashMap<>(SUPPORTED_NAMES.length);
|
||||
for (String supportedThreadPool : SUPPORTED_NAMES) {
|
||||
ALIAS_TO_THREAD_POOL.put(supportedThreadPool.substring(0, 3), supportedThreadPool);
|
||||
}
|
||||
THREAD_POOL_TO_ALIAS = new HashMap<>(SUPPORTED_NAMES.length);
|
||||
for (int i = 0; i < SUPPORTED_NAMES.length; i++) {
|
||||
THREAD_POOL_TO_ALIAS.put(SUPPORTED_NAMES[i], SUPPORTED_ALIASES[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@Inject
|
||||
public RestThreadPoolAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/thread_pool", this);
|
||||
controller.registerHandler(GET, "/_cat/thread_pool/{thread_pool_patterns}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void documentation(StringBuilder sb) {
|
||||
sb.append("/_cat/thread_pool\n");
|
||||
sb.append("/_cat/thread_pool/{thread_pools}");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -149,96 +100,55 @@ public class RestThreadPoolAction extends AbstractCatAction {
|
|||
|
||||
@Override
|
||||
protected Table getTableWithHeader(final RestRequest request) {
|
||||
Table table = new Table();
|
||||
final Table table = new Table();
|
||||
table.startHeaders();
|
||||
table.addCell("id", "default:false;alias:nodeId;desc:unique node id");
|
||||
table.addCell("node_name", "default:true;alias:nn;desc:node name");
|
||||
table.addCell("node_id", "default:false;alias:id;desc:persistent node id");
|
||||
table.addCell("ephemeral_node_id", "default:false;alias:eid;desc:ephemeral node id");
|
||||
table.addCell("pid", "default:false;alias:p;desc:process id");
|
||||
table.addCell("host", "alias:h;desc:host name");
|
||||
table.addCell("ip", "alias:i;desc:ip address");
|
||||
table.addCell("host", "default:false;alias:h;desc:host name");
|
||||
table.addCell("ip", "default:false;alias:i;desc:ip address");
|
||||
table.addCell("port", "default:false;alias:po;desc:bound transport port");
|
||||
|
||||
final String[] requestedPools = fetchSortedPools(request, DEFAULT_THREAD_POOLS);
|
||||
for (String pool : SUPPORTED_NAMES) {
|
||||
String poolAlias = THREAD_POOL_TO_ALIAS.get(pool);
|
||||
boolean display = false;
|
||||
for (String requestedPool : requestedPools) {
|
||||
if (pool.equals(requestedPool)) {
|
||||
display = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
String defaultDisplayVal = Boolean.toString(display);
|
||||
table.addCell(
|
||||
pool + ".type",
|
||||
"alias:" + poolAlias + "t;default:false;desc:" + pool + " thread pool type"
|
||||
);
|
||||
table.addCell(
|
||||
pool + ".active",
|
||||
"alias:" + poolAlias + "a;default:" + defaultDisplayVal + ";text-align:right;desc:number of active " + pool + " threads"
|
||||
);
|
||||
table.addCell(
|
||||
pool + ".size",
|
||||
"alias:" + poolAlias + "s;default:false;text-align:right;desc:number of " + pool + " threads"
|
||||
);
|
||||
table.addCell(
|
||||
pool + ".queue",
|
||||
"alias:" + poolAlias + "q;default:" + defaultDisplayVal + ";text-align:right;desc:number of " + pool + " threads in queue"
|
||||
);
|
||||
table.addCell(
|
||||
pool + ".queueSize",
|
||||
"alias:" + poolAlias + "qs;default:false;text-align:right;desc:maximum number of " + pool + " threads in queue"
|
||||
);
|
||||
table.addCell(
|
||||
pool + ".rejected",
|
||||
"alias:" + poolAlias + "r;default:" + defaultDisplayVal + ";text-align:right;desc:number of rejected " + pool + " threads"
|
||||
);
|
||||
table.addCell(
|
||||
pool + ".largest",
|
||||
"alias:" + poolAlias + "l;default:false;text-align:right;desc:highest number of seen active " + pool + " threads"
|
||||
);
|
||||
table.addCell(
|
||||
pool + ".completed",
|
||||
"alias:" + poolAlias + "c;default:false;text-align:right;desc:number of completed " + pool + " threads"
|
||||
);
|
||||
table.addCell(
|
||||
pool + ".min",
|
||||
"alias:" + poolAlias + "mi;default:false;text-align:right;desc:minimum number of " + pool + " threads"
|
||||
);
|
||||
table.addCell(
|
||||
pool + ".max",
|
||||
"alias:" + poolAlias + "ma;default:false;text-align:right;desc:maximum number of " + pool + " threads"
|
||||
);
|
||||
table.addCell(
|
||||
pool + ".keepAlive",
|
||||
"alias:" + poolAlias + "k;default:false;text-align:right;desc:" + pool + " thread keep alive time"
|
||||
);
|
||||
}
|
||||
|
||||
table.addCell("name", "default:true;alias:n;desc:thread pool name");
|
||||
table.addCell("type", "alias:t;default:false;desc:thread pool type");
|
||||
table.addCell("active", "alias:a;default:true;text-align:right;desc:number of active threads");
|
||||
table.addCell("size", "alias:s;default:false;text-align:right;desc:number of threads");
|
||||
table.addCell("queue", "alias:q;default:true;text-align:right;desc:number of tasks currently in queue");
|
||||
table.addCell("queue_size", "alias:qs;default:false;text-align:right;desc:maximum number of tasks permitted in queue");
|
||||
table.addCell("rejected", "alias:r;default:true;text-align:right;desc:number of rejected tasks");
|
||||
table.addCell("largest", "alias:l;default:false;text-align:right;desc:highest number of seen active threads");
|
||||
table.addCell("completed", "alias:c;default:false;text-align:right;desc:number of completed tasks");
|
||||
table.addCell("min", "alias:mi;default:false;text-align:right;desc:minimum number of threads");
|
||||
table.addCell("max", "alias:ma;default:false;text-align:right;desc:maximum number of threads");
|
||||
table.addCell("keep_alive", "alias:ka;default:false;text-align:right;desc:thread keep alive time");
|
||||
table.endHeaders();
|
||||
return table;
|
||||
}
|
||||
|
||||
|
||||
private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats) {
|
||||
boolean fullId = req.paramAsBoolean("full_id", false);
|
||||
DiscoveryNodes nodes = state.getState().nodes();
|
||||
Table table = getTableWithHeader(req);
|
||||
final String[] threadPools = req.paramAsStringArray("thread_pool_patterns", new String[] { "*" });
|
||||
final DiscoveryNodes nodes = state.getState().nodes();
|
||||
final Table table = getTableWithHeader(req);
|
||||
|
||||
for (DiscoveryNode node : nodes) {
|
||||
NodeInfo info = nodesInfo.getNodesMap().get(node.getId());
|
||||
NodeStats stats = nodesStats.getNodesMap().get(node.getId());
|
||||
table.startRow();
|
||||
|
||||
table.addCell(fullId ? node.getId() : Strings.substring(node.getId(), 0, 4));
|
||||
table.addCell(info == null ? null : info.getProcess().getId());
|
||||
table.addCell(node.getHostName());
|
||||
table.addCell(node.getHostAddress());
|
||||
if (node.getAddress() instanceof InetSocketTransportAddress) {
|
||||
table.addCell(((InetSocketTransportAddress) node.getAddress()).address().getPort());
|
||||
} else {
|
||||
table.addCell("-");
|
||||
// collect all thread pool names that we see across the nodes
|
||||
final Set<String> candidates = new HashSet<>();
|
||||
for (final NodeStats nodeStats : nodesStats.getNodes()) {
|
||||
for (final ThreadPoolStats.Stats threadPoolStats : nodeStats.getThreadPool()) {
|
||||
candidates.add(threadPoolStats.getName());
|
||||
}
|
||||
}
|
||||
|
||||
// collect all thread pool names that match the specified thread pool patterns
|
||||
final Set<String> included = new HashSet<>();
|
||||
for (final String candidate : candidates) {
|
||||
if (Regex.simpleMatch(threadPools, candidate)) {
|
||||
included.add(candidate);
|
||||
}
|
||||
}
|
||||
|
||||
for (final DiscoveryNode node : nodes) {
|
||||
final NodeInfo info = nodesInfo.getNodesMap().get(node.getId());
|
||||
final NodeStats stats = nodesStats.getNodesMap().get(node.getId());
|
||||
|
||||
final Map<String, ThreadPoolStats.Stats> poolThreadStats;
|
||||
final Map<String, ThreadPool.Info> poolThreadInfo;
|
||||
|
@ -247,8 +157,9 @@ public class RestThreadPoolAction extends AbstractCatAction {
|
|||
poolThreadStats = Collections.emptyMap();
|
||||
poolThreadInfo = Collections.emptyMap();
|
||||
} else {
|
||||
poolThreadStats = new HashMap<>(14);
|
||||
poolThreadInfo = new HashMap<>(14);
|
||||
// we use a sorted map to ensure that thread pools are sorted by name
|
||||
poolThreadStats = new TreeMap<>();
|
||||
poolThreadInfo = new HashMap<>();
|
||||
|
||||
ThreadPoolStats threadPoolStats = stats.getThreadPool();
|
||||
for (ThreadPoolStats.Stats threadPoolStat : threadPoolStats) {
|
||||
|
@ -260,9 +171,25 @@ public class RestThreadPoolAction extends AbstractCatAction {
|
|||
}
|
||||
}
|
||||
}
|
||||
for (String pool : SUPPORTED_NAMES) {
|
||||
ThreadPoolStats.Stats poolStats = poolThreadStats.get(pool);
|
||||
ThreadPool.Info poolInfo = poolThreadInfo.get(pool);
|
||||
for (Map.Entry<String, ThreadPoolStats.Stats> entry : poolThreadStats.entrySet()) {
|
||||
|
||||
if (!included.contains(entry.getKey())) continue;
|
||||
|
||||
table.startRow();
|
||||
|
||||
table.addCell(node.getName());
|
||||
table.addCell(node.getId());
|
||||
table.addCell(node.getEphemeralId());
|
||||
table.addCell(info == null ? null : info.getProcess().getId());
|
||||
table.addCell(node.getHostName());
|
||||
table.addCell(node.getHostAddress());
|
||||
if (node.getAddress() instanceof InetSocketTransportAddress) {
|
||||
table.addCell(((InetSocketTransportAddress) node.getAddress()).address().getPort());
|
||||
} else {
|
||||
table.addCell("-");
|
||||
}
|
||||
final ThreadPoolStats.Stats poolStats = entry.getValue();
|
||||
final ThreadPool.Info poolInfo = poolThreadInfo.get(entry.getKey());
|
||||
|
||||
Long maxQueueSize = null;
|
||||
String keepAlive = null;
|
||||
|
@ -284,6 +211,7 @@ public class RestThreadPoolAction extends AbstractCatAction {
|
|||
}
|
||||
}
|
||||
|
||||
table.addCell(entry.getKey());
|
||||
table.addCell(poolInfo == null ? null : poolInfo.getThreadPoolType().getType());
|
||||
table.addCell(poolStats == null ? null : poolStats.getActive());
|
||||
table.addCell(poolStats == null ? null : poolStats.getThreads());
|
||||
|
@ -295,34 +223,11 @@ public class RestThreadPoolAction extends AbstractCatAction {
|
|||
table.addCell(minThreads);
|
||||
table.addCell(maxThreads);
|
||||
table.addCell(keepAlive);
|
||||
}
|
||||
|
||||
table.endRow();
|
||||
table.endRow();
|
||||
}
|
||||
}
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
// The thread pool columns should always be in the same order.
|
||||
private String[] fetchSortedPools(RestRequest request, String[] defaults) {
|
||||
String[] headers = request.paramAsStringArray("h", null);
|
||||
if (headers == null) {
|
||||
return defaults;
|
||||
} else {
|
||||
Set<String> requestedPools = new LinkedHashSet<>(headers.length);
|
||||
for (String header : headers) {
|
||||
int dotIndex = header.indexOf('.');
|
||||
if (dotIndex != -1) {
|
||||
String headerPrefix = header.substring(0, dotIndex);
|
||||
if (THREAD_POOL_TO_ALIAS.containsKey(headerPrefix)) {
|
||||
requestedPools.add(headerPrefix);
|
||||
}
|
||||
} else if (ALIAS_TO_THREAD_POOL.containsKey(header)) {
|
||||
requestedPools.add(ALIAS_TO_THREAD_POOL.get(header));
|
||||
}
|
||||
|
||||
}
|
||||
return requestedPools.toArray(new String[requestedPools.size()]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.search;
|
|||
|
||||
import com.carrotsearch.hppc.ObjectFloatHashMap;
|
||||
import org.apache.lucene.search.FieldDoc;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
|
@ -87,6 +88,8 @@ import org.elasticsearch.search.rescore.RescoreBuilder;
|
|||
import org.elasticsearch.search.searchafter.SearchAfterBuilder;
|
||||
import org.elasticsearch.search.sort.SortAndFormats;
|
||||
import org.elasticsearch.search.sort.SortBuilder;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionSuggestion;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Cancellable;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Names;
|
||||
|
@ -94,6 +97,7 @@ import org.elasticsearch.threadpool.ThreadPool.Names;
|
|||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
@ -265,7 +269,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
|
||||
loadOrExecuteQueryPhase(request, context);
|
||||
|
||||
if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scrollContext() == null) {
|
||||
if (hasHits(context.queryResult()) == false && context.scrollContext() == null) {
|
||||
freeContext(context.id());
|
||||
} else {
|
||||
contextProcessedSuccessfully(context);
|
||||
|
@ -320,7 +324,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
operationListener.onPreQueryPhase(context);
|
||||
long time = System.nanoTime();
|
||||
queryPhase.execute(context);
|
||||
if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scrollContext() == null) {
|
||||
if (hasHits(context.queryResult()) == false && context.scrollContext() == null) {
|
||||
// no hits, we can release the context since there will be no fetch phase
|
||||
freeContext(context.id());
|
||||
} else {
|
||||
|
@ -811,40 +815,55 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
}
|
||||
}
|
||||
|
||||
private static final int[] EMPTY_DOC_IDS = new int[0];
|
||||
|
||||
/**
|
||||
* Shortcut ids to load, we load only "from" and up to "size". The phase controller
|
||||
* handles this as well since the result is always size * shards for Q_A_F
|
||||
*/
|
||||
private void shortcutDocIdsToLoad(SearchContext context) {
|
||||
final int[] docIdsToLoad;
|
||||
int docsOffset = 0;
|
||||
final Suggest suggest = context.queryResult().suggest();
|
||||
int numSuggestDocs = 0;
|
||||
final List<CompletionSuggestion> completionSuggestions;
|
||||
if (suggest != null && suggest.hasScoreDocs()) {
|
||||
completionSuggestions = suggest.filter(CompletionSuggestion.class);
|
||||
for (CompletionSuggestion completionSuggestion : completionSuggestions) {
|
||||
numSuggestDocs += completionSuggestion.getOptions().size();
|
||||
}
|
||||
} else {
|
||||
completionSuggestions = Collections.emptyList();
|
||||
}
|
||||
if (context.request().scroll() != null) {
|
||||
TopDocs topDocs = context.queryResult().topDocs();
|
||||
int[] docIdsToLoad = new int[topDocs.scoreDocs.length];
|
||||
docIdsToLoad = new int[topDocs.scoreDocs.length + numSuggestDocs];
|
||||
for (int i = 0; i < topDocs.scoreDocs.length; i++) {
|
||||
docIdsToLoad[i] = topDocs.scoreDocs[i].doc;
|
||||
docIdsToLoad[docsOffset++] = topDocs.scoreDocs[i].doc;
|
||||
}
|
||||
context.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length);
|
||||
} else {
|
||||
TopDocs topDocs = context.queryResult().topDocs();
|
||||
if (topDocs.scoreDocs.length < context.from()) {
|
||||
// no more docs...
|
||||
context.docIdsToLoad(EMPTY_DOC_IDS, 0, 0);
|
||||
return;
|
||||
}
|
||||
int totalSize = context.from() + context.size();
|
||||
int[] docIdsToLoad = new int[Math.min(topDocs.scoreDocs.length - context.from(), context.size())];
|
||||
int counter = 0;
|
||||
for (int i = context.from(); i < totalSize; i++) {
|
||||
if (i < topDocs.scoreDocs.length) {
|
||||
docIdsToLoad[counter] = topDocs.scoreDocs[i].doc;
|
||||
} else {
|
||||
break;
|
||||
docIdsToLoad = new int[numSuggestDocs];
|
||||
} else {
|
||||
int totalSize = context.from() + context.size();
|
||||
docIdsToLoad = new int[Math.min(topDocs.scoreDocs.length - context.from(), context.size()) +
|
||||
numSuggestDocs];
|
||||
for (int i = context.from(); i < Math.min(totalSize, topDocs.scoreDocs.length); i++) {
|
||||
docIdsToLoad[docsOffset++] = topDocs.scoreDocs[i].doc;
|
||||
}
|
||||
counter++;
|
||||
}
|
||||
context.docIdsToLoad(docIdsToLoad, 0, counter);
|
||||
}
|
||||
for (CompletionSuggestion completionSuggestion : completionSuggestions) {
|
||||
for (CompletionSuggestion.Entry.Option option : completionSuggestion.getOptions()) {
|
||||
docIdsToLoad[docsOffset++] = option.getDoc().doc;
|
||||
}
|
||||
}
|
||||
context.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length);
|
||||
}
|
||||
|
||||
private static boolean hasHits(final QuerySearchResult searchResult) {
|
||||
return searchResult.topDocs().scoreDocs.length > 0 ||
|
||||
(searchResult.suggest() != null && searchResult.suggest().hasScoreDocs());
|
||||
}
|
||||
|
||||
private void processScroll(InternalScrollSearchRequest request, SearchContext context) {
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.lucene.util.CollectionUtil;
|
|||
import org.elasticsearch.common.inject.internal.Nullable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.rounding.Rounding;
|
||||
import org.elasticsearch.common.rounding.TimeZoneRounding;
|
||||
import org.elasticsearch.common.util.LongHash;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
|
@ -45,8 +44,9 @@ import java.util.Map;
|
|||
|
||||
/**
|
||||
* An aggregator for date values. Every date is rounded down using a configured
|
||||
* {@link TimeZoneRounding}.
|
||||
* @see TimeZoneRounding
|
||||
* {@link Rounding}.
|
||||
*
|
||||
* @see Rounding
|
||||
*/
|
||||
class DateHistogramAggregator extends BucketsAggregator {
|
||||
|
||||
|
@ -60,14 +60,17 @@ class DateHistogramAggregator extends BucketsAggregator {
|
|||
private final ExtendedBounds extendedBounds;
|
||||
|
||||
private final LongHash bucketOrds;
|
||||
private long offset;
|
||||
|
||||
public DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, InternalOrder order, boolean keyed,
|
||||
public DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, long offset, InternalOrder order,
|
||||
boolean keyed,
|
||||
long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource,
|
||||
DocValueFormat formatter, AggregationContext aggregationContext,
|
||||
Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||
|
||||
super(name, factories, aggregationContext, parent, pipelineAggregators, metaData);
|
||||
this.rounding = rounding;
|
||||
this.offset = offset;
|
||||
this.order = order;
|
||||
this.keyed = keyed;
|
||||
this.minDocCount = minDocCount;
|
||||
|
@ -100,7 +103,7 @@ class DateHistogramAggregator extends BucketsAggregator {
|
|||
long previousRounded = Long.MIN_VALUE;
|
||||
for (int i = 0; i < valuesCount; ++i) {
|
||||
long value = values.valueAt(i);
|
||||
long rounded = rounding.round(value);
|
||||
long rounded = rounding.round(value - offset) + offset;
|
||||
assert rounded >= previousRounded;
|
||||
if (rounded == previousRounded) {
|
||||
continue;
|
||||
|
@ -133,7 +136,7 @@ class DateHistogramAggregator extends BucketsAggregator {
|
|||
InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0
|
||||
? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds)
|
||||
: null;
|
||||
return new InternalDateHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed,
|
||||
return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, formatter, keyed,
|
||||
pipelineAggregators(), metaData());
|
||||
}
|
||||
|
||||
|
@ -142,7 +145,7 @@ class DateHistogramAggregator extends BucketsAggregator {
|
|||
InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0
|
||||
? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds)
|
||||
: null;
|
||||
return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed,
|
||||
return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, offset, emptyBucketInfo, formatter, keyed,
|
||||
pipelineAggregators(), metaData());
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket.histogram;
|
|||
|
||||
import org.elasticsearch.common.rounding.DateTimeUnit;
|
||||
import org.elasticsearch.common.rounding.Rounding;
|
||||
import org.elasticsearch.common.rounding.TimeZoneRounding;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
|
@ -95,24 +94,24 @@ public final class DateHistogramAggregatorFactory
|
|||
}
|
||||
|
||||
private Rounding createRounding() {
|
||||
TimeZoneRounding.Builder tzRoundingBuilder;
|
||||
Rounding.Builder tzRoundingBuilder;
|
||||
if (dateHistogramInterval != null) {
|
||||
DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString());
|
||||
if (dateTimeUnit != null) {
|
||||
tzRoundingBuilder = TimeZoneRounding.builder(dateTimeUnit);
|
||||
tzRoundingBuilder = Rounding.builder(dateTimeUnit);
|
||||
} else {
|
||||
// the interval is a time value?
|
||||
tzRoundingBuilder = TimeZoneRounding.builder(
|
||||
tzRoundingBuilder = Rounding.builder(
|
||||
TimeValue.parseTimeValue(dateHistogramInterval.toString(), null, getClass().getSimpleName() + ".interval"));
|
||||
}
|
||||
} else {
|
||||
// the interval is an integer time value in millis?
|
||||
tzRoundingBuilder = TimeZoneRounding.builder(TimeValue.timeValueMillis(interval));
|
||||
tzRoundingBuilder = Rounding.builder(TimeValue.timeValueMillis(interval));
|
||||
}
|
||||
if (timeZone() != null) {
|
||||
tzRoundingBuilder.timeZone(timeZone());
|
||||
}
|
||||
Rounding rounding = tzRoundingBuilder.offset(offset).build();
|
||||
Rounding rounding = tzRoundingBuilder.build();
|
||||
return rounding;
|
||||
}
|
||||
|
||||
|
@ -138,7 +137,7 @@ public final class DateHistogramAggregatorFactory
|
|||
// parse any string bounds to longs and round them
|
||||
roundedBounds = extendedBounds.parseAndValidate(name, context.searchContext(), config.format()).round(rounding);
|
||||
}
|
||||
return new DateHistogramAggregator(name, factories, rounding, order, keyed, minDocCount, roundedBounds, valuesSource,
|
||||
return new DateHistogramAggregator(name, factories, rounding, offset, order, keyed, minDocCount, roundedBounds, valuesSource,
|
||||
config.format(), context, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket.histogram;
|
|||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.rounding.Rounding;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;
|
||||
|
@ -45,7 +44,7 @@ public class DateHistogramParser extends NumericValuesSourceParser {
|
|||
protected DateHistogramAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType,
|
||||
ValueType targetValueType, Map<ParseField, Object> otherOptions) {
|
||||
DateHistogramAggregationBuilder factory = new DateHistogramAggregationBuilder(aggregationName);
|
||||
Object interval = otherOptions.get(Rounding.Interval.INTERVAL_FIELD);
|
||||
Object interval = otherOptions.get(Histogram.INTERVAL_FIELD);
|
||||
if (interval == null) {
|
||||
throw new ParsingException(null, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]");
|
||||
} else if (interval instanceof Long) {
|
||||
|
@ -55,7 +54,7 @@ public class DateHistogramParser extends NumericValuesSourceParser {
|
|||
} else {
|
||||
throw new IllegalStateException("Unexpected interval class: " + interval.getClass());
|
||||
}
|
||||
Long offset = (Long) otherOptions.get(Rounding.OffsetRounding.OFFSET_FIELD);
|
||||
Long offset = (Long) otherOptions.get(Histogram.OFFSET_FIELD);
|
||||
if (offset != null) {
|
||||
factory.offset(offset);
|
||||
}
|
||||
|
@ -83,12 +82,12 @@ public class DateHistogramParser extends NumericValuesSourceParser {
|
|||
protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,
|
||||
ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
|
||||
if (token.isValue()) {
|
||||
if (parseFieldMatcher.match(currentFieldName, Rounding.Interval.INTERVAL_FIELD)) {
|
||||
if (parseFieldMatcher.match(currentFieldName, Histogram.INTERVAL_FIELD)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
otherOptions.put(Rounding.Interval.INTERVAL_FIELD, new DateHistogramInterval(parser.text()));
|
||||
otherOptions.put(Histogram.INTERVAL_FIELD, new DateHistogramInterval(parser.text()));
|
||||
return true;
|
||||
} else {
|
||||
otherOptions.put(Rounding.Interval.INTERVAL_FIELD, parser.longValue());
|
||||
otherOptions.put(Histogram.INTERVAL_FIELD, parser.longValue());
|
||||
return true;
|
||||
}
|
||||
} else if (parseFieldMatcher.match(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) {
|
||||
|
@ -97,13 +96,13 @@ public class DateHistogramParser extends NumericValuesSourceParser {
|
|||
} else if (parseFieldMatcher.match(currentFieldName, Histogram.KEYED_FIELD)) {
|
||||
otherOptions.put(Histogram.KEYED_FIELD, parser.booleanValue());
|
||||
return true;
|
||||
} else if (parseFieldMatcher.match(currentFieldName, Rounding.OffsetRounding.OFFSET_FIELD)) {
|
||||
} else if (parseFieldMatcher.match(currentFieldName, Histogram.OFFSET_FIELD)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD,
|
||||
otherOptions.put(Histogram.OFFSET_FIELD,
|
||||
DateHistogramAggregationBuilder.parseStringOffset(parser.text()));
|
||||
return true;
|
||||
} else {
|
||||
otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD, parser.longValue());
|
||||
otherOptions.put(Histogram.OFFSET_FIELD, parser.longValue());
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -47,8 +47,8 @@ public class HistogramAggregationBuilder
|
|||
|
||||
private double interval;
|
||||
private double offset = 0;
|
||||
private double minBound = Double.MAX_VALUE;
|
||||
private double maxBound = Double.MIN_VALUE;
|
||||
private double minBound = Double.POSITIVE_INFINITY;
|
||||
private double maxBound = Double.NEGATIVE_INFINITY;
|
||||
private InternalOrder order = (InternalOrder) Histogram.Order.KEY_ASC;
|
||||
private boolean keyed = false;
|
||||
private long minDocCount = 0;
|
||||
|
@ -122,17 +122,24 @@ public class HistogramAggregationBuilder
|
|||
return maxBound;
|
||||
}
|
||||
|
||||
/** Set extended bounds on this builder: buckets between {@code minBound}
|
||||
* and {@code maxBound} will be created even if no documents fell into
|
||||
* these buckets. It is possible to create half-open bounds by providing
|
||||
* {@link Double#POSITIVE_INFINITY} as a {@code minBound} or
|
||||
* {@link Double#NEGATIVE_INFINITY} as a {@code maxBound}. */
|
||||
/**
|
||||
* Set extended bounds on this builder: buckets between {@code minBound} and
|
||||
* {@code maxBound} will be created even if no documents fell into these
|
||||
* buckets.
|
||||
*
|
||||
* @throws IllegalArgumentException
|
||||
* if maxBound is less that minBound, or if either of the bounds
|
||||
* are not finite.
|
||||
*/
|
||||
public HistogramAggregationBuilder extendedBounds(double minBound, double maxBound) {
|
||||
if (minBound == Double.NEGATIVE_INFINITY) {
|
||||
throw new IllegalArgumentException("minBound must not be -Infinity, got: " + minBound);
|
||||
if (Double.isFinite(minBound) == false) {
|
||||
throw new IllegalArgumentException("minBound must be finite, got: " + minBound);
|
||||
}
|
||||
if (maxBound == Double.POSITIVE_INFINITY) {
|
||||
throw new IllegalArgumentException("maxBound must not be +Infinity, got: " + maxBound);
|
||||
if (Double.isFinite(maxBound) == false) {
|
||||
throw new IllegalArgumentException("maxBound must be finite, got: " + maxBound);
|
||||
}
|
||||
if (maxBound < minBound) {
|
||||
throw new IllegalArgumentException("maxBound [" + maxBound + "] must be greater than minBound [" + minBound + "]");
|
||||
}
|
||||
this.minBound = minBound;
|
||||
this.maxBound = maxBound;
|
||||
|
|
|
@ -178,14 +178,17 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
private final DocValueFormat format;
|
||||
private final boolean keyed;
|
||||
private final long minDocCount;
|
||||
private final long offset;
|
||||
private final EmptyBucketInfo emptyBucketInfo;
|
||||
|
||||
InternalDateHistogram(String name, List<Bucket> buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo,
|
||||
InternalDateHistogram(String name, List<Bucket> buckets, InternalOrder order, long minDocCount, long offset,
|
||||
EmptyBucketInfo emptyBucketInfo,
|
||||
DocValueFormat formatter, boolean keyed, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) {
|
||||
super(name, pipelineAggregators, metaData);
|
||||
this.buckets = buckets;
|
||||
this.order = order;
|
||||
this.offset = offset;
|
||||
assert (minDocCount == 0) == (emptyBucketInfo != null);
|
||||
this.minDocCount = minDocCount;
|
||||
this.emptyBucketInfo = emptyBucketInfo;
|
||||
|
@ -205,6 +208,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
} else {
|
||||
emptyBucketInfo = null;
|
||||
}
|
||||
offset = in.readLong();
|
||||
format = in.readNamedWriteable(DocValueFormat.class);
|
||||
keyed = in.readBoolean();
|
||||
buckets = in.readList(stream -> new Bucket(stream, keyed, format));
|
||||
|
@ -217,6 +221,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
if (minDocCount == 0) {
|
||||
emptyBucketInfo.writeTo(out);
|
||||
}
|
||||
out.writeLong(offset);
|
||||
out.writeNamedWriteable(format);
|
||||
out.writeBoolean(keyed);
|
||||
out.writeList(buckets);
|
||||
|
@ -234,7 +239,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
|
||||
@Override
|
||||
public InternalDateHistogram create(List<Bucket> buckets) {
|
||||
return new InternalDateHistogram(name, buckets, order, minDocCount, emptyBucketInfo, format,
|
||||
return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, format,
|
||||
keyed, pipelineAggregators(), metaData);
|
||||
}
|
||||
|
||||
|
@ -328,7 +333,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
long max = bounds.getMax();
|
||||
while (key <= max) {
|
||||
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||
key = emptyBucketInfo.rounding.nextRoundingValue(key);
|
||||
key = nextKey(key).longValue();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -337,7 +342,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
if (key < firstBucket.key) {
|
||||
while (key < firstBucket.key) {
|
||||
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||
key = emptyBucketInfo.rounding.nextRoundingValue(key);
|
||||
key = nextKey(key).longValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -349,10 +354,10 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
while (iter.hasNext()) {
|
||||
Bucket nextBucket = list.get(iter.nextIndex());
|
||||
if (lastBucket != null) {
|
||||
long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);
|
||||
long key = nextKey(lastBucket.key).longValue();
|
||||
while (key < nextBucket.key) {
|
||||
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||
key = emptyBucketInfo.rounding.nextRoundingValue(key);
|
||||
key = nextKey(key).longValue();
|
||||
}
|
||||
assert key == nextBucket.key;
|
||||
}
|
||||
|
@ -393,7 +398,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
CollectionUtil.introSort(reducedBuckets, order.comparator());
|
||||
}
|
||||
|
||||
return new InternalDateHistogram(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo,
|
||||
return new InternalDateHistogram(getName(), reducedBuckets, order, minDocCount, offset, emptyBucketInfo,
|
||||
format, keyed, pipelineAggregators(), getMetaData());
|
||||
}
|
||||
|
||||
|
@ -424,7 +429,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
|
||||
@Override
|
||||
public Number nextKey(Number key) {
|
||||
return emptyBucketInfo.rounding.nextRoundingValue(key.longValue());
|
||||
return emptyBucketInfo.rounding.nextRoundingValue(key.longValue() - offset) + offset;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -435,7 +440,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
buckets2.add((Bucket) b);
|
||||
}
|
||||
buckets2 = Collections.unmodifiableList(buckets2);
|
||||
return new InternalDateHistogram(name, buckets2, order, minDocCount, emptyBucketInfo, format,
|
||||
return new InternalDateHistogram(name, buckets2, order, minDocCount, offset, emptyBucketInfo, format,
|
||||
keyed, pipelineAggregators(), getMetaData());
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.lucene.search.SortField;
|
|||
import org.apache.lucene.search.TermStatistics;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.search.TopFieldDocs;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.collect.HppcMaps;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
|
@ -53,18 +52,22 @@ import org.elasticsearch.search.internal.InternalSearchHits;
|
|||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.profile.ProfileShardResult;
|
||||
import org.elasticsearch.search.profile.SearchProfileShardResults;
|
||||
import org.elasticsearch.search.profile.query.QueryProfileShardResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
import org.elasticsearch.search.suggest.Suggest.Suggestion;
|
||||
import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionSuggestion;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
|
@ -154,6 +157,10 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns a score doc array of top N search docs across all shards, followed by top suggest docs for each
|
||||
* named completion suggestion across all shards. If more than one named completion suggestion is specified in the
|
||||
* request, the suggest docs for a named suggestion are ordered by the suggestion name.
|
||||
*
|
||||
* @param ignoreFrom Whether to ignore the from and sort all hits in each shard result.
|
||||
* Enabled only for scroll search, because that only retrieves hits of length 'size' in the query phase.
|
||||
* @param resultsArr Shard result holder
|
||||
|
@ -191,19 +198,40 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
offset = 0;
|
||||
}
|
||||
ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
|
||||
ScoreDoc[] docs;
|
||||
int numSuggestDocs = 0;
|
||||
final Suggest suggest = result.queryResult().suggest();
|
||||
final List<CompletionSuggestion> completionSuggestions;
|
||||
if (suggest != null) {
|
||||
completionSuggestions = suggest.filter(CompletionSuggestion.class);
|
||||
for (CompletionSuggestion suggestion : completionSuggestions) {
|
||||
numSuggestDocs += suggestion.getOptions().size();
|
||||
}
|
||||
} else {
|
||||
completionSuggestions = Collections.emptyList();
|
||||
}
|
||||
int docsOffset = 0;
|
||||
if (scoreDocs.length == 0 || scoreDocs.length < offset) {
|
||||
return EMPTY_DOCS;
|
||||
docs = new ScoreDoc[numSuggestDocs];
|
||||
} else {
|
||||
int resultDocsSize = result.size();
|
||||
if ((scoreDocs.length - offset) < resultDocsSize) {
|
||||
resultDocsSize = scoreDocs.length - offset;
|
||||
}
|
||||
docs = new ScoreDoc[resultDocsSize + numSuggestDocs];
|
||||
for (int i = 0; i < resultDocsSize; i++) {
|
||||
ScoreDoc scoreDoc = scoreDocs[offset + i];
|
||||
scoreDoc.shardIndex = shardIndex;
|
||||
docs[i] = scoreDoc;
|
||||
docsOffset++;
|
||||
}
|
||||
}
|
||||
|
||||
int resultDocsSize = result.size();
|
||||
if ((scoreDocs.length - offset) < resultDocsSize) {
|
||||
resultDocsSize = scoreDocs.length - offset;
|
||||
}
|
||||
ScoreDoc[] docs = new ScoreDoc[resultDocsSize];
|
||||
for (int i = 0; i < resultDocsSize; i++) {
|
||||
ScoreDoc scoreDoc = scoreDocs[offset + i];
|
||||
scoreDoc.shardIndex = shardIndex;
|
||||
docs[i] = scoreDoc;
|
||||
for (CompletionSuggestion suggestion: completionSuggestions) {
|
||||
for (CompletionSuggestion.Entry.Option option : suggestion.getOptions()) {
|
||||
ScoreDoc doc = option.getDoc();
|
||||
doc.shardIndex = shardIndex;
|
||||
docs[docsOffset++] = doc;
|
||||
}
|
||||
}
|
||||
return docs;
|
||||
}
|
||||
|
@ -213,13 +241,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
Arrays.sort(sortedResults, QUERY_RESULT_ORDERING);
|
||||
QuerySearchResultProvider firstResult = sortedResults[0].value;
|
||||
|
||||
int topN = firstResult.queryResult().size();
|
||||
if (firstResult.includeFetch()) {
|
||||
// if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them...
|
||||
// this is also important since we shortcut and fetch only docs from "from" and up to "size"
|
||||
topN *= sortedResults.length;
|
||||
}
|
||||
|
||||
int topN = topN(results);
|
||||
int from = firstResult.queryResult().from();
|
||||
if (ignoreFrom) {
|
||||
from = 0;
|
||||
|
@ -258,40 +280,86 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
}
|
||||
mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs);
|
||||
}
|
||||
return mergedTopDocs.scoreDocs;
|
||||
}
|
||||
|
||||
public ScoreDoc[] getLastEmittedDocPerShard(SearchRequest request, ScoreDoc[] sortedShardList, int numShards) {
|
||||
if (request.scroll() != null) {
|
||||
return getLastEmittedDocPerShard(sortedShardList, numShards);
|
||||
} else {
|
||||
return null;
|
||||
ScoreDoc[] scoreDocs = mergedTopDocs.scoreDocs;
|
||||
final Map<String, List<Suggestion<CompletionSuggestion.Entry>>> groupedCompletionSuggestions = new HashMap<>();
|
||||
// group suggestions and assign shard index
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : sortedResults) {
|
||||
Suggest shardSuggest = sortedResult.value.queryResult().suggest();
|
||||
if (shardSuggest != null) {
|
||||
for (CompletionSuggestion suggestion : shardSuggest.filter(CompletionSuggestion.class)) {
|
||||
suggestion.setShardIndex(sortedResult.index);
|
||||
List<Suggestion<CompletionSuggestion.Entry>> suggestions =
|
||||
groupedCompletionSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>());
|
||||
suggestions.add(suggestion);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (groupedCompletionSuggestions.isEmpty() == false) {
|
||||
int numSuggestDocs = 0;
|
||||
List<Suggestion<? extends Entry<? extends Entry.Option>>> completionSuggestions =
|
||||
new ArrayList<>(groupedCompletionSuggestions.size());
|
||||
for (List<Suggestion<CompletionSuggestion.Entry>> groupedSuggestions : groupedCompletionSuggestions.values()) {
|
||||
final CompletionSuggestion completionSuggestion = CompletionSuggestion.reduceTo(groupedSuggestions);
|
||||
assert completionSuggestion != null;
|
||||
numSuggestDocs += completionSuggestion.getOptions().size();
|
||||
completionSuggestions.add(completionSuggestion);
|
||||
}
|
||||
scoreDocs = new ScoreDoc[mergedTopDocs.scoreDocs.length + numSuggestDocs];
|
||||
System.arraycopy(mergedTopDocs.scoreDocs, 0, scoreDocs, 0, mergedTopDocs.scoreDocs.length);
|
||||
int offset = mergedTopDocs.scoreDocs.length;
|
||||
Suggest suggestions = new Suggest(completionSuggestions);
|
||||
for (CompletionSuggestion completionSuggestion : suggestions.filter(CompletionSuggestion.class)) {
|
||||
for (CompletionSuggestion.Entry.Option option : completionSuggestion.getOptions()) {
|
||||
scoreDocs[offset++] = option.getDoc();
|
||||
}
|
||||
}
|
||||
}
|
||||
return scoreDocs;
|
||||
}
|
||||
|
||||
public ScoreDoc[] getLastEmittedDocPerShard(ScoreDoc[] sortedShardList, int numShards) {
|
||||
public ScoreDoc[] getLastEmittedDocPerShard(List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults,
|
||||
ScoreDoc[] sortedScoreDocs, int numShards) {
|
||||
ScoreDoc[] lastEmittedDocPerShard = new ScoreDoc[numShards];
|
||||
for (ScoreDoc scoreDoc : sortedShardList) {
|
||||
lastEmittedDocPerShard[scoreDoc.shardIndex] = scoreDoc;
|
||||
if (queryResults.isEmpty() == false) {
|
||||
long fetchHits = 0;
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> queryResult : queryResults) {
|
||||
fetchHits += queryResult.value.queryResult().topDocs().scoreDocs.length;
|
||||
}
|
||||
// from is always zero as when we use scroll, we ignore from
|
||||
long size = Math.min(fetchHits, topN(queryResults));
|
||||
for (int sortedDocsIndex = 0; sortedDocsIndex < size; sortedDocsIndex++) {
|
||||
ScoreDoc scoreDoc = sortedScoreDocs[sortedDocsIndex];
|
||||
lastEmittedDocPerShard[scoreDoc.shardIndex] = scoreDoc;
|
||||
}
|
||||
}
|
||||
return lastEmittedDocPerShard;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds an array, with potential null elements, with docs to load.
|
||||
*/
|
||||
public void fillDocIdsToLoad(AtomicArray<IntArrayList> docsIdsToLoad, ScoreDoc[] shardDocs) {
|
||||
public void fillDocIdsToLoad(AtomicArray<IntArrayList> docIdsToLoad, ScoreDoc[] shardDocs) {
|
||||
for (ScoreDoc shardDoc : shardDocs) {
|
||||
IntArrayList list = docsIdsToLoad.get(shardDoc.shardIndex);
|
||||
if (list == null) {
|
||||
list = new IntArrayList(); // can't be shared!, uses unsafe on it later on
|
||||
docsIdsToLoad.set(shardDoc.shardIndex, list);
|
||||
IntArrayList shardDocIdsToLoad = docIdsToLoad.get(shardDoc.shardIndex);
|
||||
if (shardDocIdsToLoad == null) {
|
||||
shardDocIdsToLoad = new IntArrayList(); // can't be shared!, uses unsafe on it later on
|
||||
docIdsToLoad.set(shardDoc.shardIndex, shardDocIdsToLoad);
|
||||
}
|
||||
list.add(shardDoc.doc);
|
||||
shardDocIdsToLoad.add(shardDoc.doc);
|
||||
}
|
||||
}
|
||||
|
||||
public InternalSearchResponse merge(ScoreDoc[] sortedDocs, AtomicArray<? extends QuerySearchResultProvider> queryResultsArr,
|
||||
/**
|
||||
* Enriches search hits and completion suggestion hits from <code>sortedDocs</code> using <code>fetchResultsArr</code>,
|
||||
* merges suggestions, aggregations and profile results
|
||||
*
|
||||
* Expects sortedDocs to have top search docs across all shards, optionally followed by top suggest docs for each named
|
||||
* completion suggestion ordered by suggestion name
|
||||
*/
|
||||
public InternalSearchResponse merge(boolean ignoreFrom, ScoreDoc[] sortedDocs,
|
||||
AtomicArray<? extends QuerySearchResultProvider> queryResultsArr,
|
||||
AtomicArray<? extends FetchSearchResultProvider> fetchResultsArr) {
|
||||
|
||||
List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults = queryResultsArr.asList();
|
||||
|
@ -317,6 +385,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
|
||||
// count the total (we use the query result provider here, since we might not get any hits (we scrolled past them))
|
||||
long totalHits = 0;
|
||||
long fetchHits = 0;
|
||||
float maxScore = Float.NEGATIVE_INFINITY;
|
||||
boolean timedOut = false;
|
||||
Boolean terminatedEarly = null;
|
||||
|
@ -333,6 +402,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
totalHits += result.topDocs().totalHits;
|
||||
fetchHits += result.topDocs().scoreDocs.length;
|
||||
if (!Float.isNaN(result.topDocs().getMaxScore())) {
|
||||
maxScore = Math.max(maxScore, result.topDocs().getMaxScore());
|
||||
}
|
||||
|
@ -345,11 +415,13 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
for (AtomicArray.Entry<? extends FetchSearchResultProvider> entry : fetchResults) {
|
||||
entry.value.fetchResult().initCounter();
|
||||
}
|
||||
|
||||
int from = ignoreFrom ? 0 : firstResult.queryResult().from();
|
||||
int numSearchHits = (int) Math.min(fetchHits - from, topN(queryResults));
|
||||
// merge hits
|
||||
List<InternalSearchHit> hits = new ArrayList<>();
|
||||
if (!fetchResults.isEmpty()) {
|
||||
for (ScoreDoc shardDoc : sortedDocs) {
|
||||
for (int i = 0; i < numSearchHits; i++) {
|
||||
ScoreDoc shardDoc = sortedDocs[i];
|
||||
FetchSearchResultProvider fetchResultProvider = fetchResultsArr.get(shardDoc.shardIndex);
|
||||
if (fetchResultProvider == null) {
|
||||
continue;
|
||||
|
@ -360,7 +432,6 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
InternalSearchHit searchHit = fetchResult.hits().internalHits()[index];
|
||||
searchHit.score(shardDoc.score);
|
||||
searchHit.shard(fetchResult.shardTarget());
|
||||
|
||||
if (sorted) {
|
||||
FieldDoc fieldDoc = (FieldDoc) shardDoc;
|
||||
searchHit.sortValues(fieldDoc.fields, firstResult.sortValueFormats());
|
||||
|
@ -368,7 +439,6 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
searchHit.score(((Number) fieldDoc.fields[sortScoreIndex]).floatValue());
|
||||
}
|
||||
}
|
||||
|
||||
hits.add(searchHit);
|
||||
}
|
||||
}
|
||||
|
@ -376,38 +446,72 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
|
||||
// merge suggest results
|
||||
Suggest suggest = null;
|
||||
if (!queryResults.isEmpty()) {
|
||||
final Map<String, List<Suggest.Suggestion>> groupedSuggestions = new HashMap<>();
|
||||
boolean hasSuggestions = false;
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
|
||||
Suggest shardResult = entry.value.queryResult().queryResult().suggest();
|
||||
|
||||
if (shardResult == null) {
|
||||
continue;
|
||||
if (firstResult.suggest() != null) {
|
||||
final Map<String, List<Suggestion>> groupedSuggestions = new HashMap<>();
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> queryResult : queryResults) {
|
||||
Suggest shardSuggest = queryResult.value.queryResult().suggest();
|
||||
if (shardSuggest != null) {
|
||||
for (Suggestion<? extends Suggestion.Entry<? extends Suggestion.Entry.Option>> suggestion : shardSuggest) {
|
||||
List<Suggestion> suggestionList = groupedSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>());
|
||||
suggestionList.add(suggestion);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (groupedSuggestions.isEmpty() == false) {
|
||||
suggest = new Suggest(Suggest.reduce(groupedSuggestions));
|
||||
if (!fetchResults.isEmpty()) {
|
||||
int currentOffset = numSearchHits;
|
||||
for (CompletionSuggestion suggestion : suggest.filter(CompletionSuggestion.class)) {
|
||||
final List<CompletionSuggestion.Entry.Option> suggestionOptions = suggestion.getOptions();
|
||||
for (int scoreDocIndex = currentOffset; scoreDocIndex < currentOffset + suggestionOptions.size(); scoreDocIndex++) {
|
||||
ScoreDoc shardDoc = sortedDocs[scoreDocIndex];
|
||||
FetchSearchResultProvider fetchSearchResultProvider = fetchResultsArr.get(shardDoc.shardIndex);
|
||||
if (fetchSearchResultProvider == null) {
|
||||
continue;
|
||||
}
|
||||
FetchSearchResult fetchResult = fetchSearchResultProvider.fetchResult();
|
||||
int fetchResultIndex = fetchResult.counterGetAndIncrement();
|
||||
if (fetchResultIndex < fetchResult.hits().internalHits().length) {
|
||||
InternalSearchHit hit = fetchResult.hits().internalHits()[fetchResultIndex];
|
||||
CompletionSuggestion.Entry.Option suggestOption =
|
||||
suggestionOptions.get(scoreDocIndex - currentOffset);
|
||||
hit.score(shardDoc.score);
|
||||
hit.shard(fetchResult.shardTarget());
|
||||
suggestOption.setHit(hit);
|
||||
}
|
||||
}
|
||||
currentOffset += suggestionOptions.size();
|
||||
}
|
||||
assert currentOffset == sortedDocs.length : "expected no more score doc slices";
|
||||
}
|
||||
hasSuggestions = true;
|
||||
Suggest.group(groupedSuggestions, shardResult);
|
||||
}
|
||||
|
||||
suggest = hasSuggestions ? new Suggest(Suggest.reduce(groupedSuggestions)) : null;
|
||||
}
|
||||
|
||||
// merge addAggregation
|
||||
// merge Aggregation
|
||||
InternalAggregations aggregations = null;
|
||||
if (!queryResults.isEmpty()) {
|
||||
if (firstResult.aggregations() != null && firstResult.aggregations().asList() != null) {
|
||||
List<InternalAggregations> aggregationsList = new ArrayList<>(queryResults.size());
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
|
||||
aggregationsList.add((InternalAggregations) entry.value.queryResult().aggregations());
|
||||
if (firstResult.aggregations() != null && firstResult.aggregations().asList() != null) {
|
||||
List<InternalAggregations> aggregationsList = new ArrayList<>(queryResults.size());
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
|
||||
aggregationsList.add((InternalAggregations) entry.value.queryResult().aggregations());
|
||||
}
|
||||
ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, clusterService.state());
|
||||
aggregations = InternalAggregations.reduce(aggregationsList, reduceContext);
|
||||
List<SiblingPipelineAggregator> pipelineAggregators = firstResult.pipelineAggregators();
|
||||
if (pipelineAggregators != null) {
|
||||
List<InternalAggregation> newAggs = StreamSupport.stream(aggregations.spliterator(), false)
|
||||
.map((p) -> (InternalAggregation) p)
|
||||
.collect(Collectors.toList());
|
||||
for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) {
|
||||
InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), reduceContext);
|
||||
newAggs.add(newAgg);
|
||||
}
|
||||
ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, clusterService.state());
|
||||
aggregations = InternalAggregations.reduce(aggregationsList, reduceContext);
|
||||
aggregations = new InternalAggregations(newAggs);
|
||||
}
|
||||
}
|
||||
|
||||
//Collect profile results
|
||||
SearchProfileShardResults shardResults = null;
|
||||
if (!queryResults.isEmpty() && firstResult.profileResults() != null) {
|
||||
if (firstResult.profileResults() != null) {
|
||||
Map<String, ProfileShardResult> profileResults = new HashMap<>(queryResults.size());
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
|
||||
String key = entry.value.queryResult().shardTarget().toString();
|
||||
|
@ -416,24 +520,22 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
shardResults = new SearchProfileShardResults(profileResults);
|
||||
}
|
||||
|
||||
if (aggregations != null) {
|
||||
List<SiblingPipelineAggregator> pipelineAggregators = firstResult.pipelineAggregators();
|
||||
if (pipelineAggregators != null) {
|
||||
List<InternalAggregation> newAggs = StreamSupport.stream(aggregations.spliterator(), false).map((p) -> {
|
||||
return (InternalAggregation) p;
|
||||
}).collect(Collectors.toList());
|
||||
for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) {
|
||||
ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, clusterService.state());
|
||||
InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), reduceContext);
|
||||
newAggs.add(newAgg);
|
||||
}
|
||||
aggregations = new InternalAggregations(newAggs);
|
||||
}
|
||||
}
|
||||
|
||||
InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore);
|
||||
|
||||
return new InternalSearchResponse(searchHits, aggregations, suggest, shardResults, timedOut, terminatedEarly);
|
||||
}
|
||||
|
||||
/**
|
||||
* returns the number of top results to be considered across all shards
|
||||
*/
|
||||
private static int topN(List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults) {
|
||||
QuerySearchResultProvider firstResult = queryResults.get(0).value;
|
||||
int topN = firstResult.queryResult().size();
|
||||
if (firstResult.includeFetch()) {
|
||||
// if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them...
|
||||
// this is also important since we shortcut and fetch only docs from "from" and up to "size"
|
||||
topN *= queryResults.size();
|
||||
}
|
||||
return topN;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,10 +39,7 @@ public class ShardFetchSearchRequest extends ShardFetchRequest implements Indice
|
|||
private OriginalIndices originalIndices;
|
||||
|
||||
public ShardFetchSearchRequest() {
|
||||
}
|
||||
|
||||
public ShardFetchSearchRequest(SearchRequest request, long id, IntArrayList list) {
|
||||
this(request, id, list, null);
|
||||
}
|
||||
|
||||
public ShardFetchSearchRequest(SearchRequest request, long id, IntArrayList list, ScoreDoc lastEmittedDoc) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue