Merge branch 'master' into docs/add_autosense_to_query_dsl
This commit is contained in:
commit
2d402c732c
|
@ -71,6 +71,17 @@ Once your changes and tests are ready to submit for review:
|
|||
|
||||
Then sit back and wait. There will probably be discussion about the pull request and, if any changes are needed, we would love to work with you to get your pull request merged into Elasticsearch.
|
||||
|
||||
Please adhere to the general guideline that you should never force push
|
||||
to a publicly shared branch. Once you have opened your pull request, you
|
||||
should consider your branch publicly shared. Instead of force pushing
|
||||
you can just add incremental commits; this is generally easier on your
|
||||
reviewers. If you need to pick up changes from master, you can merge
|
||||
master into your branch. A reviewer might ask you to rebase a
|
||||
long-running pull request in which case force pushing is okay for that
|
||||
request. Note that squashing at the end of the review process should
|
||||
also not be done, that can be done when the pull request is [integrated
|
||||
via GitHub](https://github.com/blog/2141-squash-your-commits).
|
||||
|
||||
Contributing to the Elasticsearch codebase
|
||||
------------------------------------------
|
||||
|
||||
|
@ -85,7 +96,7 @@ option `Search for nested projects`. Additionally you will want to
|
|||
ensure that Eclipse is using 2048m of heap by modifying `eclipse.ini`
|
||||
accordingly to avoid GC overhead errors.
|
||||
|
||||
IntelliJ users acn automatically configure their IDE: `gradle idea`
|
||||
IntelliJ users can automatically configure their IDE: `gradle idea`
|
||||
then `File->New Project From Existing Sources`. Point to the root of
|
||||
the source directory, select
|
||||
`Import project from external model->Gradle`, enable
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.tools.ant.taskdefs.condition.Os
|
|||
subprojects {
|
||||
group = 'org.elasticsearch'
|
||||
version = org.elasticsearch.gradle.VersionProperties.elasticsearch
|
||||
description = "Elasticsearch subproject ${project.path}"
|
||||
|
||||
// we only use maven publish to add tasks for pom generation
|
||||
plugins.withType(MavenPublishPlugin).whenPluginAdded {
|
||||
|
@ -42,6 +43,10 @@ subprojects {
|
|||
license.appendNode('name', 'The Apache Software License, Version 2.0')
|
||||
license.appendNode('url', 'http://www.apache.org/licenses/LICENSE-2.0.txt')
|
||||
license.appendNode('distribution', 'repo')
|
||||
|
||||
Node developer = node.appendNode('developers').appendNode('developer')
|
||||
developer.appendNode('name', 'Elastic')
|
||||
developer.appendNode('url', 'http://www.elastic.co')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.gradle
|
||||
|
||||
import nebula.plugin.extraconfigurations.ProvidedBasePlugin
|
||||
import nebula.plugin.publishing.maven.MavenBasePublishPlugin
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.JavaVersion
|
||||
|
@ -34,7 +35,6 @@ import org.gradle.api.artifacts.ResolvedArtifact
|
|||
import org.gradle.api.artifacts.dsl.RepositoryHandler
|
||||
import org.gradle.api.artifacts.maven.MavenPom
|
||||
import org.gradle.api.publish.maven.MavenPublication
|
||||
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
|
||||
import org.gradle.api.publish.maven.tasks.GenerateMavenPom
|
||||
import org.gradle.api.tasks.bundling.Jar
|
||||
import org.gradle.api.tasks.compile.JavaCompile
|
||||
|
@ -343,8 +343,8 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
|
||||
/**Configuration generation of maven poms. */
|
||||
private static void configurePomGeneration(Project project) {
|
||||
project.plugins.withType(MavenPublishPlugin.class).whenPluginAdded {
|
||||
public static void configurePomGeneration(Project project) {
|
||||
project.plugins.withType(MavenBasePublishPlugin.class).whenPluginAdded {
|
||||
project.publishing {
|
||||
publications {
|
||||
all { MavenPublication publication -> // we only deal with maven
|
||||
|
|
|
@ -199,7 +199,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
|
||||
// Now setup the writer
|
||||
Files.createDirectories(dest.parent)
|
||||
current = dest.newPrintWriter()
|
||||
current = dest.newPrintWriter('UTF-8')
|
||||
}
|
||||
|
||||
void finishLastTest() {
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.plugin
|
||||
|
||||
import nebula.plugin.publishing.maven.MavenBasePublishPlugin
|
||||
import nebula.plugin.publishing.maven.MavenManifestPlugin
|
||||
import nebula.plugin.publishing.maven.MavenScmPlugin
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
|
@ -51,7 +52,7 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
} else {
|
||||
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
configurePomGeneration(project)
|
||||
addPomGeneration(project)
|
||||
}
|
||||
|
||||
project.namingConventions {
|
||||
|
@ -131,9 +132,9 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
/**
|
||||
* Adds the plugin jar and zip as publications.
|
||||
*/
|
||||
private static void configurePomGeneration(Project project) {
|
||||
protected static void addPomGeneration(Project project) {
|
||||
project.plugins.apply(MavenBasePublishPlugin.class)
|
||||
project.plugins.apply(MavenScmPlugin.class)
|
||||
project.plugins.apply(MavenManifestPlugin.class)
|
||||
|
||||
project.publishing {
|
||||
publications {
|
||||
|
|
|
@ -418,8 +418,7 @@ class ClusterFormationTasks {
|
|||
// argument are wrapped in an ExecArgWrapper that escapes commas
|
||||
args execArgs.collect { a -> new EscapeCommaWrapper(arg: a) }
|
||||
} else {
|
||||
executable 'sh'
|
||||
args execArgs
|
||||
commandLine execArgs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,8 +7,8 @@
|
|||
<!-- On Windows, Checkstyle matches files using \ path separator -->
|
||||
|
||||
<!-- These files are generated by ANTLR so its silly to hold them to our rules. -->
|
||||
<suppress files="org[/\\]elasticsearch[/\\]painless[/\\]PainlessLexer\.java" checks="." />
|
||||
<suppress files="org[/\\]elasticsearch[/\\]painless[/\\]PainlessParser(|BaseVisitor|Visitor)\.java" checks="." />
|
||||
<suppress files="org[/\\]elasticsearch[/\\]painless[/\\]antlr[/\\]PainlessLexer\.java" checks="." />
|
||||
<suppress files="org[/\\]elasticsearch[/\\]painless[/\\]antlr[/\\]PainlessParser(|BaseVisitor|Visitor)\.java" checks="." />
|
||||
|
||||
<!-- Hopefully temporary suppression of LineLength on files that don't pass it. We should remove these when we the
|
||||
files start to pass. -->
|
||||
|
@ -364,7 +364,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]publish[/\\]PublishClusterStateAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]env[/\\]ESFileStore.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]AsyncShardFetch.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]DanglingIndicesState.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayAllocator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayMetaState.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayService.java" checks="LineLength" />
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
|
@ -30,24 +29,19 @@ import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStores
|
|||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData.Custom;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes.RoutingNodesIterator;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
|
@ -60,7 +54,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -72,7 +65,6 @@ import java.util.Set;
|
|||
public class TransportClusterAllocationExplainAction
|
||||
extends TransportMasterNodeAction<ClusterAllocationExplainRequest, ClusterAllocationExplainResponse> {
|
||||
|
||||
private final AllocationService allocationService;
|
||||
private final ClusterInfoService clusterInfoService;
|
||||
private final AllocationDeciders allocationDeciders;
|
||||
private final ShardsAllocator shardAllocator;
|
||||
|
@ -82,12 +74,10 @@ public class TransportClusterAllocationExplainAction
|
|||
public TransportClusterAllocationExplainAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AllocationService allocationService, ClusterInfoService clusterInfoService,
|
||||
AllocationDeciders allocationDeciders, ShardsAllocator shardAllocator,
|
||||
TransportIndicesShardStoresAction shardStoresAction) {
|
||||
ClusterInfoService clusterInfoService, AllocationDeciders allocationDeciders,
|
||||
ShardsAllocator shardAllocator, TransportIndicesShardStoresAction shardStoresAction) {
|
||||
super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver, ClusterAllocationExplainRequest::new);
|
||||
this.allocationService = allocationService;
|
||||
this.clusterInfoService = clusterInfoService;
|
||||
this.allocationDeciders = allocationDeciders;
|
||||
this.shardAllocator = shardAllocator;
|
||||
|
|
|
@ -54,7 +54,8 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
public TransportClusterHealthAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ClusterName clusterName, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, GatewayAllocator gatewayAllocator) {
|
||||
super(settings, ClusterHealthAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterHealthRequest::new);
|
||||
super(settings, ClusterHealthAction.NAME, false, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver, ClusterHealthRequest::new);
|
||||
this.clusterName = clusterName;
|
||||
this.gatewayAllocator = gatewayAllocator;
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.Streamable;
|
|||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -33,16 +34,14 @@ import java.io.IOException;
|
|||
*/
|
||||
public class ClusterSearchShardsGroup implements Streamable, ToXContent {
|
||||
|
||||
private Index index;
|
||||
private int shardId;
|
||||
private ShardId shardId;
|
||||
ShardRouting[] shards;
|
||||
|
||||
ClusterSearchShardsGroup() {
|
||||
|
||||
}
|
||||
|
||||
public ClusterSearchShardsGroup(Index index, int shardId, ShardRouting[] shards) {
|
||||
this.index = index;
|
||||
public ClusterSearchShardsGroup(ShardId shardId, ShardRouting[] shards) {
|
||||
this.shardId = shardId;
|
||||
this.shards = shards;
|
||||
}
|
||||
|
@ -54,11 +53,11 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent {
|
|||
}
|
||||
|
||||
public String getIndex() {
|
||||
return index.getName();
|
||||
return shardId.getIndexName();
|
||||
}
|
||||
|
||||
public int getShardId() {
|
||||
return shardId;
|
||||
return shardId.id();
|
||||
}
|
||||
|
||||
public ShardRouting[] getShards() {
|
||||
|
@ -67,18 +66,16 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent {
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
index = new Index(in);
|
||||
shardId = in.readVInt();
|
||||
shardId = ShardId.readShardId(in);
|
||||
shards = new ShardRouting[in.readVInt()];
|
||||
for (int i = 0; i < shards.length; i++) {
|
||||
shards[i] = ShardRouting.readShardRoutingEntry(in, index, shardId);
|
||||
shards[i] = new ShardRouting(shardId, in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
index.writeTo(out);
|
||||
out.writeVInt(shardId);
|
||||
shardId.writeTo(out);
|
||||
out.writeVInt(shards.length);
|
||||
for (ShardRouting shardRouting : shards) {
|
||||
shardRouting.writeToThin(out);
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -78,8 +79,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA
|
|||
ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()];
|
||||
int currentGroup = 0;
|
||||
for (ShardIterator shardIt : groupShardsIterator) {
|
||||
Index index = shardIt.shardId().getIndex();
|
||||
int shardId = shardIt.shardId().getId();
|
||||
ShardId shardId = shardIt.shardId();
|
||||
ShardRouting[] shardRoutings = new ShardRouting[shardIt.size()];
|
||||
int currentShard = 0;
|
||||
shardIt.reset();
|
||||
|
@ -87,7 +87,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA
|
|||
shardRoutings[currentShard++] = shard;
|
||||
nodeIds.add(shard.currentNodeId());
|
||||
}
|
||||
groupResponses[currentGroup++] = new ClusterSearchShardsGroup(index, shardId, shardRoutings);
|
||||
groupResponses[currentGroup++] = new ClusterSearchShardsGroup(shardId, shardRoutings);
|
||||
}
|
||||
DiscoveryNode[] nodes = new DiscoveryNode[nodeIds.size()];
|
||||
int currentNode = 0;
|
||||
|
|
|
@ -47,7 +47,7 @@ public class TransportClusterStateAction extends TransportMasterNodeReadAction<C
|
|||
@Inject
|
||||
public TransportClusterStateAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
|
||||
ClusterName clusterName, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ClusterStateAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterStateRequest::new);
|
||||
super(settings, ClusterStateAction.NAME, false, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterStateRequest::new);
|
||||
this.clusterName = clusterName;
|
||||
}
|
||||
|
||||
|
|
|
@ -31,8 +31,6 @@ import java.util.Collections;
|
|||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
|
||||
|
||||
public class ShardSegments implements Streamable, Iterable<Segment> {
|
||||
|
||||
private ShardRouting shardRouting;
|
||||
|
@ -88,7 +86,7 @@ public class ShardSegments implements Streamable, Iterable<Segment> {
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
shardRouting = readShardRoutingEntry(in);
|
||||
shardRouting = new ShardRouting(in);
|
||||
int size = in.readVInt();
|
||||
if (size == 0) {
|
||||
segments = Collections.emptyList();
|
||||
|
@ -108,4 +106,4 @@ public class ShardSegments implements Streamable, Iterable<Segment> {
|
|||
segment.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,8 +31,6 @@ import org.elasticsearch.index.shard.ShardPath;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class ShardStats implements Streamable, ToXContent {
|
||||
|
@ -91,7 +89,7 @@ public class ShardStats implements Streamable, ToXContent {
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
shardRouting = readShardRoutingEntry(in);
|
||||
shardRouting = new ShardRouting(in);
|
||||
commonStats = CommonStats.readCommonStats(in);
|
||||
commitStats = CommitStats.readOptionalCommitStatsFrom(in);
|
||||
statePath = in.readString();
|
||||
|
|
|
@ -26,8 +26,6 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
|
||||
|
||||
public class ShardUpgradeStatus extends BroadcastShardResponse {
|
||||
|
||||
private ShardRouting shardRouting;
|
||||
|
@ -75,7 +73,7 @@ public class ShardUpgradeStatus extends BroadcastShardResponse {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
shardRouting = readShardRoutingEntry(in);
|
||||
shardRouting = new ShardRouting(in);
|
||||
totalBytes = in.readLong();
|
||||
toUpgradeBytes = in.readLong();
|
||||
toUpgradeBytesAncient = in.readLong();
|
||||
|
@ -89,4 +87,4 @@ public class ShardUpgradeStatus extends BroadcastShardResponse {
|
|||
out.writeLong(toUpgradeBytes);
|
||||
out.writeLong(toUpgradeBytesAncient);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.elasticsearch.index.VersionType;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
|
@ -125,6 +126,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
}
|
||||
|
||||
BulkRequest internalAdd(IndexRequest request, @Nullable Object payload) {
|
||||
Objects.requireNonNull(request, "'request' must not be null");
|
||||
requests.add(request);
|
||||
addPayload(payload);
|
||||
// lack of source is validated in validate() method
|
||||
|
@ -144,6 +146,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
}
|
||||
|
||||
BulkRequest internalAdd(UpdateRequest request, @Nullable Object payload) {
|
||||
Objects.requireNonNull(request, "'request' must not be null");
|
||||
requests.add(request);
|
||||
addPayload(payload);
|
||||
if (request.doc() != null) {
|
||||
|
|
|
@ -36,9 +36,18 @@ import java.util.function.Supplier;
|
|||
*/
|
||||
public abstract class HandledTransportAction<Request extends ActionRequest<Request>, Response extends ActionResponse>
|
||||
extends TransportAction<Request, Response> {
|
||||
protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request) {
|
||||
protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Supplier<Request> request) {
|
||||
this(settings, actionName, true, threadPool, transportService, actionFilters, indexNameExpressionResolver, request);
|
||||
}
|
||||
|
||||
protected HandledTransportAction(Settings settings, String actionName, boolean canTripCircuitBreaker, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request) {
|
||||
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager());
|
||||
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new TransportHandler());
|
||||
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, false, canTripCircuitBreaker,
|
||||
new TransportHandler());
|
||||
}
|
||||
|
||||
class TransportHandler implements TransportRequestHandler<Request> {
|
||||
|
|
|
@ -475,7 +475,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
int size = in.readVInt();
|
||||
shards = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
shards.add(ShardRouting.readShardRoutingEntry(in));
|
||||
shards.add(new ShardRouting(in));
|
||||
}
|
||||
nodeId = in.readString();
|
||||
}
|
||||
|
|
|
@ -58,7 +58,15 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
protected TransportMasterNodeAction(Settings settings, String actionName, TransportService transportService,
|
||||
ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request) {
|
||||
super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request);
|
||||
this(settings, actionName, true, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, request);
|
||||
}
|
||||
|
||||
protected TransportMasterNodeAction(Settings settings, String actionName, boolean canTripCircuitBreaker,
|
||||
TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Supplier<Request> request) {
|
||||
super(settings, actionName, canTripCircuitBreaker, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
request);
|
||||
this.transportService = transportService;
|
||||
this.clusterService = clusterService;
|
||||
this.executor = executor();
|
||||
|
|
|
@ -46,7 +46,14 @@ public abstract class TransportMasterNodeReadAction<Request extends MasterNodeRe
|
|||
protected TransportMasterNodeReadAction(Settings settings, String actionName, TransportService transportService,
|
||||
ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request) {
|
||||
super(settings, actionName, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver,request);
|
||||
this(settings, actionName, true, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver,request);
|
||||
}
|
||||
|
||||
protected TransportMasterNodeReadAction(Settings settings, String actionName, boolean checkSizeLimit, TransportService transportService,
|
||||
ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request) {
|
||||
super(settings, actionName, checkSizeLimit, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver,request);
|
||||
this.forceLocal = FORCE_LOCAL_SETTING.get(settings);
|
||||
}
|
||||
|
||||
|
|
|
@ -80,7 +80,8 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
|
||||
this.transportNodeAction = actionName + "[n]";
|
||||
|
||||
transportService.registerRequestHandler(transportNodeAction, nodeRequest, nodeExecutor, new NodeTransportHandler());
|
||||
transportService.registerRequestHandler(
|
||||
transportNodeAction, nodeRequest, nodeExecutor, new NodeTransportHandler());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -92,8 +92,9 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
final private WriteConsistencyLevel defaultWriteConsistencyLevel;
|
||||
final private TransportRequestOptions transportOptions;
|
||||
|
||||
final private String transportReplicaAction;
|
||||
final private String transportPrimaryAction;
|
||||
// package private for testing
|
||||
final String transportReplicaAction;
|
||||
final String transportPrimaryAction;
|
||||
final private ReplicasProxy replicasProxy;
|
||||
|
||||
protected TransportReplicationAction(Settings settings, String actionName, TransportService transportService,
|
||||
|
@ -113,7 +114,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new OperationTransportHandler());
|
||||
transportService.registerRequestHandler(transportPrimaryAction, request, executor, new PrimaryOperationTransportHandler());
|
||||
// we must never reject on because of thread pool capacity on replicas
|
||||
transportService.registerRequestHandler(transportReplicaAction, replicaRequest, executor, true,
|
||||
transportService.registerRequestHandler(transportReplicaAction, replicaRequest, executor, true, true,
|
||||
new ReplicaOperationTransportHandler());
|
||||
|
||||
this.transportOptions = transportOptions();
|
||||
|
|
|
@ -335,7 +335,7 @@ final class BootstrapCheck {
|
|||
@Override
|
||||
public String errorMessage() {
|
||||
return "please set [" + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() +
|
||||
"] to a majority of the number of master eligible nodes in your cluster.";
|
||||
"] to a majority of the number of master eligible nodes in your cluster";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -69,8 +69,6 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
|
||||
|
||||
public class ShardStateAction extends AbstractComponent {
|
||||
|
||||
public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started";
|
||||
|
@ -418,8 +416,8 @@ public class ShardStateAction extends AbstractComponent {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
shardRouting = readShardRoutingEntry(in);
|
||||
sourceShardRouting = readShardRoutingEntry(in);
|
||||
shardRouting = new ShardRouting(in);
|
||||
sourceShardRouting = new ShardRouting(in);
|
||||
message = in.readString();
|
||||
failure = in.readThrowable();
|
||||
}
|
||||
|
|
|
@ -123,6 +123,18 @@ final public class IndexGraveyard implements MetaData.Custom {
|
|||
return tombstones;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the graveyard contains a tombstone for the given index.
|
||||
*/
|
||||
public boolean containsIndex(final Index index) {
|
||||
for (Tombstone tombstone : tombstones) {
|
||||
if (tombstone.getIndex().equals(index)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
|
||||
builder.startArray(TOMBSTONES_FIELD.getPreferredName());
|
||||
|
|
|
@ -95,7 +95,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
private void checkSupportedVersion(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getState() == IndexMetaData.State.OPEN && isSupportedVersion(indexMetaData) == false) {
|
||||
throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created before v2.0.0.beta1 and wasn't upgraded."
|
||||
+ " This index should be open using a version before " + Version.CURRENT.minimumCompatibilityVersion()
|
||||
+ " This index should be opened using a version before " + Version.CURRENT.minimumCompatibilityVersion()
|
||||
+ " and upgraded using the upgrade API.");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.ParseFieldMatcherSupplier;
|
|||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -43,7 +44,7 @@ import java.util.Objects;
|
|||
* relocationId. Once relocation is done, the new allocation id is set to the relocationId. This is similar
|
||||
* behavior to how ShardRouting#currentNodeId is used.
|
||||
*/
|
||||
public class AllocationId implements ToXContent {
|
||||
public class AllocationId implements ToXContent, Writeable {
|
||||
private static final String ID_KEY = "id";
|
||||
private static final String RELOCATION_ID_KEY = "relocation_id";
|
||||
|
||||
|
@ -81,6 +82,7 @@ public class AllocationId implements ToXContent {
|
|||
this.relocationId = in.readOptionalString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(this.id);
|
||||
out.writeOptionalString(this.relocationId);
|
||||
|
|
|
@ -42,7 +42,7 @@ public class IllegalShardRoutingStateException extends RoutingException {
|
|||
|
||||
public IllegalShardRoutingStateException(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
shard = ShardRouting.readShardRoutingEntry(in);
|
||||
shard = new ShardRouting(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -76,7 +76,6 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
List<ShardRouting> allActiveShards = new ArrayList<>();
|
||||
for (IntObjectCursor<IndexShardRoutingTable> cursor : shards) {
|
||||
for (ShardRouting shardRouting : cursor.value) {
|
||||
shardRouting.freeze();
|
||||
if (shardRouting.active()) {
|
||||
allActiveShards.add(shardRouting);
|
||||
}
|
||||
|
@ -395,17 +394,18 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
if (!shards.isEmpty()) {
|
||||
throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created");
|
||||
}
|
||||
for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) {
|
||||
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(index, shardId));
|
||||
for (int shardNumber = 0; shardNumber < indexMetaData.getNumberOfShards(); shardNumber++) {
|
||||
ShardId shardId = new ShardId(index, shardNumber);
|
||||
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
|
||||
for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) {
|
||||
if (asNew && ignoreShards.contains(shardId)) {
|
||||
if (asNew && ignoreShards.contains(shardNumber)) {
|
||||
// This shards wasn't completely snapshotted - restore it as new shard
|
||||
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, null, i == 0, unassignedInfo));
|
||||
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, null, i == 0, unassignedInfo));
|
||||
} else {
|
||||
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, i == 0 ? restoreSource : null, i == 0, unassignedInfo));
|
||||
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, i == 0 ? restoreSource : null, i == 0, unassignedInfo));
|
||||
}
|
||||
}
|
||||
shards.put(shardId, indexShardRoutingBuilder.build());
|
||||
shards.put(shardNumber, indexShardRoutingBuilder.build());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
@ -418,22 +418,24 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
if (!shards.isEmpty()) {
|
||||
throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created");
|
||||
}
|
||||
for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) {
|
||||
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(index, shardId));
|
||||
for (int shardNumber = 0; shardNumber < indexMetaData.getNumberOfShards(); shardNumber++) {
|
||||
ShardId shardId = new ShardId(index, shardNumber);
|
||||
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
|
||||
for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) {
|
||||
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, null, i == 0, unassignedInfo));
|
||||
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, null, i == 0, unassignedInfo));
|
||||
}
|
||||
shards.put(shardId, indexShardRoutingBuilder.build());
|
||||
shards.put(shardNumber, indexShardRoutingBuilder.build());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder addReplica() {
|
||||
for (IntCursor cursor : shards.keys()) {
|
||||
int shardId = cursor.value;
|
||||
int shardNumber = cursor.value;
|
||||
ShardId shardId = new ShardId(index, shardNumber);
|
||||
// version 0, will get updated when reroute will happen
|
||||
ShardRouting shard = ShardRouting.newUnassigned(index, shardId, null, false, new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null));
|
||||
shards.put(shardId,
|
||||
ShardRouting shard = ShardRouting.newUnassigned(shardId, null, false, new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null));
|
||||
shards.put(shardNumber,
|
||||
new IndexShardRoutingTable.Builder(shards.get(shard.id())).addShard(shard).build()
|
||||
);
|
||||
}
|
||||
|
@ -451,7 +453,7 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
// re-add all the current ones
|
||||
IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(indexShard.shardId());
|
||||
for (ShardRouting shardRouting : indexShard) {
|
||||
builder.addShard(new ShardRouting(shardRouting));
|
||||
builder.addShard(shardRouting);
|
||||
}
|
||||
// first check if there is one that is not assigned to a node, and remove it
|
||||
boolean removed = false;
|
||||
|
@ -487,9 +489,9 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
public Builder addShard(IndexShardRoutingTable refData, ShardRouting shard) {
|
||||
IndexShardRoutingTable indexShard = shards.get(shard.id());
|
||||
if (indexShard == null) {
|
||||
indexShard = new IndexShardRoutingTable.Builder(refData.shardId()).addShard(new ShardRouting(shard)).build();
|
||||
indexShard = new IndexShardRoutingTable.Builder(refData.shardId()).addShard(shard).build();
|
||||
} else {
|
||||
indexShard = new IndexShardRoutingTable.Builder(indexShard).addShard(new ShardRouting(shard)).build();
|
||||
indexShard = new IndexShardRoutingTable.Builder(indexShard).addShard(shard).build();
|
||||
}
|
||||
shards.put(indexShard.shardId().id(), indexShard);
|
||||
return this;
|
||||
|
|
|
@ -590,11 +590,12 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
|
||||
public static IndexShardRoutingTable readFromThin(StreamInput in, Index index) throws IOException {
|
||||
int iShardId = in.readVInt();
|
||||
Builder builder = new Builder(new ShardId(index, iShardId));
|
||||
ShardId shardId = new ShardId(index, iShardId);
|
||||
Builder builder = new Builder(shardId);
|
||||
|
||||
int size = in.readVInt();
|
||||
for (int i = 0; i < size; i++) {
|
||||
ShardRouting shard = ShardRouting.readShardRoutingEntry(in, index, iShardId);
|
||||
ShardRouting shard = new ShardRouting(shardId, in);
|
||||
builder.addShard(shard);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,12 +20,15 @@
|
|||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A {@link RoutingNode} represents a cluster node associated with a single {@link DiscoveryNode} including all shards
|
||||
|
@ -37,25 +40,37 @@ public class RoutingNode implements Iterable<ShardRouting> {
|
|||
|
||||
private final DiscoveryNode node;
|
||||
|
||||
private final List<ShardRouting> shards;
|
||||
private final LinkedHashMap<ShardId, ShardRouting> shards; // LinkedHashMap to preserve order
|
||||
|
||||
public RoutingNode(String nodeId, DiscoveryNode node) {
|
||||
this(nodeId, node, new ArrayList<ShardRouting>());
|
||||
public RoutingNode(String nodeId, DiscoveryNode node, ShardRouting... shards) {
|
||||
this(nodeId, node, buildShardRoutingMap(shards));
|
||||
}
|
||||
|
||||
public RoutingNode(String nodeId, DiscoveryNode node, List<ShardRouting> shards) {
|
||||
RoutingNode(String nodeId, DiscoveryNode node, LinkedHashMap<ShardId, ShardRouting> shards) {
|
||||
this.nodeId = nodeId;
|
||||
this.node = node;
|
||||
this.shards = shards;
|
||||
}
|
||||
|
||||
private static LinkedHashMap<ShardId, ShardRouting> buildShardRoutingMap(ShardRouting... shardRoutings) {
|
||||
final LinkedHashMap<ShardId, ShardRouting> shards = new LinkedHashMap<>();
|
||||
for (ShardRouting shardRouting : shardRoutings) {
|
||||
ShardRouting previousValue = shards.put(shardRouting.shardId(), shardRouting);
|
||||
if (previousValue != null) {
|
||||
throw new IllegalArgumentException("Cannot have two different shards with same shard id " + shardRouting.shardId() +
|
||||
" on same node ");
|
||||
}
|
||||
}
|
||||
return shards;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<ShardRouting> iterator() {
|
||||
return Collections.unmodifiableCollection(shards).iterator();
|
||||
return Collections.unmodifiableCollection(shards.values()).iterator();
|
||||
}
|
||||
|
||||
Iterator<ShardRouting> mutableIterator() {
|
||||
return shards.iterator();
|
||||
return shards.values().iterator();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -67,6 +82,10 @@ public class RoutingNode implements Iterable<ShardRouting> {
|
|||
return this.node;
|
||||
}
|
||||
|
||||
public @Nullable ShardRouting getByShardId(ShardId id) {
|
||||
return shards.get(id);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the id of this node
|
||||
* @return id of the node
|
||||
|
@ -84,13 +103,20 @@ public class RoutingNode implements Iterable<ShardRouting> {
|
|||
* @param shard Shard to crate on this Node
|
||||
*/
|
||||
void add(ShardRouting shard) {
|
||||
// TODO use Set with ShardIds for faster lookup.
|
||||
for (ShardRouting shardRouting : shards) {
|
||||
if (shardRouting.isSameShard(shard)) {
|
||||
throw new IllegalStateException("Trying to add a shard [" + shard.shardId().getIndex().getName() + "][" + shard.shardId().id() + "] to a node [" + nodeId + "] where it already exists");
|
||||
}
|
||||
if (shards.containsKey(shard.shardId())) {
|
||||
throw new IllegalStateException("Trying to add a shard " + shard.shardId() + " to a node [" + nodeId + "] where it already exists");
|
||||
}
|
||||
shards.add(shard);
|
||||
shards.put(shard.shardId(), shard);
|
||||
}
|
||||
|
||||
void update(ShardRouting oldShard, ShardRouting newShard) {
|
||||
if (shards.containsKey(oldShard.shardId()) == false) {
|
||||
// Shard was already removed by routing nodes iterator
|
||||
// TODO: change caller logic in RoutingNodes so that this check can go away
|
||||
return;
|
||||
}
|
||||
ShardRouting previousValue = shards.put(newShard.shardId(), newShard);
|
||||
assert previousValue == oldShard : "expected shard " + previousValue + " but was " + oldShard;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -166,7 +192,7 @@ public class RoutingNode implements Iterable<ShardRouting> {
|
|||
public String prettyPrint() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("-----node_id[").append(nodeId).append("][" + (node == null ? "X" : "V") + "]\n");
|
||||
for (ShardRouting entry : shards) {
|
||||
for (ShardRouting entry : shards.values()) {
|
||||
sb.append("--------").append(entry.shortSummary()).append('\n');
|
||||
}
|
||||
return sb.toString();
|
||||
|
@ -188,12 +214,8 @@ public class RoutingNode implements Iterable<ShardRouting> {
|
|||
return sb.toString();
|
||||
}
|
||||
|
||||
public ShardRouting get(int i) {
|
||||
return shards.get(i) ;
|
||||
}
|
||||
|
||||
public Collection<ShardRouting> copyShards() {
|
||||
return new ArrayList<>(shards);
|
||||
public List<ShardRouting> copyShards() {
|
||||
return new ArrayList<>(shards.values());
|
||||
}
|
||||
|
||||
public boolean isEmpty() {
|
||||
|
|
|
@ -29,16 +29,22 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.ListIterator;
|
||||
import java.util.Map;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
|
@ -71,7 +77,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
private int relocatingShards = 0;
|
||||
|
||||
private final Map<String, ObjectIntHashMap<String>> nodesPerAttributeNames = new HashMap<>();
|
||||
private final Map<String, Recoveries> recoveryiesPerNode = new HashMap<>();
|
||||
private final Map<String, Recoveries> recoveriesPerNode = new HashMap<>();
|
||||
|
||||
public RoutingNodes(ClusterState clusterState) {
|
||||
this(clusterState, true);
|
||||
|
@ -84,10 +90,10 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
this.routingTable = clusterState.routingTable();
|
||||
this.customs = clusterState.customs();
|
||||
|
||||
Map<String, List<ShardRouting>> nodesToShards = new HashMap<>();
|
||||
Map<String, LinkedHashMap<ShardId, ShardRouting>> nodesToShards = new HashMap<>();
|
||||
// fill in the nodeToShards with the "live" nodes
|
||||
for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().getDataNodes().values()) {
|
||||
nodesToShards.put(cursor.value.getId(), new ArrayList<>());
|
||||
nodesToShards.put(cursor.value.getId(), new LinkedHashMap<>()); // LinkedHashMap to preserve order
|
||||
}
|
||||
|
||||
// fill in the inverse of node -> shards allocated
|
||||
|
@ -101,21 +107,25 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
// by the ShardId, as this is common for primary and replicas.
|
||||
// A replica Set might have one (and not more) replicas with the state of RELOCATING.
|
||||
if (shard.assignedToNode()) {
|
||||
List<ShardRouting> entries = nodesToShards.computeIfAbsent(shard.currentNodeId(), k -> new ArrayList<>());
|
||||
final ShardRouting sr = getRouting(shard, readOnly);
|
||||
entries.add(sr);
|
||||
assignedShardsAdd(sr);
|
||||
Map<ShardId, ShardRouting> entries = nodesToShards.computeIfAbsent(shard.currentNodeId(),
|
||||
k -> new LinkedHashMap<>()); // LinkedHashMap to preserve order
|
||||
ShardRouting previousValue = entries.put(shard.shardId(), shard);
|
||||
if (previousValue != null) {
|
||||
throw new IllegalArgumentException("Cannot have two different shards with same shard id on same node");
|
||||
}
|
||||
assignedShardsAdd(shard);
|
||||
if (shard.relocating()) {
|
||||
relocatingShards++;
|
||||
entries = nodesToShards.computeIfAbsent(shard.relocatingNodeId(), k -> new ArrayList<>());
|
||||
entries = nodesToShards.computeIfAbsent(shard.relocatingNodeId(),
|
||||
k -> new LinkedHashMap<>()); // LinkedHashMap to preserve order
|
||||
// add the counterpart shard with relocatingNodeId reflecting the source from which
|
||||
// it's relocating from.
|
||||
ShardRouting targetShardRouting = shard.buildTargetRelocatingShard();
|
||||
addInitialRecovery(targetShardRouting);
|
||||
if (readOnly) {
|
||||
targetShardRouting.freeze();
|
||||
previousValue = entries.put(targetShardRouting.shardId(), targetShardRouting);
|
||||
if (previousValue != null) {
|
||||
throw new IllegalArgumentException("Cannot have two different shards with same shard id on same node");
|
||||
}
|
||||
entries.add(targetShardRouting);
|
||||
assignedShardsAdd(targetShardRouting);
|
||||
} else if (shard.active() == false) { // shards that are initializing without being relocated
|
||||
if (shard.primary()) {
|
||||
|
@ -125,14 +135,12 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
addInitialRecovery(shard);
|
||||
}
|
||||
} else {
|
||||
final ShardRouting sr = getRouting(shard, readOnly);
|
||||
assignedShardsAdd(sr);
|
||||
unassignedShards.add(sr);
|
||||
unassignedShards.add(shard);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (Map.Entry<String, List<ShardRouting>> entry : nodesToShards.entrySet()) {
|
||||
for (Map.Entry<String, LinkedHashMap<ShardId, ShardRouting>> entry : nodesToShards.entrySet()) {
|
||||
String nodeId = entry.getKey();
|
||||
this.nodesToShards.put(nodeId, new RoutingNode(nodeId, clusterState.nodes().get(nodeId), entry.getValue()));
|
||||
}
|
||||
|
@ -153,7 +161,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
private void addRecovery(final ShardRouting routing, final boolean increment, final boolean initializing) {
|
||||
final int howMany = increment ? 1 : -1;
|
||||
assert routing.initializing() : "routing must be initializing: " + routing;
|
||||
Recoveries.getOrAdd(recoveryiesPerNode, routing.currentNodeId()).addIncoming(howMany);
|
||||
Recoveries.getOrAdd(recoveriesPerNode, routing.currentNodeId()).addIncoming(howMany);
|
||||
final String sourceNodeId;
|
||||
if (routing.relocatingNodeId() != null) { // this is a relocation-target
|
||||
sourceNodeId = routing.relocatingNodeId();
|
||||
|
@ -165,8 +173,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
}
|
||||
}
|
||||
// we transfer the recoveries to the relocated primary
|
||||
recoveryiesPerNode.get(sourceNodeId).addOutgoing(-numRecoveringReplicas);
|
||||
recoveryiesPerNode.get(routing.currentNodeId()).addOutgoing(numRecoveringReplicas);
|
||||
recoveriesPerNode.get(sourceNodeId).addOutgoing(-numRecoveringReplicas);
|
||||
recoveriesPerNode.get(routing.currentNodeId()).addOutgoing(numRecoveringReplicas);
|
||||
}
|
||||
} else if (routing.primary() == false) { // primary without relocationID is initial recovery
|
||||
ShardRouting primary = findPrimary(routing);
|
||||
|
@ -180,16 +188,16 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
sourceNodeId = null;
|
||||
}
|
||||
if (sourceNodeId != null) {
|
||||
Recoveries.getOrAdd(recoveryiesPerNode, sourceNodeId).addOutgoing(howMany);
|
||||
Recoveries.getOrAdd(recoveriesPerNode, sourceNodeId).addOutgoing(howMany);
|
||||
}
|
||||
}
|
||||
|
||||
public int getIncomingRecoveries(String nodeId) {
|
||||
return recoveryiesPerNode.getOrDefault(nodeId, Recoveries.EMPTY).getIncoming();
|
||||
return recoveriesPerNode.getOrDefault(nodeId, Recoveries.EMPTY).getIncoming();
|
||||
}
|
||||
|
||||
public int getOutgoingRecoveries(String nodeId) {
|
||||
return recoveryiesPerNode.getOrDefault(nodeId, Recoveries.EMPTY).getOutgoing();
|
||||
return recoveriesPerNode.getOrDefault(nodeId, Recoveries.EMPTY).getOutgoing();
|
||||
}
|
||||
|
||||
private ShardRouting findPrimary(ShardRouting routing) {
|
||||
|
@ -211,15 +219,6 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
return primary;
|
||||
}
|
||||
|
||||
private static ShardRouting getRouting(ShardRouting src, boolean readOnly) {
|
||||
if (readOnly) {
|
||||
src.freeze(); // we just freeze and reuse this instance if we are read only
|
||||
} else {
|
||||
src = new ShardRouting(src);
|
||||
}
|
||||
return src;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<RoutingNode> iterator() {
|
||||
return Collections.unmodifiableCollection(nodesToShards.values()).iterator();
|
||||
|
@ -423,40 +422,46 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
* Moves a shard from unassigned to initialize state
|
||||
*
|
||||
* @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated.
|
||||
* @return the initialized shard
|
||||
*/
|
||||
public void initialize(ShardRouting shard, String nodeId, @Nullable String existingAllocationId, long expectedSize) {
|
||||
public ShardRouting initialize(ShardRouting shard, String nodeId, @Nullable String existingAllocationId, long expectedSize) {
|
||||
ensureMutable();
|
||||
assert shard.unassigned() : shard;
|
||||
shard.initialize(nodeId, existingAllocationId, expectedSize);
|
||||
node(nodeId).add(shard);
|
||||
assert shard.unassigned() : "expected an unassigned shard " + shard;
|
||||
ShardRouting initializedShard = shard.initialize(nodeId, existingAllocationId, expectedSize);
|
||||
node(nodeId).add(initializedShard);
|
||||
inactiveShardCount++;
|
||||
if (shard.primary()) {
|
||||
if (initializedShard.primary()) {
|
||||
inactivePrimaryCount++;
|
||||
}
|
||||
addRecovery(shard);
|
||||
assignedShardsAdd(shard);
|
||||
addRecovery(initializedShard);
|
||||
assignedShardsAdd(initializedShard);
|
||||
return initializedShard;
|
||||
}
|
||||
|
||||
/**
|
||||
* Relocate a shard to another node, adding the target initializing
|
||||
* shard as well as assigning it. And returning the target initializing
|
||||
* shard.
|
||||
* shard as well as assigning it.
|
||||
*
|
||||
* @return pair of source relocating and target initializing shards.
|
||||
*/
|
||||
public ShardRouting relocate(ShardRouting shard, String nodeId, long expectedShardSize) {
|
||||
public Tuple<ShardRouting,ShardRouting> relocate(ShardRouting shard, String nodeId, long expectedShardSize) {
|
||||
ensureMutable();
|
||||
relocatingShards++;
|
||||
shard.relocate(nodeId, expectedShardSize);
|
||||
ShardRouting target = shard.buildTargetRelocatingShard();
|
||||
ShardRouting source = shard.relocate(nodeId, expectedShardSize);
|
||||
ShardRouting target = source.buildTargetRelocatingShard();
|
||||
updateAssigned(shard, source);
|
||||
node(target.currentNodeId()).add(target);
|
||||
assignedShardsAdd(target);
|
||||
addRecovery(target);
|
||||
return target;
|
||||
return Tuple.tuple(source, target);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a shard as started and adjusts internal statistics.
|
||||
*
|
||||
* @return the started shard
|
||||
*/
|
||||
public void started(ShardRouting shard) {
|
||||
public ShardRouting started(ShardRouting shard) {
|
||||
ensureMutable();
|
||||
assert !shard.active() : "expected an initializing shard " + shard;
|
||||
if (shard.relocatingNodeId() == null) {
|
||||
|
@ -467,40 +472,39 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
}
|
||||
}
|
||||
removeRecovery(shard);
|
||||
shard.moveToStarted();
|
||||
ShardRouting startedShard = shard.moveToStarted();
|
||||
updateAssigned(shard, startedShard);
|
||||
return startedShard;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Cancels a relocation of a shard that shard must relocating.
|
||||
*
|
||||
* @return the shard after cancelling relocation
|
||||
*/
|
||||
public void cancelRelocation(ShardRouting shard) {
|
||||
public ShardRouting cancelRelocation(ShardRouting shard) {
|
||||
ensureMutable();
|
||||
relocatingShards--;
|
||||
shard.cancelRelocation();
|
||||
ShardRouting cancelledShard = shard.cancelRelocation();
|
||||
updateAssigned(shard, cancelledShard);
|
||||
return cancelledShard;
|
||||
}
|
||||
|
||||
/**
|
||||
* swaps the status of a shard, making replicas primary and vice versa.
|
||||
* moves the assigned replica shard to primary.
|
||||
*
|
||||
* @param shards the shard to have its primary status swapped.
|
||||
* @param replicaShard the replica shard to be promoted to primary
|
||||
* @return the resulting primary shard
|
||||
*/
|
||||
public void swapPrimaryFlag(ShardRouting... shards) {
|
||||
public ShardRouting promoteAssignedReplicaShardToPrimary(ShardRouting replicaShard) {
|
||||
ensureMutable();
|
||||
for (ShardRouting shard : shards) {
|
||||
if (shard.primary()) {
|
||||
shard.moveFromPrimary();
|
||||
if (shard.unassigned()) {
|
||||
unassignedShards.primaries--;
|
||||
}
|
||||
} else {
|
||||
shard.moveToPrimary();
|
||||
if (shard.unassigned()) {
|
||||
unassignedShards.primaries++;
|
||||
}
|
||||
}
|
||||
}
|
||||
assert replicaShard.unassigned() == false : "unassigned shard cannot be promoted to primary: " + replicaShard;
|
||||
assert replicaShard.primary() == false : "primary shard cannot be promoted to primary: " + replicaShard;
|
||||
ShardRouting primaryShard = replicaShard.moveToPrimary();
|
||||
updateAssigned(replicaShard, primaryShard);
|
||||
return primaryShard;
|
||||
}
|
||||
|
||||
private static final List<ShardRouting> EMPTY = Collections.emptyList();
|
||||
|
@ -523,7 +527,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
inactivePrimaryCount--;
|
||||
}
|
||||
} else if (shard.relocating()) {
|
||||
cancelRelocation(shard);
|
||||
shard = cancelRelocation(shard);
|
||||
}
|
||||
assignedShardsRemove(shard);
|
||||
if (shard.initializing()) {
|
||||
|
@ -532,12 +536,9 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
}
|
||||
|
||||
private void assignedShardsAdd(ShardRouting shard) {
|
||||
if (shard.unassigned()) {
|
||||
// no unassigned
|
||||
return;
|
||||
}
|
||||
assert shard.unassigned() == false : "unassigned shard " + shard + " cannot be added to list of assigned shards";
|
||||
List<ShardRouting> shards = assignedShards.computeIfAbsent(shard.shardId(), k -> new ArrayList<>());
|
||||
assert assertInstanceNotInList(shard, shards);
|
||||
assert assertInstanceNotInList(shard, shards) : "shard " + shard + " cannot appear twice in list of assigned shards";
|
||||
shards.add(shard);
|
||||
}
|
||||
|
||||
|
@ -560,8 +561,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
return;
|
||||
}
|
||||
}
|
||||
assert false : "Illegal state";
|
||||
}
|
||||
assert false : "No shard found to remove";
|
||||
}
|
||||
|
||||
public boolean isKnown(DiscoveryNode node) {
|
||||
|
@ -586,15 +587,30 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
return nodesToShards.values().toArray(new RoutingNode[nodesToShards.size()]);
|
||||
}
|
||||
|
||||
public void reinitShadowPrimary(ShardRouting candidate) {
|
||||
public ShardRouting reinitShadowPrimary(ShardRouting candidate) {
|
||||
ensureMutable();
|
||||
if (candidate.relocating()) {
|
||||
cancelRelocation(candidate);
|
||||
}
|
||||
candidate.reinitializeShard();
|
||||
ShardRouting reinitializedShard = candidate.reinitializeShard();
|
||||
updateAssigned(candidate, reinitializedShard);
|
||||
inactivePrimaryCount++;
|
||||
inactiveShardCount++;
|
||||
return reinitializedShard;
|
||||
}
|
||||
|
||||
private void updateAssigned(ShardRouting oldShard, ShardRouting newShard) {
|
||||
assert oldShard.shardId().equals(newShard.shardId()) :
|
||||
"can only update " + oldShard + " by shard with same shard id but was " + newShard;
|
||||
assert oldShard.unassigned() == false && newShard.unassigned() == false :
|
||||
"only assigned shards can be updated in list of assigned shards (prev: " + oldShard + ", new: " + newShard + ")";
|
||||
assert oldShard.currentNodeId().equals(newShard.currentNodeId()) : "shard to update " + oldShard +
|
||||
" can only update " + oldShard + " by shard assigned to same node but was " + newShard;
|
||||
node(oldShard.currentNodeId()).update(oldShard, newShard);
|
||||
List<ShardRouting> shardsWithMatchingShardId = assignedShards.computeIfAbsent(oldShard.shardId(), k -> new ArrayList<>());
|
||||
int previousShardIndex = shardsWithMatchingShardId.indexOf(oldShard);
|
||||
assert previousShardIndex >= 0 : "shard to update " + oldShard + " does not exist in list of assigned shards";
|
||||
shardsWithMatchingShardId.set(previousShardIndex, newShard);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -683,11 +699,11 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
|
||||
public class UnassignedIterator implements Iterator<ShardRouting> {
|
||||
|
||||
private final Iterator<ShardRouting> iterator;
|
||||
private final ListIterator<ShardRouting> iterator;
|
||||
private ShardRouting current;
|
||||
|
||||
public UnassignedIterator() {
|
||||
this.iterator = unassigned.iterator();
|
||||
this.iterator = unassigned.listIterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -705,9 +721,9 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
*
|
||||
* @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated.
|
||||
*/
|
||||
public void initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize) {
|
||||
public ShardRouting initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize) {
|
||||
innerRemove();
|
||||
nodes.initialize(new ShardRouting(current), nodeId, existingAllocationId, expectedShardSize);
|
||||
return nodes.initialize(current, nodeId, existingAllocationId, expectedShardSize);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -721,6 +737,35 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
ignoreShard(current);
|
||||
}
|
||||
|
||||
private void updateShardRouting(ShardRouting shardRouting) {
|
||||
current = shardRouting;
|
||||
iterator.set(shardRouting);
|
||||
}
|
||||
|
||||
/**
|
||||
* updates the unassigned info on the current unassigned shard
|
||||
*
|
||||
* @param unassignedInfo the new unassigned info to use
|
||||
* @return the shard with unassigned info updated
|
||||
*/
|
||||
public ShardRouting updateUnassignedInfo(UnassignedInfo unassignedInfo) {
|
||||
ShardRouting updatedShardRouting = current.updateUnassignedInfo(unassignedInfo);
|
||||
updateShardRouting(updatedShardRouting);
|
||||
return updatedShardRouting;
|
||||
}
|
||||
|
||||
/**
|
||||
* marks the current primary shard as replica
|
||||
*
|
||||
* @return the shard with primary status swapped
|
||||
*/
|
||||
public ShardRouting demotePrimaryToReplicaShard() {
|
||||
assert current.primary() : "non-primary shard " + current + " cannot be demoted";
|
||||
updateShardRouting(current.moveFromPrimary());
|
||||
primaries--;
|
||||
return current;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unsupported operation, just there for the interface. Use {@link #removeAndIgnore()} or
|
||||
* {@link #initialize(String, String, long)}.
|
||||
|
@ -847,7 +892,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
}
|
||||
}
|
||||
|
||||
for (Map.Entry<String, Recoveries> recoveries : routingNodes.recoveryiesPerNode.entrySet()) {
|
||||
for (Map.Entry<String, Recoveries> recoveries : routingNodes.recoveriesPerNode.entrySet()) {
|
||||
String node = recoveries.getKey();
|
||||
final Recoveries value = recoveries.getValue();
|
||||
int incoming = 0;
|
||||
|
@ -962,14 +1007,14 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
return iterable.iterator();
|
||||
}
|
||||
|
||||
public void moveToUnassigned(UnassignedInfo unassignedInfo) {
|
||||
public ShardRouting moveToUnassigned(UnassignedInfo unassignedInfo) {
|
||||
ensureMutable();
|
||||
if (isRemoved() == false) {
|
||||
remove();
|
||||
}
|
||||
ShardRouting unassigned = new ShardRouting(shard); // protective copy of the mutable shard
|
||||
unassigned.moveToUnassigned(unassignedInfo);
|
||||
unassigned().add(unassigned);
|
||||
ShardRouting unassigned = shard.moveToUnassigned(unassignedInfo);
|
||||
unassignedShards.add(unassigned);
|
||||
return unassigned;
|
||||
}
|
||||
|
||||
public ShardRouting current() {
|
||||
|
@ -983,6 +1028,44 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an iterator over shards interleaving between nodes: The iterator returns the first shard from
|
||||
* the first node, then the first shard of the second node, etc. until one shard from each node has been returned.
|
||||
* The iterator then resumes on the first node by returning the second shard and continues until all shards from
|
||||
* all the nodes have been returned.
|
||||
*/
|
||||
public Iterator<ShardRouting> nodeInterleavedShardIterator() {
|
||||
final Queue<Iterator<ShardRouting>> queue = new ArrayDeque<>();
|
||||
for (Map.Entry<String, RoutingNode> entry : nodesToShards.entrySet()) {
|
||||
queue.add(entry.getValue().copyShards().iterator());
|
||||
}
|
||||
return new Iterator<ShardRouting>() {
|
||||
public boolean hasNext() {
|
||||
while (!queue.isEmpty()) {
|
||||
if (queue.peek().hasNext()) {
|
||||
return true;
|
||||
}
|
||||
queue.poll();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public ShardRouting next() {
|
||||
if (hasNext() == false) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
Iterator<ShardRouting> iter = queue.poll();
|
||||
ShardRouting result = iter.next();
|
||||
queue.offer(iter);
|
||||
return result;
|
||||
}
|
||||
|
||||
public void remove() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private static final class Recoveries {
|
||||
private static final Recoveries EMPTY = new Recoveries();
|
||||
private int incoming = 0;
|
||||
|
|
|
@ -21,10 +21,11 @@ package org.elasticsearch.cluster.routing;
|
|||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
@ -38,43 +39,31 @@ import java.util.List;
|
|||
* {@link ShardRouting} immutably encapsulates information about shard
|
||||
* routings like id, state, version, etc.
|
||||
*/
|
||||
public final class ShardRouting implements Streamable, ToXContent {
|
||||
public final class ShardRouting implements Writeable, ToXContent {
|
||||
|
||||
/**
|
||||
* Used if shard size is not available
|
||||
*/
|
||||
public static final long UNAVAILABLE_EXPECTED_SHARD_SIZE = -1;
|
||||
|
||||
private Index index;
|
||||
private int shardId;
|
||||
private String currentNodeId;
|
||||
private String relocatingNodeId;
|
||||
private boolean primary;
|
||||
private ShardRoutingState state;
|
||||
private RestoreSource restoreSource;
|
||||
private UnassignedInfo unassignedInfo;
|
||||
private AllocationId allocationId;
|
||||
private final ShardId shardId;
|
||||
private final String currentNodeId;
|
||||
private final String relocatingNodeId;
|
||||
private final boolean primary;
|
||||
private final ShardRoutingState state;
|
||||
private final RestoreSource restoreSource;
|
||||
private final UnassignedInfo unassignedInfo;
|
||||
private final AllocationId allocationId;
|
||||
private final transient List<ShardRouting> asList;
|
||||
private transient ShardId shardIdentifier;
|
||||
private boolean frozen = false;
|
||||
private long expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
|
||||
|
||||
private ShardRouting() {
|
||||
this.asList = Collections.singletonList(this);
|
||||
}
|
||||
|
||||
public ShardRouting(ShardRouting copy) {
|
||||
this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), copy.unassignedInfo(), copy.allocationId(), true, copy.getExpectedShardSize());
|
||||
}
|
||||
private final long expectedShardSize;
|
||||
|
||||
/**
|
||||
* A constructor to internally create shard routing instances, note, the internal flag should only be set to true
|
||||
* by either this class or tests. Visible for testing.
|
||||
*/
|
||||
ShardRouting(Index index, int shardId, String currentNodeId,
|
||||
ShardRouting(ShardId shardId, String currentNodeId,
|
||||
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state,
|
||||
UnassignedInfo unassignedInfo, AllocationId allocationId, boolean internal, long expectedShardSize) {
|
||||
this.index = index;
|
||||
UnassignedInfo unassignedInfo, AllocationId allocationId, long expectedShardSize) {
|
||||
this.shardId = shardId;
|
||||
this.currentNodeId = currentNodeId;
|
||||
this.relocatingNodeId = relocatingNodeId;
|
||||
|
@ -88,38 +77,31 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE || state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state;
|
||||
assert expectedShardSize >= 0 || state != ShardRoutingState.INITIALIZING || state != ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state;
|
||||
assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta";
|
||||
if (!internal) {
|
||||
assert state == ShardRoutingState.UNASSIGNED;
|
||||
assert currentNodeId == null;
|
||||
assert relocatingNodeId == null;
|
||||
assert allocationId == null;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new unassigned shard.
|
||||
*/
|
||||
public static ShardRouting newUnassigned(Index index, int shardId, RestoreSource restoreSource, boolean primary, UnassignedInfo unassignedInfo) {
|
||||
return new ShardRouting(index, shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, unassignedInfo, null, true, UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
public static ShardRouting newUnassigned(ShardId shardId, RestoreSource restoreSource, boolean primary, UnassignedInfo unassignedInfo) {
|
||||
return new ShardRouting(shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, unassignedInfo, null, UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
}
|
||||
|
||||
public Index index() {
|
||||
return this.index;
|
||||
return shardId.getIndex();
|
||||
}
|
||||
|
||||
/**
|
||||
* The index name.
|
||||
*/
|
||||
public String getIndexName() {
|
||||
return index().getName();
|
||||
return shardId.getIndexName();
|
||||
}
|
||||
|
||||
/**
|
||||
* The shard id.
|
||||
*/
|
||||
public int id() {
|
||||
return this.shardId;
|
||||
return shardId.id();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -201,8 +183,8 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
*/
|
||||
public ShardRouting buildTargetRelocatingShard() {
|
||||
assert relocating();
|
||||
return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, unassignedInfo,
|
||||
AllocationId.newTargetRelocation(allocationId), true, expectedShardSize);
|
||||
return new ShardRouting(shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, unassignedInfo,
|
||||
AllocationId.newTargetRelocation(allocationId), expectedShardSize);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -247,11 +229,7 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
* The shard id.
|
||||
*/
|
||||
public ShardId shardId() {
|
||||
if (shardIdentifier != null) {
|
||||
return shardIdentifier;
|
||||
}
|
||||
shardIdentifier = new ShardId(index, shardId);
|
||||
return shardIdentifier;
|
||||
return shardId;
|
||||
}
|
||||
|
||||
public boolean allocatedPostIndexCreate(IndexMetaData indexMetaData) {
|
||||
|
@ -278,57 +256,30 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
* A shard iterator with just this shard in it.
|
||||
*/
|
||||
public ShardIterator shardsIt() {
|
||||
return new PlainShardIterator(shardId(), asList);
|
||||
return new PlainShardIterator(shardId, asList);
|
||||
}
|
||||
|
||||
public static ShardRouting readShardRoutingEntry(StreamInput in) throws IOException {
|
||||
ShardRouting entry = new ShardRouting();
|
||||
entry.readFrom(in);
|
||||
return entry;
|
||||
}
|
||||
|
||||
public static ShardRouting readShardRoutingEntry(StreamInput in, Index index, int shardId) throws IOException {
|
||||
ShardRouting entry = new ShardRouting();
|
||||
entry.readFrom(in, index, shardId);
|
||||
return entry;
|
||||
}
|
||||
|
||||
public void readFrom(StreamInput in, Index index, int shardId) throws IOException {
|
||||
this.index = index;
|
||||
public ShardRouting(ShardId shardId, StreamInput in) throws IOException {
|
||||
this.shardId = shardId;
|
||||
readFromThin(in);
|
||||
}
|
||||
|
||||
public void readFromThin(StreamInput in) throws IOException {
|
||||
if (in.readBoolean()) {
|
||||
currentNodeId = in.readString();
|
||||
}
|
||||
|
||||
if (in.readBoolean()) {
|
||||
relocatingNodeId = in.readString();
|
||||
}
|
||||
|
||||
currentNodeId = in.readOptionalString();
|
||||
relocatingNodeId = in.readOptionalString();
|
||||
primary = in.readBoolean();
|
||||
state = ShardRoutingState.fromValue(in.readByte());
|
||||
|
||||
restoreSource = RestoreSource.readOptionalRestoreSource(in);
|
||||
if (in.readBoolean()) {
|
||||
unassignedInfo = new UnassignedInfo(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
allocationId = new AllocationId(in);
|
||||
}
|
||||
if (relocating() || initializing()) {
|
||||
expectedShardSize = in.readLong();
|
||||
unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new);
|
||||
allocationId = in.readOptionalWriteable(AllocationId::new);
|
||||
final long shardSize;
|
||||
if (state == ShardRoutingState.RELOCATING || state == ShardRoutingState.INITIALIZING) {
|
||||
shardSize = in.readLong();
|
||||
} else {
|
||||
expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
|
||||
shardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
|
||||
}
|
||||
freeze();
|
||||
expectedShardSize = shardSize;
|
||||
asList = Collections.singletonList(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
readFrom(in, new Index(in), in.readVInt());
|
||||
public ShardRouting(StreamInput in) throws IOException {
|
||||
this(ShardId.readShardId(in), in);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -338,74 +289,37 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
* @throws IOException if something happens during write
|
||||
*/
|
||||
public void writeToThin(StreamOutput out) throws IOException {
|
||||
if (currentNodeId != null) {
|
||||
out.writeBoolean(true);
|
||||
out.writeString(currentNodeId);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
|
||||
if (relocatingNodeId != null) {
|
||||
out.writeBoolean(true);
|
||||
out.writeString(relocatingNodeId);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
|
||||
out.writeOptionalString(currentNodeId);
|
||||
out.writeOptionalString(relocatingNodeId);
|
||||
out.writeBoolean(primary);
|
||||
out.writeByte(state.value());
|
||||
|
||||
if (restoreSource != null) {
|
||||
out.writeBoolean(true);
|
||||
restoreSource.writeTo(out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
if (unassignedInfo != null) {
|
||||
out.writeBoolean(true);
|
||||
unassignedInfo.writeTo(out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
if (allocationId != null) {
|
||||
out.writeBoolean(true);
|
||||
allocationId.writeTo(out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
if (relocating() || initializing()) {
|
||||
out.writeOptionalStreamable(restoreSource);
|
||||
out.writeOptionalWriteable(unassignedInfo);
|
||||
out.writeOptionalWriteable(allocationId);
|
||||
if (state == ShardRoutingState.RELOCATING || state == ShardRoutingState.INITIALIZING) {
|
||||
out.writeLong(expectedShardSize);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
index.writeTo(out);
|
||||
out.writeVInt(shardId);
|
||||
shardId.writeTo(out);
|
||||
writeToThin(out);
|
||||
}
|
||||
|
||||
public void updateUnassignedInfo(UnassignedInfo unassignedInfo) {
|
||||
ensureNotFrozen();
|
||||
public ShardRouting updateUnassignedInfo(UnassignedInfo unassignedInfo) {
|
||||
assert this.unassignedInfo != null : "can only update unassign info if they are already set";
|
||||
this.unassignedInfo = unassignedInfo;
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state,
|
||||
unassignedInfo, allocationId, expectedShardSize);
|
||||
}
|
||||
|
||||
// package private mutators start here
|
||||
|
||||
/**
|
||||
* Moves the shard to unassigned state.
|
||||
*/
|
||||
void moveToUnassigned(UnassignedInfo unassignedInfo) {
|
||||
ensureNotFrozen();
|
||||
public ShardRouting moveToUnassigned(UnassignedInfo unassignedInfo) {
|
||||
assert state != ShardRoutingState.UNASSIGNED : this;
|
||||
state = ShardRoutingState.UNASSIGNED;
|
||||
currentNodeId = null;
|
||||
relocatingNodeId = null;
|
||||
this.unassignedInfo = unassignedInfo;
|
||||
allocationId = null;
|
||||
expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
|
||||
return new ShardRouting(shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED,
|
||||
unassignedInfo, null, UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -413,18 +327,17 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
*
|
||||
* @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated.
|
||||
*/
|
||||
void initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize) {
|
||||
ensureNotFrozen();
|
||||
public ShardRouting initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize) {
|
||||
assert state == ShardRoutingState.UNASSIGNED : this;
|
||||
assert relocatingNodeId == null : this;
|
||||
state = ShardRoutingState.INITIALIZING;
|
||||
currentNodeId = nodeId;
|
||||
final AllocationId allocationId;
|
||||
if (existingAllocationId == null) {
|
||||
allocationId = AllocationId.newInitializing();
|
||||
} else {
|
||||
allocationId = AllocationId.newInitializing(existingAllocationId);
|
||||
}
|
||||
this.expectedShardSize = expectedShardSize;
|
||||
return new ShardRouting(shardId, nodeId, null, restoreSource, primary, ShardRoutingState.INITIALIZING,
|
||||
unassignedInfo, allocationId, expectedShardSize);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -432,39 +345,31 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
*
|
||||
* @param relocatingNodeId id of the node to relocate the shard
|
||||
*/
|
||||
void relocate(String relocatingNodeId, long expectedShardSize) {
|
||||
ensureNotFrozen();
|
||||
public ShardRouting relocate(String relocatingNodeId, long expectedShardSize) {
|
||||
assert state == ShardRoutingState.STARTED : "current shard has to be started in order to be relocated " + this;
|
||||
state = ShardRoutingState.RELOCATING;
|
||||
this.relocatingNodeId = relocatingNodeId;
|
||||
this.allocationId = AllocationId.newRelocation(allocationId);
|
||||
this.expectedShardSize = expectedShardSize;
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, primary, ShardRoutingState.RELOCATING,
|
||||
null, AllocationId.newRelocation(allocationId), expectedShardSize);
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancel relocation of a shard. The shards state must be set
|
||||
* to <code>RELOCATING</code>.
|
||||
*/
|
||||
void cancelRelocation() {
|
||||
ensureNotFrozen();
|
||||
public ShardRouting cancelRelocation() {
|
||||
assert state == ShardRoutingState.RELOCATING : this;
|
||||
assert assignedToNode() : this;
|
||||
assert relocatingNodeId != null : this;
|
||||
expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
|
||||
state = ShardRoutingState.STARTED;
|
||||
relocatingNodeId = null;
|
||||
allocationId = AllocationId.cancelRelocation(allocationId);
|
||||
return new ShardRouting(shardId, currentNodeId, null, restoreSource, primary, ShardRoutingState.STARTED,
|
||||
null, AllocationId.cancelRelocation(allocationId), UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Moves the shard from started to initializing
|
||||
*/
|
||||
void reinitializeShard() {
|
||||
ensureNotFrozen();
|
||||
public ShardRouting reinitializeShard() {
|
||||
assert state == ShardRoutingState.STARTED;
|
||||
state = ShardRoutingState.INITIALIZING;
|
||||
allocationId = AllocationId.newInitializing();
|
||||
this.unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null);
|
||||
return new ShardRouting(shardId, currentNodeId, null, restoreSource, primary, ShardRoutingState.INITIALIZING,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null), AllocationId.newInitializing(), UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -472,46 +377,46 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
* <code>INITIALIZING</code> or <code>RELOCATING</code>. Any relocation will be
|
||||
* canceled.
|
||||
*/
|
||||
void moveToStarted() {
|
||||
ensureNotFrozen();
|
||||
public ShardRouting moveToStarted() {
|
||||
assert state == ShardRoutingState.INITIALIZING : "expected an initializing shard " + this;
|
||||
relocatingNodeId = null;
|
||||
restoreSource = null;
|
||||
unassignedInfo = null; // we keep the unassigned data until the shard is started
|
||||
AllocationId allocationId = this.allocationId;
|
||||
if (allocationId.getRelocationId() != null) {
|
||||
// relocation target
|
||||
allocationId = AllocationId.finishRelocation(allocationId);
|
||||
}
|
||||
expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
|
||||
state = ShardRoutingState.STARTED;
|
||||
return new ShardRouting(shardId, currentNodeId, null, restoreSource, primary, ShardRoutingState.STARTED, null, allocationId,
|
||||
UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Make the shard primary unless it's not Primary
|
||||
* //TODO: doc exception
|
||||
*
|
||||
* @throws IllegalShardRoutingStateException if shard is already a primary
|
||||
*/
|
||||
void moveToPrimary() {
|
||||
ensureNotFrozen();
|
||||
public ShardRouting moveToPrimary() {
|
||||
if (primary) {
|
||||
throw new IllegalShardRoutingStateException(this, "Already primary, can't move to primary");
|
||||
}
|
||||
primary = true;
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, true, state, unassignedInfo, allocationId,
|
||||
expectedShardSize);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the primary shard to non-primary
|
||||
*
|
||||
* @throws IllegalShardRoutingStateException if shard is already a replica
|
||||
*/
|
||||
void moveFromPrimary() {
|
||||
ensureNotFrozen();
|
||||
public ShardRouting moveFromPrimary() {
|
||||
if (!primary) {
|
||||
throw new IllegalShardRoutingStateException(this, "Not primary, can't move to replica");
|
||||
}
|
||||
primary = false;
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, false, state, unassignedInfo, allocationId,
|
||||
expectedShardSize);
|
||||
}
|
||||
|
||||
/** returns true if this routing has the same shardId as another */
|
||||
public boolean isSameShard(ShardRouting other) {
|
||||
return index.equals(other.index) && shardId == other.shardId;
|
||||
return getIndexName().equals(other.getIndexName()) && id() == other.id();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -592,15 +497,12 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
if (primary != other.primary) {
|
||||
return false;
|
||||
}
|
||||
if (shardId != other.shardId) {
|
||||
if (shardId != null ? !shardId.equals(other.shardId) : other.shardId != null) {
|
||||
return false;
|
||||
}
|
||||
if (currentNodeId != null ? !currentNodeId.equals(other.currentNodeId) : other.currentNodeId != null) {
|
||||
return false;
|
||||
}
|
||||
if (index != null ? !index.equals(other.index) : other.index != null) {
|
||||
return false;
|
||||
}
|
||||
if (relocatingNodeId != null ? !relocatingNodeId.equals(other.relocatingNodeId) : other.relocatingNodeId != null) {
|
||||
return false;
|
||||
}
|
||||
|
@ -631,27 +533,27 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
return equalsIgnoringMetaData(that);
|
||||
}
|
||||
|
||||
private boolean usePreComputedHashCode = false;
|
||||
private int hashCode = 0;
|
||||
/**
|
||||
* Cache hash code in same same way as {@link String#hashCode()}) using racy single-check idiom
|
||||
* as it is mainly used in single-threaded code ({@link BalancedShardsAllocator}).
|
||||
*/
|
||||
private int hashCode; // default to 0
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
if (frozen && usePreComputedHashCode) {
|
||||
return hashCode;
|
||||
int h = hashCode;
|
||||
if (h == 0) {
|
||||
h = shardId.hashCode();
|
||||
h = 31 * h + (currentNodeId != null ? currentNodeId.hashCode() : 0);
|
||||
h = 31 * h + (relocatingNodeId != null ? relocatingNodeId.hashCode() : 0);
|
||||
h = 31 * h + (primary ? 1 : 0);
|
||||
h = 31 * h + (state != null ? state.hashCode() : 0);
|
||||
h = 31 * h + (restoreSource != null ? restoreSource.hashCode() : 0);
|
||||
h = 31 * h + (allocationId != null ? allocationId.hashCode() : 0);
|
||||
h = 31 * h + (unassignedInfo != null ? unassignedInfo.hashCode() : 0);
|
||||
hashCode = h;
|
||||
}
|
||||
int result = index != null ? index.hashCode() : 0;
|
||||
result = 31 * result + shardId;
|
||||
result = 31 * result + (currentNodeId != null ? currentNodeId.hashCode() : 0);
|
||||
result = 31 * result + (relocatingNodeId != null ? relocatingNodeId.hashCode() : 0);
|
||||
result = 31 * result + (primary ? 1 : 0);
|
||||
result = 31 * result + (state != null ? state.hashCode() : 0);
|
||||
result = 31 * result + (restoreSource != null ? restoreSource.hashCode() : 0);
|
||||
result = 31 * result + (allocationId != null ? allocationId.hashCode() : 0);
|
||||
result = 31 * result + (unassignedInfo != null ? unassignedInfo.hashCode() : 0);
|
||||
if (frozen) {
|
||||
usePreComputedHashCode = true;
|
||||
}
|
||||
return hashCode = result;
|
||||
return h;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -664,7 +566,7 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
*/
|
||||
public String shortSummary() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append('[').append(index).append(']').append('[').append(shardId).append(']');
|
||||
sb.append('[').append(shardId.getIndexName()).append(']').append('[').append(shardId.getId()).append(']');
|
||||
sb.append(", node[").append(currentNodeId).append("], ");
|
||||
if (relocatingNodeId != null) {
|
||||
sb.append("relocating [").append(relocatingNodeId).append("], ");
|
||||
|
@ -697,8 +599,8 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
.field("primary", primary())
|
||||
.field("node", currentNodeId())
|
||||
.field("relocating_node", relocatingNodeId())
|
||||
.field("shard", shardId().id())
|
||||
.field("index", shardId().getIndex().getName());
|
||||
.field("shard", id())
|
||||
.field("index", getIndexName());
|
||||
if (expectedShardSize != UNAVAILABLE_EXPECTED_SHARD_SIZE) {
|
||||
builder.field("expected_shard_size_in_bytes", expectedShardSize);
|
||||
}
|
||||
|
@ -716,20 +618,6 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
return builder.endObject();
|
||||
}
|
||||
|
||||
private void ensureNotFrozen() {
|
||||
if (frozen) {
|
||||
throw new IllegalStateException("ShardRouting can't be modified anymore - already frozen");
|
||||
}
|
||||
}
|
||||
|
||||
void freeze() {
|
||||
frozen = true;
|
||||
}
|
||||
|
||||
boolean isFrozen() {
|
||||
return frozen;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the expected shard size for {@link ShardRoutingState#RELOCATING} and {@link ShardRoutingState#INITIALIZING}
|
||||
* shards. If it's size is not available {@value #UNAVAILABLE_EXPECTED_SHARD_SIZE} will be returned.
|
||||
|
|
|
@ -40,7 +40,7 @@ import java.io.IOException;
|
|||
/**
|
||||
* Holds additional information as to why the shard is in unassigned state.
|
||||
*/
|
||||
public class UnassignedInfo implements ToXContent, Writeable {
|
||||
public final class UnassignedInfo implements ToXContent, Writeable {
|
||||
|
||||
public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime");
|
||||
private static final TimeValue DEFAULT_DELAYED_NODE_LEFT_TIMEOUT = TimeValue.timeValueMinutes(1);
|
||||
|
@ -109,7 +109,7 @@ public class UnassignedInfo implements ToXContent, Writeable {
|
|||
private final Reason reason;
|
||||
private final long unassignedTimeMillis; // used for display and log messages, in milliseconds
|
||||
private final long unassignedTimeNanos; // in nanoseconds, used to calculate delay for delayed shard allocation
|
||||
private volatile long lastComputedLeftDelayNanos = 0L; // how long to delay shard allocation, not serialized (always positive, 0 means no delay)
|
||||
private final long lastComputedLeftDelayNanos; // how long to delay shard allocation, not serialized (always positive, 0 means no delay)
|
||||
private final String message;
|
||||
private final Throwable failure;
|
||||
|
||||
|
@ -134,17 +134,28 @@ public class UnassignedInfo implements ToXContent, Writeable {
|
|||
this.reason = reason;
|
||||
this.unassignedTimeMillis = unassignedTimeMillis;
|
||||
this.unassignedTimeNanos = unassignedTimeNanos;
|
||||
this.lastComputedLeftDelayNanos = 0L;
|
||||
this.message = message;
|
||||
this.failure = failure;
|
||||
assert !(message == null && failure != null) : "provide a message if a failure exception is provided";
|
||||
}
|
||||
|
||||
public UnassignedInfo(UnassignedInfo unassignedInfo, long newComputedLeftDelayNanos) {
|
||||
this.reason = unassignedInfo.reason;
|
||||
this.unassignedTimeMillis = unassignedInfo.unassignedTimeMillis;
|
||||
this.unassignedTimeNanos = unassignedInfo.unassignedTimeNanos;
|
||||
this.lastComputedLeftDelayNanos = newComputedLeftDelayNanos;
|
||||
this.message = unassignedInfo.message;
|
||||
this.failure = unassignedInfo.failure;
|
||||
}
|
||||
|
||||
public UnassignedInfo(StreamInput in) throws IOException {
|
||||
this.reason = Reason.values()[(int) in.readByte()];
|
||||
this.unassignedTimeMillis = in.readLong();
|
||||
// As System.nanoTime() cannot be compared across different JVMs, reset it to now.
|
||||
// This means that in master failover situations, elapsed delay time is forgotten.
|
||||
this.unassignedTimeNanos = System.nanoTime();
|
||||
this.lastComputedLeftDelayNanos = 0L;
|
||||
this.message = in.readOptionalString();
|
||||
this.failure = in.readThrowable();
|
||||
}
|
||||
|
@ -247,14 +258,16 @@ public class UnassignedInfo implements ToXContent, Writeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Updates delay left based on current time (in nanoseconds) and index/node settings.
|
||||
* Creates new UnassignedInfo object if delay needs updating.
|
||||
*
|
||||
* @return updated delay in nanoseconds
|
||||
* @return new Unassigned with updated delay, or this if no change in delay
|
||||
*/
|
||||
public long updateDelay(final long nanoTimeNow, final Settings settings, final Settings indexSettings) {
|
||||
public UnassignedInfo updateDelay(final long nanoTimeNow, final Settings settings, final Settings indexSettings) {
|
||||
final long newComputedLeftDelayNanos = getRemainingDelay(nanoTimeNow, settings, indexSettings);
|
||||
lastComputedLeftDelayNanos = newComputedLeftDelayNanos;
|
||||
return newComputedLeftDelayNanos;
|
||||
if (lastComputedLeftDelayNanos == newComputedLeftDelayNanos) {
|
||||
return this;
|
||||
}
|
||||
return new UnassignedInfo(this, newComputedLeftDelayNanos);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -353,10 +353,17 @@ public class AllocationService extends AbstractComponent {
|
|||
|
||||
// public for testing
|
||||
public static void updateLeftDelayOfUnassignedShards(RoutingAllocation allocation, Settings settings) {
|
||||
for (ShardRouting shardRouting : allocation.routingNodes().unassigned()) {
|
||||
final MetaData metaData = allocation.metaData();
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = allocation.routingNodes().unassigned().iterator();
|
||||
final MetaData metaData = allocation.metaData();
|
||||
while (unassignedIterator.hasNext()) {
|
||||
ShardRouting shardRouting = unassignedIterator.next();
|
||||
final IndexMetaData indexMetaData = metaData.getIndexSafe(shardRouting.index());
|
||||
shardRouting.unassignedInfo().updateDelay(allocation.getCurrentNanoTime(), settings, indexMetaData.getSettings());
|
||||
UnassignedInfo previousUnassignedInfo = shardRouting.unassignedInfo();
|
||||
UnassignedInfo updatedUnassignedInfo = previousUnassignedInfo.updateDelay(allocation.getCurrentNanoTime(), settings,
|
||||
indexMetaData.getSettings());
|
||||
if (updatedUnassignedInfo != previousUnassignedInfo) { // reference equality!
|
||||
unassignedIterator.updateUnassignedInfo(updatedUnassignedInfo);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -369,29 +376,32 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
// now, go over and elect a new primary if possible, not, from this code block on, if one is elected,
|
||||
// routingNodes.hasUnassignedPrimaries() will potentially be false
|
||||
for (ShardRouting shardEntry : routingNodes.unassigned()) {
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
|
||||
while (unassignedIterator.hasNext()) {
|
||||
ShardRouting shardEntry = unassignedIterator.next();
|
||||
if (shardEntry.primary()) {
|
||||
// remove dangling replicas that are initializing for primary shards
|
||||
changed |= failReplicasForUnassignedPrimary(allocation, shardEntry);
|
||||
ShardRouting candidate = allocation.routingNodes().activeReplica(shardEntry);
|
||||
if (candidate != null) {
|
||||
routingNodes.swapPrimaryFlag(shardEntry, candidate);
|
||||
if (candidate.relocatingNodeId() != null) {
|
||||
shardEntry = unassignedIterator.demotePrimaryToReplicaShard();
|
||||
ShardRouting primarySwappedCandidate = routingNodes.promoteAssignedReplicaShardToPrimary(candidate);
|
||||
if (primarySwappedCandidate.relocatingNodeId() != null) {
|
||||
changed = true;
|
||||
// its also relocating, make sure to move the other routing to primary
|
||||
RoutingNode node = routingNodes.node(candidate.relocatingNodeId());
|
||||
RoutingNode node = routingNodes.node(primarySwappedCandidate.relocatingNodeId());
|
||||
if (node != null) {
|
||||
for (ShardRouting shardRouting : node) {
|
||||
if (shardRouting.shardId().equals(candidate.shardId()) && !shardRouting.primary()) {
|
||||
routingNodes.swapPrimaryFlag(shardRouting);
|
||||
if (shardRouting.shardId().equals(primarySwappedCandidate.shardId()) && !shardRouting.primary()) {
|
||||
routingNodes.promoteAssignedReplicaShardToPrimary(shardRouting);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
IndexMetaData index = allocation.metaData().getIndexSafe(candidate.index());
|
||||
IndexMetaData index = allocation.metaData().getIndexSafe(primarySwappedCandidate.index());
|
||||
if (IndexMetaData.isIndexUsingShadowReplicas(index.getSettings())) {
|
||||
routingNodes.reinitShadowPrimary(candidate);
|
||||
routingNodes.reinitShadowPrimary(primarySwappedCandidate);
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
|
@ -466,24 +476,26 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
|
||||
|
||||
RoutingNodes.RoutingNodeIterator currentRoutingNode = routingNodes.routingNodeIter(startedShard.currentNodeId());
|
||||
RoutingNode currentRoutingNode = routingNodes.node(startedShard.currentNodeId());
|
||||
if (currentRoutingNode == null) {
|
||||
logger.debug("{} failed to find shard in order to start it [failed to find node], ignoring (routing: {})", startedShard.shardId(), startedShard);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (ShardRouting shard : currentRoutingNode) {
|
||||
if (shard.isSameAllocation(startedShard)) {
|
||||
if (shard.active()) {
|
||||
logger.trace("{} shard is already started, ignoring (routing: {})", startedShard.shardId(), startedShard);
|
||||
} else {
|
||||
dirty = true;
|
||||
// override started shard with the latest copy. Capture it now , before starting the shard destroys it...
|
||||
startedShard = new ShardRouting(shard);
|
||||
routingNodes.started(shard);
|
||||
logger.trace("{} marked shard as started (routing: {})", startedShard.shardId(), startedShard);
|
||||
}
|
||||
break;
|
||||
ShardRouting matchingShard = currentRoutingNode.getByShardId(startedShard.shardId());
|
||||
if (matchingShard == null) {
|
||||
logger.debug("{} failed to find shard in order to start it [failed to find shard], ignoring (routing: {})", startedShard.shardId(), startedShard);
|
||||
} else if (matchingShard.isSameAllocation(startedShard) == false) {
|
||||
logger.debug("{} failed to find shard with matching allocation id in order to start it [failed to find matching shard], ignoring (routing: {}, matched shard routing: {})", startedShard.shardId(), startedShard, matchingShard);
|
||||
} else {
|
||||
if (matchingShard.active()) {
|
||||
logger.trace("{} shard is already started, ignoring (routing: {})", startedShard.shardId(), startedShard);
|
||||
} else {
|
||||
dirty = true;
|
||||
// override started shard with the latest copy.
|
||||
startedShard = matchingShard;
|
||||
routingNodes.started(matchingShard);
|
||||
logger.trace("{} marked shard as started (routing: {})", startedShard.shardId(), startedShard);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -545,8 +557,8 @@ public class AllocationService extends AbstractComponent {
|
|||
// fail replicas first otherwise we move RoutingNodes into an inconsistent state
|
||||
failReplicasForUnassignedPrimary(allocation, failedShard);
|
||||
}
|
||||
// replace incoming instance to make sure we work on the latest one. Copy it to maintain information during modifications.
|
||||
failedShard = new ShardRouting(matchedNode.current());
|
||||
// replace incoming instance to make sure we work on the latest one
|
||||
failedShard = matchedNode.current();
|
||||
|
||||
// remove the current copy of the shard
|
||||
matchedNode.remove();
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
|
@ -512,28 +513,19 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
// shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are
|
||||
// offloading the shards.
|
||||
boolean changed = false;
|
||||
int index = 0;
|
||||
boolean found = true;
|
||||
final NodeSorter sorter = newNodeSorter();
|
||||
while (found) {
|
||||
found = false;
|
||||
for (RoutingNode routingNode : allocation.routingNodes()) {
|
||||
if (index >= routingNode.size()) {
|
||||
continue;
|
||||
}
|
||||
found = true;
|
||||
ShardRouting shardRouting = routingNode.get(index);
|
||||
// we can only move started shards...
|
||||
if (shardRouting.started()) {
|
||||
final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId());
|
||||
assert sourceNode != null && sourceNode.containsShard(shardRouting);
|
||||
Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
|
||||
if (decision.type() == Decision.Type.NO) {
|
||||
changed |= moveShard(sorter, shardRouting, sourceNode, routingNode);
|
||||
}
|
||||
for (Iterator<ShardRouting> it = allocation.routingNodes().nodeInterleavedShardIterator(); it.hasNext(); ) {
|
||||
ShardRouting shardRouting = it.next();
|
||||
// we can only move started shards...
|
||||
if (shardRouting.started()) {
|
||||
final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId());
|
||||
assert sourceNode != null && sourceNode.containsShard(shardRouting);
|
||||
RoutingNode routingNode = sourceNode.getRoutingNode();
|
||||
Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
|
||||
if (decision.type() == Decision.Type.NO) {
|
||||
changed |= moveShard(sorter, shardRouting, sourceNode, routingNode);
|
||||
}
|
||||
}
|
||||
index++;
|
||||
}
|
||||
|
||||
return changed;
|
||||
|
@ -560,8 +552,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation);
|
||||
if (allocationDecision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
|
||||
sourceNode.removeShard(shardRouting);
|
||||
ShardRouting targetRelocatingShard = routingNodes.relocate(shardRouting, target.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
currentNode.addShard(targetRelocatingShard);
|
||||
Tuple<ShardRouting, ShardRouting> relocatingShards = routingNodes.relocate(shardRouting, target.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
currentNode.addShard(relocatingShards.v2());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Moved shard [{}] to node [{}]", shardRouting, routingNode.node());
|
||||
}
|
||||
|
@ -729,15 +721,17 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
assert decision != null && minNode != null || decision == null && minNode == null;
|
||||
if (minNode != null) {
|
||||
minNode.addShard(shard);
|
||||
long shardSize = allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
if (decision.type() == Type.YES) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId());
|
||||
}
|
||||
routingNodes.initialize(shard, minNode.getNodeId(), null, allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
shard = routingNodes.initialize(shard, minNode.getNodeId(), null, shardSize);
|
||||
minNode.addShard(shard);
|
||||
changed = true;
|
||||
continue; // don't add to ignoreUnassigned
|
||||
} else {
|
||||
minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize));
|
||||
final RoutingNode node = minNode.getRoutingNode();
|
||||
if (deciders.canAllocate(node, allocation).type() != Type.YES) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
|
@ -810,15 +804,19 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
if (candidate != null) {
|
||||
/* allocate on the model even if not throttled */
|
||||
maxNode.removeShard(candidate);
|
||||
minNode.addShard(candidate);
|
||||
long shardSize = allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
|
||||
if (decision.type() == Type.YES) { /* only allocate on the cluster if we are not throttled */
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Relocate shard [{}] from node [{}] to node [{}]", candidate, maxNode.getNodeId(),
|
||||
minNode.getNodeId());
|
||||
}
|
||||
/* now allocate on the cluster */
|
||||
routingNodes.relocate(candidate, minNode.getNodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
minNode.addShard(routingNodes.relocate(candidate, minNode.getNodeId(), shardSize).v1());
|
||||
return true;
|
||||
} else {
|
||||
assert decision.type() == Type.THROTTLE;
|
||||
minNode.addShard(candidate.relocate(minNode.getNodeId(), shardSize));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
|
@ -38,6 +39,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* Abstract base class for allocating an unassigned shard to a node
|
||||
|
@ -196,17 +198,17 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom
|
|||
* @param routingNodes the routing nodes
|
||||
* @param routingNode the node to initialize it to
|
||||
* @param shardRouting the shard routing that is to be matched in unassigned shards
|
||||
* @param shardRoutingChanges changes to apply for shard routing in unassigned shards before initialization
|
||||
* @param unassignedInfo unassigned info to override
|
||||
*/
|
||||
protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes, RoutingNode routingNode,
|
||||
ShardRouting shardRouting, @Nullable Consumer<ShardRouting> shardRoutingChanges) {
|
||||
ShardRouting shardRouting, @Nullable UnassignedInfo unassignedInfo) {
|
||||
for (RoutingNodes.UnassignedShards.UnassignedIterator it = routingNodes.unassigned().iterator(); it.hasNext(); ) {
|
||||
ShardRouting unassigned = it.next();
|
||||
if (!unassigned.equalsIgnoringMetaData(shardRouting)) {
|
||||
continue;
|
||||
}
|
||||
if (shardRoutingChanges != null) {
|
||||
shardRoutingChanges.accept(unassigned);
|
||||
if (unassignedInfo != null) {
|
||||
unassigned = it.updateUnassignedInfo(unassignedInfo);
|
||||
}
|
||||
it.initialize(routingNode.nodeId(), null, allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
return;
|
||||
|
|
|
@ -120,15 +120,16 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation
|
|||
"allocating an empty primary for [" + index + "][" + shardId + "] can result in data loss. Please confirm by setting the accept_data_loss parameter to true");
|
||||
}
|
||||
|
||||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting,
|
||||
shr -> {
|
||||
if (shr.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED) {
|
||||
// we need to move the unassigned info back to treat it as if it was index creation
|
||||
shr.updateUnassignedInfo(new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
|
||||
"force empty allocation from previous reason " + shardRouting.unassignedInfo().getReason() + ", " + shardRouting.unassignedInfo().getMessage(),
|
||||
shardRouting.unassignedInfo().getFailure(), System.nanoTime(), System.currentTimeMillis()));
|
||||
}
|
||||
});
|
||||
UnassignedInfo unassignedInfoToUpdate = null;
|
||||
if (shardRouting.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED) {
|
||||
// we need to move the unassigned info back to treat it as if it was index creation
|
||||
unassignedInfoToUpdate = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
|
||||
"force empty allocation from previous reason " + shardRouting.unassignedInfo().getReason() + ", " + shardRouting.unassignedInfo().getMessage(),
|
||||
shardRouting.unassignedInfo().getFailure(), System.nanoTime(), System.currentTimeMillis());
|
||||
}
|
||||
|
||||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate);
|
||||
|
||||
return new RerouteExplanation(this, allocation.decision(Decision.YES, name() + " (allocation command)", "ignore deciders"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -155,7 +155,7 @@ public class CancelAllocationCommand implements AllocationCommand {
|
|||
throw new IllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " +
|
||||
discoNode + ", shard is primary and initializing its state");
|
||||
}
|
||||
it.moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.REROUTE_CANCELLED, null));
|
||||
shardRouting = it.moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.REROUTE_CANCELLED, null));
|
||||
// now, go and find the shard that is initializing on the target node, and cancel it as well...
|
||||
RoutingNodes.RoutingNodeIterator initializingNode = allocation.routingNodes().routingNodeIter(shardRouting.relocatingNodeId());
|
||||
if (initializingNode != null) {
|
||||
|
|
|
@ -37,7 +37,6 @@ import java.util.HashSet;
|
|||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.StringTokenizer;
|
||||
import java.util.TreeSet;
|
||||
|
@ -243,45 +242,6 @@ public class Strings {
|
|||
return hasText((CharSequence) str);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether the given CharSequence contains any whitespace characters.
|
||||
*
|
||||
* @param str the CharSequence to check (may be <code>null</code>)
|
||||
* @return <code>true</code> if the CharSequence is not empty and
|
||||
* contains at least 1 whitespace character
|
||||
* @see java.lang.Character#isWhitespace
|
||||
*/
|
||||
public static boolean containsWhitespace(CharSequence str) {
|
||||
if (!hasLength(str)) {
|
||||
return false;
|
||||
}
|
||||
int strLen = str.length();
|
||||
for (int i = 0; i < strLen; i++) {
|
||||
if (Character.isWhitespace(str.charAt(i))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Trim leading whitespace from the given String.
|
||||
*
|
||||
* @param str the String to check
|
||||
* @return the trimmed String
|
||||
* @see java.lang.Character#isWhitespace
|
||||
*/
|
||||
public static String trimLeadingWhitespace(String str) {
|
||||
if (!hasLength(str)) {
|
||||
return str;
|
||||
}
|
||||
StringBuilder sb = new StringBuilder(str);
|
||||
while (sb.length() > 0 && Character.isWhitespace(sb.charAt(0))) {
|
||||
sb.deleteCharAt(0);
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Trim all occurrences of the supplied leading character from the given String.
|
||||
*
|
||||
|
@ -416,17 +376,6 @@ public class Strings {
|
|||
return (str != null ? "'" + str + "'" : null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unqualify a string qualified by a separator character. For example,
|
||||
* "this:name:is:qualified" returns "qualified" if using a ':' separator.
|
||||
*
|
||||
* @param qualifiedName the qualified name
|
||||
* @param separator the separator
|
||||
*/
|
||||
public static String unqualify(String qualifiedName, char separator) {
|
||||
return qualifiedName.substring(qualifiedName.lastIndexOf(separator) + 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Capitalize a <code>String</code>, changing the first letter to
|
||||
* upper case as per {@link Character#toUpperCase(char)}.
|
||||
|
@ -611,41 +560,6 @@ public class Strings {
|
|||
return new String[]{beforeDelimiter, afterDelimiter};
|
||||
}
|
||||
|
||||
/**
|
||||
* Take an array Strings and split each element based on the given delimiter.
|
||||
* A <code>Properties</code> instance is then generated, with the left of the
|
||||
* delimiter providing the key, and the right of the delimiter providing the value.
|
||||
* <p>Will trim both the key and value before adding them to the
|
||||
* <code>Properties</code> instance.
|
||||
*
|
||||
* @param array the array to process
|
||||
* @param delimiter to split each element using (typically the equals symbol)
|
||||
* @param charsToDelete one or more characters to remove from each element
|
||||
* prior to attempting the split operation (typically the quotation mark
|
||||
* symbol), or <code>null</code> if no removal should occur
|
||||
* @return a <code>Properties</code> instance representing the array contents,
|
||||
* or <code>null</code> if the array to process was <code>null</code> or empty
|
||||
*/
|
||||
public static Properties splitArrayElementsIntoProperties(
|
||||
String[] array, String delimiter, String charsToDelete) {
|
||||
|
||||
if (isEmpty(array)) {
|
||||
return null;
|
||||
}
|
||||
Properties result = new Properties();
|
||||
for (String element : array) {
|
||||
if (charsToDelete != null) {
|
||||
element = deleteAny(element, charsToDelete);
|
||||
}
|
||||
String[] splittedElement = split(element, delimiter);
|
||||
if (splittedElement == null) {
|
||||
continue;
|
||||
}
|
||||
result.setProperty(splittedElement[0].trim(), splittedElement[1].trim());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tokenize the given String into a String array via a StringTokenizer.
|
||||
* Trims tokens and omits empty tokens.
|
||||
|
|
|
@ -66,18 +66,12 @@ public class FilterableTermsEnum extends TermsEnum {
|
|||
protected long currentTotalTermFreq = 0;
|
||||
protected BytesRef current;
|
||||
protected final int docsEnumFlag;
|
||||
protected int numDocs;
|
||||
|
||||
public FilterableTermsEnum(IndexReader reader, String field, int docsEnumFlag, @Nullable Query filter) throws IOException {
|
||||
if ((docsEnumFlag != PostingsEnum.FREQS) && (docsEnumFlag != PostingsEnum.NONE)) {
|
||||
throw new IllegalArgumentException("invalid docsEnumFlag of " + docsEnumFlag);
|
||||
}
|
||||
this.docsEnumFlag = docsEnumFlag;
|
||||
if (filter == null) {
|
||||
// Important - need to use the doc count that includes deleted docs
|
||||
// or we have this issue: https://github.com/elastic/elasticsearch/issues/7951
|
||||
numDocs = reader.maxDoc();
|
||||
}
|
||||
List<LeafReaderContext> leaves = reader.leaves();
|
||||
List<Holder> enums = new ArrayList<>(leaves.size());
|
||||
final Weight weight;
|
||||
|
@ -118,20 +112,12 @@ public class FilterableTermsEnum extends TermsEnum {
|
|||
}
|
||||
|
||||
bits = BitSet.of(docs, context.reader().maxDoc());
|
||||
|
||||
// Count how many docs are in our filtered set
|
||||
// TODO make this lazy-loaded only for those that need it?
|
||||
numDocs += bits.cardinality();
|
||||
}
|
||||
enums.add(new Holder(termsEnum, bits));
|
||||
}
|
||||
this.enums = enums.toArray(new Holder[enums.size()]);
|
||||
}
|
||||
|
||||
public int getNumDocs() {
|
||||
return numDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef term() throws IOException {
|
||||
return current;
|
||||
|
|
|
@ -151,66 +151,7 @@ public final class Fuzziness implements ToXContent, Writeable {
|
|||
return 1;
|
||||
}
|
||||
}
|
||||
return Math.min(2, asInt());
|
||||
}
|
||||
|
||||
public TimeValue asTimeValue() {
|
||||
if (this.equals(AUTO)) {
|
||||
return TimeValue.timeValueMillis(1);
|
||||
} else {
|
||||
return TimeValue.parseTimeValue(fuzziness.toString(), null, "fuzziness");
|
||||
}
|
||||
}
|
||||
|
||||
public long asLong() {
|
||||
if (this.equals(AUTO)) {
|
||||
return 1;
|
||||
}
|
||||
try {
|
||||
return Long.parseLong(fuzziness.toString());
|
||||
} catch (NumberFormatException ex) {
|
||||
return (long) Double.parseDouble(fuzziness.toString());
|
||||
}
|
||||
}
|
||||
|
||||
public int asInt() {
|
||||
if (this.equals(AUTO)) {
|
||||
return 1;
|
||||
}
|
||||
try {
|
||||
return Integer.parseInt(fuzziness.toString());
|
||||
} catch (NumberFormatException ex) {
|
||||
return (int) Float.parseFloat(fuzziness.toString());
|
||||
}
|
||||
}
|
||||
|
||||
public short asShort() {
|
||||
if (this.equals(AUTO)) {
|
||||
return 1;
|
||||
}
|
||||
try {
|
||||
return Short.parseShort(fuzziness.toString());
|
||||
} catch (NumberFormatException ex) {
|
||||
return (short) Float.parseFloat(fuzziness.toString());
|
||||
}
|
||||
}
|
||||
|
||||
public byte asByte() {
|
||||
if (this.equals(AUTO)) {
|
||||
return 1;
|
||||
}
|
||||
try {
|
||||
return Byte.parseByte(fuzziness.toString());
|
||||
} catch (NumberFormatException ex) {
|
||||
return (byte) Float.parseFloat(fuzziness.toString());
|
||||
}
|
||||
}
|
||||
|
||||
public double asDouble() {
|
||||
if (this.equals(AUTO)) {
|
||||
return 1d;
|
||||
}
|
||||
return Double.parseDouble(fuzziness.toString());
|
||||
return Math.min(2, (int) asFloat());
|
||||
}
|
||||
|
||||
public float asFloat() {
|
||||
|
|
|
@ -54,6 +54,8 @@ public class Iterables {
|
|||
}
|
||||
}
|
||||
|
||||
/** Flattens the two level {@code Iterable} into a single {@code Iterable}. Note that this pre-caches the values from the outer {@code
|
||||
* Iterable}, but not the values from the inner one. */
|
||||
public static <T> Iterable<T> flatten(Iterable<? extends Iterable<T>> inputs) {
|
||||
Objects.requireNonNull(inputs);
|
||||
return new FlattenedIterables<>(inputs);
|
||||
|
|
|
@ -201,7 +201,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
|
||||
this.joinThreadControl = new JoinThreadControl(threadPool);
|
||||
|
||||
transportService.registerRequestHandler(DISCOVERY_REJOIN_ACTION_NAME, RejoinClusterRequest::new, ThreadPool.Names.SAME, new RejoinClusterRequestHandler());
|
||||
transportService.registerRequestHandler(
|
||||
DISCOVERY_REJOIN_ACTION_NAME, RejoinClusterRequest::new, ThreadPool.Names.SAME, new RejoinClusterRequestHandler());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -832,7 +833,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
try {
|
||||
membership.sendValidateJoinRequestBlocking(node, state, joinTimeout);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to validate incoming join request from node [{}]", node);
|
||||
logger.warn("failed to validate incoming join request from node [{}]", e, node);
|
||||
callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e));
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -81,7 +81,8 @@ public class MasterFaultDetection extends FaultDetection {
|
|||
|
||||
logger.debug("[master] uses ping_interval [{}], ping_timeout [{}], ping_retries [{}]", pingInterval, pingRetryTimeout, pingRetryCount);
|
||||
|
||||
transportService.registerRequestHandler(MASTER_PING_ACTION_NAME, MasterPingRequest::new, ThreadPool.Names.SAME, new MasterPingRequestHandler());
|
||||
transportService.registerRequestHandler(
|
||||
MASTER_PING_ACTION_NAME, MasterPingRequest::new, ThreadPool.Names.SAME, false, false, new MasterPingRequestHandler());
|
||||
}
|
||||
|
||||
public DiscoveryNode masterNode() {
|
||||
|
|
|
@ -72,7 +72,8 @@ public class NodesFaultDetection extends FaultDetection {
|
|||
|
||||
logger.debug("[node ] uses ping_interval [{}], ping_timeout [{}], ping_retries [{}]", pingInterval, pingRetryTimeout, pingRetryCount);
|
||||
|
||||
transportService.registerRequestHandler(PING_ACTION_NAME, PingRequest::new, ThreadPool.Names.SAME, new PingRequestHandler());
|
||||
transportService.registerRequestHandler(
|
||||
PING_ACTION_NAME, PingRequest::new, ThreadPool.Names.SAME, false, false, new PingRequestHandler());
|
||||
}
|
||||
|
||||
public void setLocalNode(DiscoveryNode localNode) {
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.gateway;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.elasticsearch.cluster.metadata.IndexGraveyard;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
|
@ -68,7 +69,7 @@ public class DanglingIndicesState extends AbstractComponent {
|
|||
* Process dangling indices based on the provided meta data, handling cleanup, finding
|
||||
* new dangling indices, and allocating outstanding ones.
|
||||
*/
|
||||
public void processDanglingIndices(MetaData metaData) {
|
||||
public void processDanglingIndices(final MetaData metaData) {
|
||||
if (nodeEnv.hasNodeFile() == false) {
|
||||
return;
|
||||
}
|
||||
|
@ -107,7 +108,7 @@ public class DanglingIndicesState extends AbstractComponent {
|
|||
* Finds (@{link #findNewAndAddDanglingIndices}) and adds the new dangling indices
|
||||
* to the currently tracked dangling indices.
|
||||
*/
|
||||
void findNewAndAddDanglingIndices(MetaData metaData) {
|
||||
void findNewAndAddDanglingIndices(final MetaData metaData) {
|
||||
danglingIndices.putAll(findNewDanglingIndices(metaData));
|
||||
}
|
||||
|
||||
|
@ -116,7 +117,7 @@ public class DanglingIndicesState extends AbstractComponent {
|
|||
* that have state on disk, but are not part of the provided meta data, or not detected
|
||||
* as dangled already.
|
||||
*/
|
||||
Map<Index, IndexMetaData> findNewDanglingIndices(MetaData metaData) {
|
||||
Map<Index, IndexMetaData> findNewDanglingIndices(final MetaData metaData) {
|
||||
final Set<String> excludeIndexPathIds = new HashSet<>(metaData.indices().size() + danglingIndices.size());
|
||||
for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
|
||||
excludeIndexPathIds.add(cursor.value.getIndex().getUUID());
|
||||
|
@ -125,13 +126,18 @@ public class DanglingIndicesState extends AbstractComponent {
|
|||
try {
|
||||
final List<IndexMetaData> indexMetaDataList = metaStateService.loadIndicesStates(excludeIndexPathIds::contains);
|
||||
Map<Index, IndexMetaData> newIndices = new HashMap<>(indexMetaDataList.size());
|
||||
final IndexGraveyard graveyard = metaData.indexGraveyard();
|
||||
for (IndexMetaData indexMetaData : indexMetaDataList) {
|
||||
if (metaData.hasIndex(indexMetaData.getIndex().getName())) {
|
||||
logger.warn("[{}] can not be imported as a dangling index, as index with same name already exists in cluster metadata",
|
||||
indexMetaData.getIndex());
|
||||
} else if (graveyard.containsIndex(indexMetaData.getIndex())) {
|
||||
logger.warn("[{}] can not be imported as a dangling index, as an index with the same name and UUID exist in the " +
|
||||
"index tombstones. This situation is likely caused by copying over the data directory for an index " +
|
||||
"that was previously deleted.", indexMetaData.getIndex());
|
||||
} else {
|
||||
logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, auto import to cluster state",
|
||||
indexMetaData.getIndex());
|
||||
logger.info("[{}] dangling index exists on local file system, but not in cluster metadata, " +
|
||||
"auto import to cluster state", indexMetaData.getIndex());
|
||||
newIndices.put(indexMetaData.getIndex(), indexMetaData);
|
||||
}
|
||||
}
|
||||
|
@ -151,17 +157,19 @@ public class DanglingIndicesState extends AbstractComponent {
|
|||
return;
|
||||
}
|
||||
try {
|
||||
allocateDangledIndices.allocateDangled(Collections.unmodifiableCollection(new ArrayList<>(danglingIndices.values())), new LocalAllocateDangledIndices.Listener() {
|
||||
@Override
|
||||
public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) {
|
||||
logger.trace("allocated dangled");
|
||||
}
|
||||
allocateDangledIndices.allocateDangled(Collections.unmodifiableCollection(new ArrayList<>(danglingIndices.values())),
|
||||
new LocalAllocateDangledIndices.Listener() {
|
||||
@Override
|
||||
public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) {
|
||||
logger.trace("allocated dangled");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
logger.info("failed to send allocated dangled", e);
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
logger.info("failed to send allocated dangled", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to send allocate dangled", e);
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.http.netty.cors;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.rest.support.RestUtils;
|
||||
import org.jboss.netty.channel.ChannelFutureListener;
|
||||
import org.jboss.netty.channel.ChannelHandlerContext;
|
||||
import org.jboss.netty.channel.MessageEvent;
|
||||
|
@ -31,6 +30,7 @@ import org.jboss.netty.handler.codec.http.HttpMethod;
|
|||
import org.jboss.netty.handler.codec.http.HttpRequest;
|
||||
import org.jboss.netty.handler.codec.http.HttpResponse;
|
||||
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_CREDENTIALS;
|
||||
|
@ -38,8 +38,8 @@ import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTRO
|
|||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_ORIGIN;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_MAX_AGE;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.HOST;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ORIGIN;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.USER_AGENT;
|
||||
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.VARY;
|
||||
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.FORBIDDEN;
|
||||
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK;
|
||||
|
@ -55,8 +55,9 @@ import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK;
|
|||
public class CorsHandler extends SimpleChannelUpstreamHandler {
|
||||
|
||||
public static final String ANY_ORIGIN = "*";
|
||||
private final CorsConfig config;
|
||||
private static Pattern SCHEME_PATTERN = Pattern.compile("^https?://");
|
||||
|
||||
private final CorsConfig config;
|
||||
private HttpRequest request;
|
||||
|
||||
/**
|
||||
|
@ -73,15 +74,13 @@ public class CorsHandler extends SimpleChannelUpstreamHandler {
|
|||
public void messageReceived(final ChannelHandlerContext ctx, final MessageEvent e) throws Exception {
|
||||
if (config.isCorsSupportEnabled() && e.getMessage() instanceof HttpRequest) {
|
||||
request = (HttpRequest) e.getMessage();
|
||||
if (RestUtils.isBrowser(request.headers().get(USER_AGENT))) {
|
||||
if (isPreflightRequest(request)) {
|
||||
handlePreflight(ctx, request);
|
||||
return;
|
||||
}
|
||||
if (config.isShortCircuit() && !validateOrigin()) {
|
||||
forbidden(ctx, request);
|
||||
return;
|
||||
}
|
||||
if (isPreflightRequest(request)) {
|
||||
handlePreflight(ctx, request);
|
||||
return;
|
||||
}
|
||||
if (config.isShortCircuit() && !validateOrigin()) {
|
||||
forbidden(ctx, request);
|
||||
return;
|
||||
}
|
||||
}
|
||||
super.messageReceived(ctx, e);
|
||||
|
@ -96,7 +95,7 @@ public class CorsHandler extends SimpleChannelUpstreamHandler {
|
|||
final String originHeaderVal;
|
||||
if (config.isAnyOriginSupported()) {
|
||||
originHeaderVal = ANY_ORIGIN;
|
||||
} else if (config.isOriginAllowed(originHeader)) {
|
||||
} else if (config.isOriginAllowed(originHeader) || isSameOrigin(originHeader, request.headers().get(HOST))) {
|
||||
originHeaderVal = originHeader;
|
||||
} else {
|
||||
originHeaderVal = null;
|
||||
|
@ -129,6 +128,17 @@ public class CorsHandler extends SimpleChannelUpstreamHandler {
|
|||
.addListener(ChannelFutureListener.CLOSE);
|
||||
}
|
||||
|
||||
private static boolean isSameOrigin(final String origin, final String host) {
|
||||
if (Strings.isNullOrEmpty(host) == false) {
|
||||
// strip protocol from origin
|
||||
final String originDomain = SCHEME_PATTERN.matcher(origin).replaceFirst("");
|
||||
if (host.equals(originDomain)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a non CORS specification feature which enables the setting of preflight
|
||||
* response headers that might be required by intermediaries.
|
||||
|
@ -179,6 +189,11 @@ public class CorsHandler extends SimpleChannelUpstreamHandler {
|
|||
return true;
|
||||
}
|
||||
|
||||
// if the origin is the same as the host of the request, then allow
|
||||
if (isSameOrigin(origin, request.headers().get(HOST))) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return config.isOriginAllowed(origin);
|
||||
}
|
||||
|
||||
|
|
|
@ -48,9 +48,9 @@ public final class FingerprintAnalyzer extends Analyzer {
|
|||
final Tokenizer tokenizer = new StandardTokenizer();
|
||||
TokenStream stream = tokenizer;
|
||||
stream = new LowerCaseFilter(stream);
|
||||
stream = new ASCIIFoldingFilter(stream, preserveOriginal);
|
||||
stream = new StopFilter(stream, stopWords);
|
||||
stream = new FingerprintFilter(stream, maxOutputSize, separator);
|
||||
stream = new ASCIIFoldingFilter(stream, preserveOriginal);
|
||||
return new TokenStreamComponents(tokenizer, stream);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.index.IndexableField;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -454,19 +455,23 @@ final class DocumentParser {
|
|||
|
||||
private static ObjectMapper parseObject(final ParseContext context, ObjectMapper mapper, String currentFieldName) throws IOException {
|
||||
assert currentFieldName != null;
|
||||
context.path().add(currentFieldName);
|
||||
|
||||
ObjectMapper update = null;
|
||||
Mapper objectMapper = getMapper(mapper, currentFieldName);
|
||||
if (objectMapper != null) {
|
||||
context.path().add(currentFieldName);
|
||||
parseObjectOrField(context, objectMapper);
|
||||
context.path().remove();
|
||||
} else {
|
||||
ObjectMapper.Dynamic dynamic = dynamicOrDefault(mapper, context);
|
||||
|
||||
final String[] paths = currentFieldName.split("\\.");
|
||||
currentFieldName = paths[paths.length - 1];
|
||||
Tuple<Integer, ObjectMapper> parentMapperTuple = getDynamicParentMapper(context, paths, mapper);
|
||||
ObjectMapper parentMapper = parentMapperTuple.v2();
|
||||
ObjectMapper.Dynamic dynamic = dynamicOrDefault(parentMapper, context);
|
||||
if (dynamic == ObjectMapper.Dynamic.STRICT) {
|
||||
throw new StrictDynamicMappingException(mapper.fullPath(), currentFieldName);
|
||||
} else if (dynamic == ObjectMapper.Dynamic.TRUE) {
|
||||
// remove the current field name from path, since template search and the object builder add it as well...
|
||||
context.path().remove();
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object");
|
||||
if (builder == null) {
|
||||
builder = new ObjectMapper.Builder(currentFieldName).enabled(true);
|
||||
|
@ -476,13 +481,16 @@ final class DocumentParser {
|
|||
context.addDynamicMapper(objectMapper);
|
||||
context.path().add(currentFieldName);
|
||||
parseObjectOrField(context, objectMapper);
|
||||
context.path().remove();
|
||||
} else {
|
||||
// not dynamic, read everything up to end object
|
||||
context.parser().skipChildren();
|
||||
}
|
||||
for (int i = 0; i < parentMapperTuple.v1(); i++) {
|
||||
context.path().remove();
|
||||
}
|
||||
}
|
||||
|
||||
context.path().remove();
|
||||
return update;
|
||||
}
|
||||
|
||||
|
@ -500,6 +508,11 @@ final class DocumentParser {
|
|||
}
|
||||
} else {
|
||||
|
||||
final String[] paths = arrayFieldName.split("\\.");
|
||||
arrayFieldName = paths[paths.length - 1];
|
||||
lastFieldName = arrayFieldName;
|
||||
Tuple<Integer, ObjectMapper> parentMapperTuple = getDynamicParentMapper(context, paths, parentMapper);
|
||||
parentMapper = parentMapperTuple.v2();
|
||||
ObjectMapper.Dynamic dynamic = dynamicOrDefault(parentMapper, context);
|
||||
if (dynamic == ObjectMapper.Dynamic.STRICT) {
|
||||
throw new StrictDynamicMappingException(parentMapper.fullPath(), arrayFieldName);
|
||||
|
@ -507,23 +520,26 @@ final class DocumentParser {
|
|||
Mapper.Builder builder = context.root().findTemplateBuilder(context, arrayFieldName, "object");
|
||||
if (builder == null) {
|
||||
parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
|
||||
return;
|
||||
}
|
||||
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
|
||||
mapper = builder.build(builderContext);
|
||||
assert mapper != null;
|
||||
if (mapper instanceof ArrayValueMapperParser) {
|
||||
context.addDynamicMapper(mapper);
|
||||
context.path().add(arrayFieldName);
|
||||
parseObjectOrField(context, mapper);
|
||||
context.path().remove();
|
||||
} else {
|
||||
parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
|
||||
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
|
||||
mapper = builder.build(builderContext);
|
||||
assert mapper != null;
|
||||
if (mapper instanceof ArrayValueMapperParser) {
|
||||
context.addDynamicMapper(mapper);
|
||||
context.path().add(arrayFieldName);
|
||||
parseObjectOrField(context, mapper);
|
||||
context.path().remove();
|
||||
} else {
|
||||
parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// TODO: shouldn't this skip, not parse?
|
||||
parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
|
||||
}
|
||||
for (int i = 0; i < parentMapperTuple.v1(); i++) {
|
||||
context.path().remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -555,7 +571,15 @@ final class DocumentParser {
|
|||
if (mapper != null) {
|
||||
parseObjectOrField(context, mapper);
|
||||
} else {
|
||||
|
||||
final String[] paths = currentFieldName.split("\\.");
|
||||
currentFieldName = paths[paths.length - 1];
|
||||
Tuple<Integer, ObjectMapper> parentMapperTuple = getDynamicParentMapper(context, paths, parentMapper);
|
||||
parentMapper = parentMapperTuple.v2();
|
||||
parseDynamicValue(context, parentMapper, currentFieldName, token);
|
||||
for (int i = 0; i < parentMapperTuple.v1(); i++) {
|
||||
context.path().remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -814,46 +838,61 @@ final class DocumentParser {
|
|||
|
||||
final String[] paths = field.split("\\.");
|
||||
final String fieldName = paths[paths.length-1];
|
||||
ObjectMapper mapper = context.root();
|
||||
ObjectMapper[] mappers = new ObjectMapper[paths.length-1];
|
||||
if (paths.length > 1) {
|
||||
ObjectMapper parent = context.root();
|
||||
for (int i = 0; i < paths.length-1; i++) {
|
||||
mapper = context.docMapper().objectMappers().get(context.path().pathAsText(paths[i]));
|
||||
if (mapper == null) {
|
||||
// One mapping is missing, check if we are allowed to create a dynamic one.
|
||||
ObjectMapper.Dynamic dynamic = dynamicOrDefault(parent, context);
|
||||
|
||||
switch (dynamic) {
|
||||
case STRICT:
|
||||
throw new StrictDynamicMappingException(parent.fullPath(), paths[i]);
|
||||
case TRUE:
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, paths[i], "object");
|
||||
if (builder == null) {
|
||||
builder = new ObjectMapper.Builder(paths[i]).enabled(true);
|
||||
}
|
||||
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
|
||||
mapper = (ObjectMapper) builder.build(builderContext);
|
||||
if (mapper.nested() != ObjectMapper.Nested.NO) {
|
||||
throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to`");
|
||||
}
|
||||
context.addDynamicMapper(mapper);
|
||||
break;
|
||||
case FALSE:
|
||||
// Maybe we should log something to tell the user that the copy_to is ignored in this case.
|
||||
break;
|
||||
|
||||
}
|
||||
}
|
||||
context.path().add(paths[i]);
|
||||
mappers[i] = mapper;
|
||||
parent = mapper;
|
||||
}
|
||||
}
|
||||
Tuple<Integer, ObjectMapper> parentMapperTuple = getDynamicParentMapper(context, paths, null);
|
||||
ObjectMapper mapper = parentMapperTuple.v2();
|
||||
parseDynamicValue(context, mapper, fieldName, context.parser().currentToken());
|
||||
for (int i = 0; i < parentMapperTuple.v1(); i++) {
|
||||
context.path().remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static Tuple<Integer, ObjectMapper> getDynamicParentMapper(ParseContext context, final String[] paths,
|
||||
ObjectMapper currentParent) {
|
||||
ObjectMapper mapper = currentParent == null ? context.root() : currentParent;
|
||||
int pathsAdded = 0;
|
||||
ObjectMapper parent = mapper;
|
||||
for (int i = 0; i < paths.length-1; i++) {
|
||||
String currentPath = context.path().pathAsText(paths[i]);
|
||||
FieldMapper existingFieldMapper = context.docMapper().mappers().getMapper(currentPath);
|
||||
if (existingFieldMapper != null) {
|
||||
throw new MapperParsingException(
|
||||
"Could not dynamically add mapping for field [{}]. Existing mapping for [{}] must be of type object but found [{}].",
|
||||
null, String.join(".", paths), currentPath, existingFieldMapper.fieldType.typeName());
|
||||
}
|
||||
mapper = context.docMapper().objectMappers().get(currentPath);
|
||||
if (mapper == null) {
|
||||
// One mapping is missing, check if we are allowed to create a dynamic one.
|
||||
ObjectMapper.Dynamic dynamic = dynamicOrDefault(parent, context);
|
||||
|
||||
switch (dynamic) {
|
||||
case STRICT:
|
||||
throw new StrictDynamicMappingException(parent.fullPath(), paths[i]);
|
||||
case TRUE:
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, paths[i], "object");
|
||||
if (builder == null) {
|
||||
builder = new ObjectMapper.Builder(paths[i]).enabled(true);
|
||||
}
|
||||
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
|
||||
mapper = (ObjectMapper) builder.build(builderContext);
|
||||
if (mapper.nested() != ObjectMapper.Nested.NO) {
|
||||
throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to`");
|
||||
}
|
||||
context.addDynamicMapper(mapper);
|
||||
break;
|
||||
case FALSE:
|
||||
// Should not dynamically create any more mappers so return the last mapper
|
||||
return new Tuple<Integer, ObjectMapper>(pathsAdded, parent);
|
||||
|
||||
}
|
||||
}
|
||||
context.path().add(paths[i]);
|
||||
pathsAdded++;
|
||||
parent = mapper;
|
||||
}
|
||||
return new Tuple<Integer, ObjectMapper>(pathsAdded, mapper);
|
||||
}
|
||||
|
||||
// find what the dynamic setting is given the current parse context and parent
|
||||
private static ObjectMapper.Dynamic dynamicOrDefault(ObjectMapper parentMapper, ParseContext context) {
|
||||
ObjectMapper.Dynamic dynamic = parentMapper.dynamic();
|
||||
|
|
|
@ -25,21 +25,16 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.queries.TermsQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.PrefixQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.joda.DateMathParser;
|
||||
import org.elasticsearch.common.lucene.BytesRefs;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
|
@ -312,13 +307,6 @@ public abstract class MappedFieldType extends FieldType {
|
|||
return value;
|
||||
}
|
||||
|
||||
/** Returns the indexed value used to construct search "values".
|
||||
* This method is used for the default implementations of most
|
||||
* query factory methods such as {@link #termQuery}. */
|
||||
protected BytesRef indexedValueForSearch(Object value) {
|
||||
return BytesRefs.toBytesRef(value);
|
||||
}
|
||||
|
||||
/** Returns true if the field is searchable.
|
||||
*
|
||||
*/
|
||||
|
@ -342,50 +330,33 @@ public abstract class MappedFieldType extends FieldType {
|
|||
* The default implementation returns a {@link TermQuery} over the value bytes,
|
||||
* boosted by {@link #boost()}.
|
||||
* @throws IllegalArgumentException if {@code value} cannot be converted to the expected data type */
|
||||
public Query termQuery(Object value, @Nullable QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
TermQuery query = new TermQuery(new Term(name(), indexedValueForSearch(value)));
|
||||
if (boost == 1f ||
|
||||
(context != null && context.indexVersionCreated().before(Version.V_5_0_0_alpha1))) {
|
||||
return query;
|
||||
}
|
||||
return new BoostQuery(query, boost);
|
||||
}
|
||||
public abstract Query termQuery(Object value, @Nullable QueryShardContext context);
|
||||
|
||||
public Query termsQuery(List values, @Nullable QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
BytesRef[] bytesRefs = new BytesRef[values.size()];
|
||||
for (int i = 0; i < bytesRefs.length; i++) {
|
||||
bytesRefs[i] = indexedValueForSearch(values.get(i));
|
||||
/** Build a constant-scoring query that matches all values. The default implementation uses a
|
||||
* {@link ConstantScoreQuery} around a {@link BooleanQuery} whose {@link Occur#SHOULD} clauses
|
||||
* are generated with {@link #termQuery}. */
|
||||
public Query termsQuery(List<?> values, @Nullable QueryShardContext context) {
|
||||
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
for (Object value : values) {
|
||||
builder.add(termQuery(value, context), Occur.SHOULD);
|
||||
}
|
||||
return new TermsQuery(name(), bytesRefs);
|
||||
return new ConstantScoreQuery(builder.build());
|
||||
}
|
||||
|
||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
||||
failIfNotIndexed();
|
||||
return new TermRangeQuery(name(),
|
||||
lowerTerm == null ? null : indexedValueForSearch(lowerTerm),
|
||||
upperTerm == null ? null : indexedValueForSearch(upperTerm),
|
||||
includeLower, includeUpper);
|
||||
throw new IllegalArgumentException("Field [" + name + "] of type [" + typeName() + "] does not support range queries");
|
||||
}
|
||||
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
failIfNotIndexed();
|
||||
return new FuzzyQuery(new Term(name(), indexedValueForSearch(value)),
|
||||
fuzziness.asDistance(BytesRefs.toString(value)), prefixLength, maxExpansions, transpositions);
|
||||
throw new IllegalArgumentException("Can only use fuzzy queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]");
|
||||
}
|
||||
|
||||
public Query prefixQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
PrefixQuery query = new PrefixQuery(new Term(name(), indexedValueForSearch(value)));
|
||||
if (method != null) {
|
||||
query.setRewriteMethod(method);
|
||||
}
|
||||
return query;
|
||||
public Query prefixQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Can only use prefix queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]");
|
||||
}
|
||||
|
||||
public Query regexpQuery(String value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Can only use regular expression on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]");
|
||||
public Query regexpQuery(String value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Can only use regexp queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]");
|
||||
}
|
||||
|
||||
public Query nullValueQuery() {
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.TermsQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.PrefixQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.RegexpQuery;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.lucene.BytesRefs;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
/** Base class for {@link MappedFieldType} implementations that use the same
|
||||
* representation for internal index terms as the external representation so
|
||||
* that partial matching queries such as prefix, wildcard and fuzzy queries
|
||||
* can be implemented. */
|
||||
public abstract class StringFieldType extends TermBasedFieldType {
|
||||
|
||||
public StringFieldType() {}
|
||||
|
||||
protected StringFieldType(MappedFieldType ref) {
|
||||
super(ref);
|
||||
}
|
||||
|
||||
public Query termsQuery(List<?> values, QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
BytesRef[] bytesRefs = new BytesRef[values.size()];
|
||||
for (int i = 0; i < bytesRefs.length; i++) {
|
||||
bytesRefs[i] = indexedValueForSearch(values.get(i));
|
||||
}
|
||||
return new TermsQuery(name(), bytesRefs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions,
|
||||
boolean transpositions) {
|
||||
failIfNotIndexed();
|
||||
return new FuzzyQuery(new Term(name(), indexedValueForSearch(value)),
|
||||
fuzziness.asDistance(BytesRefs.toString(value)), prefixLength, maxExpansions, transpositions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
PrefixQuery query = new PrefixQuery(new Term(name(), indexedValueForSearch(value)));
|
||||
if (method != null) {
|
||||
query.setRewriteMethod(method);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Query regexpQuery(String value, int flags, int maxDeterminizedStates,
|
||||
MultiTermQuery.RewriteMethod method, QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
RegexpQuery query = new RegexpQuery(new Term(name(), indexedValueForSearch(value)), flags, maxDeterminizedStates);
|
||||
if (method != null) {
|
||||
query.setRewriteMethod(method);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
||||
failIfNotIndexed();
|
||||
return new TermRangeQuery(name(),
|
||||
lowerTerm == null ? null : indexedValueForSearch(lowerTerm),
|
||||
upperTerm == null ? null : indexedValueForSearch(upperTerm),
|
||||
includeLower, includeUpper);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.TermsQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.lucene.BytesRefs;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
/** Base {@link MappedFieldType} implementation for a field that is indexed
|
||||
* with the inverted index. */
|
||||
public abstract class TermBasedFieldType extends MappedFieldType {
|
||||
|
||||
public TermBasedFieldType() {}
|
||||
|
||||
protected TermBasedFieldType(MappedFieldType ref) {
|
||||
super(ref);
|
||||
}
|
||||
|
||||
/** Returns the indexed value used to construct search "values".
|
||||
* This method is used for the default implementations of most
|
||||
* query factory methods such as {@link #termQuery}. */
|
||||
protected BytesRef indexedValueForSearch(Object value) {
|
||||
return BytesRefs.toBytesRef(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
TermQuery query = new TermQuery(new Term(name(), indexedValueForSearch(value)));
|
||||
if (boost() == 1f ||
|
||||
(context != null && context.indexVersionCreated().before(Version.V_5_0_0_alpha1))) {
|
||||
return query;
|
||||
}
|
||||
return new BoostQuery(query, boost());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termsQuery(List<?> values, QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
BytesRef[] bytesRefs = new BytesRef[values.size()];
|
||||
for (int i = 0; i < bytesRefs.length; i++) {
|
||||
bytesRefs[i] = indexedValueForSearch(values.get(i));
|
||||
}
|
||||
return new TermsQuery(name(), bytesRefs);
|
||||
}
|
||||
|
||||
}
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper.core;
|
|||
import com.carrotsearch.hppc.ObjectArrayList;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.store.ByteArrayDataOutput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
@ -40,6 +41,8 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
@ -135,6 +138,11 @@ public class BinaryFieldMapper extends FieldMapper {
|
|||
failIfNoDocValues();
|
||||
return new BytesBinaryDVIndexFieldData.Builder();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Binary fields do not support searching");
|
||||
}
|
||||
}
|
||||
|
||||
protected BinaryFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||
|
|
|
@ -22,10 +22,11 @@ package org.elasticsearch.index.mapper.core;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -38,6 +39,7 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TermBasedFieldType;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
|
@ -48,7 +50,6 @@ import java.util.Map;
|
|||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
||||
|
||||
/**
|
||||
* A field mapper for boolean fields.
|
||||
|
@ -119,7 +120,7 @@ public class BooleanFieldMapper extends FieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
public static final class BooleanFieldType extends MappedFieldType {
|
||||
public static final class BooleanFieldType extends TermBasedFieldType {
|
||||
|
||||
public BooleanFieldType() {}
|
||||
|
||||
|
@ -200,6 +201,15 @@ public class BooleanFieldMapper extends FieldMapper {
|
|||
}
|
||||
return DocValueFormat.BOOLEAN;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
||||
failIfNotIndexed();
|
||||
return new TermRangeQuery(name(),
|
||||
lowerTerm == null ? null : indexedValueForSearch(lowerTerm),
|
||||
upperTerm == null ? null : indexedValueForSearch(upperTerm),
|
||||
includeLower, includeUpper);
|
||||
}
|
||||
}
|
||||
|
||||
protected BooleanFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TermBasedFieldType;
|
||||
import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionSuggester;
|
||||
import org.elasticsearch.search.suggest.completion.context.ContextMapping;
|
||||
|
@ -178,7 +179,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
|||
}
|
||||
}
|
||||
|
||||
public static final class CompletionFieldType extends MappedFieldType {
|
||||
public static final class CompletionFieldType extends TermBasedFieldType {
|
||||
|
||||
private static PostingsFormat postingsFormat;
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperException;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TermBasedFieldType;
|
||||
import org.elasticsearch.search.suggest.completion2x.AnalyzingCompletionLookupProvider;
|
||||
import org.elasticsearch.search.suggest.completion2x.Completion090PostingsFormat;
|
||||
import org.elasticsearch.search.suggest.completion2x.CompletionTokenStream;
|
||||
|
@ -231,7 +232,7 @@ public class CompletionFieldMapper2x extends FieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
public static final class CompletionFieldType extends MappedFieldType {
|
||||
public static final class CompletionFieldType extends TermBasedFieldType {
|
||||
private PostingsFormat postingsFormat;
|
||||
private AnalyzingCompletionLookupProvider analyzingSuggestLookupProvider;
|
||||
private SortedMap<String, ContextMapping> contextMapping = ContextMapping.EMPTY_MAPPING;
|
||||
|
|
|
@ -316,21 +316,6 @@ public class DateFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
|
|||
return query;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
failIfNotIndexed();
|
||||
long baseLo = parseToMilliseconds(value, false, null, dateMathParser);
|
||||
long baseHi = parseToMilliseconds(value, true, null, dateMathParser);
|
||||
long delta;
|
||||
try {
|
||||
delta = fuzziness.asTimeValue().millis();
|
||||
} catch (Exception e) {
|
||||
// not a time format
|
||||
delta = fuzziness.asLong();
|
||||
}
|
||||
return LongPoint.newRangeQuery(name(), baseLo - delta, baseHi + delta);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
||||
failIfNotIndexed();
|
||||
|
|
|
@ -22,13 +22,8 @@ package org.elasticsearch.index.mapper.core;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.RegexpQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -40,8 +35,8 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.StringFieldType;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
@ -143,7 +138,7 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap
|
|||
}
|
||||
}
|
||||
|
||||
public static final class KeywordFieldType extends MappedFieldType {
|
||||
public static final class KeywordFieldType extends StringFieldType {
|
||||
|
||||
public KeywordFieldType() {}
|
||||
|
||||
|
@ -173,17 +168,6 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap
|
|||
failIfNoDocValues();
|
||||
return new DocValuesIndexFieldData.Builder();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query regexpQuery(String value, int flags, int maxDeterminizedStates,
|
||||
@Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
RegexpQuery query = new RegexpQuery(new Term(name(), indexedValueForSearch(value)), flags, maxDeterminizedStates);
|
||||
if (method != null) {
|
||||
query.setRewriteMethod(method);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
}
|
||||
|
||||
private Boolean includeInAll;
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
|
@ -159,16 +158,6 @@ public class LegacyByteFieldMapper extends LegacyNumberFieldMapper {
|
|||
includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
byte iValue = parseValue(value);
|
||||
byte iSim = fuzziness.asByte();
|
||||
return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
||||
iValue - iSim,
|
||||
iValue + iSim,
|
||||
true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats.Long stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -358,22 +358,6 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper {
|
|||
return rangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, null, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
long iValue = parseValue(value);
|
||||
long iSim;
|
||||
try {
|
||||
iSim = fuzziness.asTimeValue().millis();
|
||||
} catch (Exception e) {
|
||||
// not a time format
|
||||
iSim = fuzziness.asLong();
|
||||
}
|
||||
return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
||||
iValue - iSim,
|
||||
iValue + iSim,
|
||||
true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats.Date stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.action.fieldstats.FieldStats;
|
|||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Numbers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
|
@ -170,16 +169,6 @@ public class LegacyDoubleFieldMapper extends LegacyNumberFieldMapper {
|
|||
includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
double iValue = parseDoubleValue(value);
|
||||
double iSim = fuzziness.asDouble();
|
||||
return LegacyNumericRangeQuery.newDoubleRange(name(), numericPrecisionStep(),
|
||||
iValue - iSim,
|
||||
iValue + iSim,
|
||||
true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats.Double stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
|
@ -155,16 +154,6 @@ public class LegacyFloatFieldMapper extends LegacyNumberFieldMapper {
|
|||
includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
float iValue = parseValue(value);
|
||||
final float iSim = fuzziness.asFloat();
|
||||
return LegacyNumericRangeQuery.newFloatRange(name(), numericPrecisionStep(),
|
||||
iValue - iSim,
|
||||
iValue + iSim,
|
||||
true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats.Double stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
|
@ -135,8 +134,7 @@ public class LegacyIntegerFieldMapper extends LegacyNumberFieldMapper {
|
|||
|
||||
@Override
|
||||
public String typeName() {
|
||||
// TODO: this should be the same as the mapper type name, except fielddata expects int...
|
||||
return "int";
|
||||
return "integer";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -159,16 +157,6 @@ public class LegacyIntegerFieldMapper extends LegacyNumberFieldMapper {
|
|||
includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
int iValue = parseValue(value);
|
||||
int iSim = fuzziness.asInt();
|
||||
return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
||||
iValue - iSim,
|
||||
iValue + iSim,
|
||||
true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats.Long stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
|
@ -158,16 +157,6 @@ public class LegacyLongFieldMapper extends LegacyNumberFieldMapper {
|
|||
includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
long iValue = parseLongValue(value);
|
||||
final long iSim = fuzziness.asLong();
|
||||
return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
||||
iValue - iSim,
|
||||
iValue + iSim,
|
||||
true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -27,20 +27,19 @@ import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TermBasedFieldType;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
@ -121,7 +120,7 @@ public abstract class LegacyNumberFieldMapper extends FieldMapper implements All
|
|||
protected abstract int maxPrecisionStep();
|
||||
}
|
||||
|
||||
public static abstract class NumberFieldType extends MappedFieldType {
|
||||
public static abstract class NumberFieldType extends TermBasedFieldType {
|
||||
|
||||
public NumberFieldType(LegacyNumericType numericType) {
|
||||
setTokenized(false);
|
||||
|
@ -146,9 +145,6 @@ public abstract class LegacyNumberFieldMapper extends FieldMapper implements All
|
|||
|
||||
public abstract NumberFieldType clone();
|
||||
|
||||
@Override
|
||||
public abstract Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions);
|
||||
|
||||
@Override
|
||||
public DocValueFormat docValueFormat(@Nullable String format, DateTimeZone timeZone) {
|
||||
if (timeZone != null) {
|
||||
|
|
|
@ -163,16 +163,6 @@ public class LegacyShortFieldMapper extends LegacyNumberFieldMapper {
|
|||
includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
short iValue = parseValue(value);
|
||||
short iSim = fuzziness.asShort();
|
||||
return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
||||
iValue - iSim,
|
||||
iValue + iSim,
|
||||
true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats.Long stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.elasticsearch.common.Explicit;
|
|||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
|
@ -233,13 +232,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
return FloatPoint.newRangeQuery(field, l, u);
|
||||
}
|
||||
|
||||
@Override
|
||||
Query fuzzyQuery(String field, Object value, Fuzziness fuzziness) {
|
||||
float base = parse(value);
|
||||
float delta = fuzziness.asFloat();
|
||||
return rangeQuery(field, base - delta, base + delta, true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Field> createFields(String name, Number value,
|
||||
boolean indexed, boolean docValued, boolean stored) {
|
||||
|
@ -324,13 +316,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
return DoublePoint.newRangeQuery(field, l, u);
|
||||
}
|
||||
|
||||
@Override
|
||||
Query fuzzyQuery(String field, Object value, Fuzziness fuzziness) {
|
||||
double base = parse(value);
|
||||
double delta = fuzziness.asFloat();
|
||||
return rangeQuery(field, base - delta, base + delta, true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Field> createFields(String name, Number value,
|
||||
boolean indexed, boolean docValued, boolean stored) {
|
||||
|
@ -407,11 +392,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
return INTEGER.rangeQuery(field, lowerTerm, upperTerm, includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
Query fuzzyQuery(String field, Object value, Fuzziness fuzziness) {
|
||||
return INTEGER.fuzzyQuery(field, value, fuzziness);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Field> createFields(String name, Number value,
|
||||
boolean indexed, boolean docValued, boolean stored) {
|
||||
|
@ -473,11 +453,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
return INTEGER.rangeQuery(field, lowerTerm, upperTerm, includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
Query fuzzyQuery(String field, Object value, Fuzziness fuzziness) {
|
||||
return INTEGER.fuzzyQuery(field, value, fuzziness);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Field> createFields(String name, Number value,
|
||||
boolean indexed, boolean docValued, boolean stored) {
|
||||
|
@ -560,13 +535,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
return IntPoint.newRangeQuery(field, l, u);
|
||||
}
|
||||
|
||||
@Override
|
||||
Query fuzzyQuery(String field, Object value, Fuzziness fuzziness) {
|
||||
int base = parse(value);
|
||||
int delta = fuzziness.asInt();
|
||||
return rangeQuery(field, base - delta, base + delta, true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Field> createFields(String name, Number value,
|
||||
boolean indexed, boolean docValued, boolean stored) {
|
||||
|
@ -663,13 +631,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
return LongPoint.newRangeQuery(field, l, u);
|
||||
}
|
||||
|
||||
@Override
|
||||
Query fuzzyQuery(String field, Object value, Fuzziness fuzziness) {
|
||||
long base = parse(value);
|
||||
long delta = fuzziness.asLong();
|
||||
return rangeQuery(field, base - delta, base + delta, true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Field> createFields(String name, Number value,
|
||||
boolean indexed, boolean docValued, boolean stored) {
|
||||
|
@ -722,7 +683,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
abstract Query termsQuery(String field, List<Object> values);
|
||||
abstract Query rangeQuery(String field, Object lowerTerm, Object upperTerm,
|
||||
boolean includeLower, boolean includeUpper);
|
||||
abstract Query fuzzyQuery(String field, Object value, Fuzziness fuzziness);
|
||||
abstract Number parse(XContentParser parser, boolean coerce) throws IOException;
|
||||
abstract Number parse(Object value);
|
||||
public abstract List<Field> createFields(String name, Number value, boolean indexed,
|
||||
|
@ -791,13 +751,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
return query;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength,
|
||||
int maxExpansions, boolean transpositions) {
|
||||
failIfNotIndexed();
|
||||
return type.fuzzyQuery(name(), value, fuzziness);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats stats(IndexReader reader) throws IOException {
|
||||
return type.stats(reader, name(), isSearchable(), isAggregatable());
|
||||
|
|
|
@ -355,7 +355,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
}
|
||||
}
|
||||
|
||||
public static final class StringFieldType extends MappedFieldType {
|
||||
public static final class StringFieldType extends org.elasticsearch.index.mapper.StringFieldType {
|
||||
|
||||
private boolean fielddata;
|
||||
private double fielddataMinFrequency;
|
||||
|
@ -485,15 +485,6 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
+ "use significant memory.");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query regexpQuery(String value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryShardContext context) {
|
||||
RegexpQuery query = new RegexpQuery(new Term(name(), indexedValueForSearch(value)), flags, maxDeterminizedStates);
|
||||
if (method != null) {
|
||||
query.setRewriteMethod(method);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
}
|
||||
|
||||
private Boolean includeInAll;
|
||||
|
|
|
@ -21,12 +21,7 @@ package org.elasticsearch.index.mapper.core;
|
|||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.RegexpQuery;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
|
@ -39,8 +34,8 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.StringFieldType;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
@ -172,7 +167,7 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
|
|||
}
|
||||
}
|
||||
|
||||
public static final class TextFieldType extends MappedFieldType {
|
||||
public static final class TextFieldType extends StringFieldType {
|
||||
|
||||
private boolean fielddata;
|
||||
private double fielddataMinFrequency;
|
||||
|
@ -300,17 +295,6 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
|
|||
}
|
||||
return new PagedBytesIndexFieldData.Builder(fielddataMinFrequency, fielddataMaxFrequency, fielddataMinSegmentSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query regexpQuery(String value, int flags, int maxDeterminizedStates,
|
||||
@Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
RegexpQuery query = new RegexpQuery(new Term(name(), indexedValueForSearch(value)), flags, maxDeterminizedStates);
|
||||
if (method != null) {
|
||||
query.setRewriteMethod(method);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
}
|
||||
|
||||
private Boolean includeInAll;
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.mapper.geo;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.geo.GeoHashUtils;
|
||||
import org.apache.lucene.util.LegacyNumericUtils;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -47,6 +48,8 @@ import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
|||
import org.elasticsearch.index.mapper.core.LegacyNumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
|
@ -366,6 +369,11 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
}
|
||||
return DocValueFormat.GEOHASH;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Geo fields do not support exact searching, use dedicated geo queries instead: [" + name() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
protected FieldMapper latMapper;
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.locationtech.spatial4j.shape.Shape;
|
|||
import org.locationtech.spatial4j.shape.jts.JtsGeometry;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.spatial.prefix.PrefixTreeStrategy;
|
||||
import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
|
||||
import org.apache.lucene.spatial.prefix.TermQueryPrefixTreeStrategy;
|
||||
|
@ -46,6 +47,8 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
@ -412,6 +415,10 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
throw new IllegalArgumentException("Unknown prefix tree strategy [" + strategyName + "]");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Geo fields do not support exact searching, use dedicated geo queries instead");
|
||||
}
|
||||
}
|
||||
|
||||
protected Explicit<Boolean> coerce;
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.lucene.all.AllEntries;
|
||||
import org.elasticsearch.common.lucene.all.AllField;
|
||||
|
@ -36,6 +35,7 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.StringFieldType;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
|
||||
|
@ -177,7 +177,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
static final class AllFieldType extends MappedFieldType {
|
||||
static final class AllFieldType extends StringFieldType {
|
||||
|
||||
public AllFieldType() {
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -32,6 +31,7 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TermBasedFieldType;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -125,7 +125,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
public static final class FieldNamesFieldType extends MappedFieldType {
|
||||
public static final class FieldNamesFieldType extends TermBasedFieldType {
|
||||
|
||||
private boolean enabled = Defaults.ENABLED;
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TermBasedFieldType;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
|
@ -77,19 +78,6 @@ public class IdFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
public static class Builder extends MetadataFieldMapper.Builder<Builder, IdFieldMapper> {
|
||||
|
||||
public Builder(MappedFieldType existing) {
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IdFieldMapper build(BuilderContext context) {
|
||||
setupFieldType(context);
|
||||
return new IdFieldMapper(fieldType, context.indexSettings());
|
||||
}
|
||||
}
|
||||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
|
@ -102,7 +90,7 @@ public class IdFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
static final class IdFieldType extends MappedFieldType {
|
||||
static final class IdFieldType extends TermBasedFieldType {
|
||||
|
||||
public IdFieldType() {
|
||||
}
|
||||
|
@ -129,62 +117,14 @@ public class IdFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
@Override
|
||||
public Query termQuery(Object value, @Nullable QueryShardContext context) {
|
||||
if (indexOptions() != IndexOptions.NONE || context == null) {
|
||||
return super.termQuery(value, context);
|
||||
}
|
||||
final BytesRef[] uids = Uid.createUidsForTypesAndId(context.queryTypes(), value);
|
||||
return new TermsQuery(UidFieldMapper.NAME, uids);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termsQuery(List values, @Nullable QueryShardContext context) {
|
||||
if (indexOptions() != IndexOptions.NONE || context == null) {
|
||||
return super.termsQuery(values, context);
|
||||
}
|
||||
return new TermsQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(context.queryTypes(), values));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query prefixQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryShardContext context) {
|
||||
if (indexOptions() != IndexOptions.NONE || context == null) {
|
||||
return super.prefixQuery(value, method, context);
|
||||
}
|
||||
Collection<String> queryTypes = context.queryTypes();
|
||||
BooleanQuery.Builder query = new BooleanQuery.Builder();
|
||||
for (String queryType : queryTypes) {
|
||||
PrefixQuery prefixQuery = new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value))));
|
||||
if (method != null) {
|
||||
prefixQuery.setRewriteMethod(method);
|
||||
}
|
||||
query.add(prefixQuery, BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
return query.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query regexpQuery(String value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryShardContext context) {
|
||||
if (indexOptions() != IndexOptions.NONE || context == null) {
|
||||
return super.regexpQuery(value, flags, maxDeterminizedStates, method, context);
|
||||
}
|
||||
Collection<String> queryTypes = context.queryTypes();
|
||||
if (queryTypes.size() == 1) {
|
||||
RegexpQuery regexpQuery = new RegexpQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(Iterables.getFirst(queryTypes, null), BytesRefs.toBytesRef(value))),
|
||||
flags, maxDeterminizedStates);
|
||||
if (method != null) {
|
||||
regexpQuery.setRewriteMethod(method);
|
||||
}
|
||||
return regexpQuery;
|
||||
}
|
||||
BooleanQuery.Builder query = new BooleanQuery.Builder();
|
||||
for (String queryType : queryTypes) {
|
||||
RegexpQuery regexpQuery = new RegexpQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value))), flags, maxDeterminizedStates);
|
||||
if (method != null) {
|
||||
regexpQuery.setRewriteMethod(method);
|
||||
}
|
||||
query.add(regexpQuery, BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
return query.build();
|
||||
}
|
||||
}
|
||||
|
||||
private IdFieldMapper(Settings indexSettings, MappedFieldType existing) {
|
||||
|
|
|
@ -130,9 +130,6 @@ public class IndexFieldMapper extends MetadataFieldMapper {
|
|||
*/
|
||||
@Override
|
||||
public Query termQuery(Object value, @Nullable QueryShardContext context) {
|
||||
if (context == null) {
|
||||
return super.termQuery(value, context);
|
||||
}
|
||||
if (isSameIndex(value, context.index().getName())) {
|
||||
return Queries.newMatchAllQuery();
|
||||
} else {
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.lucene.search.TermQuery;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.lucene.BytesRefs;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.loader.SettingsLoader;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -194,7 +195,7 @@ public class ParentFieldMapper extends MetadataFieldMapper {
|
|||
public Query termsQuery(List values, @Nullable QueryShardContext context) {
|
||||
BytesRef[] ids = new BytesRef[values.size()];
|
||||
for (int i = 0; i < ids.length; i++) {
|
||||
ids[i] = indexedValueForSearch(values.get(i));
|
||||
ids[i] = BytesRefs.toBytesRef(values.get(i));
|
||||
}
|
||||
BooleanQuery.Builder query = new BooleanQuery.Builder();
|
||||
query.add(new DocValuesTermsQuery(name(), ids), BooleanClause.Occur.MUST);
|
||||
|
|
|
@ -19,10 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.mapper.internal;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -31,6 +29,7 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TermBasedFieldType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
@ -107,7 +106,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
static final class RoutingFieldType extends MappedFieldType {
|
||||
static final class RoutingFieldType extends TermBasedFieldType {
|
||||
|
||||
public RoutingFieldType() {
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper.internal;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -39,6 +40,8 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -167,6 +170,11 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
public String typeName() {
|
||||
return CONTENT_TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, QueryShardContext context) {
|
||||
throw new QueryShardException(context, "The _source field is not searchable");
|
||||
}
|
||||
}
|
||||
|
||||
private final boolean enabled;
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.StringFieldType;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -73,19 +74,6 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
public static class Builder extends MetadataFieldMapper.Builder<Builder, TypeFieldMapper> {
|
||||
|
||||
public Builder(MappedFieldType existing) {
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TypeFieldMapper build(BuilderContext context) {
|
||||
fieldType.setName(buildFullName(context));
|
||||
return new TypeFieldMapper(fieldType, context.indexSettings());
|
||||
}
|
||||
}
|
||||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
|
@ -98,7 +86,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
static final class TypeFieldType extends MappedFieldType {
|
||||
static final class TypeFieldType extends StringFieldType {
|
||||
|
||||
public TypeFieldType() {
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TermBasedFieldType;
|
||||
import org.elasticsearch.index.mapper.core.TextFieldMapper;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
|
||||
|
@ -71,19 +72,6 @@ public class UidFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
public static class Builder extends MetadataFieldMapper.Builder<Builder, UidFieldMapper> {
|
||||
|
||||
public Builder(MappedFieldType existing) {
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public UidFieldMapper build(BuilderContext context) {
|
||||
setupFieldType(context);
|
||||
return new UidFieldMapper(fieldType, defaultFieldType, context.indexSettings());
|
||||
}
|
||||
}
|
||||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
|
@ -96,7 +84,7 @@ public class UidFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
static final class UidFieldType extends MappedFieldType {
|
||||
static final class UidFieldType extends TermBasedFieldType {
|
||||
|
||||
public UidFieldType() {
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper.internal;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
|
@ -30,6 +31,8 @@ import org.elasticsearch.index.mapper.MapperParsingException;
|
|||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.ParseContext.Document;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
@ -54,18 +57,6 @@ public class VersionFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
public static class Builder extends MetadataFieldMapper.Builder<Builder, VersionFieldMapper> {
|
||||
|
||||
public Builder() {
|
||||
super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public VersionFieldMapper build(BuilderContext context) {
|
||||
return new VersionFieldMapper(context.indexSettings());
|
||||
}
|
||||
}
|
||||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
|
@ -96,6 +87,11 @@ public class VersionFieldMapper extends MetadataFieldMapper {
|
|||
public String typeName() {
|
||||
return CONTENT_TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, QueryShardContext context) {
|
||||
throw new QueryShardException(context, "The _version field is not searchable");
|
||||
}
|
||||
}
|
||||
|
||||
private VersionFieldMapper(Settings indexSettings) {
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.common.Explicit;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.network.InetAddresses;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
|
||||
|
@ -219,14 +218,6 @@ public class IpFieldMapper extends FieldMapper implements AllFieldMapper.Include
|
|||
return InetAddressPoint.newRangeQuery(name(), lower, upper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
failIfNotIndexed();
|
||||
InetAddress base = parse(value);
|
||||
int mask = fuzziness.asInt();
|
||||
return XInetAddressPoint.newPrefixQuery(name(), base, mask);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats.Ip stats(IndexReader reader) throws IOException {
|
||||
String field = name();
|
||||
|
|
|
@ -50,7 +50,6 @@ import org.elasticsearch.index.mapper.core.LegacyLongFieldMapper.CustomLongNumer
|
|||
import org.elasticsearch.index.mapper.core.LegacyNumberFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.bucket.range.ipv4.InternalIPv4Range;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -218,7 +217,7 @@ public class LegacyIpFieldMapper extends LegacyNumberFieldMapper {
|
|||
}
|
||||
if (fromTo != null) {
|
||||
return rangeQuery(fromTo[0] == 0 ? null : fromTo[0],
|
||||
fromTo[1] == InternalIPv4Range.MAX_IP ? null : fromTo[1], true, false);
|
||||
fromTo[1] == MAX_IP ? null : fromTo[1], true, false);
|
||||
}
|
||||
}
|
||||
return super.termQuery(value, context);
|
||||
|
@ -232,21 +231,6 @@ public class LegacyIpFieldMapper extends LegacyNumberFieldMapper {
|
|||
includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
long iValue = parseValue(value);
|
||||
long iSim;
|
||||
try {
|
||||
iSim = ipToLong(fuzziness.asString());
|
||||
} catch (IllegalArgumentException e) {
|
||||
iSim = fuzziness.asLong();
|
||||
}
|
||||
return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
||||
iValue - iSim,
|
||||
iValue + iSim,
|
||||
true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
|||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
@ -149,6 +150,11 @@ public class PercolatorFieldMapper extends FieldMapper {
|
|||
public String typeName() {
|
||||
return CONTENT_TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Percolator fields are not searchable directly, use a percolate query instead");
|
||||
}
|
||||
}
|
||||
|
||||
private final boolean mapUnmappedFieldAsString;
|
||||
|
|
|
@ -655,6 +655,11 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
|
|||
throw new ParsingException(parser.getTokenLocation(), "No fields specified for multi_match query");
|
||||
}
|
||||
|
||||
if (fuzziness != null && (type == Type.CROSS_FIELDS || type == Type.PHRASE || type == Type.PHRASE_PREFIX)) {
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"Fuziness not allowed for type [" + type.parseField.getPreferredName() + "]");
|
||||
}
|
||||
|
||||
return new MultiMatchQueryBuilder(value)
|
||||
.fields(fieldsBoosts)
|
||||
.type(type)
|
||||
|
|
|
@ -231,7 +231,7 @@ public class MatchQuery {
|
|||
*/
|
||||
boolean noForcedAnalyzer = this.analyzer == null;
|
||||
if (fieldType != null && fieldType.tokenized() == false && noForcedAnalyzer) {
|
||||
return termQuery(fieldType, value);
|
||||
return blendTermQuery(new Term(fieldName, value.toString()), fieldType);
|
||||
}
|
||||
|
||||
Analyzer analyzer = getAnalyzer(fieldType);
|
||||
|
@ -265,15 +265,6 @@ public class MatchQuery {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a TermQuery-like-query for MappedFieldTypes that don't support
|
||||
* QueryBuilder which is very string-ish. Just delegates to the
|
||||
* MappedFieldType for MatchQuery but gets more complex for blended queries.
|
||||
*/
|
||||
protected Query termQuery(MappedFieldType fieldType, Object value) {
|
||||
return termQuery(fieldType, value, lenient);
|
||||
}
|
||||
|
||||
protected final Query termQuery(MappedFieldType fieldType, Object value, boolean lenient) {
|
||||
try {
|
||||
return fieldType.termQuery(value, context);
|
||||
|
@ -366,8 +357,11 @@ public class MatchQuery {
|
|||
}
|
||||
return query;
|
||||
} catch (RuntimeException e) {
|
||||
return new TermQuery(term);
|
||||
// See long comment below about why we're lenient here.
|
||||
if (lenient) {
|
||||
return new TermQuery(term);
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
int edits = fuzziness.asDistance(term.text());
|
||||
|
@ -376,23 +370,7 @@ public class MatchQuery {
|
|||
return query;
|
||||
}
|
||||
if (fieldType != null) {
|
||||
/*
|
||||
* Its a bit weird to default to lenient here but its the backwards
|
||||
* compatible. It makes some sense when you think about what we are
|
||||
* doing here: at this point the user has forced an analyzer and
|
||||
* passed some string to the match query. We cut it up using the
|
||||
* analyzer and then tried to cram whatever we get into the field.
|
||||
* lenient=true here means that we try the terms in the query and on
|
||||
* the off chance that they are actually valid terms then we
|
||||
* actually try them. lenient=false would mean that we blow up the
|
||||
* query if they aren't valid terms. "valid" in this context means
|
||||
* "parses properly to something of the type being queried." So "1"
|
||||
* is a valid number, etc.
|
||||
*
|
||||
* We use the text form here because we we've received the term from
|
||||
* an analyzer that cut some string into text.
|
||||
*/
|
||||
Query query = termQuery(fieldType, term.bytes(), true);
|
||||
Query query = termQuery(fieldType, term.bytes(), lenient);
|
||||
if (query != null) {
|
||||
return query;
|
||||
}
|
||||
|
|
|
@ -303,15 +303,6 @@ public class MultiMatchQuery extends MatchQuery {
|
|||
return queryBuilder.blendTerm(term, fieldType);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Query termQuery(MappedFieldType fieldType, Object value) {
|
||||
if (queryBuilder == null) {
|
||||
// Can be null when the MultiMatchQuery collapses into a MatchQuery
|
||||
return super.termQuery(fieldType, value);
|
||||
}
|
||||
return queryBuilder.termQuery(fieldType, value);
|
||||
}
|
||||
|
||||
static final class FieldAndFieldType {
|
||||
final MappedFieldType fieldType;
|
||||
final float boost;
|
||||
|
|
|
@ -32,7 +32,7 @@ import java.io.IOException;
|
|||
public class IndexAlreadyExistsException extends ElasticsearchException {
|
||||
|
||||
public IndexAlreadyExistsException(Index index) {
|
||||
this(index, "already exists");
|
||||
this(index, "index " + index.toString() + " already exists");
|
||||
}
|
||||
|
||||
public IndexAlreadyExistsException(Index index, String message) {
|
||||
|
@ -48,4 +48,4 @@ public class IndexAlreadyExistsException extends ElasticsearchException {
|
|||
public RestStatus status() {
|
||||
return RestStatus.BAD_REQUEST;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ public class IndexingMemoryController extends AbstractComponent implements Index
|
|||
|
||||
private final ShardsIndicesStatusChecker statusChecker;
|
||||
|
||||
IndexingMemoryController(Settings settings, ThreadPool threadPool, Iterable<IndexShard>indexServices) {
|
||||
IndexingMemoryController(Settings settings, ThreadPool threadPool, Iterable<IndexShard> indexServices) {
|
||||
this(settings, threadPool, indexServices, JvmInfo.jvmInfo().getMem().getHeapMax().bytes());
|
||||
}
|
||||
|
||||
|
@ -251,21 +251,29 @@ public class IndexingMemoryController extends AbstractComponent implements Index
|
|||
/** Shard calls this on each indexing/delete op */
|
||||
public void bytesWritten(int bytes) {
|
||||
long totalBytes = bytesWrittenSinceCheck.addAndGet(bytes);
|
||||
assert totalBytes >= 0;
|
||||
while (totalBytes > indexingBuffer.bytes()/30) {
|
||||
|
||||
if (runLock.tryLock()) {
|
||||
try {
|
||||
bytesWrittenSinceCheck.addAndGet(-totalBytes);
|
||||
// NOTE: this is only an approximate check, because bytes written is to the translog, vs indexing memory buffer which is
|
||||
// typically smaller but can be larger in extreme cases (many unique terms). This logic is here only as a safety against
|
||||
// thread starvation or too infrequent checking, to ensure we are still checking periodically, in proportion to bytes
|
||||
// processed by indexing:
|
||||
runUnlocked();
|
||||
// Must pull this again because it may have changed since we first checked:
|
||||
totalBytes = bytesWrittenSinceCheck.get();
|
||||
if (totalBytes > indexingBuffer.bytes()/30) {
|
||||
bytesWrittenSinceCheck.addAndGet(-totalBytes);
|
||||
// NOTE: this is only an approximate check, because bytes written is to the translog, vs indexing memory buffer which is
|
||||
// typically smaller but can be larger in extreme cases (many unique terms). This logic is here only as a safety against
|
||||
// thread starvation or too infrequent checking, to ensure we are still checking periodically, in proportion to bytes
|
||||
// processed by indexing:
|
||||
runUnlocked();
|
||||
}
|
||||
} finally {
|
||||
runLock.unlock();
|
||||
}
|
||||
// Could be while we were checking, more bytes arrived:
|
||||
totalBytes = bytesWrittenSinceCheck.addAndGet(bytes);
|
||||
|
||||
// Must get it again since other threads could have increased it while we were in runUnlocked
|
||||
totalBytes = bytesWrittenSinceCheck.get();
|
||||
} else {
|
||||
// Another thread beat us to it: let them do all the work, yay!
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -314,7 +322,7 @@ public class IndexingMemoryController extends AbstractComponent implements Index
|
|||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("total indexing heap bytes used [{}] vs {} [{}], currently writing bytes [{}]",
|
||||
new ByteSizeValue(totalBytesUsed), INDEX_BUFFER_SIZE_SETTING, indexingBuffer, new ByteSizeValue(totalBytesWriting));
|
||||
new ByteSizeValue(totalBytesUsed), INDEX_BUFFER_SIZE_SETTING.getKey(), indexingBuffer, new ByteSizeValue(totalBytesWriting));
|
||||
}
|
||||
|
||||
// If we are using more than 50% of our budget across both indexing buffer and bytes we are still moving to disk, then we now
|
||||
|
@ -354,7 +362,7 @@ public class IndexingMemoryController extends AbstractComponent implements Index
|
|||
}
|
||||
|
||||
logger.debug("now write some indexing buffers: total indexing heap bytes used [{}] vs {} [{}], currently writing bytes [{}], [{}] shards with non-zero indexing buffer",
|
||||
new ByteSizeValue(totalBytesUsed), INDEX_BUFFER_SIZE_SETTING, indexingBuffer, new ByteSizeValue(totalBytesWriting), queue.size());
|
||||
new ByteSizeValue(totalBytesUsed), INDEX_BUFFER_SIZE_SETTING.getKey(), indexingBuffer, new ByteSizeValue(totalBytesWriting), queue.size());
|
||||
|
||||
while (totalBytesUsed > indexingBuffer.bytes() && queue.isEmpty() == false) {
|
||||
ShardAndBytesUsed largest = queue.poll();
|
||||
|
|
|
@ -181,7 +181,9 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
this.namedWriteableRegistry = namedWriteableRegistry;
|
||||
clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, indexStoreConfig::setRateLimitingType);
|
||||
clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, indexStoreConfig::setRateLimitingThrottle);
|
||||
indexingMemoryController = new IndexingMemoryController(settings, threadPool, Iterables.flatten(this));
|
||||
indexingMemoryController = new IndexingMemoryController(settings, threadPool,
|
||||
// ensure we pull an iter with new shards - flatten makes a copy
|
||||
() -> Iterables.flatten(this).iterator());
|
||||
this.indexScopeSetting = indexScopedSettings;
|
||||
this.circuitBreakerService = circuitBreakerService;
|
||||
this.indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() {
|
||||
|
@ -527,7 +529,8 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
try {
|
||||
if (clusterState.metaData().hasIndex(indexName)) {
|
||||
final IndexMetaData index = clusterState.metaData().index(indexName);
|
||||
throw new IllegalStateException("Can't delete unassigned index store for [" + indexName + "] - it's still part of the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]");
|
||||
throw new IllegalStateException("Can't delete unassigned index store for [" + indexName + "] - it's still part of " +
|
||||
"the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]");
|
||||
}
|
||||
deleteIndexStore(reason, metaData, clusterState);
|
||||
} catch (IOException e) {
|
||||
|
|
|
@ -30,11 +30,9 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RestoreSource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
|
@ -194,27 +192,16 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
}
|
||||
|
||||
private void cleanFailedShards(final ClusterChangedEvent event) {
|
||||
RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().getLocalNodeId());
|
||||
RoutingNode routingNode = event.state().getRoutingNodes().node(event.state().nodes().getLocalNodeId());
|
||||
if (routingNode == null) {
|
||||
failedShards.clear();
|
||||
return;
|
||||
}
|
||||
RoutingTable routingTable = event.state().routingTable();
|
||||
for (Iterator<Map.Entry<ShardId, ShardRouting>> iterator = failedShards.entrySet().iterator(); iterator.hasNext(); ) {
|
||||
Map.Entry<ShardId, ShardRouting> entry = iterator.next();
|
||||
ShardId failedShardId = entry.getKey();
|
||||
ShardRouting failedShardRouting = entry.getValue();
|
||||
IndexRoutingTable indexRoutingTable = routingTable.index(failedShardId.getIndex());
|
||||
if (indexRoutingTable == null) {
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(failedShardId.id());
|
||||
if (shardRoutingTable == null) {
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
if (shardRoutingTable.assignedShards().stream().noneMatch(shr -> shr.isSameAllocation(failedShardRouting))) {
|
||||
ShardRouting matchedShardRouting = routingNode.getByShardId(failedShardRouting.shardId());
|
||||
if (matchedShardRouting == null || matchedShardRouting.isSameAllocation(failedShardRouting) == false) {
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
@ -279,7 +266,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
}
|
||||
|
||||
private void applyDeletedShards(final ClusterChangedEvent event) {
|
||||
RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().getLocalNodeId());
|
||||
RoutingNode routingNode = event.state().getRoutingNodes().node(event.state().nodes().getLocalNodeId());
|
||||
if (routingNode == null) {
|
||||
return;
|
||||
}
|
||||
|
@ -382,7 +369,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
|
||||
private void applyNewIndices(final ClusterChangedEvent event) {
|
||||
// we only create indices for shards that are allocated
|
||||
RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().getLocalNodeId());
|
||||
RoutingNode routingNode = event.state().getRoutingNodes().node(event.state().nodes().getLocalNodeId());
|
||||
if (routingNode == null) {
|
||||
return;
|
||||
}
|
||||
|
@ -479,7 +466,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
}
|
||||
|
||||
RoutingTable routingTable = event.state().routingTable();
|
||||
RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().getLocalNodeId());
|
||||
RoutingNode routingNode = event.state().getRoutingNodes().node(event.state().nodes().getLocalNodeId());
|
||||
|
||||
if (routingNode == null) {
|
||||
failedShards.clear();
|
||||
|
|
|
@ -141,7 +141,7 @@ public class ShardsSyncedFlushResult implements Streamable {
|
|||
int numResponses = in.readInt();
|
||||
shardResponses = new HashMap<>();
|
||||
for (int i = 0; i < numResponses; i++) {
|
||||
ShardRouting shardRouting = ShardRouting.readShardRoutingEntry(in);
|
||||
ShardRouting shardRouting = new ShardRouting(in);
|
||||
SyncedFlushService.ShardSyncedFlushResponse response = SyncedFlushService.ShardSyncedFlushResponse.readSyncedFlushResponse(in);
|
||||
shardResponses.put(shardRouting, response);
|
||||
}
|
||||
|
|
|
@ -23,6 +23,9 @@ import java.io.IOException;
|
|||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import org.elasticsearch.cli.Command;
|
||||
|
@ -48,10 +51,17 @@ class ListPluginsCommand extends Command {
|
|||
}
|
||||
|
||||
terminal.println(Terminal.Verbosity.VERBOSE, "Plugins directory: " + env.pluginsFile());
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(env.pluginsFile())) {
|
||||
for (Path plugin : stream) {
|
||||
terminal.println(plugin.getFileName().toString());
|
||||
final List<Path> plugins = new ArrayList<>();
|
||||
try (DirectoryStream<Path> paths = Files.newDirectoryStream(env.pluginsFile())) {
|
||||
for (Path plugin : paths) {
|
||||
plugins.add(plugin);
|
||||
}
|
||||
}
|
||||
Collections.sort(plugins);
|
||||
for (final Path plugin : plugins) {
|
||||
terminal.println(plugin.getFileName().toString());
|
||||
PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin.toAbsolutePath()));
|
||||
terminal.println(Terminal.Verbosity.VERBOSE, info.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,7 @@
|
|||
|
||||
package org.elasticsearch.rest.action.cat;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectLongHashMap;
|
||||
import com.carrotsearch.hppc.ObjectLongMap;
|
||||
import com.carrotsearch.hppc.cursors.ObjectLongCursor;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
|
@ -36,11 +35,6 @@ import org.elasticsearch.rest.RestResponse;
|
|||
import org.elasticsearch.rest.action.support.RestResponseListener;
|
||||
import org.elasticsearch.rest.action.support.RestTable;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.GET;
|
||||
|
||||
/**
|
||||
|
@ -57,7 +51,6 @@ public class RestFielddataAction extends AbstractCatAction {
|
|||
|
||||
@Override
|
||||
protected void doRequest(final RestRequest request, final RestChannel channel, final Client client) {
|
||||
|
||||
final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest("data:true");
|
||||
nodesStatsRequest.clear();
|
||||
nodesStatsRequest.indices(true);
|
||||
|
@ -86,56 +79,30 @@ public class RestFielddataAction extends AbstractCatAction {
|
|||
.addCell("host", "alias:h;desc:host name")
|
||||
.addCell("ip", "desc:ip address")
|
||||
.addCell("node", "alias:n;desc:node name")
|
||||
.addCell("total", "text-align:right;desc:total field data usage")
|
||||
.addCell("field", "alias:f;desc:field name")
|
||||
.addCell("size", "text-align:right;alias:s;desc:field data usage")
|
||||
.endHeaders();
|
||||
return table;
|
||||
}
|
||||
|
||||
private Table buildTable(final RestRequest request, final NodesStatsResponse nodeStatses) {
|
||||
Set<String> fieldNames = new HashSet<>();
|
||||
Map<NodeStats, ObjectLongMap<String>> nodesFields = new HashMap<>();
|
||||
Table table = getTableWithHeader(request);
|
||||
|
||||
// Collect all the field names so a new table can be built
|
||||
for (NodeStats ns : nodeStatses.getNodes()) {
|
||||
ObjectLongHashMap<String> fields = ns.getIndices().getFieldData().getFields();
|
||||
nodesFields.put(ns, fields);
|
||||
if (fields != null) {
|
||||
for (String key : fields.keys().toArray(String.class)) {
|
||||
fieldNames.add(key);
|
||||
for (NodeStats nodeStats: nodeStatses.getNodes()) {
|
||||
if (nodeStats.getIndices().getFieldData().getFields() != null) {
|
||||
for (ObjectLongCursor<String> cursor : nodeStats.getIndices().getFieldData().getFields()) {
|
||||
table.startRow();
|
||||
table.addCell(nodeStats.getNode().getId());
|
||||
table.addCell(nodeStats.getNode().getHostName());
|
||||
table.addCell(nodeStats.getNode().getHostAddress());
|
||||
table.addCell(nodeStats.getNode().getName());
|
||||
table.addCell(cursor.key);
|
||||
table.addCell(new ByteSizeValue(cursor.value));
|
||||
table.endRow();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The table must be rebuilt because it has dynamic headers based on the fields
|
||||
Table table = new Table();
|
||||
table.startHeaders()
|
||||
.addCell("id", "desc:node id")
|
||||
.addCell("host", "alias:h;desc:host name")
|
||||
.addCell("ip", "desc:ip address")
|
||||
.addCell("node", "alias:n;desc:node name")
|
||||
.addCell("total", "text-align:right;desc:total field data usage");
|
||||
// The table columns must be built dynamically since the number of fields is unknown
|
||||
for (String fieldName : fieldNames) {
|
||||
table.addCell(fieldName, "text-align:right;desc:" + fieldName + " field");
|
||||
}
|
||||
table.endHeaders();
|
||||
|
||||
for (Map.Entry<NodeStats, ObjectLongMap<String>> statsEntry : nodesFields.entrySet()) {
|
||||
table.startRow();
|
||||
// add the node info and field data total before each individual field
|
||||
NodeStats ns = statsEntry.getKey();
|
||||
table.addCell(ns.getNode().getId());
|
||||
table.addCell(ns.getNode().getHostName());
|
||||
table.addCell(ns.getNode().getHostAddress());
|
||||
table.addCell(ns.getNode().getName());
|
||||
table.addCell(ns.getIndices().getFieldData().getMemorySize());
|
||||
ObjectLongMap<String> fields = statsEntry.getValue();
|
||||
for (String fieldName : fieldNames) {
|
||||
table.addCell(new ByteSizeValue(fields == null ? 0L : fields.getOrDefault(fieldName, 0L)));
|
||||
}
|
||||
table.endRow();
|
||||
}
|
||||
|
||||
return table;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,17 +41,6 @@ public class RestUtils {
|
|||
}
|
||||
};
|
||||
|
||||
public static boolean isBrowser(@Nullable String userAgent) {
|
||||
if (userAgent == null) {
|
||||
return false;
|
||||
}
|
||||
// chrome, safari, firefox, ie
|
||||
if (userAgent.startsWith("Mozilla")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public static void decodeQueryString(String s, int fromIndex, Map<String, String> params) {
|
||||
if (fromIndex < 0) {
|
||||
return;
|
||||
|
|
|
@ -37,9 +37,4 @@ public abstract class AbstractDoubleSearchScript extends AbstractSearchScript {
|
|||
public long runAsLong() {
|
||||
return (long) runAsDouble();
|
||||
}
|
||||
|
||||
@Override
|
||||
public float runAsFloat() {
|
||||
return (float) runAsDouble();
|
||||
}
|
||||
}
|
|
@ -37,9 +37,4 @@ public abstract class AbstractLongSearchScript extends AbstractSearchScript {
|
|||
public double runAsDouble() {
|
||||
return runAsLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public float runAsFloat() {
|
||||
return runAsLong();
|
||||
}
|
||||
}
|
|
@ -34,7 +34,7 @@ import java.util.Map;
|
|||
* A base class for any script type that is used during the search process (custom score, aggs, and so on).
|
||||
* <p>
|
||||
* If the script returns a specific numeric type, consider overriding the type specific base classes
|
||||
* such as {@link AbstractDoubleSearchScript}, {@link AbstractFloatSearchScript} and {@link AbstractLongSearchScript}
|
||||
* such as {@link AbstractDoubleSearchScript} and {@link AbstractLongSearchScript}
|
||||
* for better performance.
|
||||
* <p>
|
||||
* The use is required to implement the {@link #run()} method.
|
||||
|
@ -120,11 +120,6 @@ public abstract class AbstractSearchScript extends AbstractExecutableScript impl
|
|||
lookup.source().setSource(source);
|
||||
}
|
||||
|
||||
@Override
|
||||
public float runAsFloat() {
|
||||
return ((Number) run()).floatValue();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long runAsLong() {
|
||||
return ((Number) run()).longValue();
|
||||
|
|
|
@ -24,6 +24,14 @@ package org.elasticsearch.script;
|
|||
*/
|
||||
public interface ExecutableScript {
|
||||
|
||||
/**
|
||||
* Sets a runtime script parameter.
|
||||
* <p>
|
||||
* Note that this method may be slow, involving put() and get() calls
|
||||
* to a hashmap or similar.
|
||||
* @param name parameter name
|
||||
* @param value parameter value
|
||||
*/
|
||||
void setNextVar(String name, Object value);
|
||||
|
||||
/**
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue