Merge branch 'master' into feature/seq_no
* master: (158 commits) Document the hack Refactor property placeholder use of env. vars Force java9 log4j hack in testing Fix log4j buggy java version detection Make java9 work again Don't mkdir directly in deb init script Fix env. var placeholder test so it's reproducible Remove ScriptMode class in favor of boolean true/false [rest api spec] fix doc urls Netty request/response tracer should wait for send Filter client/server VM options from jvm.options [rest api spec] fix url for reindex api docs Remove use of a Fields class in snapshot responses that contains x-content keys, in favor of declaring/using the keys directly. Limit retries of failed allocations per index (#18467) Proxy box method to use valueOf. Use the build-in valueOf method instead of the custom one. Fixed tests and added a comment to the box method. Fix boxing. Do not decode path when sending error Fix race condition in snapshot initialization ...
This commit is contained in:
commit
ad7229fe72
|
@ -71,6 +71,17 @@ Once your changes and tests are ready to submit for review:
|
|||
|
||||
Then sit back and wait. There will probably be discussion about the pull request and, if any changes are needed, we would love to work with you to get your pull request merged into Elasticsearch.
|
||||
|
||||
Please adhere to the general guideline that you should never force push
|
||||
to a publicly shared branch. Once you have opened your pull request, you
|
||||
should consider your branch publicly shared. Instead of force pushing
|
||||
you can just add incremental commits; this is generally easier on your
|
||||
reviewers. If you need to pick up changes from master, you can merge
|
||||
master into your branch. A reviewer might ask you to rebase a
|
||||
long-running pull request in which case force pushing is okay for that
|
||||
request. Note that squashing at the end of the review process should
|
||||
also not be done, that can be done when the pull request is [integrated
|
||||
via GitHub](https://github.com/blog/2141-squash-your-commits).
|
||||
|
||||
Contributing to the Elasticsearch codebase
|
||||
------------------------------------------
|
||||
|
||||
|
|
|
@ -201,7 +201,7 @@ gradle test -Dtests.timeoutSuite=5000! ...
|
|||
Change the logging level of ES (not gradle)
|
||||
|
||||
--------------------------------
|
||||
gradle test -Des.logger.level=DEBUG
|
||||
gradle test -Dtests.logger.level=DEBUG
|
||||
--------------------------------
|
||||
|
||||
Print all the logging output from the test runs to the commandline
|
||||
|
|
|
@ -378,7 +378,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
* -serial because we don't use java serialization.
|
||||
*/
|
||||
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-options' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
// compile with compact 3 profile by default
|
||||
// NOTE: this is just a compile time check: does not replace testing with a compact3 JRE
|
||||
if (project.compactProfile != 'full') {
|
||||
|
@ -456,7 +456,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
// default test sysprop values
|
||||
systemProperty 'tests.ifNoTests', 'fail'
|
||||
// TODO: remove setting logging level via system property
|
||||
systemProperty 'es.logger.level', 'WARN'
|
||||
systemProperty 'tests.logger.level', 'WARN'
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('tests.') ||
|
||||
property.getKey().startsWith('es.')) {
|
||||
|
|
|
@ -144,6 +144,9 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
if (query != null) {
|
||||
for (String param: query.tokenize('&')) {
|
||||
def (String name, String value) = param.tokenize('=')
|
||||
if (value == null) {
|
||||
value = ''
|
||||
}
|
||||
current.println(" $name: \"$value\"")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -129,7 +129,11 @@ class NodeInfo {
|
|||
}
|
||||
|
||||
env = [ 'JAVA_HOME' : project.javaHome ]
|
||||
args.addAll("-E", "es.node.portsfile=true")
|
||||
args.addAll("-E", "node.portsfile=true")
|
||||
String loggerLevel = System.getProperty("tests.logger.level")
|
||||
if (loggerLevel != null) {
|
||||
args.addAll("-E", "logger.level=${loggerLevel}")
|
||||
}
|
||||
String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ")
|
||||
String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs
|
||||
env.put('ES_JAVA_OPTS', esJavaOpts)
|
||||
|
@ -140,7 +144,7 @@ class NodeInfo {
|
|||
}
|
||||
}
|
||||
env.put('ES_JVM_OPTIONS', new File(confDir, 'jvm.options'))
|
||||
args.addAll("-E", "es.path.conf=${confDir}")
|
||||
args.addAll("-E", "path.conf=${confDir}")
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
args.add('"') // end the entire command, quoted
|
||||
}
|
||||
|
|
|
@ -517,7 +517,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]processor[/\\]ConvertProcessor.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]GcNames.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]HotThreads.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmGcMonitorService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmStats.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]internal[/\\]InternalSettingsPreparer.java" checks="LineLength" />
|
||||
|
@ -1336,7 +1335,6 @@
|
|||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]junit[/\\]listeners[/\\]LoggingListener.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]ESRestTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]RestTestExecutionContext.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]client[/\\]RestClient.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]client[/\\]http[/\\]HttpRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]json[/\\]JsonPath.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]GreaterThanEqualToParser.java" checks="LineLength" />
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
#!/bin/sh -e
|
||||
#!/bin/bash -e
|
||||
<% commands.each {command -> %><%= command %><% } %>
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
#!/bin/sh -e
|
||||
#!/bin/bash -e
|
||||
<% commands.each {command -> %><%= command %><% } %>
|
||||
|
|
|
@ -13,9 +13,7 @@ jna = 4.1.0
|
|||
# test dependencies
|
||||
randomizedrunner = 2.3.2
|
||||
junit = 4.11
|
||||
# TODO: Upgrade httpclient to a version > 4.5.1 once released. Then remove o.e.test.rest.client.StrictHostnameVerifier* and use
|
||||
# DefaultHostnameVerifier instead since we no longer need to workaround https://issues.apache.org/jira/browse/HTTPCLIENT-1698
|
||||
httpclient = 4.3.6
|
||||
httpcore = 4.3.3
|
||||
httpclient = 4.5.2
|
||||
httpcore = 4.4.4
|
||||
commonslogging = 1.1.3
|
||||
commonscodec = 1.10
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.log4j;
|
||||
|
||||
import org.apache.log4j.helpers.ThreadLocalMap;
|
||||
|
||||
/**
|
||||
* Log4j 1.2 MDC breaks because it parses java.version incorrectly (does not handle new java9 versioning).
|
||||
*
|
||||
* This hack fixes up the pkg private members as if it had detected the java version correctly.
|
||||
*/
|
||||
public class Java9Hack {
|
||||
|
||||
public static void fixLog4j() {
|
||||
if (MDC.mdc.tlm == null) {
|
||||
MDC.mdc.java1 = false;
|
||||
MDC.mdc.tlm = new ThreadLocalMap();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -69,6 +69,8 @@ public class Version {
|
|||
public static final Version V_2_3_1 = new Version(V_2_3_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_2_ID = 2030299;
|
||||
public static final Version V_2_3_2 = new Version(V_2_3_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_3_ID = 2030399;
|
||||
public static final Version V_2_3_3 = new Version(V_2_3_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_5_0_0_alpha1_ID = 5000001;
|
||||
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_alpha2_ID = 5000002;
|
||||
|
@ -94,6 +96,8 @@ public class Version {
|
|||
return V_5_0_0_alpha2;
|
||||
case V_5_0_0_alpha1_ID:
|
||||
return V_5_0_0_alpha1;
|
||||
case V_2_3_3_ID:
|
||||
return V_2_3_3;
|
||||
case V_2_3_2_ID:
|
||||
return V_2_3_2;
|
||||
case V_2_3_1_ID:
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
|
@ -30,24 +29,19 @@ import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStores
|
|||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData.Custom;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes.RoutingNodesIterator;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
|
@ -60,7 +54,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -72,7 +65,6 @@ import java.util.Set;
|
|||
public class TransportClusterAllocationExplainAction
|
||||
extends TransportMasterNodeAction<ClusterAllocationExplainRequest, ClusterAllocationExplainResponse> {
|
||||
|
||||
private final AllocationService allocationService;
|
||||
private final ClusterInfoService clusterInfoService;
|
||||
private final AllocationDeciders allocationDeciders;
|
||||
private final ShardsAllocator shardAllocator;
|
||||
|
@ -82,12 +74,10 @@ public class TransportClusterAllocationExplainAction
|
|||
public TransportClusterAllocationExplainAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AllocationService allocationService, ClusterInfoService clusterInfoService,
|
||||
AllocationDeciders allocationDeciders, ShardsAllocator shardAllocator,
|
||||
TransportIndicesShardStoresAction shardStoresAction) {
|
||||
ClusterInfoService clusterInfoService, AllocationDeciders allocationDeciders,
|
||||
ShardsAllocator shardAllocator, TransportIndicesShardStoresAction shardStoresAction) {
|
||||
super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver, ClusterAllocationExplainRequest::new);
|
||||
this.allocationService = allocationService;
|
||||
this.clusterInfoService = clusterInfoService;
|
||||
this.allocationDeciders = allocationDeciders;
|
||||
this.shardAllocator = shardAllocator;
|
||||
|
@ -259,8 +249,8 @@ public class TransportClusterAllocationExplainAction
|
|||
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
|
||||
final ActionListener<ClusterAllocationExplainResponse> listener) {
|
||||
final RoutingNodes routingNodes = state.getRoutingNodes();
|
||||
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state.nodes(),
|
||||
clusterInfoService.getClusterInfo(), System.nanoTime());
|
||||
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,
|
||||
clusterInfoService.getClusterInfo(), System.nanoTime(), false);
|
||||
|
||||
ShardRouting foundShard = null;
|
||||
if (request.useAnyUnassignedShard()) {
|
||||
|
|
|
@ -38,9 +38,10 @@ import java.io.IOException;
|
|||
* Request to submit cluster reroute allocation commands
|
||||
*/
|
||||
public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteRequest> {
|
||||
AllocationCommands commands = new AllocationCommands();
|
||||
boolean dryRun;
|
||||
boolean explain;
|
||||
private AllocationCommands commands = new AllocationCommands();
|
||||
private boolean dryRun;
|
||||
private boolean explain;
|
||||
private boolean retryFailed;
|
||||
|
||||
public ClusterRerouteRequest() {
|
||||
}
|
||||
|
@ -81,6 +82,15 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the retry failed flag (defaults to <tt>false</tt>). If true, the
|
||||
* request will retry allocating shards that can't currently be allocated due to too many allocation failures.
|
||||
*/
|
||||
public ClusterRerouteRequest setRetryFailed(boolean retryFailed) {
|
||||
this.retryFailed = retryFailed;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current explain flag
|
||||
*/
|
||||
|
@ -88,6 +98,14 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
|||
return this.explain;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current retry failed flag
|
||||
*/
|
||||
public boolean isRetryFailed() {
|
||||
return this.retryFailed;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Set the allocation commands to execute.
|
||||
*/
|
||||
|
@ -96,6 +114,13 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the allocation commands to execute
|
||||
*/
|
||||
public AllocationCommands getCommands() {
|
||||
return commands;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the source for the request.
|
||||
*/
|
||||
|
@ -136,6 +161,7 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
|||
commands = AllocationCommands.readFrom(in);
|
||||
dryRun = in.readBoolean();
|
||||
explain = in.readBoolean();
|
||||
retryFailed = in.readBoolean();
|
||||
readTimeout(in);
|
||||
}
|
||||
|
||||
|
@ -145,6 +171,7 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
|||
AllocationCommands.writeTo(commands, out);
|
||||
out.writeBoolean(dryRun);
|
||||
out.writeBoolean(explain);
|
||||
out.writeBoolean(retryFailed);
|
||||
writeTimeout(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,6 +60,15 @@ public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder<Clu
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the retry failed flag (defaults to <tt>false</tt>). If true, the
|
||||
* request will retry allocating shards that can't currently be allocated due to too many allocation failures.
|
||||
*/
|
||||
public ClusterRerouteRequestBuilder setRetryFailed(boolean retryFailed) {
|
||||
request.setRetryFailed(retryFailed);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the commands for the request to execute.
|
||||
*/
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -68,38 +69,55 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
|
|||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener<ClusterRerouteResponse> listener) {
|
||||
clusterService.submitStateUpdateTask("cluster_reroute (api)", new AckedClusterStateUpdateTask<ClusterRerouteResponse>(Priority.IMMEDIATE, request, listener) {
|
||||
|
||||
private volatile ClusterState clusterStateToSend;
|
||||
private volatile RoutingExplanations explanations;
|
||||
|
||||
@Override
|
||||
protected ClusterRerouteResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterRerouteResponse(acknowledged, clusterStateToSend, explanations);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onAckTimeout() {
|
||||
listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend, new RoutingExplanations()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.debug("failed to perform [{}]", t, source);
|
||||
super.onFailure(source, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.commands, request.explain());
|
||||
ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build();
|
||||
clusterStateToSend = newState;
|
||||
explanations = routingResult.explanations();
|
||||
if (request.dryRun) {
|
||||
return currentState;
|
||||
}
|
||||
return newState;
|
||||
}
|
||||
});
|
||||
clusterService.submitStateUpdateTask("cluster_reroute (api)", new ClusterRerouteResponseAckedClusterStateUpdateTask(logger,
|
||||
allocationService, request, listener));
|
||||
}
|
||||
}
|
||||
|
||||
static class ClusterRerouteResponseAckedClusterStateUpdateTask extends AckedClusterStateUpdateTask<ClusterRerouteResponse> {
|
||||
|
||||
private final ClusterRerouteRequest request;
|
||||
private final ActionListener<ClusterRerouteResponse> listener;
|
||||
private final ESLogger logger;
|
||||
private final AllocationService allocationService;
|
||||
private volatile ClusterState clusterStateToSend;
|
||||
private volatile RoutingExplanations explanations;
|
||||
|
||||
ClusterRerouteResponseAckedClusterStateUpdateTask(ESLogger logger, AllocationService allocationService, ClusterRerouteRequest request,
|
||||
ActionListener<ClusterRerouteResponse> listener) {
|
||||
super(Priority.IMMEDIATE, request, listener);
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.logger = logger;
|
||||
this.allocationService = allocationService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterRerouteResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterRerouteResponse(acknowledged, clusterStateToSend, explanations);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onAckTimeout() {
|
||||
listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend, new RoutingExplanations()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.debug("failed to perform [{}]", t, source);
|
||||
super.onFailure(source, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.getCommands(), request.explain(),
|
||||
request.isRetryFailed());
|
||||
ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build();
|
||||
clusterStateToSend = newState;
|
||||
explanations = routingResult.explanations();
|
||||
if (request.dryRun()) {
|
||||
return currentState;
|
||||
}
|
||||
return newState;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,18 +81,13 @@ public class CreateSnapshotResponse extends ActionResponse implements ToXContent
|
|||
return snapshotInfo.status();
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String SNAPSHOT = "snapshot";
|
||||
static final String ACCEPTED = "accepted";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (snapshotInfo != null) {
|
||||
builder.field(Fields.SNAPSHOT);
|
||||
builder.field("snapshot");
|
||||
snapshotInfo.toExternalXContent(builder, params);
|
||||
} else {
|
||||
builder.field(Fields.ACCEPTED, true);
|
||||
builder.field("accepted", true);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -74,13 +74,9 @@ public class GetSnapshotsResponse extends ActionResponse implements ToXContent {
|
|||
}
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String SNAPSHOTS = "snapshots";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startArray(Fields.SNAPSHOTS);
|
||||
builder.startArray("snapshots");
|
||||
for (SnapshotInfo snapshotInfo : snapshots) {
|
||||
snapshotInfo.toExternalXContent(builder, params);
|
||||
}
|
||||
|
|
|
@ -73,18 +73,13 @@ public class RestoreSnapshotResponse extends ActionResponse implements ToXConten
|
|||
return restoreInfo.status();
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String SNAPSHOT = "snapshot";
|
||||
static final String ACCEPTED = "accepted";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
if (restoreInfo != null) {
|
||||
builder.field(Fields.SNAPSHOT);
|
||||
builder.field("snapshot");
|
||||
restoreInfo.toXContent(builder, params);
|
||||
} else {
|
||||
builder.field(Fields.ACCEPTED, true);
|
||||
builder.field("accepted", true);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -73,13 +73,9 @@ public class SnapshotsStatusResponse extends ActionResponse implements ToXConten
|
|||
}
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String SNAPSHOTS = "snapshots";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray(Fields.SNAPSHOTS);
|
||||
builder.startArray("snapshots");
|
||||
for (SnapshotStatus snapshot : snapshots) {
|
||||
snapshot.toXContent(builder, params);
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.engine.SegmentsStats;
|
||||
import org.elasticsearch.index.fielddata.FieldDataStats;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCacheStats;
|
||||
import org.elasticsearch.index.shard.DocsStats;
|
||||
import org.elasticsearch.index.store.StoreStats;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionStats;
|
||||
|
@ -45,7 +44,6 @@ public class ClusterStatsIndices implements ToXContent {
|
|||
private QueryCacheStats queryCache;
|
||||
private CompletionStats completion;
|
||||
private SegmentsStats segments;
|
||||
private PercolatorQueryCacheStats percolatorCache;
|
||||
|
||||
public ClusterStatsIndices(List<ClusterStatsNodeResponse> nodeResponses) {
|
||||
ObjectObjectHashMap<String, ShardStats> countsPerIndex = new ObjectObjectHashMap<>();
|
||||
|
@ -56,7 +54,6 @@ public class ClusterStatsIndices implements ToXContent {
|
|||
this.queryCache = new QueryCacheStats();
|
||||
this.completion = new CompletionStats();
|
||||
this.segments = new SegmentsStats();
|
||||
this.percolatorCache = new PercolatorQueryCacheStats();
|
||||
|
||||
for (ClusterStatsNodeResponse r : nodeResponses) {
|
||||
for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) {
|
||||
|
@ -79,7 +76,6 @@ public class ClusterStatsIndices implements ToXContent {
|
|||
queryCache.add(shardCommonStats.queryCache);
|
||||
completion.add(shardCommonStats.completion);
|
||||
segments.add(shardCommonStats.segments);
|
||||
percolatorCache.add(shardCommonStats.percolatorCache);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -122,10 +118,6 @@ public class ClusterStatsIndices implements ToXContent {
|
|||
return segments;
|
||||
}
|
||||
|
||||
public PercolatorQueryCacheStats getPercolatorCache() {
|
||||
return percolatorCache;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String COUNT = "count";
|
||||
}
|
||||
|
@ -140,7 +132,6 @@ public class ClusterStatsIndices implements ToXContent {
|
|||
queryCache.toXContent(builder, params);
|
||||
completion.toXContent(builder, params);
|
||||
segments.toXContent(builder, params);
|
||||
percolatorCache.toXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
|
|
@ -55,8 +55,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
TransportClusterStatsAction.ClusterStatsNodeRequest, ClusterStatsNodeResponse> {
|
||||
|
||||
private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store,
|
||||
CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments,
|
||||
CommonStatsFlags.Flag.PercolatorCache);
|
||||
CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments);
|
||||
|
||||
private final NodeService nodeService;
|
||||
private final IndicesService indicesService;
|
||||
|
@ -100,10 +99,13 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
for (IndexShard indexShard : indexService) {
|
||||
if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) {
|
||||
// only report on fully started shards
|
||||
shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(),
|
||||
new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(),
|
||||
indexShard, SHARD_STATS_FLAGS),
|
||||
indexShard.commitStats(), indexShard.seqNoStats()));
|
||||
shardsStats.add(
|
||||
new ShardStats(
|
||||
indexShard.routingEntry(),
|
||||
indexShard.shardPath(),
|
||||
new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS),
|
||||
indexShard.commitStats(),
|
||||
indexShard.seqNoStats()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
|
|||
TransportService transportService, IndicesService indicesService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ClearIndicesCacheAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ClearIndicesCacheRequest::new, ThreadPool.Names.MANAGEMENT);
|
||||
ClearIndicesCacheRequest::new, ThreadPool.Names.MANAGEMENT, false);
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
|
|
|
@ -32,10 +32,8 @@ import org.elasticsearch.index.engine.SegmentsStats;
|
|||
import org.elasticsearch.index.fielddata.FieldDataStats;
|
||||
import org.elasticsearch.index.flush.FlushStats;
|
||||
import org.elasticsearch.index.get.GetStats;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCache;
|
||||
import org.elasticsearch.index.shard.IndexingStats;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCacheStats;
|
||||
import org.elasticsearch.index.recovery.RecoveryStats;
|
||||
import org.elasticsearch.index.refresh.RefreshStats;
|
||||
import org.elasticsearch.index.search.stats.SearchStats;
|
||||
|
@ -101,9 +99,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
case Segments:
|
||||
segments = new SegmentsStats();
|
||||
break;
|
||||
case PercolatorCache:
|
||||
percolatorCache = new PercolatorQueryCacheStats();
|
||||
break;
|
||||
case Translog:
|
||||
translog = new TranslogStats();
|
||||
break;
|
||||
|
@ -123,8 +118,7 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
}
|
||||
|
||||
|
||||
public CommonStats(IndicesQueryCache indicesQueryCache, PercolatorQueryCache percolatorQueryCache,
|
||||
IndexShard indexShard, CommonStatsFlags flags) {
|
||||
public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) {
|
||||
|
||||
CommonStatsFlags.Flag[] setFlags = flags.getFlags();
|
||||
|
||||
|
@ -169,9 +163,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
case Segments:
|
||||
segments = indexShard.segmentStats(flags.includeSegmentFileSizes());
|
||||
break;
|
||||
case PercolatorCache:
|
||||
percolatorCache = percolatorQueryCache.getStats(indexShard.shardId());
|
||||
break;
|
||||
case Translog:
|
||||
translog = indexShard.translogStats();
|
||||
break;
|
||||
|
@ -223,9 +214,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
@Nullable
|
||||
public FieldDataStats fieldData;
|
||||
|
||||
@Nullable
|
||||
public PercolatorQueryCacheStats percolatorCache;
|
||||
|
||||
@Nullable
|
||||
public CompletionStats completion;
|
||||
|
||||
|
@ -331,14 +319,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
} else {
|
||||
fieldData.add(stats.getFieldData());
|
||||
}
|
||||
if (percolatorCache == null) {
|
||||
if (stats.getPercolatorCache() != null) {
|
||||
percolatorCache = new PercolatorQueryCacheStats();
|
||||
percolatorCache.add(stats.getPercolatorCache());
|
||||
}
|
||||
} else {
|
||||
percolatorCache.add(stats.getPercolatorCache());
|
||||
}
|
||||
if (completion == null) {
|
||||
if (stats.getCompletion() != null) {
|
||||
completion = new CompletionStats();
|
||||
|
@ -436,11 +416,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
return this.fieldData;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public PercolatorQueryCacheStats getPercolatorCache() {
|
||||
return percolatorCache;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public CompletionStats getCompletion() {
|
||||
return completion;
|
||||
|
@ -528,9 +503,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
if (in.readBoolean()) {
|
||||
fieldData = FieldDataStats.readFieldDataStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
percolatorCache = PercolatorQueryCacheStats.readPercolateStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
completion = CompletionStats.readCompletionStats(in);
|
||||
}
|
||||
|
@ -610,12 +582,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
out.writeBoolean(true);
|
||||
fieldData.writeTo(out);
|
||||
}
|
||||
if (percolatorCache == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
percolatorCache.writeTo(out);
|
||||
}
|
||||
if (completion == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
|
@ -669,9 +635,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
if (fieldData != null) {
|
||||
fieldData.toXContent(builder, params);
|
||||
}
|
||||
if (percolatorCache != null) {
|
||||
percolatorCache.toXContent(builder, params);
|
||||
}
|
||||
if (completion != null) {
|
||||
completion.toXContent(builder, params);
|
||||
}
|
||||
|
|
|
@ -240,7 +240,6 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
FieldData("fielddata"),
|
||||
Docs("docs"),
|
||||
Warmer("warmer"),
|
||||
PercolatorCache("percolator_cache"),
|
||||
Completion("completion"),
|
||||
Segments("segments"),
|
||||
Translog("translog"),
|
||||
|
|
|
@ -184,15 +184,6 @@ public class IndicesStatsRequest extends BroadcastRequest<IndicesStatsRequest> {
|
|||
return flags.isSet(Flag.FieldData);
|
||||
}
|
||||
|
||||
public IndicesStatsRequest percolate(boolean percolate) {
|
||||
flags.set(Flag.PercolatorCache, percolate);
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean percolate() {
|
||||
return flags.isSet(Flag.PercolatorCache);
|
||||
}
|
||||
|
||||
public IndicesStatsRequest segments(boolean segments) {
|
||||
flags.set(Flag.Segments, segments);
|
||||
return this;
|
||||
|
|
|
@ -127,11 +127,6 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder
|
|||
return this;
|
||||
}
|
||||
|
||||
public IndicesStatsRequestBuilder setPercolate(boolean percolate) {
|
||||
request.percolate(percolate);
|
||||
return this;
|
||||
}
|
||||
|
||||
public IndicesStatsRequestBuilder setSegments(boolean segments) {
|
||||
request.segments(segments);
|
||||
return this;
|
||||
|
|
|
@ -139,9 +139,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
|
|||
flags.set(CommonStatsFlags.Flag.FieldData);
|
||||
flags.fieldDataFields(request.fieldDataFields());
|
||||
}
|
||||
if (request.percolate()) {
|
||||
flags.set(CommonStatsFlags.Flag.PercolatorCache);
|
||||
}
|
||||
if (request.segments()) {
|
||||
flags.set(CommonStatsFlags.Flag.Segments);
|
||||
flags.includeSegmentFileSizes(request.includeSegmentFileSizes());
|
||||
|
@ -163,8 +160,9 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
|
|||
flags.set(CommonStatsFlags.Flag.Recovery);
|
||||
}
|
||||
|
||||
return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(),
|
||||
new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, flags),
|
||||
indexShard.commitStats(), indexShard.seqNoStats());
|
||||
return new ShardStats(
|
||||
indexShard.routingEntry(),
|
||||
indexShard.shardPath(),
|
||||
new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), indexShard.commitStats(), indexShard.seqNoStats());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -169,6 +169,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
}
|
||||
|
||||
public BulkRequest add(DeleteRequest request, @Nullable Object payload) {
|
||||
Objects.requireNonNull(request, "'request' must not be null");
|
||||
requests.add(request);
|
||||
addPayload(payload);
|
||||
sizeInBytes += REQUEST_OVERHEAD;
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.search.sort.SortBuilder;
|
||||
|
@ -165,9 +165,9 @@ public class PercolateRequestBuilder extends ActionRequestBuilder<PercolateReque
|
|||
|
||||
/**
|
||||
* Delegates to
|
||||
* {@link PercolateSourceBuilder#addAggregation(AggregatorBuilder)}
|
||||
* {@link PercolateSourceBuilder#addAggregation(AggregationBuilder)}
|
||||
*/
|
||||
public PercolateRequestBuilder addAggregation(AggregatorBuilder<?> aggregationBuilder) {
|
||||
public PercolateRequestBuilder addAggregation(AggregationBuilder<?> aggregationBuilder) {
|
||||
sourceBuilder().addAggregation(aggregationBuilder);
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilder;
|
||||
|
@ -53,7 +53,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
|
|||
private List<SortBuilder<?>> sorts;
|
||||
private Boolean trackScores;
|
||||
private HighlightBuilder highlightBuilder;
|
||||
private List<AggregatorBuilder<?>> aggregationBuilders;
|
||||
private List<AggregationBuilder<?>> aggregationBuilders;
|
||||
private List<PipelineAggregatorBuilder<?>> pipelineAggregationBuilders;
|
||||
|
||||
/**
|
||||
|
@ -126,7 +126,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
|
|||
/**
|
||||
* Add an aggregation definition.
|
||||
*/
|
||||
public PercolateSourceBuilder addAggregation(AggregatorBuilder<?> aggregationBuilder) {
|
||||
public PercolateSourceBuilder addAggregation(AggregationBuilder<?> aggregationBuilder) {
|
||||
if (aggregationBuilders == null) {
|
||||
aggregationBuilders = new ArrayList<>();
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
|
|||
builder.field("aggregations");
|
||||
builder.startObject();
|
||||
if (aggregationBuilders != null) {
|
||||
for (AggregatorBuilder<?> aggregation : aggregationBuilders) {
|
||||
for (AggregationBuilder<?> aggregation : aggregationBuilders) {
|
||||
aggregation.toXContent(builder, params);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.elasticsearch.index.query.QueryBuilder;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.Template;
|
||||
import org.elasticsearch.search.Scroll;
|
||||
import org.elasticsearch.search.aggregations.AggregatorBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilder;
|
||||
|
@ -373,7 +373,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
|||
/**
|
||||
* Adds an aggregation to the search operation.
|
||||
*/
|
||||
public SearchRequestBuilder addAggregation(AggregatorBuilder<?> aggregation) {
|
||||
public SearchRequestBuilder addAggregation(AggregationBuilder<?> aggregation) {
|
||||
sourceBuilder().aggregation(aggregation);
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -84,6 +84,20 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
|
||||
final String transportNodeBroadcastAction;
|
||||
|
||||
public TransportBroadcastByNodeAction(
|
||||
Settings settings,
|
||||
String actionName,
|
||||
ThreadPool threadPool,
|
||||
ClusterService clusterService,
|
||||
TransportService transportService,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Supplier<Request> request,
|
||||
String executor) {
|
||||
this(settings, actionName, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, request,
|
||||
executor, true);
|
||||
}
|
||||
|
||||
public TransportBroadcastByNodeAction(
|
||||
Settings settings,
|
||||
String actionName,
|
||||
|
@ -93,7 +107,8 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Supplier<Request> request,
|
||||
String executor) {
|
||||
String executor,
|
||||
boolean canTripCircuitBreaker) {
|
||||
super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request);
|
||||
|
||||
this.clusterService = clusterService;
|
||||
|
@ -101,7 +116,8 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
|
||||
transportNodeBroadcastAction = actionName + "[n]";
|
||||
|
||||
transportService.registerRequestHandler(transportNodeBroadcastAction, NodeRequest::new, executor, new BroadcastByNodeTransportRequestHandler());
|
||||
transportService.registerRequestHandler(transportNodeBroadcastAction, NodeRequest::new, executor, false, canTripCircuitBreaker,
|
||||
new BroadcastByNodeTransportRequestHandler());
|
||||
}
|
||||
|
||||
private Response newResponse(
|
||||
|
|
|
@ -177,15 +177,7 @@ final class Bootstrap {
|
|||
// install SM after natives, shutdown hooks, etc.
|
||||
Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
|
||||
|
||||
// We do not need to reload system properties here as we have already applied them in building the settings and
|
||||
// reloading could cause multiple prompts to the user for values if a system property was specified with a prompt
|
||||
// placeholder
|
||||
Settings nodeSettings = Settings.builder()
|
||||
.put(settings)
|
||||
.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true)
|
||||
.build();
|
||||
|
||||
node = new Node(nodeSettings) {
|
||||
node = new Node(settings) {
|
||||
@Override
|
||||
protected void validateNodeBeforeAcceptingRequests(Settings settings, BoundTransportAddress boundTransportAddress) {
|
||||
BootstrapCheck.check(settings, boundTransportAddress);
|
||||
|
@ -193,13 +185,13 @@ final class Bootstrap {
|
|||
};
|
||||
}
|
||||
|
||||
private static Environment initialSettings(boolean foreground, String pidFile) {
|
||||
private static Environment initialSettings(boolean foreground, String pidFile, Map<String, String> esSettings) {
|
||||
Terminal terminal = foreground ? Terminal.DEFAULT : null;
|
||||
Settings.Builder builder = Settings.builder();
|
||||
if (Strings.hasLength(pidFile)) {
|
||||
builder.put(Environment.PIDFILE_SETTING.getKey(), pidFile);
|
||||
}
|
||||
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal);
|
||||
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, esSettings);
|
||||
}
|
||||
|
||||
private void start() {
|
||||
|
@ -233,11 +225,13 @@ final class Bootstrap {
|
|||
// Set the system property before anything has a chance to trigger its use
|
||||
initLoggerPrefix();
|
||||
|
||||
elasticsearchSettings(esSettings);
|
||||
// force the class initializer for BootstrapInfo to run before
|
||||
// the security manager is installed
|
||||
BootstrapInfo.init();
|
||||
|
||||
INSTANCE = new Bootstrap();
|
||||
|
||||
Environment environment = initialSettings(foreground, pidFile);
|
||||
Environment environment = initialSettings(foreground, pidFile, esSettings);
|
||||
Settings settings = environment.settings();
|
||||
LogConfigurator.configure(settings, true);
|
||||
checkForCustomConfFile();
|
||||
|
@ -295,13 +289,6 @@ final class Bootstrap {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "Sets system properties passed as CLI parameters")
|
||||
private static void elasticsearchSettings(Map<String, String> esSettings) {
|
||||
for (Map.Entry<String, String> esSetting : esSettings.entrySet()) {
|
||||
System.setProperty(esSetting.getKey(), esSetting.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "System#out")
|
||||
private static void closeSystOut() {
|
||||
System.out.close();
|
||||
|
|
|
@ -120,4 +120,8 @@ public final class BootstrapInfo {
|
|||
}
|
||||
return SYSTEM_PROPERTIES;
|
||||
}
|
||||
|
||||
public static void init() {
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -21,28 +21,25 @@ package org.elasticsearch.bootstrap;
|
|||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import joptsimple.util.KeyValuePair;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.cli.Command;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.SettingCommand;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserError;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* This class starts elasticsearch.
|
||||
*/
|
||||
class Elasticsearch extends Command {
|
||||
class Elasticsearch extends SettingCommand {
|
||||
|
||||
private final OptionSpec<Void> versionOption;
|
||||
private final OptionSpec<Void> daemonizeOption;
|
||||
private final OptionSpec<String> pidfileOption;
|
||||
private final OptionSpec<KeyValuePair> propertyOption;
|
||||
|
||||
// visible for testing
|
||||
Elasticsearch() {
|
||||
|
@ -56,7 +53,6 @@ class Elasticsearch extends Command {
|
|||
pidfileOption = parser.acceptsAll(Arrays.asList("p", "pidfile"),
|
||||
"Creates a pid file in the specified path on start")
|
||||
.withRequiredArg();
|
||||
propertyOption = parser.accepts("E", "Configure an Elasticsearch setting").withRequiredArg().ofType(KeyValuePair.class);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -75,7 +71,7 @@ class Elasticsearch extends Command {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options) throws Exception {
|
||||
protected void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception {
|
||||
if (options.nonOptionArguments().isEmpty() == false) {
|
||||
throw new UserError(ExitCodes.USAGE, "Positional arguments not allowed, found " + options.nonOptionArguments());
|
||||
}
|
||||
|
@ -84,26 +80,15 @@ class Elasticsearch extends Command {
|
|||
throw new UserError(ExitCodes.USAGE, "Elasticsearch version option is mutually exclusive with any other option");
|
||||
}
|
||||
terminal.println("Version: " + org.elasticsearch.Version.CURRENT
|
||||
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
|
||||
+ ", JVM: " + JvmInfo.jvmInfo().version());
|
||||
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
|
||||
+ ", JVM: " + JvmInfo.jvmInfo().version());
|
||||
return;
|
||||
}
|
||||
|
||||
final boolean daemonize = options.has(daemonizeOption);
|
||||
final String pidFile = pidfileOption.value(options);
|
||||
|
||||
final Map<String, String> esSettings = new HashMap<>();
|
||||
for (final KeyValuePair kvp : propertyOption.values(options)) {
|
||||
if (!kvp.key.startsWith("es.")) {
|
||||
throw new UserError(ExitCodes.USAGE, "Elasticsearch settings must be prefixed with [es.] but was [" + kvp.key + "]");
|
||||
}
|
||||
if (kvp.value.isEmpty()) {
|
||||
throw new UserError(ExitCodes.USAGE, "Elasticsearch setting [" + kvp.key + "] must not be empty");
|
||||
}
|
||||
esSettings.put(kvp.key, kvp.value);
|
||||
}
|
||||
|
||||
init(daemonize, pidFile, esSettings);
|
||||
init(daemonize, pidFile, settings);
|
||||
}
|
||||
|
||||
void init(final boolean daemonize, final String pidFile, final Map<String, String> esSettings) {
|
||||
|
|
|
@ -19,15 +19,15 @@
|
|||
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import joptsimple.OptionException;
|
||||
import joptsimple.OptionParser;
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* An action to execute within a cli.
|
||||
*/
|
||||
|
@ -112,4 +112,5 @@ public abstract class Command {
|
|||
*
|
||||
* Any runtime user errors (like an input file that does not exist), should throw a {@link UserError}. */
|
||||
protected abstract void execute(Terminal terminal, OptionSet options) throws Exception;
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import joptsimple.util.KeyValuePair;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
public abstract class SettingCommand extends Command {
|
||||
|
||||
private final OptionSpec<KeyValuePair> settingOption;
|
||||
|
||||
public SettingCommand(String description) {
|
||||
super(description);
|
||||
this.settingOption = parser.accepts("E", "Configure a setting").withRequiredArg().ofType(KeyValuePair.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options) throws Exception {
|
||||
final Map<String, String> settings = new HashMap<>();
|
||||
for (final KeyValuePair kvp : settingOption.values(options)) {
|
||||
if (kvp.value.isEmpty()) {
|
||||
throw new UserError(ExitCodes.USAGE, "Setting [" + kvp.key + "] must not be empty");
|
||||
}
|
||||
settings.put(kvp.key, kvp.value);
|
||||
}
|
||||
|
||||
putSystemPropertyIfSettingIsMissing(settings, "path.conf", "es.path.conf");
|
||||
putSystemPropertyIfSettingIsMissing(settings, "path.data", "es.path.data");
|
||||
putSystemPropertyIfSettingIsMissing(settings, "path.home", "es.path.home");
|
||||
putSystemPropertyIfSettingIsMissing(settings, "path.logs", "es.path.logs");
|
||||
|
||||
execute(terminal, options, settings);
|
||||
}
|
||||
|
||||
protected static void putSystemPropertyIfSettingIsMissing(final Map<String, String> settings, final String setting, final String key) {
|
||||
final String value = System.getProperty(key);
|
||||
if (value != null) {
|
||||
if (settings.containsKey(setting)) {
|
||||
final String message =
|
||||
String.format(
|
||||
Locale.ROOT,
|
||||
"duplicate setting [%s] found via command-line [%s] and system property [%s]",
|
||||
setting,
|
||||
settings.get(setting),
|
||||
value);
|
||||
throw new IllegalArgumentException(message);
|
||||
} else {
|
||||
settings.put(setting, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception;
|
||||
|
||||
}
|
|
@ -49,6 +49,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDeci
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
|
||||
|
@ -79,6 +80,7 @@ public class ClusterModule extends AbstractModule {
|
|||
new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope);
|
||||
public static final List<Class<? extends AllocationDecider>> DEFAULT_ALLOCATION_DECIDERS =
|
||||
Collections.unmodifiableList(Arrays.asList(
|
||||
MaxRetryAllocationDecider.class,
|
||||
SameShardAllocationDecider.class,
|
||||
FilterAllocationDecider.class,
|
||||
ReplicaAfterPrimaryActiveAllocationDecider.class,
|
||||
|
|
|
@ -88,15 +88,13 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Elasticsearch 3.0 no longer supports indices with pre Lucene v5.0 (Elasticsearch v2.0.0.beta1) segments. All indices
|
||||
* that were created before Elasticsearch v2.0.0.beta1 should be upgraded using upgrade API before they can
|
||||
* be open by this version of elasticsearch.
|
||||
*/
|
||||
* Elasticsearch 5.0 no longer supports indices with pre Lucene v5.0 (Elasticsearch v2.0.0.beta1) segments. All indices
|
||||
* that were created before Elasticsearch v2.0.0.beta1 should be reindexed in Elasticsearch 2.x
|
||||
* before they can be opened by this version of elasticsearch. */
|
||||
private void checkSupportedVersion(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getState() == IndexMetaData.State.OPEN && isSupportedVersion(indexMetaData) == false) {
|
||||
throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created before v2.0.0.beta1 and wasn't upgraded."
|
||||
+ " This index should be opened using a version before " + Version.CURRENT.minimumCompatibilityVersion()
|
||||
+ " and upgraded using the upgrade API.");
|
||||
throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created before v2.0.0.beta1."
|
||||
+ " It should be reindexed in Elasticsearch 2.x before upgrading to " + Version.CURRENT + ".");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -281,8 +281,11 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
// Also the order of the mappings may be backwards.
|
||||
if (newMapper.parentFieldMapper().active()) {
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
|
||||
throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
|
||||
String parentType = newMapper.parentFieldMapper().type();
|
||||
if (parentType.equals(mapping.value.type()) &&
|
||||
indexService.mapperService().getParentTypes().contains(parentType) == false) {
|
||||
throw new IllegalArgumentException("can't add a _parent field that points to an " +
|
||||
"already existing type, that isn't already a parent");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,10 +54,6 @@ import java.util.function.Predicate;
|
|||
*/
|
||||
public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
|
||||
private final MetaData metaData;
|
||||
|
||||
private final ClusterBlocks blocks;
|
||||
|
||||
private final RoutingTable routingTable;
|
||||
|
||||
private final Map<String, RoutingNode> nodesToShards = new HashMap<>();
|
||||
|
@ -66,8 +62,6 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
|
||||
private final Map<ShardId, List<ShardRouting>> assignedShards = new HashMap<>();
|
||||
|
||||
private final ImmutableOpenMap<String, ClusterState.Custom> customs;
|
||||
|
||||
private final boolean readOnly;
|
||||
|
||||
private int inactivePrimaryCount = 0;
|
||||
|
@ -85,10 +79,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
|
||||
public RoutingNodes(ClusterState clusterState, boolean readOnly) {
|
||||
this.readOnly = readOnly;
|
||||
this.metaData = clusterState.metaData();
|
||||
this.blocks = clusterState.blocks();
|
||||
this.routingTable = clusterState.routingTable();
|
||||
this.customs = clusterState.customs();
|
||||
|
||||
Map<String, LinkedHashMap<ShardId, ShardRouting>> nodesToShards = new HashMap<>();
|
||||
// fill in the nodeToShards with the "live" nodes
|
||||
|
@ -232,28 +223,6 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
return routingTable();
|
||||
}
|
||||
|
||||
public MetaData metaData() {
|
||||
return this.metaData;
|
||||
}
|
||||
|
||||
public MetaData getMetaData() {
|
||||
return metaData();
|
||||
}
|
||||
|
||||
public ClusterBlocks blocks() {
|
||||
return this.blocks;
|
||||
}
|
||||
|
||||
public ClusterBlocks getBlocks() {
|
||||
return this.blocks;
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, ClusterState.Custom> customs() {
|
||||
return this.customs;
|
||||
}
|
||||
|
||||
public <T extends ClusterState.Custom> T custom(String type) { return (T) customs.get(type); }
|
||||
|
||||
public UnassignedShards unassigned() {
|
||||
return this.unassignedShards;
|
||||
}
|
||||
|
|
|
@ -252,6 +252,13 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* returns true for initializing shards that recover their data from another shard copy
|
||||
*/
|
||||
public boolean isPeerRecovery() {
|
||||
return state == ShardRoutingState.INITIALIZING && (primary() == false || relocatingNodeId != null);
|
||||
}
|
||||
|
||||
/**
|
||||
* A shard iterator with just this shard in it.
|
||||
*/
|
||||
|
|
|
@ -48,7 +48,6 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
public static final Setting<TimeValue> INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING =
|
||||
Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, Property.Dynamic,
|
||||
Property.IndexScope);
|
||||
|
||||
/**
|
||||
* Reason why the shard is in unassigned state.
|
||||
* <p>
|
||||
|
@ -103,7 +102,11 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
/**
|
||||
* A better replica location is identified and causes the existing replica allocation to be cancelled.
|
||||
*/
|
||||
REALLOCATED_REPLICA;
|
||||
REALLOCATED_REPLICA,
|
||||
/**
|
||||
* Unassigned as a result of a failed primary while the replica was initializing.
|
||||
*/
|
||||
PRIMARY_FAILED;
|
||||
}
|
||||
|
||||
private final Reason reason;
|
||||
|
@ -112,6 +115,7 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
private final long lastComputedLeftDelayNanos; // how long to delay shard allocation, not serialized (always positive, 0 means no delay)
|
||||
private final String message;
|
||||
private final Throwable failure;
|
||||
private final int failedAllocations;
|
||||
|
||||
/**
|
||||
* creates an UnassingedInfo object based **current** time
|
||||
|
@ -120,7 +124,7 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
* @param message more information about cause.
|
||||
**/
|
||||
public UnassignedInfo(Reason reason, String message) {
|
||||
this(reason, message, null, System.nanoTime(), System.currentTimeMillis());
|
||||
this(reason, message, null, reason == Reason.ALLOCATION_FAILED ? 1 : 0, System.nanoTime(), System.currentTimeMillis());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -130,13 +134,16 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
* @param unassignedTimeNanos the time to use as the base for any delayed re-assignment calculation
|
||||
* @param unassignedTimeMillis the time of unassignment used to display to in our reporting.
|
||||
*/
|
||||
public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Throwable failure, long unassignedTimeNanos, long unassignedTimeMillis) {
|
||||
public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Throwable failure, int failedAllocations, long unassignedTimeNanos, long unassignedTimeMillis) {
|
||||
this.reason = reason;
|
||||
this.unassignedTimeMillis = unassignedTimeMillis;
|
||||
this.unassignedTimeNanos = unassignedTimeNanos;
|
||||
this.lastComputedLeftDelayNanos = 0L;
|
||||
this.message = message;
|
||||
this.failure = failure;
|
||||
this.failedAllocations = failedAllocations;
|
||||
assert (failedAllocations > 0) == (reason == Reason.ALLOCATION_FAILED):
|
||||
"failedAllocations: " + failedAllocations + " for reason " + reason;
|
||||
assert !(message == null && failure != null) : "provide a message if a failure exception is provided";
|
||||
}
|
||||
|
||||
|
@ -147,17 +154,19 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
this.lastComputedLeftDelayNanos = newComputedLeftDelayNanos;
|
||||
this.message = unassignedInfo.message;
|
||||
this.failure = unassignedInfo.failure;
|
||||
this.failedAllocations = unassignedInfo.failedAllocations;
|
||||
}
|
||||
|
||||
public UnassignedInfo(StreamInput in) throws IOException {
|
||||
this.reason = Reason.values()[(int) in.readByte()];
|
||||
this.unassignedTimeMillis = in.readLong();
|
||||
// As System.nanoTime() cannot be compared across different JVMs, reset it to now.
|
||||
// This means that in master failover situations, elapsed delay time is forgotten.
|
||||
// This means that in master fail-over situations, elapsed delay time is forgotten.
|
||||
this.unassignedTimeNanos = System.nanoTime();
|
||||
this.lastComputedLeftDelayNanos = 0L;
|
||||
this.message = in.readOptionalString();
|
||||
this.failure = in.readThrowable();
|
||||
this.failedAllocations = in.readVInt();
|
||||
}
|
||||
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
|
@ -166,12 +175,18 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
// Do not serialize unassignedTimeNanos as System.nanoTime() cannot be compared across different JVMs
|
||||
out.writeOptionalString(message);
|
||||
out.writeThrowable(failure);
|
||||
out.writeVInt(failedAllocations);
|
||||
}
|
||||
|
||||
public UnassignedInfo readFrom(StreamInput in) throws IOException {
|
||||
return new UnassignedInfo(in);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of previously failed allocations of this shard.
|
||||
*/
|
||||
public int getNumFailedAllocations() { return failedAllocations; }
|
||||
|
||||
/**
|
||||
* The reason why the shard is unassigned.
|
||||
*/
|
||||
|
@ -325,7 +340,11 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("[reason=").append(reason).append("]");
|
||||
sb.append(", at[").append(DATE_TIME_FORMATTER.printer().print(unassignedTimeMillis)).append("]");
|
||||
if (failedAllocations > 0) {
|
||||
sb.append(", failed_attempts[").append(failedAllocations).append("]");
|
||||
}
|
||||
String details = getDetails();
|
||||
|
||||
if (details != null) {
|
||||
sb.append(", details[").append(details).append("]");
|
||||
}
|
||||
|
@ -342,6 +361,9 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
builder.startObject("unassigned_info");
|
||||
builder.field("reason", reason);
|
||||
builder.field("at", DATE_TIME_FORMATTER.printer().print(unassignedTimeMillis));
|
||||
if (failedAllocations > 0) {
|
||||
builder.field("failed_attempts", failedAllocations);
|
||||
}
|
||||
String details = getDetails();
|
||||
if (details != null) {
|
||||
builder.field("details", details);
|
||||
|
|
|
@ -90,7 +90,7 @@ public class AllocationService extends AbstractComponent {
|
|||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||
routingNodes.unassigned().shuffle();
|
||||
StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), startedShards, clusterInfoService.getClusterInfo());
|
||||
StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState, startedShards, clusterInfoService.getClusterInfo());
|
||||
boolean changed = applyStartedShards(routingNodes, startedShards);
|
||||
if (!changed) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
|
@ -216,14 +216,16 @@ public class AllocationService extends AbstractComponent {
|
|||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||
routingNodes.unassigned().shuffle();
|
||||
FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), failedShards, clusterInfoService.getClusterInfo());
|
||||
FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState, failedShards, clusterInfoService.getClusterInfo());
|
||||
boolean changed = false;
|
||||
// as failing primaries also fail associated replicas, we fail replicas first here so that their nodes are added to ignore list
|
||||
List<FailedRerouteAllocation.FailedShard> orderedFailedShards = new ArrayList<>(failedShards);
|
||||
orderedFailedShards.sort(Comparator.comparing(failedShard -> failedShard.shard.primary()));
|
||||
for (FailedRerouteAllocation.FailedShard failedShard : orderedFailedShards) {
|
||||
UnassignedInfo unassignedInfo = failedShard.shard.unassignedInfo();
|
||||
final int failedAllocations = unassignedInfo != null ? unassignedInfo.getNumFailedAllocations() : 0;
|
||||
changed |= applyFailedShard(allocation, failedShard.shard, true, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShard.message, failedShard.failure,
|
||||
System.nanoTime(), System.currentTimeMillis()));
|
||||
failedAllocations + 1, System.nanoTime(), System.currentTimeMillis()));
|
||||
}
|
||||
if (!changed) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
|
@ -257,16 +259,13 @@ public class AllocationService extends AbstractComponent {
|
|||
.collect(Collectors.joining(", "));
|
||||
}
|
||||
|
||||
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands) {
|
||||
return reroute(clusterState, commands, false);
|
||||
}
|
||||
|
||||
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain) {
|
||||
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) {
|
||||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// we don't shuffle the unassigned shards here, to try and get as close as possible to
|
||||
// a consistent result of the effect the commands have on the routing
|
||||
// this allows systems to dry run the commands, see the resulting cluster state, and act on it
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo(), currentNanoTime());
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
|
||||
clusterInfoService.getClusterInfo(), currentNanoTime(), retryFailed);
|
||||
// don't short circuit deciders, we want a full explanation
|
||||
allocation.debugDecision(true);
|
||||
// we ignore disable allocation, because commands are explicit
|
||||
|
@ -305,7 +304,8 @@ public class AllocationService extends AbstractComponent {
|
|||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||
routingNodes.unassigned().shuffle();
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo(), currentNanoTime());
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
|
||||
clusterInfoService.getClusterInfo(), currentNanoTime(), false);
|
||||
allocation.debugDecision(debug);
|
||||
if (!reroute(allocation)) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
|
@ -437,7 +437,7 @@ public class AllocationService extends AbstractComponent {
|
|||
// now, go over all the shards routing on the node, and fail them
|
||||
for (ShardRouting shardRouting : node.copyShards()) {
|
||||
UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]", null,
|
||||
allocation.getCurrentNanoTime(), System.currentTimeMillis());
|
||||
0, allocation.getCurrentNanoTime(), System.currentTimeMillis());
|
||||
applyFailedShard(allocation, shardRouting, false, unassignedInfo);
|
||||
}
|
||||
// its a dead node, remove it, note, its important to remove it *after* we apply failed shard
|
||||
|
@ -457,8 +457,8 @@ public class AllocationService extends AbstractComponent {
|
|||
boolean changed = false;
|
||||
for (ShardRouting routing : replicas) {
|
||||
changed |= applyFailedShard(allocation, routing, false,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "primary failed while replica initializing",
|
||||
null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
|
||||
new UnassignedInfo(UnassignedInfo.Reason.PRIMARY_FAILED, "primary failed while replica initializing",
|
||||
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.cluster.routing.allocation;
|
|||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
|
@ -57,8 +57,8 @@ public class FailedRerouteAllocation extends RoutingAllocation {
|
|||
|
||||
private final List<FailedShard> failedShards;
|
||||
|
||||
public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, List<FailedShard> failedShards, ClusterInfo clusterInfo) {
|
||||
super(deciders, routingNodes, nodes, clusterInfo, System.nanoTime());
|
||||
public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<FailedShard> failedShards, ClusterInfo clusterInfo) {
|
||||
super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime(), false);
|
||||
this.failedShards = failedShards;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,12 +20,14 @@
|
|||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.HashMap;
|
||||
|
@ -118,8 +120,12 @@ public class RoutingAllocation {
|
|||
|
||||
private final RoutingNodes routingNodes;
|
||||
|
||||
private final MetaData metaData;
|
||||
|
||||
private final DiscoveryNodes nodes;
|
||||
|
||||
private final ImmutableOpenMap<String, ClusterState.Custom> customs;
|
||||
|
||||
private final AllocationExplanation explanation = new AllocationExplanation();
|
||||
|
||||
private final ClusterInfo clusterInfo;
|
||||
|
@ -128,6 +134,8 @@ public class RoutingAllocation {
|
|||
|
||||
private boolean ignoreDisable = false;
|
||||
|
||||
private final boolean retryFailed;
|
||||
|
||||
private boolean debugDecision = false;
|
||||
|
||||
private boolean hasPendingAsyncFetch = false;
|
||||
|
@ -139,15 +147,18 @@ public class RoutingAllocation {
|
|||
* Creates a new {@link RoutingAllocation}
|
||||
* @param deciders {@link AllocationDeciders} to used to make decisions for routing allocations
|
||||
* @param routingNodes Routing nodes in the current cluster
|
||||
* @param nodes TODO: Documentation
|
||||
* @param clusterState cluster state before rerouting
|
||||
* @param currentNanoTime the nano time to use for all delay allocation calculation (typically {@link System#nanoTime()})
|
||||
*/
|
||||
public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, ClusterInfo clusterInfo, long currentNanoTime) {
|
||||
public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, long currentNanoTime, boolean retryFailed) {
|
||||
this.deciders = deciders;
|
||||
this.routingNodes = routingNodes;
|
||||
this.nodes = nodes;
|
||||
this.metaData = clusterState.metaData();
|
||||
this.nodes = clusterState.nodes();
|
||||
this.customs = clusterState.customs();
|
||||
this.clusterInfo = clusterInfo;
|
||||
this.currentNanoTime = currentNanoTime;
|
||||
this.retryFailed = retryFailed;
|
||||
}
|
||||
|
||||
/** returns the nano time captured at the beginning of the allocation. used to make sure all time based decisions are aligned */
|
||||
|
@ -184,7 +195,7 @@ public class RoutingAllocation {
|
|||
* @return Metadata of routing nodes
|
||||
*/
|
||||
public MetaData metaData() {
|
||||
return routingNodes.metaData();
|
||||
return metaData;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -199,6 +210,10 @@ public class RoutingAllocation {
|
|||
return clusterInfo;
|
||||
}
|
||||
|
||||
public <T extends ClusterState.Custom> T custom(String key) {
|
||||
return (T)customs.get(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get explanations of current routing
|
||||
* @return explanation of routing
|
||||
|
@ -285,4 +300,8 @@ public class RoutingAllocation {
|
|||
public void setHasPendingAsyncFetch() {
|
||||
this.hasPendingAsyncFetch = true;
|
||||
}
|
||||
|
||||
public boolean isRetryFailed() {
|
||||
return retryFailed;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
|
@ -35,8 +35,8 @@ public class StartedRerouteAllocation extends RoutingAllocation {
|
|||
|
||||
private final List<? extends ShardRouting> startedShards;
|
||||
|
||||
public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, List<? extends ShardRouting> startedShards, ClusterInfo clusterInfo) {
|
||||
super(deciders, routingNodes, nodes, clusterInfo, System.nanoTime());
|
||||
public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List<? extends ShardRouting> startedShards, ClusterInfo clusterInfo) {
|
||||
super(deciders, routingNodes, clusterState, clusterInfo, System.nanoTime(), false);
|
||||
this.startedShards = startedShards;
|
||||
}
|
||||
|
||||
|
@ -47,4 +47,4 @@ public class StartedRerouteAllocation extends RoutingAllocation {
|
|||
public List<? extends ShardRouting> startedShards() {
|
||||
return startedShards;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -225,7 +225,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
this.weight = weight;
|
||||
this.threshold = threshold;
|
||||
this.routingNodes = allocation.routingNodes();
|
||||
metaData = routingNodes.metaData();
|
||||
this.metaData = allocation.metaData();
|
||||
avgShardsPerNode = ((float) metaData.getTotalNumberOfShards()) / routingNodes.size();
|
||||
buildModelFromAssigned();
|
||||
}
|
||||
|
|
|
@ -125,7 +125,7 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation
|
|||
// we need to move the unassigned info back to treat it as if it was index creation
|
||||
unassignedInfoToUpdate = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
|
||||
"force empty allocation from previous reason " + shardRouting.unassignedInfo().getReason() + ", " + shardRouting.unassignedInfo().getMessage(),
|
||||
shardRouting.unassignedInfo().getFailure(), System.nanoTime(), System.currentTimeMillis());
|
||||
shardRouting.unassignedInfo().getFailure(), 0, System.nanoTime(), System.currentTimeMillis());
|
||||
}
|
||||
|
||||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate);
|
||||
|
|
|
@ -137,7 +137,7 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of relocation");
|
||||
}
|
||||
|
||||
Settings indexSettings = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()).getSettings();
|
||||
Settings indexSettings = allocation.metaData().getIndexSafe(shardRouting.index()).getSettings();
|
||||
final Rebalance enable;
|
||||
if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexSettings)) {
|
||||
enable = INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexSettings);
|
||||
|
|
|
@ -105,7 +105,7 @@ public class FilterAllocationDecider extends AllocationDecider {
|
|||
Decision decision = shouldClusterFilter(node, allocation);
|
||||
if (decision != null) return decision;
|
||||
|
||||
decision = shouldIndexFilter(allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()), node, allocation);
|
||||
decision = shouldIndexFilter(allocation.metaData().getIndexSafe(shardRouting.index()), node, allocation);
|
||||
if (decision != null) return decision;
|
||||
|
||||
return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters");
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
* An allocation decider that prevents shards from being allocated on any node if the shards allocation has been retried N times without
|
||||
* success. This means if a shard has been INITIALIZING N times in a row without being moved to STARTED the shard will be ignored until
|
||||
* the setting for <tt>index.allocation.max_retry</tt> is raised. The default value is <tt>5</tt>.
|
||||
* Note: This allocation decider also allows allocation of repeatedly failing shards when the <tt>/_cluster/reroute?retry_failed=true</tt>
|
||||
* API is manually invoked. This allows single retries without raising the limits.
|
||||
*
|
||||
* @see RoutingAllocation#isRetryFailed()
|
||||
*/
|
||||
public class MaxRetryAllocationDecider extends AllocationDecider {
|
||||
|
||||
public static final Setting<Integer> SETTING_ALLOCATION_MAX_RETRY = Setting.intSetting("index.allocation.max_retries", 5, 0,
|
||||
Setting.Property.Dynamic, Setting.Property.IndexScope);
|
||||
|
||||
public static final String NAME = "max_retry";
|
||||
|
||||
/**
|
||||
* Initializes a new {@link MaxRetryAllocationDecider}
|
||||
*
|
||||
* @param settings {@link Settings} used by this {@link AllocationDecider}
|
||||
*/
|
||||
@Inject
|
||||
public MaxRetryAllocationDecider(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) {
|
||||
UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
|
||||
if (unassignedInfo != null && unassignedInfo.getNumFailedAllocations() > 0) {
|
||||
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
final int maxRetry = SETTING_ALLOCATION_MAX_RETRY.get(indexMetaData.getSettings());
|
||||
if (allocation.isRetryFailed()) { // manual allocation - retry
|
||||
// if we are called via the _reroute API we ignore the failure counter and try to allocate
|
||||
// this improves the usability since people don't need to raise the limits to issue retries since a simple _reroute call is
|
||||
// enough to manually retry.
|
||||
return allocation.decision(Decision.YES, NAME, "shard has already failed allocating ["
|
||||
+ unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed "
|
||||
+ unassignedInfo.toString() + " - retrying once on manual allocation");
|
||||
} else if (unassignedInfo.getNumFailedAllocations() >= maxRetry) {
|
||||
return allocation.decision(Decision.NO, NAME, "shard has already failed allocating ["
|
||||
+ unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed "
|
||||
+ unassignedInfo.toString() + " - manually call [/_cluster/reroute?retry_failed=true] to retry");
|
||||
}
|
||||
}
|
||||
return allocation.decision(Decision.YES, NAME, "shard has no previous failures");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
return canAllocate(shardRouting, allocation);
|
||||
}
|
||||
}
|
|
@ -86,7 +86,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
|||
|
||||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index());
|
||||
IndexMetaData indexMd = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
|
||||
// Capture the limit here in case it changes during this method's
|
||||
// execution
|
||||
|
@ -125,7 +125,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
|||
|
||||
@Override
|
||||
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index());
|
||||
IndexMetaData indexMd = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
|
||||
// Capture the limit here in case it changes during this method's
|
||||
// execution
|
||||
|
|
|
@ -98,7 +98,7 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider {
|
|||
if (!enableRelocation && shardRouting.primary()) {
|
||||
// Only primary shards are snapshotted
|
||||
|
||||
SnapshotsInProgress snapshotsInProgress = allocation.routingNodes().custom(SnapshotsInProgress.TYPE);
|
||||
SnapshotsInProgress snapshotsInProgress = allocation.custom(SnapshotsInProgress.TYPE);
|
||||
if (snapshotsInProgress == null) {
|
||||
// Snapshots are not running
|
||||
return allocation.decision(Decision.YES, NAME, "no snapshots are currently running");
|
||||
|
|
|
@ -19,9 +19,10 @@
|
|||
|
||||
package org.elasticsearch.common.logging;
|
||||
|
||||
import org.apache.log4j.Java9Hack;
|
||||
import org.apache.log4j.PropertyConfigurator;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.bootstrap.BootstrapInfo;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
@ -87,14 +88,17 @@ public class LogConfigurator {
|
|||
replacements.put("ttcc", "org.apache.log4j.TTCCLayout");
|
||||
replacements.put("xml", "org.apache.log4j.XMLLayout");
|
||||
REPLACEMENTS = unmodifiableMap(replacements);
|
||||
|
||||
if (Constants.JRE_IS_MINIMUM_JAVA9) {
|
||||
Java9Hack.fixLog4j();
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean loaded;
|
||||
|
||||
/**
|
||||
* Consolidates settings and converts them into actual log4j settings, then initializes loggers and appenders.
|
||||
*
|
||||
* @param settings custom settings that should be applied
|
||||
* @param settings custom settings that should be applied
|
||||
* @param resolveConfig controls whether the logging conf file should be read too or not.
|
||||
*/
|
||||
public static void configure(Settings settings, boolean resolveConfig) {
|
||||
|
@ -109,7 +113,7 @@ public class LogConfigurator {
|
|||
if (resolveConfig) {
|
||||
resolveConfig(environment, settingsBuilder);
|
||||
}
|
||||
settingsBuilder.putProperties("es.", BootstrapInfo.getSystemProperties());
|
||||
|
||||
// add custom settings after config was added so that they are not overwritten by config
|
||||
settingsBuilder.put(settings);
|
||||
settingsBuilder.replacePropertyPlaceholders();
|
||||
|
|
|
@ -53,6 +53,15 @@ public enum DateTimeUnit {
|
|||
return field;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param unit the {@link DateTimeUnit} to check
|
||||
* @return true if the unit is a day or longer
|
||||
*/
|
||||
public static boolean isDayOrLonger(DateTimeUnit unit) {
|
||||
return (unit == DateTimeUnit.HOUR_OF_DAY || unit == DateTimeUnit.MINUTES_OF_HOUR
|
||||
|| unit == DateTimeUnit.SECOND_OF_MINUTE) == false;
|
||||
}
|
||||
|
||||
public static DateTimeUnit resolve(byte id) {
|
||||
switch (id) {
|
||||
case 1: return WEEK_OF_WEEKYEAR;
|
||||
|
|
|
@ -46,8 +46,8 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
|
||||
public static class Builder {
|
||||
|
||||
private DateTimeUnit unit;
|
||||
private long interval = -1;
|
||||
private final DateTimeUnit unit;
|
||||
private final long interval;
|
||||
|
||||
private DateTimeZone timeZone = DateTimeZone.UTC;
|
||||
|
||||
|
@ -142,10 +142,15 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
|
||||
@Override
|
||||
public long nextRoundingValue(long time) {
|
||||
long timeLocal = time;
|
||||
timeLocal = timeZone.convertUTCToLocal(time);
|
||||
long nextInLocalTime = durationField.add(timeLocal, 1);
|
||||
return timeZone.convertLocalToUTC(nextInLocalTime, false);
|
||||
if (DateTimeUnit.isDayOrLonger(unit)) {
|
||||
time = timeZone.convertUTCToLocal(time);
|
||||
}
|
||||
long next = durationField.add(time, 1);
|
||||
if (DateTimeUnit.isDayOrLonger(unit)) {
|
||||
return timeZone.convertLocalToUTC(next, false);
|
||||
} else {
|
||||
return next;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -161,12 +166,12 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
out.writeByte(unit.id());
|
||||
out.writeString(timeZone.getID());
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(unit, timeZone);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
|
@ -236,12 +241,12 @@ public abstract class TimeZoneRounding extends Rounding {
|
|||
out.writeVLong(interval);
|
||||
out.writeString(timeZone.getID());
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(interval, timeZone);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
|
|
|
@ -87,6 +87,7 @@ import org.elasticsearch.repositories.fs.FsRepository;
|
|||
import org.elasticsearch.repositories.uri.URLRepository;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
@ -374,7 +375,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX,
|
||||
ClusterName.CLUSTER_NAME_SETTING,
|
||||
Client.CLIENT_TYPE_SETTING_S,
|
||||
InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING,
|
||||
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
|
||||
EsExecutors.PROCESSORS_SETTING,
|
||||
ThreadContext.DEFAULT_HEADERS_SETTING,
|
||||
|
@ -397,6 +397,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
JvmGcMonitorService.ENABLED_SETTING,
|
||||
JvmGcMonitorService.REFRESH_INTERVAL_SETTING,
|
||||
JvmGcMonitorService.GC_SETTING,
|
||||
JvmGcMonitorService.GC_OVERHEAD_WARN_SETTING,
|
||||
JvmGcMonitorService.GC_OVERHEAD_INFO_SETTING,
|
||||
JvmGcMonitorService.GC_OVERHEAD_DEBUG_SETTING,
|
||||
PageCacheRecycler.LIMIT_HEAP_SETTING,
|
||||
PageCacheRecycler.WEIGHT_BYTES_SETTING,
|
||||
PageCacheRecycler.WEIGHT_INT_SETTING,
|
||||
|
@ -417,6 +420,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
ResourceWatcherService.ENABLED,
|
||||
ResourceWatcherService.RELOAD_INTERVAL_HIGH,
|
||||
ResourceWatcherService.RELOAD_INTERVAL_MEDIUM,
|
||||
ResourceWatcherService.RELOAD_INTERVAL_LOW
|
||||
ResourceWatcherService.RELOAD_INTERVAL_LOW,
|
||||
SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING
|
||||
)));
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.settings;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
|
@ -35,12 +36,11 @@ import org.elasticsearch.index.engine.EngineConfig;
|
|||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCache;
|
||||
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.FsDirectoryService;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.IndexWarmer;
|
||||
import org.elasticsearch.indices.IndicesRequestCache;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -59,6 +59,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
public static final Predicate<String> INDEX_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
|
||||
public static final Set<Setting<?>> BUILT_IN_INDEX_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
|
||||
MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY,
|
||||
IndexSettings.INDEX_TTL_DISABLE_PURGE_SETTING,
|
||||
IndexStore.INDEX_STORE_THROTTLE_TYPE_SETTING,
|
||||
IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
|
||||
|
@ -126,7 +127,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
FieldMapper.IGNORE_MALFORMED_SETTING,
|
||||
FieldMapper.COERCE_SETTING,
|
||||
Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING,
|
||||
PercolatorQueryCache.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING,
|
||||
PercolatorFieldMapper.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING,
|
||||
MapperService.INDEX_MAPPER_DYNAMIC_SETTING,
|
||||
MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING,
|
||||
MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING,
|
||||
|
|
|
@ -18,19 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.support.ToXContentToBytes;
|
||||
|
@ -50,6 +37,19 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* A setting. Encapsulates typical stuff like default value, parsing, and scope.
|
||||
* Some (SettingsProperty.Dynamic) can by modified at run time using the API.
|
||||
|
@ -504,7 +504,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
|
||||
}
|
||||
if (value > maxValue) {
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be =< " + maxValue);
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be <= " + maxValue);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
@ -537,6 +537,10 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, properties);
|
||||
}
|
||||
|
||||
public static Setting<Boolean> boolSetting(String key, Function<Settings, String> defaultValueFn, Property... properties) {
|
||||
return new Setting<>(key, defaultValueFn, Booleans::parseBooleanExact, properties);
|
||||
}
|
||||
|
||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, String percentage, Property... properties) {
|
||||
return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties);
|
||||
}
|
||||
|
@ -572,7 +576,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
|
||||
}
|
||||
if (value.bytes() > maxValue.bytes()) {
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be =< " + maxValue);
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be <= " + maxValue);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
|
|
@ -58,9 +58,11 @@ import java.util.Set;
|
|||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue;
|
||||
import static org.elasticsearch.common.unit.SizeValue.parseSizeValue;
|
||||
|
@ -942,89 +944,54 @@ public final class Settings implements ToXContent {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Puts all the properties with keys starting with the provided <tt>prefix</tt>.
|
||||
*
|
||||
* @param prefix The prefix to filter property key by
|
||||
* @param properties The properties to put
|
||||
* @return The builder
|
||||
*/
|
||||
public Builder putProperties(String prefix, Dictionary<Object, Object> properties) {
|
||||
for (Object property : Collections.list(properties.keys())) {
|
||||
String key = Objects.toString(property);
|
||||
String value = Objects.toString(properties.get(property));
|
||||
if (key.startsWith(prefix)) {
|
||||
map.put(key.substring(prefix.length()), value);
|
||||
public Builder putProperties(Map<String, String> esSettings, Predicate<String> keyPredicate, Function<String, String> keyFunction) {
|
||||
for (final Map.Entry<String, String> esSetting : esSettings.entrySet()) {
|
||||
final String key = esSetting.getKey();
|
||||
if (keyPredicate.test(key)) {
|
||||
map.put(keyFunction.apply(key), esSetting.getValue());
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Puts all the properties with keys starting with the provided <tt>prefix</tt>.
|
||||
*
|
||||
* @param prefix The prefix to filter property key by
|
||||
* @param properties The properties to put
|
||||
* @return The builder
|
||||
*/
|
||||
public Builder putProperties(String prefix, Dictionary<Object, Object> properties, String ignorePrefix) {
|
||||
for (Object property : Collections.list(properties.keys())) {
|
||||
String key = Objects.toString(property);
|
||||
String value = Objects.toString(properties.get(property));
|
||||
if (key.startsWith(prefix)) {
|
||||
if (!key.startsWith(ignorePrefix)) {
|
||||
map.put(key.substring(prefix.length()), value);
|
||||
}
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs across all the settings set on this builder and replaces <tt>${...}</tt> elements in the
|
||||
* each setting value according to the following logic:
|
||||
* <p>
|
||||
* First, tries to resolve it against a System property ({@link System#getProperty(String)}), next,
|
||||
* tries and resolve it against an environment variable ({@link System#getenv(String)}), and last, tries
|
||||
* and replace it with another setting already set on this builder.
|
||||
* Runs across all the settings set on this builder and
|
||||
* replaces <tt>${...}</tt> elements in each setting with
|
||||
* another setting already set on this builder.
|
||||
*/
|
||||
public Builder replacePropertyPlaceholders() {
|
||||
return replacePropertyPlaceholders(System::getenv);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
Builder replacePropertyPlaceholders(Function<String, String> getenv) {
|
||||
PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false);
|
||||
PropertyPlaceholder.PlaceholderResolver placeholderResolver = new PropertyPlaceholder.PlaceholderResolver() {
|
||||
@Override
|
||||
public String resolvePlaceholder(String placeholderName) {
|
||||
if (placeholderName.startsWith("env.")) {
|
||||
// explicit env var prefix
|
||||
return System.getenv(placeholderName.substring("env.".length()));
|
||||
}
|
||||
String value = System.getProperty(placeholderName);
|
||||
if (value != null) {
|
||||
return value;
|
||||
}
|
||||
value = System.getenv(placeholderName);
|
||||
if (value != null) {
|
||||
return value;
|
||||
}
|
||||
return map.get(placeholderName);
|
||||
@Override
|
||||
public String resolvePlaceholder(String placeholderName) {
|
||||
final String value = getenv.apply(placeholderName);
|
||||
if (value != null) {
|
||||
return value;
|
||||
}
|
||||
return map.get(placeholderName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean shouldIgnoreMissing(String placeholderName) {
|
||||
// if its an explicit env var, we are ok with not having a value for it and treat it as optional
|
||||
if (placeholderName.startsWith("env.") || placeholderName.startsWith("prompt.")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean shouldRemoveMissingPlaceholder(String placeholderName) {
|
||||
if (placeholderName.startsWith("prompt.")) {
|
||||
return false;
|
||||
}
|
||||
@Override
|
||||
public boolean shouldIgnoreMissing(String placeholderName) {
|
||||
if (placeholderName.startsWith("prompt.")) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean shouldRemoveMissingPlaceholder(String placeholderName) {
|
||||
if (placeholderName.startsWith("prompt.")) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
for (Map.Entry<String, String> entry : new HashMap<>(map).entrySet()) {
|
||||
String value = propertyPlaceholder.replacePlaceholders(entry.getKey(), entry.getValue(), placeholderResolver);
|
||||
// if the values exists and has length, we should maintain it in the map
|
||||
|
|
|
@ -65,7 +65,12 @@ public class SettingsModule extends AbstractModule {
|
|||
protected void configure() {
|
||||
final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, new HashSet<>(this.indexSettings.values()));
|
||||
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.nodeSettings.values()));
|
||||
Settings indexSettings = settings.filter((s) -> s.startsWith("index.") && clusterSettings.get(s) == null);
|
||||
Settings indexSettings = settings.filter((s) -> (s.startsWith("index.") &&
|
||||
// special case - we want to get Did you mean indices.query.bool.max_clause_count
|
||||
// which means we need to by-pass this check for this setting
|
||||
// TODO remove in 6.0!!
|
||||
"index.query.bool.max_clause_count".equals(s) == false)
|
||||
&& clusterSettings.get(s) == null);
|
||||
if (indexSettings.isEmpty() == false) {
|
||||
try {
|
||||
String separator = IntStream.range(0, 85).mapToObj(s -> "*").collect(Collectors.joining("")).trim();
|
||||
|
|
|
@ -151,66 +151,7 @@ public final class Fuzziness implements ToXContent, Writeable {
|
|||
return 1;
|
||||
}
|
||||
}
|
||||
return Math.min(2, asInt());
|
||||
}
|
||||
|
||||
public TimeValue asTimeValue() {
|
||||
if (this.equals(AUTO)) {
|
||||
return TimeValue.timeValueMillis(1);
|
||||
} else {
|
||||
return TimeValue.parseTimeValue(fuzziness.toString(), null, "fuzziness");
|
||||
}
|
||||
}
|
||||
|
||||
public long asLong() {
|
||||
if (this.equals(AUTO)) {
|
||||
return 1;
|
||||
}
|
||||
try {
|
||||
return Long.parseLong(fuzziness.toString());
|
||||
} catch (NumberFormatException ex) {
|
||||
return (long) Double.parseDouble(fuzziness.toString());
|
||||
}
|
||||
}
|
||||
|
||||
public int asInt() {
|
||||
if (this.equals(AUTO)) {
|
||||
return 1;
|
||||
}
|
||||
try {
|
||||
return Integer.parseInt(fuzziness.toString());
|
||||
} catch (NumberFormatException ex) {
|
||||
return (int) Float.parseFloat(fuzziness.toString());
|
||||
}
|
||||
}
|
||||
|
||||
public short asShort() {
|
||||
if (this.equals(AUTO)) {
|
||||
return 1;
|
||||
}
|
||||
try {
|
||||
return Short.parseShort(fuzziness.toString());
|
||||
} catch (NumberFormatException ex) {
|
||||
return (short) Float.parseFloat(fuzziness.toString());
|
||||
}
|
||||
}
|
||||
|
||||
public byte asByte() {
|
||||
if (this.equals(AUTO)) {
|
||||
return 1;
|
||||
}
|
||||
try {
|
||||
return Byte.parseByte(fuzziness.toString());
|
||||
} catch (NumberFormatException ex) {
|
||||
return (byte) Float.parseFloat(fuzziness.toString());
|
||||
}
|
||||
}
|
||||
|
||||
public double asDouble() {
|
||||
if (this.equals(AUTO)) {
|
||||
return 1d;
|
||||
}
|
||||
return Double.parseDouble(fuzziness.toString());
|
||||
return Math.min(2, (int) asFloat());
|
||||
}
|
||||
|
||||
public float asFloat() {
|
||||
|
|
|
@ -1,629 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.util;
|
||||
|
||||
import org.apache.lucene.store.DataInput;
|
||||
import org.apache.lucene.store.DataOutput;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.hash.MurmurHash3;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.SizeValue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
|
||||
/**
|
||||
* A bloom filter. Inspired by Guava bloom filter implementation though with some optimizations.
|
||||
*/
|
||||
public class BloomFilter {
|
||||
|
||||
/**
|
||||
* A factory that can use different fpp based on size.
|
||||
*/
|
||||
public static class Factory {
|
||||
|
||||
public static final Factory DEFAULT = buildDefault();
|
||||
|
||||
private static Factory buildDefault() {
|
||||
// Some numbers:
|
||||
// 10k =0.001: 140.4kb , 10 Hashes
|
||||
// 10k =0.01 : 93.6kb , 6 Hashes
|
||||
// 100k=0.01 : 936.0kb , 6 Hashes
|
||||
// 100k=0.03 : 712.7kb , 5 Hashes
|
||||
// 500k=0.01 : 4.5mb , 6 Hashes
|
||||
// 500k=0.03 : 3.4mb , 5 Hashes
|
||||
// 500k=0.05 : 2.9mb , 4 Hashes
|
||||
// 1m=0.01 : 9.1mb , 6 Hashes
|
||||
// 1m=0.03 : 6.9mb , 5 Hashes
|
||||
// 1m=0.05 : 5.9mb , 4 Hashes
|
||||
// 5m=0.01 : 45.7mb , 6 Hashes
|
||||
// 5m=0.03 : 34.8mb , 5 Hashes
|
||||
// 5m=0.05 : 29.7mb , 4 Hashes
|
||||
// 50m=0.01 : 457.0mb , 6 Hashes
|
||||
// 50m=0.03 : 297.3mb , 4 Hashes
|
||||
// 50m=0.10 : 228.5mb , 3 Hashes
|
||||
return buildFromString("10k=0.01,1m=0.03");
|
||||
}
|
||||
|
||||
/**
|
||||
* Supports just passing fpp, as in "0.01", and also ranges, like "50k=0.01,1m=0.05". If
|
||||
* its null, returns {@link #buildDefault()}.
|
||||
*/
|
||||
public static Factory buildFromString(@Nullable String config) {
|
||||
if (config == null) {
|
||||
return buildDefault();
|
||||
}
|
||||
String[] sEntries = config.split(",");
|
||||
if (sEntries.length == 0) {
|
||||
if (config.length() > 0) {
|
||||
return new Factory(new Entry[]{new Entry(0, Double.parseDouble(config))});
|
||||
}
|
||||
return buildDefault();
|
||||
}
|
||||
Entry[] entries = new Entry[sEntries.length];
|
||||
for (int i = 0; i < sEntries.length; i++) {
|
||||
int index = sEntries[i].indexOf('=');
|
||||
entries[i] = new Entry(
|
||||
(int) SizeValue.parseSizeValue(sEntries[i].substring(0, index).trim()).singles(),
|
||||
Double.parseDouble(sEntries[i].substring(index + 1).trim())
|
||||
);
|
||||
}
|
||||
return new Factory(entries);
|
||||
}
|
||||
|
||||
private final Entry[] entries;
|
||||
|
||||
public Factory(Entry[] entries) {
|
||||
this.entries = entries;
|
||||
// the order is from the upper most expected insertions to the lowest
|
||||
Arrays.sort(this.entries, new Comparator<Entry>() {
|
||||
@Override
|
||||
public int compare(Entry o1, Entry o2) {
|
||||
return o2.expectedInsertions - o1.expectedInsertions;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public BloomFilter createFilter(int expectedInsertions) {
|
||||
for (Entry entry : entries) {
|
||||
if (expectedInsertions > entry.expectedInsertions) {
|
||||
return BloomFilter.create(expectedInsertions, entry.fpp);
|
||||
}
|
||||
}
|
||||
return BloomFilter.create(expectedInsertions, 0.03);
|
||||
}
|
||||
|
||||
public static class Entry {
|
||||
public final int expectedInsertions;
|
||||
public final double fpp;
|
||||
|
||||
Entry(int expectedInsertions, double fpp) {
|
||||
this.expectedInsertions = expectedInsertions;
|
||||
this.fpp = fpp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a bloom filter based on the with the expected number
|
||||
* of insertions and expected false positive probability.
|
||||
*
|
||||
* @param expectedInsertions the number of expected insertions to the constructed
|
||||
* @param fpp the desired false positive probability (must be positive and less than 1.0)
|
||||
*/
|
||||
public static BloomFilter create(int expectedInsertions, double fpp) {
|
||||
return create(expectedInsertions, fpp, -1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a bloom filter based on the expected number of insertions, expected false positive probability,
|
||||
* and number of hash functions.
|
||||
*
|
||||
* @param expectedInsertions the number of expected insertions to the constructed
|
||||
* @param fpp the desired false positive probability (must be positive and less than 1.0)
|
||||
* @param numHashFunctions the number of hash functions to use (must be less than or equal to 255)
|
||||
*/
|
||||
public static BloomFilter create(int expectedInsertions, double fpp, int numHashFunctions) {
|
||||
if (expectedInsertions == 0) {
|
||||
expectedInsertions = 1;
|
||||
}
|
||||
/*
|
||||
* TODO(user): Put a warning in the javadoc about tiny fpp values,
|
||||
* since the resulting size is proportional to -log(p), but there is not
|
||||
* much of a point after all, e.g. optimalM(1000, 0.0000000000000001) = 76680
|
||||
* which is less that 10kb. Who cares!
|
||||
*/
|
||||
long numBits = optimalNumOfBits(expectedInsertions, fpp);
|
||||
|
||||
// calculate the optimal number of hash functions
|
||||
if (numHashFunctions == -1) {
|
||||
numHashFunctions = optimalNumOfHashFunctions(expectedInsertions, numBits);
|
||||
}
|
||||
|
||||
try {
|
||||
return new BloomFilter(new BitArray(numBits), numHashFunctions, Hashing.DEFAULT);
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new IllegalArgumentException("Could not create BloomFilter of " + numBits + " bits", e);
|
||||
}
|
||||
}
|
||||
|
||||
public static void skipBloom(IndexInput in) throws IOException {
|
||||
int version = in.readInt(); // we do nothing with this now..., defaults to 0
|
||||
final int numLongs = in.readInt();
|
||||
in.seek(in.getFilePointer() + (numLongs * 8) + 4 + 4); // filter + numberOfHashFunctions + hashType
|
||||
}
|
||||
|
||||
public static BloomFilter deserialize(DataInput in) throws IOException {
|
||||
int version = in.readInt(); // we do nothing with this now..., defaults to 0
|
||||
int numLongs = in.readInt();
|
||||
long[] data = new long[numLongs];
|
||||
for (int i = 0; i < numLongs; i++) {
|
||||
data[i] = in.readLong();
|
||||
}
|
||||
int numberOfHashFunctions = in.readInt();
|
||||
int hashType = in.readInt();
|
||||
return new BloomFilter(new BitArray(data), numberOfHashFunctions, Hashing.fromType(hashType));
|
||||
}
|
||||
|
||||
public static void serilaize(BloomFilter filter, DataOutput out) throws IOException {
|
||||
out.writeInt(0); // version
|
||||
BitArray bits = filter.bits;
|
||||
out.writeInt(bits.data.length);
|
||||
for (long l : bits.data) {
|
||||
out.writeLong(l);
|
||||
}
|
||||
out.writeInt(filter.numHashFunctions);
|
||||
out.writeInt(filter.hashing.type()); // hashType
|
||||
}
|
||||
|
||||
public static BloomFilter readFrom(StreamInput in) throws IOException {
|
||||
int version = in.readVInt(); // we do nothing with this now..., defaults to 0
|
||||
int numLongs = in.readVInt();
|
||||
long[] data = new long[numLongs];
|
||||
for (int i = 0; i < numLongs; i++) {
|
||||
data[i] = in.readLong();
|
||||
}
|
||||
int numberOfHashFunctions = in.readVInt();
|
||||
int hashType = in.readVInt(); // again, nothing to do now...
|
||||
return new BloomFilter(new BitArray(data), numberOfHashFunctions, Hashing.fromType(hashType));
|
||||
}
|
||||
|
||||
public static void writeTo(BloomFilter filter, StreamOutput out) throws IOException {
|
||||
out.writeVInt(0); // version
|
||||
BitArray bits = filter.bits;
|
||||
out.writeVInt(bits.data.length);
|
||||
for (long l : bits.data) {
|
||||
out.writeLong(l);
|
||||
}
|
||||
out.writeVInt(filter.numHashFunctions);
|
||||
out.writeVInt(filter.hashing.type()); // hashType
|
||||
}
|
||||
|
||||
/**
|
||||
* The bit set of the BloomFilter (not necessarily power of 2!)
|
||||
*/
|
||||
final BitArray bits;
|
||||
/**
|
||||
* Number of hashes per element
|
||||
*/
|
||||
final int numHashFunctions;
|
||||
|
||||
final Hashing hashing;
|
||||
|
||||
BloomFilter(BitArray bits, int numHashFunctions, Hashing hashing) {
|
||||
this.bits = bits;
|
||||
this.numHashFunctions = numHashFunctions;
|
||||
this.hashing = hashing;
|
||||
/*
|
||||
* This only exists to forbid BFs that cannot use the compact persistent representation.
|
||||
* If it ever throws, at a user who was not intending to use that representation, we should
|
||||
* reconsider
|
||||
*/
|
||||
if (numHashFunctions > 255) {
|
||||
throw new IllegalArgumentException("Currently we don't allow BloomFilters that would use more than 255 hash functions");
|
||||
}
|
||||
}
|
||||
|
||||
public boolean put(BytesRef value) {
|
||||
return hashing.put(value, numHashFunctions, bits);
|
||||
}
|
||||
|
||||
public boolean mightContain(BytesRef value) {
|
||||
return hashing.mightContain(value, numHashFunctions, bits);
|
||||
}
|
||||
|
||||
public int getNumHashFunctions() {
|
||||
return this.numHashFunctions;
|
||||
}
|
||||
|
||||
public long getSizeInBytes() {
|
||||
return bits.ramBytesUsed();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return bits.hashCode() + numHashFunctions;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cheat sheet:
|
||||
*
|
||||
* m: total bits
|
||||
* n: expected insertions
|
||||
* b: m/n, bits per insertion
|
||||
|
||||
* p: expected false positive probability
|
||||
*
|
||||
* 1) Optimal k = b * ln2
|
||||
* 2) p = (1 - e ^ (-kn/m))^k
|
||||
* 3) For optimal k: p = 2 ^ (-k) ~= 0.6185^b
|
||||
* 4) For optimal k: m = -nlnp / ((ln2) ^ 2)
|
||||
*/
|
||||
|
||||
/**
|
||||
* Computes the optimal k (number of hashes per element inserted in Bloom filter), given the
|
||||
* expected insertions and total number of bits in the Bloom filter.
|
||||
* <p>
|
||||
* See http://en.wikipedia.org/wiki/File:Bloom_filter_fp_probability.svg for the formula.
|
||||
*
|
||||
* @param n expected insertions (must be positive)
|
||||
* @param m total number of bits in Bloom filter (must be positive)
|
||||
*/
|
||||
static int optimalNumOfHashFunctions(long n, long m) {
|
||||
return Math.max(1, (int) Math.round(m / n * Math.log(2)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes m (total bits of Bloom filter) which is expected to achieve, for the specified
|
||||
* expected insertions, the required false positive probability.
|
||||
* <p>
|
||||
* See http://en.wikipedia.org/wiki/Bloom_filter#Probability_of_false_positives for the formula.
|
||||
*
|
||||
* @param n expected insertions (must be positive)
|
||||
* @param p false positive rate (must be 0 < p < 1)
|
||||
*/
|
||||
static long optimalNumOfBits(long n, double p) {
|
||||
if (p == 0) {
|
||||
p = Double.MIN_VALUE;
|
||||
}
|
||||
return (long) (-n * Math.log(p) / (Math.log(2) * Math.log(2)));
|
||||
}
|
||||
|
||||
// Note: We use this instead of java.util.BitSet because we need access to the long[] data field
|
||||
static final class BitArray {
|
||||
final long[] data;
|
||||
final long bitSize;
|
||||
long bitCount;
|
||||
|
||||
BitArray(long bits) {
|
||||
this(new long[size(bits)]);
|
||||
}
|
||||
|
||||
private static int size(long bits) {
|
||||
long quotient = bits / 64;
|
||||
long remainder = bits - quotient * 64;
|
||||
return Math.toIntExact(remainder == 0 ? quotient : 1 + quotient);
|
||||
}
|
||||
|
||||
// Used by serialization
|
||||
BitArray(long[] data) {
|
||||
this.data = data;
|
||||
long bitCount = 0;
|
||||
for (long value : data) {
|
||||
bitCount += Long.bitCount(value);
|
||||
}
|
||||
this.bitCount = bitCount;
|
||||
this.bitSize = data.length * Long.SIZE;
|
||||
}
|
||||
|
||||
/** Returns true if the bit changed value. */
|
||||
boolean set(long index) {
|
||||
if (!get(index)) {
|
||||
data[(int) (index >>> 6)] |= (1L << index);
|
||||
bitCount++;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean get(long index) {
|
||||
return (data[(int) (index >>> 6)] & (1L << index)) != 0;
|
||||
}
|
||||
|
||||
/** Number of bits */
|
||||
long bitSize() {
|
||||
return bitSize;
|
||||
}
|
||||
|
||||
/** Number of set bits (1s) */
|
||||
long bitCount() {
|
||||
return bitCount;
|
||||
}
|
||||
|
||||
BitArray copy() {
|
||||
return new BitArray(data.clone());
|
||||
}
|
||||
|
||||
/** Combines the two BitArrays using bitwise OR. */
|
||||
void putAll(BitArray array) {
|
||||
bitCount = 0;
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
data[i] |= array.data[i];
|
||||
bitCount += Long.bitCount(data[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@Override public boolean equals(Object o) {
|
||||
if (o instanceof BitArray) {
|
||||
BitArray bitArray = (BitArray) o;
|
||||
return Arrays.equals(data, bitArray.data);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override public int hashCode() {
|
||||
return Arrays.hashCode(data);
|
||||
}
|
||||
|
||||
public long ramBytesUsed() {
|
||||
return Long.BYTES * data.length + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 16;
|
||||
}
|
||||
}
|
||||
|
||||
static enum Hashing {
|
||||
|
||||
V0() {
|
||||
@Override
|
||||
protected boolean put(BytesRef value, int numHashFunctions, BitArray bits) {
|
||||
long bitSize = bits.bitSize();
|
||||
long hash64 = hash3_x64_128(value.bytes, value.offset, value.length, 0);
|
||||
int hash1 = (int) hash64;
|
||||
int hash2 = (int) (hash64 >>> 32);
|
||||
boolean bitsChanged = false;
|
||||
for (int i = 1; i <= numHashFunctions; i++) {
|
||||
int nextHash = hash1 + i * hash2;
|
||||
if (nextHash < 0) {
|
||||
nextHash = ~nextHash;
|
||||
}
|
||||
bitsChanged |= bits.set(nextHash % bitSize);
|
||||
}
|
||||
return bitsChanged;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean mightContain(BytesRef value, int numHashFunctions, BitArray bits) {
|
||||
long bitSize = bits.bitSize();
|
||||
long hash64 = hash3_x64_128(value.bytes, value.offset, value.length, 0);
|
||||
int hash1 = (int) hash64;
|
||||
int hash2 = (int) (hash64 >>> 32);
|
||||
for (int i = 1; i <= numHashFunctions; i++) {
|
||||
int nextHash = hash1 + i * hash2;
|
||||
if (nextHash < 0) {
|
||||
nextHash = ~nextHash;
|
||||
}
|
||||
if (!bits.get(nextHash % bitSize)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int type() {
|
||||
return 0;
|
||||
}
|
||||
},
|
||||
V1() {
|
||||
@Override
|
||||
protected boolean put(BytesRef value, int numHashFunctions, BitArray bits) {
|
||||
long bitSize = bits.bitSize();
|
||||
MurmurHash3.Hash128 hash128 = MurmurHash3.hash128(value.bytes, value.offset, value.length, 0, new MurmurHash3.Hash128());
|
||||
|
||||
boolean bitsChanged = false;
|
||||
long combinedHash = hash128.h1;
|
||||
for (int i = 0; i < numHashFunctions; i++) {
|
||||
// Make the combined hash positive and indexable
|
||||
bitsChanged |= bits.set((combinedHash & Long.MAX_VALUE) % bitSize);
|
||||
combinedHash += hash128.h2;
|
||||
}
|
||||
return bitsChanged;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean mightContain(BytesRef value, int numHashFunctions, BitArray bits) {
|
||||
long bitSize = bits.bitSize();
|
||||
MurmurHash3.Hash128 hash128 = MurmurHash3.hash128(value.bytes, value.offset, value.length, 0, new MurmurHash3.Hash128());
|
||||
|
||||
long combinedHash = hash128.h1;
|
||||
for (int i = 0; i < numHashFunctions; i++) {
|
||||
// Make the combined hash positive and indexable
|
||||
if (!bits.get((combinedHash & Long.MAX_VALUE) % bitSize)) {
|
||||
return false;
|
||||
}
|
||||
combinedHash += hash128.h2;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int type() {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
;
|
||||
|
||||
protected abstract boolean put(BytesRef value, int numHashFunctions, BitArray bits);
|
||||
|
||||
protected abstract boolean mightContain(BytesRef value, int numHashFunctions, BitArray bits);
|
||||
|
||||
protected abstract int type();
|
||||
|
||||
public static final Hashing DEFAULT = Hashing.V1;
|
||||
|
||||
public static Hashing fromType(int type) {
|
||||
if (type == 0) {
|
||||
return Hashing.V0;
|
||||
} if (type == 1) {
|
||||
return Hashing.V1;
|
||||
} else {
|
||||
throw new IllegalArgumentException("no hashing type matching " + type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// START : MURMUR 3_128 USED FOR Hashing.V0
|
||||
// NOTE: don't replace this code with the o.e.common.hashing.MurmurHash3 method which returns a different hash
|
||||
|
||||
protected static long getblock(byte[] key, int offset, int index) {
|
||||
int i_8 = index << 3;
|
||||
int blockOffset = offset + i_8;
|
||||
return ((long) key[blockOffset + 0] & 0xff) + (((long) key[blockOffset + 1] & 0xff) << 8) +
|
||||
(((long) key[blockOffset + 2] & 0xff) << 16) + (((long) key[blockOffset + 3] & 0xff) << 24) +
|
||||
(((long) key[blockOffset + 4] & 0xff) << 32) + (((long) key[blockOffset + 5] & 0xff) << 40) +
|
||||
(((long) key[blockOffset + 6] & 0xff) << 48) + (((long) key[blockOffset + 7] & 0xff) << 56);
|
||||
}
|
||||
|
||||
protected static long rotl64(long v, int n) {
|
||||
return ((v << n) | (v >>> (64 - n)));
|
||||
}
|
||||
|
||||
protected static long fmix(long k) {
|
||||
k ^= k >>> 33;
|
||||
k *= 0xff51afd7ed558ccdL;
|
||||
k ^= k >>> 33;
|
||||
k *= 0xc4ceb9fe1a85ec53L;
|
||||
k ^= k >>> 33;
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
@SuppressWarnings("fallthrough") // Uses fallthrough to implement a well know hashing algorithm
|
||||
public static long hash3_x64_128(byte[] key, int offset, int length, long seed) {
|
||||
final int nblocks = length >> 4; // Process as 128-bit blocks.
|
||||
|
||||
long h1 = seed;
|
||||
long h2 = seed;
|
||||
|
||||
long c1 = 0x87c37b91114253d5L;
|
||||
long c2 = 0x4cf5ad432745937fL;
|
||||
|
||||
//----------
|
||||
// body
|
||||
|
||||
for (int i = 0; i < nblocks; i++) {
|
||||
long k1 = getblock(key, offset, i * 2 + 0);
|
||||
long k2 = getblock(key, offset, i * 2 + 1);
|
||||
|
||||
k1 *= c1;
|
||||
k1 = rotl64(k1, 31);
|
||||
k1 *= c2;
|
||||
h1 ^= k1;
|
||||
|
||||
h1 = rotl64(h1, 27);
|
||||
h1 += h2;
|
||||
h1 = h1 * 5 + 0x52dce729;
|
||||
|
||||
k2 *= c2;
|
||||
k2 = rotl64(k2, 33);
|
||||
k2 *= c1;
|
||||
h2 ^= k2;
|
||||
|
||||
h2 = rotl64(h2, 31);
|
||||
h2 += h1;
|
||||
h2 = h2 * 5 + 0x38495ab5;
|
||||
}
|
||||
|
||||
//----------
|
||||
// tail
|
||||
|
||||
// Advance offset to the unprocessed tail of the data.
|
||||
offset += nblocks * 16;
|
||||
|
||||
long k1 = 0;
|
||||
long k2 = 0;
|
||||
|
||||
switch (length & 15) {
|
||||
case 15:
|
||||
k2 ^= ((long) key[offset + 14]) << 48;
|
||||
case 14:
|
||||
k2 ^= ((long) key[offset + 13]) << 40;
|
||||
case 13:
|
||||
k2 ^= ((long) key[offset + 12]) << 32;
|
||||
case 12:
|
||||
k2 ^= ((long) key[offset + 11]) << 24;
|
||||
case 11:
|
||||
k2 ^= ((long) key[offset + 10]) << 16;
|
||||
case 10:
|
||||
k2 ^= ((long) key[offset + 9]) << 8;
|
||||
case 9:
|
||||
k2 ^= ((long) key[offset + 8]) << 0;
|
||||
k2 *= c2;
|
||||
k2 = rotl64(k2, 33);
|
||||
k2 *= c1;
|
||||
h2 ^= k2;
|
||||
|
||||
case 8:
|
||||
k1 ^= ((long) key[offset + 7]) << 56;
|
||||
case 7:
|
||||
k1 ^= ((long) key[offset + 6]) << 48;
|
||||
case 6:
|
||||
k1 ^= ((long) key[offset + 5]) << 40;
|
||||
case 5:
|
||||
k1 ^= ((long) key[offset + 4]) << 32;
|
||||
case 4:
|
||||
k1 ^= ((long) key[offset + 3]) << 24;
|
||||
case 3:
|
||||
k1 ^= ((long) key[offset + 2]) << 16;
|
||||
case 2:
|
||||
k1 ^= ((long) key[offset + 1]) << 8;
|
||||
case 1:
|
||||
k1 ^= (key[offset]);
|
||||
k1 *= c1;
|
||||
k1 = rotl64(k1, 31);
|
||||
k1 *= c2;
|
||||
h1 ^= k1;
|
||||
}
|
||||
|
||||
//----------
|
||||
// finalization
|
||||
|
||||
h1 ^= length;
|
||||
h2 ^= length;
|
||||
|
||||
h1 += h2;
|
||||
h2 += h1;
|
||||
|
||||
h1 = fmix(h1);
|
||||
h2 = fmix(h2);
|
||||
|
||||
h1 += h2;
|
||||
h2 += h1;
|
||||
|
||||
//return (new long[]{h1, h2});
|
||||
// SAME AS GUAVA, they take the first long out of the 128bit
|
||||
return h1;
|
||||
}
|
||||
|
||||
// END: MURMUR 3_128
|
||||
}
|
|
@ -54,6 +54,8 @@ public class Iterables {
|
|||
}
|
||||
}
|
||||
|
||||
/** Flattens the two level {@code Iterable} into a single {@code Iterable}. Note that this pre-caches the values from the outer {@code
|
||||
* Iterable}, but not the values from the inner one. */
|
||||
public static <T> Iterable<T> flatten(Iterable<? extends Iterable<T>> inputs) {
|
||||
Objects.requireNonNull(inputs);
|
||||
return new FlattenedIterables<>(inputs);
|
||||
|
|
|
@ -32,6 +32,7 @@ import java.nio.file.Path;
|
|||
import java.nio.file.attribute.FileAttributeView;
|
||||
import java.nio.file.attribute.FileStoreAttributeView;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Implementation of FileStore that supports
|
||||
|
@ -44,6 +45,8 @@ class ESFileStore extends FileStore {
|
|||
final FileStore in;
|
||||
/** Cached result of Lucene's {@code IOUtils.spins} on path. */
|
||||
final Boolean spins;
|
||||
int majorDeviceNumber;
|
||||
int minorDeviceNumber;
|
||||
|
||||
@SuppressForbidden(reason = "tries to determine if disk is spinning")
|
||||
// TODO: move PathUtils to be package-private here instead of
|
||||
|
@ -58,6 +61,21 @@ class ESFileStore extends FileStore {
|
|||
} catch (Exception e) {
|
||||
spins = null;
|
||||
}
|
||||
try {
|
||||
final List<String> lines = Files.readAllLines(PathUtils.get("/proc/self/mountinfo"));
|
||||
for (final String line : lines) {
|
||||
final String[] fields = line.trim().split("\\s+");
|
||||
final String mountPoint = fields[4];
|
||||
if (mountPoint.equals(getMountPointLinux(in))) {
|
||||
final String[] deviceNumbers = fields[2].split(":");
|
||||
majorDeviceNumber = Integer.parseInt(deviceNumbers[0]);
|
||||
minorDeviceNumber = Integer.parseInt(deviceNumbers[1]);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
majorDeviceNumber = -1;
|
||||
minorDeviceNumber = -1;
|
||||
}
|
||||
} else {
|
||||
spins = null;
|
||||
}
|
||||
|
@ -229,10 +247,13 @@ class ESFileStore extends FileStore {
|
|||
|
||||
@Override
|
||||
public Object getAttribute(String attribute) throws IOException {
|
||||
if ("lucene:spins".equals(attribute)) {
|
||||
return spins;
|
||||
} else {
|
||||
return in.getAttribute(attribute);
|
||||
switch(attribute) {
|
||||
// for the device
|
||||
case "lucene:spins": return spins;
|
||||
// for the partition
|
||||
case "lucene:major_device_number": return majorDeviceNumber;
|
||||
case "lucene:minor_device_number": return minorDeviceNumber;
|
||||
default: return in.getAttribute(attribute);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,6 @@ import java.util.Collection;
|
|||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Semaphore;
|
||||
|
@ -88,14 +87,21 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
|||
* not running on Linux, or we hit an exception trying), True means the device possibly spins and False means it does not. */
|
||||
public final Boolean spins;
|
||||
|
||||
public final int majorDeviceNumber;
|
||||
public final int minorDeviceNumber;
|
||||
|
||||
public NodePath(Path path) throws IOException {
|
||||
this.path = path;
|
||||
this.indicesPath = path.resolve(INDICES_FOLDER);
|
||||
this.fileStore = Environment.getFileStore(path);
|
||||
if (fileStore.supportsFileAttributeView("lucene")) {
|
||||
this.spins = (Boolean) fileStore.getAttribute("lucene:spins");
|
||||
this.majorDeviceNumber = (int) fileStore.getAttribute("lucene:major_device_number");
|
||||
this.minorDeviceNumber = (int) fileStore.getAttribute("lucene:minor_device_number");
|
||||
} else {
|
||||
this.spins = null;
|
||||
this.majorDeviceNumber = -1;
|
||||
this.minorDeviceNumber = -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
public boolean allocateUnassigned(RoutingAllocation allocation) {
|
||||
boolean changed = false;
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
final MetaData metaData = routingNodes.metaData();
|
||||
final MetaData metaData = allocation.metaData();
|
||||
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
|
||||
while (unassignedIterator.hasNext()) {
|
||||
|
|
|
@ -108,7 +108,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
currentNode, nodeWithHighestMatch);
|
||||
it.moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.REALLOCATED_REPLICA,
|
||||
"existing allocation of replica to [" + currentNode + "] cancelled, sync id match found on node [" + nodeWithHighestMatch + "]",
|
||||
null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
|
||||
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,6 @@ import org.elasticsearch.index.engine.EngineFactory;
|
|||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCache;
|
||||
import org.elasticsearch.index.query.ParsedQuery;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
|
@ -151,11 +150,9 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
this.indexStore = indexStore;
|
||||
indexFieldData.setListener(new FieldDataCacheListener(this));
|
||||
this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this));
|
||||
PercolatorQueryCache percolatorQueryCache = new PercolatorQueryCache(indexSettings, IndexService.this::newQueryShardContext);
|
||||
this.warmer = new IndexWarmer(indexSettings.getSettings(), threadPool,
|
||||
bitsetFilterCache.createListener(threadPool),
|
||||
percolatorQueryCache.createListener(threadPool));
|
||||
this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache, percolatorQueryCache);
|
||||
bitsetFilterCache.createListener(threadPool));
|
||||
this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache);
|
||||
this.engineFactory = engineFactory;
|
||||
// initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE
|
||||
this.searcherWrapper = wrapperFactory.newWrapper(this);
|
||||
|
@ -239,8 +236,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
}
|
||||
}
|
||||
} finally {
|
||||
IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, analysisService, refreshTask, fsyncTask,
|
||||
cache().getPercolatorQueryCache());
|
||||
IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, analysisService, refreshTask, fsyncTask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -336,18 +332,17 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock,
|
||||
new StoreCloseListener(shardId, canDeleteShardContent, () -> eventListener.onStoreClosed(shardId)));
|
||||
if (useShadowEngine(primary, indexSettings)) {
|
||||
indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService,
|
||||
indexShard = new ShadowIndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService,
|
||||
indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer,
|
||||
searchOperationListeners);
|
||||
// no indexing listeners - shadow engines don't index
|
||||
} else {
|
||||
indexShard = new IndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService,
|
||||
indexShard = new IndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService,
|
||||
indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer,
|
||||
searchOperationListeners, indexingOperationListeners);
|
||||
}
|
||||
eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created");
|
||||
eventListener.afterIndexShardCreated(indexShard);
|
||||
indexShard.updateRoutingEntry(routing, true);
|
||||
shards = newMapBuilder(shards).put(shardId.id(), indexShard).immutableMap();
|
||||
success = true;
|
||||
return indexShard;
|
||||
|
@ -444,7 +439,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
return new QueryShardContext(
|
||||
indexSettings, indexCache.bitsetFilterCache(), indexFieldData, mapperService(),
|
||||
similarityService(), nodeServicesProvider.getScriptService(), nodeServicesProvider.getIndicesQueriesRegistry(),
|
||||
nodeServicesProvider.getClient(), indexCache.getPercolatorQueryCache(), indexReader,
|
||||
nodeServicesProvider.getClient(), indexReader,
|
||||
nodeServicesProvider.getClusterService().state()
|
||||
);
|
||||
}
|
||||
|
|
|
@ -321,7 +321,7 @@ public final class AnalysisRegistry implements Closeable {
|
|||
if (currentSettings.get("tokenizer") != null) {
|
||||
factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings);
|
||||
} else {
|
||||
throw new IllegalArgumentException(toBuild + " [" + name + "] must have a type associated with it");
|
||||
throw new IllegalArgumentException(toBuild + " [" + name + "] must specify either an analyzer type, or a tokenizer");
|
||||
}
|
||||
} else if (typeName.equals("custom")) {
|
||||
factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings);
|
||||
|
@ -335,7 +335,7 @@ public final class AnalysisRegistry implements Closeable {
|
|||
factories.put(name, factory);
|
||||
} else {
|
||||
if (typeName == null) {
|
||||
throw new IllegalArgumentException(toBuild + " [" + name + "] must have a type associated with it");
|
||||
throw new IllegalArgumentException(toBuild + " [" + name + "] must specify either an analyzer type, or a tokenizer");
|
||||
}
|
||||
AnalysisModule.AnalysisProvider<T> type = providerMap.get(typeName);
|
||||
if (type == null) {
|
||||
|
|
|
@ -33,13 +33,11 @@ import org.apache.lucene.analysis.util.CharArraySet;
|
|||
public final class FingerprintAnalyzer extends Analyzer {
|
||||
private final char separator;
|
||||
private final int maxOutputSize;
|
||||
private final boolean preserveOriginal;
|
||||
private final CharArraySet stopWords;
|
||||
|
||||
public FingerprintAnalyzer(CharArraySet stopWords, char separator, int maxOutputSize, boolean preserveOriginal) {
|
||||
public FingerprintAnalyzer(CharArraySet stopWords, char separator, int maxOutputSize) {
|
||||
this.separator = separator;
|
||||
this.maxOutputSize = maxOutputSize;
|
||||
this.preserveOriginal = preserveOriginal;
|
||||
this.stopWords = stopWords;
|
||||
}
|
||||
|
||||
|
@ -48,7 +46,7 @@ public final class FingerprintAnalyzer extends Analyzer {
|
|||
final Tokenizer tokenizer = new StandardTokenizer();
|
||||
TokenStream stream = tokenizer;
|
||||
stream = new LowerCaseFilter(stream);
|
||||
stream = new ASCIIFoldingFilter(stream, preserveOriginal);
|
||||
stream = new ASCIIFoldingFilter(stream, false);
|
||||
stream = new StopFilter(stream, stopWords);
|
||||
stream = new FingerprintFilter(stream, maxOutputSize, separator);
|
||||
return new TokenStreamComponents(tokenizer, stream);
|
||||
|
|
|
@ -34,10 +34,8 @@ import org.elasticsearch.index.IndexSettings;
|
|||
public class FingerprintAnalyzerProvider extends AbstractIndexAnalyzerProvider<Analyzer> {
|
||||
|
||||
public static ParseField MAX_OUTPUT_SIZE = FingerprintTokenFilterFactory.MAX_OUTPUT_SIZE;
|
||||
public static ParseField PRESERVE_ORIGINAL = ASCIIFoldingTokenFilterFactory.PRESERVE_ORIGINAL;
|
||||
|
||||
public static int DEFAULT_MAX_OUTPUT_SIZE = FingerprintTokenFilterFactory.DEFAULT_MAX_OUTPUT_SIZE;
|
||||
public static boolean DEFAULT_PRESERVE_ORIGINAL = ASCIIFoldingTokenFilterFactory.DEFAULT_PRESERVE_ORIGINAL;
|
||||
public static CharArraySet DEFAULT_STOP_WORDS = CharArraySet.EMPTY_SET;
|
||||
|
||||
private final FingerprintAnalyzer analyzer;
|
||||
|
@ -47,10 +45,9 @@ public class FingerprintAnalyzerProvider extends AbstractIndexAnalyzerProvider<A
|
|||
|
||||
char separator = FingerprintTokenFilterFactory.parseSeparator(settings);
|
||||
int maxOutputSize = settings.getAsInt(MAX_OUTPUT_SIZE.getPreferredName(),DEFAULT_MAX_OUTPUT_SIZE);
|
||||
boolean preserveOriginal = settings.getAsBoolean(PRESERVE_ORIGINAL.getPreferredName(), DEFAULT_PRESERVE_ORIGINAL);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, settings, DEFAULT_STOP_WORDS);
|
||||
|
||||
this.analyzer = new FingerprintAnalyzer(stopWords, separator, maxOutputSize, preserveOriginal);
|
||||
this.analyzer = new FingerprintAnalyzer(stopWords, separator, maxOutputSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.index.AbstractIndexComponent;
|
|||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.cache.query.QueryCache;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCache;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
@ -36,14 +35,11 @@ public class IndexCache extends AbstractIndexComponent implements Closeable {
|
|||
|
||||
private final QueryCache queryCache;
|
||||
private final BitsetFilterCache bitsetFilterCache;
|
||||
private final PercolatorQueryCache percolatorQueryCache;
|
||||
|
||||
public IndexCache(IndexSettings indexSettings, QueryCache queryCache, BitsetFilterCache bitsetFilterCache,
|
||||
PercolatorQueryCache percolatorQueryCache) {
|
||||
public IndexCache(IndexSettings indexSettings, QueryCache queryCache, BitsetFilterCache bitsetFilterCache) {
|
||||
super(indexSettings);
|
||||
this.queryCache = queryCache;
|
||||
this.bitsetFilterCache = bitsetFilterCache;
|
||||
this.percolatorQueryCache = percolatorQueryCache;
|
||||
}
|
||||
|
||||
public QueryCache query() {
|
||||
|
@ -57,13 +53,9 @@ public class IndexCache extends AbstractIndexComponent implements Closeable {
|
|||
return bitsetFilterCache;
|
||||
}
|
||||
|
||||
public PercolatorQueryCache getPercolatorQueryCache() {
|
||||
return percolatorQueryCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
IOUtils.close(queryCache, bitsetFilterCache, percolatorQueryCache);
|
||||
IOUtils.close(queryCache, bitsetFilterCache);
|
||||
}
|
||||
|
||||
public void clear(String reason) {
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.index.IndexableField;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -455,19 +456,23 @@ final class DocumentParser {
|
|||
|
||||
private static ObjectMapper parseObject(final ParseContext context, ObjectMapper mapper, String currentFieldName) throws IOException {
|
||||
assert currentFieldName != null;
|
||||
context.path().add(currentFieldName);
|
||||
|
||||
ObjectMapper update = null;
|
||||
Mapper objectMapper = getMapper(mapper, currentFieldName);
|
||||
if (objectMapper != null) {
|
||||
context.path().add(currentFieldName);
|
||||
parseObjectOrField(context, objectMapper);
|
||||
context.path().remove();
|
||||
} else {
|
||||
ObjectMapper.Dynamic dynamic = dynamicOrDefault(mapper, context);
|
||||
|
||||
final String[] paths = currentFieldName.split("\\.");
|
||||
currentFieldName = paths[paths.length - 1];
|
||||
Tuple<Integer, ObjectMapper> parentMapperTuple = getDynamicParentMapper(context, paths, mapper);
|
||||
ObjectMapper parentMapper = parentMapperTuple.v2();
|
||||
ObjectMapper.Dynamic dynamic = dynamicOrDefault(parentMapper, context);
|
||||
if (dynamic == ObjectMapper.Dynamic.STRICT) {
|
||||
throw new StrictDynamicMappingException(mapper.fullPath(), currentFieldName);
|
||||
} else if (dynamic == ObjectMapper.Dynamic.TRUE) {
|
||||
// remove the current field name from path, since template search and the object builder add it as well...
|
||||
context.path().remove();
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object");
|
||||
if (builder == null) {
|
||||
builder = new ObjectMapper.Builder(currentFieldName).enabled(true);
|
||||
|
@ -477,13 +482,16 @@ final class DocumentParser {
|
|||
context.addDynamicMapper(objectMapper);
|
||||
context.path().add(currentFieldName);
|
||||
parseObjectOrField(context, objectMapper);
|
||||
context.path().remove();
|
||||
} else {
|
||||
// not dynamic, read everything up to end object
|
||||
context.parser().skipChildren();
|
||||
}
|
||||
for (int i = 0; i < parentMapperTuple.v1(); i++) {
|
||||
context.path().remove();
|
||||
}
|
||||
}
|
||||
|
||||
context.path().remove();
|
||||
return update;
|
||||
}
|
||||
|
||||
|
@ -501,6 +509,11 @@ final class DocumentParser {
|
|||
}
|
||||
} else {
|
||||
|
||||
final String[] paths = arrayFieldName.split("\\.");
|
||||
arrayFieldName = paths[paths.length - 1];
|
||||
lastFieldName = arrayFieldName;
|
||||
Tuple<Integer, ObjectMapper> parentMapperTuple = getDynamicParentMapper(context, paths, parentMapper);
|
||||
parentMapper = parentMapperTuple.v2();
|
||||
ObjectMapper.Dynamic dynamic = dynamicOrDefault(parentMapper, context);
|
||||
if (dynamic == ObjectMapper.Dynamic.STRICT) {
|
||||
throw new StrictDynamicMappingException(parentMapper.fullPath(), arrayFieldName);
|
||||
|
@ -508,23 +521,26 @@ final class DocumentParser {
|
|||
Mapper.Builder builder = context.root().findTemplateBuilder(context, arrayFieldName, "object");
|
||||
if (builder == null) {
|
||||
parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
|
||||
return;
|
||||
}
|
||||
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
|
||||
mapper = builder.build(builderContext);
|
||||
assert mapper != null;
|
||||
if (mapper instanceof ArrayValueMapperParser) {
|
||||
context.addDynamicMapper(mapper);
|
||||
context.path().add(arrayFieldName);
|
||||
parseObjectOrField(context, mapper);
|
||||
context.path().remove();
|
||||
} else {
|
||||
parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
|
||||
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
|
||||
mapper = builder.build(builderContext);
|
||||
assert mapper != null;
|
||||
if (mapper instanceof ArrayValueMapperParser) {
|
||||
context.addDynamicMapper(mapper);
|
||||
context.path().add(arrayFieldName);
|
||||
parseObjectOrField(context, mapper);
|
||||
context.path().remove();
|
||||
} else {
|
||||
parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// TODO: shouldn't this skip, not parse?
|
||||
parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName);
|
||||
}
|
||||
for (int i = 0; i < parentMapperTuple.v1(); i++) {
|
||||
context.path().remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -556,7 +572,15 @@ final class DocumentParser {
|
|||
if (mapper != null) {
|
||||
parseObjectOrField(context, mapper);
|
||||
} else {
|
||||
|
||||
final String[] paths = currentFieldName.split("\\.");
|
||||
currentFieldName = paths[paths.length - 1];
|
||||
Tuple<Integer, ObjectMapper> parentMapperTuple = getDynamicParentMapper(context, paths, parentMapper);
|
||||
parentMapper = parentMapperTuple.v2();
|
||||
parseDynamicValue(context, parentMapper, currentFieldName, token);
|
||||
for (int i = 0; i < parentMapperTuple.v1(); i++) {
|
||||
context.path().remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -815,46 +839,61 @@ final class DocumentParser {
|
|||
|
||||
final String[] paths = field.split("\\.");
|
||||
final String fieldName = paths[paths.length-1];
|
||||
ObjectMapper mapper = context.root();
|
||||
ObjectMapper[] mappers = new ObjectMapper[paths.length-1];
|
||||
if (paths.length > 1) {
|
||||
ObjectMapper parent = context.root();
|
||||
for (int i = 0; i < paths.length-1; i++) {
|
||||
mapper = context.docMapper().objectMappers().get(context.path().pathAsText(paths[i]));
|
||||
if (mapper == null) {
|
||||
// One mapping is missing, check if we are allowed to create a dynamic one.
|
||||
ObjectMapper.Dynamic dynamic = dynamicOrDefault(parent, context);
|
||||
|
||||
switch (dynamic) {
|
||||
case STRICT:
|
||||
throw new StrictDynamicMappingException(parent.fullPath(), paths[i]);
|
||||
case TRUE:
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, paths[i], "object");
|
||||
if (builder == null) {
|
||||
builder = new ObjectMapper.Builder(paths[i]).enabled(true);
|
||||
}
|
||||
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
|
||||
mapper = (ObjectMapper) builder.build(builderContext);
|
||||
if (mapper.nested() != ObjectMapper.Nested.NO) {
|
||||
throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to`");
|
||||
}
|
||||
context.addDynamicMapper(mapper);
|
||||
break;
|
||||
case FALSE:
|
||||
// Maybe we should log something to tell the user that the copy_to is ignored in this case.
|
||||
break;
|
||||
|
||||
}
|
||||
}
|
||||
context.path().add(paths[i]);
|
||||
mappers[i] = mapper;
|
||||
parent = mapper;
|
||||
}
|
||||
}
|
||||
Tuple<Integer, ObjectMapper> parentMapperTuple = getDynamicParentMapper(context, paths, null);
|
||||
ObjectMapper mapper = parentMapperTuple.v2();
|
||||
parseDynamicValue(context, mapper, fieldName, context.parser().currentToken());
|
||||
for (int i = 0; i < parentMapperTuple.v1(); i++) {
|
||||
context.path().remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static Tuple<Integer, ObjectMapper> getDynamicParentMapper(ParseContext context, final String[] paths,
|
||||
ObjectMapper currentParent) {
|
||||
ObjectMapper mapper = currentParent == null ? context.root() : currentParent;
|
||||
int pathsAdded = 0;
|
||||
ObjectMapper parent = mapper;
|
||||
for (int i = 0; i < paths.length-1; i++) {
|
||||
String currentPath = context.path().pathAsText(paths[i]);
|
||||
FieldMapper existingFieldMapper = context.docMapper().mappers().getMapper(currentPath);
|
||||
if (existingFieldMapper != null) {
|
||||
throw new MapperParsingException(
|
||||
"Could not dynamically add mapping for field [{}]. Existing mapping for [{}] must be of type object but found [{}].",
|
||||
null, String.join(".", paths), currentPath, existingFieldMapper.fieldType.typeName());
|
||||
}
|
||||
mapper = context.docMapper().objectMappers().get(currentPath);
|
||||
if (mapper == null) {
|
||||
// One mapping is missing, check if we are allowed to create a dynamic one.
|
||||
ObjectMapper.Dynamic dynamic = dynamicOrDefault(parent, context);
|
||||
|
||||
switch (dynamic) {
|
||||
case STRICT:
|
||||
throw new StrictDynamicMappingException(parent.fullPath(), paths[i]);
|
||||
case TRUE:
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, paths[i], "object");
|
||||
if (builder == null) {
|
||||
builder = new ObjectMapper.Builder(paths[i]).enabled(true);
|
||||
}
|
||||
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
|
||||
mapper = (ObjectMapper) builder.build(builderContext);
|
||||
if (mapper.nested() != ObjectMapper.Nested.NO) {
|
||||
throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to`");
|
||||
}
|
||||
context.addDynamicMapper(mapper);
|
||||
break;
|
||||
case FALSE:
|
||||
// Should not dynamically create any more mappers so return the last mapper
|
||||
return new Tuple<Integer, ObjectMapper>(pathsAdded, parent);
|
||||
|
||||
}
|
||||
}
|
||||
context.path().add(paths[i]);
|
||||
pathsAdded++;
|
||||
parent = mapper;
|
||||
}
|
||||
return new Tuple<Integer, ObjectMapper>(pathsAdded, mapper);
|
||||
}
|
||||
|
||||
// find what the dynamic setting is given the current parse context and parent
|
||||
private static ObjectMapper.Dynamic dynamicOrDefault(ObjectMapper parentMapper, ParseContext context) {
|
||||
ObjectMapper.Dynamic dynamic = parentMapper.dynamic();
|
||||
|
|
|
@ -25,21 +25,16 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.queries.TermsQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.PrefixQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.joda.DateMathParser;
|
||||
import org.elasticsearch.common.lucene.BytesRefs;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
|
@ -312,13 +307,6 @@ public abstract class MappedFieldType extends FieldType {
|
|||
return value;
|
||||
}
|
||||
|
||||
/** Returns the indexed value used to construct search "values".
|
||||
* This method is used for the default implementations of most
|
||||
* query factory methods such as {@link #termQuery}. */
|
||||
protected BytesRef indexedValueForSearch(Object value) {
|
||||
return BytesRefs.toBytesRef(value);
|
||||
}
|
||||
|
||||
/** Returns true if the field is searchable.
|
||||
*
|
||||
*/
|
||||
|
@ -342,50 +330,33 @@ public abstract class MappedFieldType extends FieldType {
|
|||
* The default implementation returns a {@link TermQuery} over the value bytes,
|
||||
* boosted by {@link #boost()}.
|
||||
* @throws IllegalArgumentException if {@code value} cannot be converted to the expected data type */
|
||||
public Query termQuery(Object value, @Nullable QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
TermQuery query = new TermQuery(new Term(name(), indexedValueForSearch(value)));
|
||||
if (boost == 1f ||
|
||||
(context != null && context.indexVersionCreated().before(Version.V_5_0_0_alpha1))) {
|
||||
return query;
|
||||
}
|
||||
return new BoostQuery(query, boost);
|
||||
}
|
||||
public abstract Query termQuery(Object value, @Nullable QueryShardContext context);
|
||||
|
||||
public Query termsQuery(List values, @Nullable QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
BytesRef[] bytesRefs = new BytesRef[values.size()];
|
||||
for (int i = 0; i < bytesRefs.length; i++) {
|
||||
bytesRefs[i] = indexedValueForSearch(values.get(i));
|
||||
/** Build a constant-scoring query that matches all values. The default implementation uses a
|
||||
* {@link ConstantScoreQuery} around a {@link BooleanQuery} whose {@link Occur#SHOULD} clauses
|
||||
* are generated with {@link #termQuery}. */
|
||||
public Query termsQuery(List<?> values, @Nullable QueryShardContext context) {
|
||||
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
for (Object value : values) {
|
||||
builder.add(termQuery(value, context), Occur.SHOULD);
|
||||
}
|
||||
return new TermsQuery(name(), bytesRefs);
|
||||
return new ConstantScoreQuery(builder.build());
|
||||
}
|
||||
|
||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
||||
failIfNotIndexed();
|
||||
return new TermRangeQuery(name(),
|
||||
lowerTerm == null ? null : indexedValueForSearch(lowerTerm),
|
||||
upperTerm == null ? null : indexedValueForSearch(upperTerm),
|
||||
includeLower, includeUpper);
|
||||
throw new IllegalArgumentException("Field [" + name + "] of type [" + typeName() + "] does not support range queries");
|
||||
}
|
||||
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
failIfNotIndexed();
|
||||
return new FuzzyQuery(new Term(name(), indexedValueForSearch(value)),
|
||||
fuzziness.asDistance(BytesRefs.toString(value)), prefixLength, maxExpansions, transpositions);
|
||||
throw new IllegalArgumentException("Can only use fuzzy queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]");
|
||||
}
|
||||
|
||||
public Query prefixQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
PrefixQuery query = new PrefixQuery(new Term(name(), indexedValueForSearch(value)));
|
||||
if (method != null) {
|
||||
query.setRewriteMethod(method);
|
||||
}
|
||||
return query;
|
||||
public Query prefixQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Can only use prefix queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]");
|
||||
}
|
||||
|
||||
public Query regexpQuery(String value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Can only use regular expression on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]");
|
||||
public Query regexpQuery(String value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Can only use regexp queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]");
|
||||
}
|
||||
|
||||
public Query nullValueQuery() {
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.TermsQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.PrefixQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.RegexpQuery;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.lucene.BytesRefs;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
/** Base class for {@link MappedFieldType} implementations that use the same
|
||||
* representation for internal index terms as the external representation so
|
||||
* that partial matching queries such as prefix, wildcard and fuzzy queries
|
||||
* can be implemented. */
|
||||
public abstract class StringFieldType extends TermBasedFieldType {
|
||||
|
||||
public StringFieldType() {}
|
||||
|
||||
protected StringFieldType(MappedFieldType ref) {
|
||||
super(ref);
|
||||
}
|
||||
|
||||
public Query termsQuery(List<?> values, QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
BytesRef[] bytesRefs = new BytesRef[values.size()];
|
||||
for (int i = 0; i < bytesRefs.length; i++) {
|
||||
bytesRefs[i] = indexedValueForSearch(values.get(i));
|
||||
}
|
||||
return new TermsQuery(name(), bytesRefs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions,
|
||||
boolean transpositions) {
|
||||
failIfNotIndexed();
|
||||
return new FuzzyQuery(new Term(name(), indexedValueForSearch(value)),
|
||||
fuzziness.asDistance(BytesRefs.toString(value)), prefixLength, maxExpansions, transpositions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
PrefixQuery query = new PrefixQuery(new Term(name(), indexedValueForSearch(value)));
|
||||
if (method != null) {
|
||||
query.setRewriteMethod(method);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Query regexpQuery(String value, int flags, int maxDeterminizedStates,
|
||||
MultiTermQuery.RewriteMethod method, QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
RegexpQuery query = new RegexpQuery(new Term(name(), indexedValueForSearch(value)), flags, maxDeterminizedStates);
|
||||
if (method != null) {
|
||||
query.setRewriteMethod(method);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
||||
failIfNotIndexed();
|
||||
return new TermRangeQuery(name(),
|
||||
lowerTerm == null ? null : indexedValueForSearch(lowerTerm),
|
||||
upperTerm == null ? null : indexedValueForSearch(upperTerm),
|
||||
includeLower, includeUpper);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.TermsQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.lucene.BytesRefs;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
/** Base {@link MappedFieldType} implementation for a field that is indexed
|
||||
* with the inverted index. */
|
||||
public abstract class TermBasedFieldType extends MappedFieldType {
|
||||
|
||||
public TermBasedFieldType() {}
|
||||
|
||||
protected TermBasedFieldType(MappedFieldType ref) {
|
||||
super(ref);
|
||||
}
|
||||
|
||||
/** Returns the indexed value used to construct search "values".
|
||||
* This method is used for the default implementations of most
|
||||
* query factory methods such as {@link #termQuery}. */
|
||||
protected BytesRef indexedValueForSearch(Object value) {
|
||||
return BytesRefs.toBytesRef(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
TermQuery query = new TermQuery(new Term(name(), indexedValueForSearch(value)));
|
||||
if (boost() == 1f ||
|
||||
(context != null && context.indexVersionCreated().before(Version.V_5_0_0_alpha1))) {
|
||||
return query;
|
||||
}
|
||||
return new BoostQuery(query, boost());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termsQuery(List<?> values, QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
BytesRef[] bytesRefs = new BytesRef[values.size()];
|
||||
for (int i = 0; i < bytesRefs.length; i++) {
|
||||
bytesRefs[i] = indexedValueForSearch(values.get(i));
|
||||
}
|
||||
return new TermsQuery(name(), bytesRefs);
|
||||
}
|
||||
|
||||
}
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper.core;
|
|||
import com.carrotsearch.hppc.ObjectArrayList;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.store.ByteArrayDataOutput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
@ -40,6 +41,8 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
@ -135,6 +138,11 @@ public class BinaryFieldMapper extends FieldMapper {
|
|||
failIfNoDocValues();
|
||||
return new BytesBinaryDVIndexFieldData.Builder();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Binary fields do not support searching");
|
||||
}
|
||||
}
|
||||
|
||||
protected BinaryFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||
|
|
|
@ -22,10 +22,11 @@ package org.elasticsearch.index.mapper.core;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -38,6 +39,7 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TermBasedFieldType;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
|
@ -48,7 +50,6 @@ import java.util.Map;
|
|||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
||||
|
||||
/**
|
||||
* A field mapper for boolean fields.
|
||||
|
@ -119,7 +120,7 @@ public class BooleanFieldMapper extends FieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
public static final class BooleanFieldType extends MappedFieldType {
|
||||
public static final class BooleanFieldType extends TermBasedFieldType {
|
||||
|
||||
public BooleanFieldType() {}
|
||||
|
||||
|
@ -200,6 +201,15 @@ public class BooleanFieldMapper extends FieldMapper {
|
|||
}
|
||||
return DocValueFormat.BOOLEAN;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
||||
failIfNotIndexed();
|
||||
return new TermRangeQuery(name(),
|
||||
lowerTerm == null ? null : indexedValueForSearch(lowerTerm),
|
||||
upperTerm == null ? null : indexedValueForSearch(upperTerm),
|
||||
includeLower, includeUpper);
|
||||
}
|
||||
}
|
||||
|
||||
protected BooleanFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TermBasedFieldType;
|
||||
import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionSuggester;
|
||||
import org.elasticsearch.search.suggest.completion.context.ContextMapping;
|
||||
|
@ -178,7 +179,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
|||
}
|
||||
}
|
||||
|
||||
public static final class CompletionFieldType extends MappedFieldType {
|
||||
public static final class CompletionFieldType extends TermBasedFieldType {
|
||||
|
||||
private static PostingsFormat postingsFormat;
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperException;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TermBasedFieldType;
|
||||
import org.elasticsearch.search.suggest.completion2x.AnalyzingCompletionLookupProvider;
|
||||
import org.elasticsearch.search.suggest.completion2x.Completion090PostingsFormat;
|
||||
import org.elasticsearch.search.suggest.completion2x.CompletionTokenStream;
|
||||
|
@ -231,7 +232,7 @@ public class CompletionFieldMapper2x extends FieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
public static final class CompletionFieldType extends MappedFieldType {
|
||||
public static final class CompletionFieldType extends TermBasedFieldType {
|
||||
private PostingsFormat postingsFormat;
|
||||
private AnalyzingCompletionLookupProvider analyzingSuggestLookupProvider;
|
||||
private SortedMap<String, ContextMapping> contextMapping = ContextMapping.EMPTY_MAPPING;
|
||||
|
|
|
@ -316,21 +316,6 @@ public class DateFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
|
|||
return query;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
failIfNotIndexed();
|
||||
long baseLo = parseToMilliseconds(value, false, null, dateMathParser);
|
||||
long baseHi = parseToMilliseconds(value, true, null, dateMathParser);
|
||||
long delta;
|
||||
try {
|
||||
delta = fuzziness.asTimeValue().millis();
|
||||
} catch (Exception e) {
|
||||
// not a time format
|
||||
delta = fuzziness.asLong();
|
||||
}
|
||||
return LongPoint.newRangeQuery(name(), baseLo - delta, baseHi + delta);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
||||
failIfNotIndexed();
|
||||
|
|
|
@ -22,13 +22,8 @@ package org.elasticsearch.index.mapper.core;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.RegexpQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -40,8 +35,8 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.StringFieldType;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
@ -143,7 +138,7 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap
|
|||
}
|
||||
}
|
||||
|
||||
public static final class KeywordFieldType extends MappedFieldType {
|
||||
public static final class KeywordFieldType extends StringFieldType {
|
||||
|
||||
public KeywordFieldType() {}
|
||||
|
||||
|
@ -173,17 +168,6 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap
|
|||
failIfNoDocValues();
|
||||
return new DocValuesIndexFieldData.Builder();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query regexpQuery(String value, int flags, int maxDeterminizedStates,
|
||||
@Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
RegexpQuery query = new RegexpQuery(new Term(name(), indexedValueForSearch(value)), flags, maxDeterminizedStates);
|
||||
if (method != null) {
|
||||
query.setRewriteMethod(method);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
}
|
||||
|
||||
private Boolean includeInAll;
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
|
@ -159,16 +158,6 @@ public class LegacyByteFieldMapper extends LegacyNumberFieldMapper {
|
|||
includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
byte iValue = parseValue(value);
|
||||
byte iSim = fuzziness.asByte();
|
||||
return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
||||
iValue - iSim,
|
||||
iValue + iSim,
|
||||
true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats.Long stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -358,22 +358,6 @@ public class LegacyDateFieldMapper extends LegacyNumberFieldMapper {
|
|||
return rangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, null, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
long iValue = parseValue(value);
|
||||
long iSim;
|
||||
try {
|
||||
iSim = fuzziness.asTimeValue().millis();
|
||||
} catch (Exception e) {
|
||||
// not a time format
|
||||
iSim = fuzziness.asLong();
|
||||
}
|
||||
return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
||||
iValue - iSim,
|
||||
iValue + iSim,
|
||||
true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats.Date stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.action.fieldstats.FieldStats;
|
|||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Numbers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
|
@ -170,16 +169,6 @@ public class LegacyDoubleFieldMapper extends LegacyNumberFieldMapper {
|
|||
includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
double iValue = parseDoubleValue(value);
|
||||
double iSim = fuzziness.asDouble();
|
||||
return LegacyNumericRangeQuery.newDoubleRange(name(), numericPrecisionStep(),
|
||||
iValue - iSim,
|
||||
iValue + iSim,
|
||||
true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats.Double stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
|
@ -155,16 +154,6 @@ public class LegacyFloatFieldMapper extends LegacyNumberFieldMapper {
|
|||
includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
float iValue = parseValue(value);
|
||||
final float iSim = fuzziness.asFloat();
|
||||
return LegacyNumericRangeQuery.newFloatRange(name(), numericPrecisionStep(),
|
||||
iValue - iSim,
|
||||
iValue + iSim,
|
||||
true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats.Double stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
|
@ -135,8 +134,7 @@ public class LegacyIntegerFieldMapper extends LegacyNumberFieldMapper {
|
|||
|
||||
@Override
|
||||
public String typeName() {
|
||||
// TODO: this should be the same as the mapper type name, except fielddata expects int...
|
||||
return "int";
|
||||
return "integer";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -159,16 +157,6 @@ public class LegacyIntegerFieldMapper extends LegacyNumberFieldMapper {
|
|||
includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
int iValue = parseValue(value);
|
||||
int iSim = fuzziness.asInt();
|
||||
return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
||||
iValue - iSim,
|
||||
iValue + iSim,
|
||||
true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats.Long stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
|
@ -158,16 +157,6 @@ public class LegacyLongFieldMapper extends LegacyNumberFieldMapper {
|
|||
includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
long iValue = parseLongValue(value);
|
||||
final long iSim = fuzziness.asLong();
|
||||
return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
||||
iValue - iSim,
|
||||
iValue + iSim,
|
||||
true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -27,20 +27,19 @@ import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TermBasedFieldType;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
@ -121,7 +120,7 @@ public abstract class LegacyNumberFieldMapper extends FieldMapper implements All
|
|||
protected abstract int maxPrecisionStep();
|
||||
}
|
||||
|
||||
public static abstract class NumberFieldType extends MappedFieldType {
|
||||
public static abstract class NumberFieldType extends TermBasedFieldType {
|
||||
|
||||
public NumberFieldType(LegacyNumericType numericType) {
|
||||
setTokenized(false);
|
||||
|
@ -146,9 +145,6 @@ public abstract class LegacyNumberFieldMapper extends FieldMapper implements All
|
|||
|
||||
public abstract NumberFieldType clone();
|
||||
|
||||
@Override
|
||||
public abstract Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions);
|
||||
|
||||
@Override
|
||||
public DocValueFormat docValueFormat(@Nullable String format, DateTimeZone timeZone) {
|
||||
if (timeZone != null) {
|
||||
|
|
|
@ -163,16 +163,6 @@ public class LegacyShortFieldMapper extends LegacyNumberFieldMapper {
|
|||
includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
short iValue = parseValue(value);
|
||||
short iSim = fuzziness.asShort();
|
||||
return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
||||
iValue - iSim,
|
||||
iValue + iSim,
|
||||
true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats.Long stats(IndexReader reader) throws IOException {
|
||||
int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.elasticsearch.common.Explicit;
|
|||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
|
@ -233,13 +232,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
return FloatPoint.newRangeQuery(field, l, u);
|
||||
}
|
||||
|
||||
@Override
|
||||
Query fuzzyQuery(String field, Object value, Fuzziness fuzziness) {
|
||||
float base = parse(value);
|
||||
float delta = fuzziness.asFloat();
|
||||
return rangeQuery(field, base - delta, base + delta, true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Field> createFields(String name, Number value,
|
||||
boolean indexed, boolean docValued, boolean stored) {
|
||||
|
@ -324,13 +316,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
return DoublePoint.newRangeQuery(field, l, u);
|
||||
}
|
||||
|
||||
@Override
|
||||
Query fuzzyQuery(String field, Object value, Fuzziness fuzziness) {
|
||||
double base = parse(value);
|
||||
double delta = fuzziness.asFloat();
|
||||
return rangeQuery(field, base - delta, base + delta, true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Field> createFields(String name, Number value,
|
||||
boolean indexed, boolean docValued, boolean stored) {
|
||||
|
@ -407,11 +392,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
return INTEGER.rangeQuery(field, lowerTerm, upperTerm, includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
Query fuzzyQuery(String field, Object value, Fuzziness fuzziness) {
|
||||
return INTEGER.fuzzyQuery(field, value, fuzziness);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Field> createFields(String name, Number value,
|
||||
boolean indexed, boolean docValued, boolean stored) {
|
||||
|
@ -473,11 +453,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
return INTEGER.rangeQuery(field, lowerTerm, upperTerm, includeLower, includeUpper);
|
||||
}
|
||||
|
||||
@Override
|
||||
Query fuzzyQuery(String field, Object value, Fuzziness fuzziness) {
|
||||
return INTEGER.fuzzyQuery(field, value, fuzziness);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Field> createFields(String name, Number value,
|
||||
boolean indexed, boolean docValued, boolean stored) {
|
||||
|
@ -560,13 +535,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
return IntPoint.newRangeQuery(field, l, u);
|
||||
}
|
||||
|
||||
@Override
|
||||
Query fuzzyQuery(String field, Object value, Fuzziness fuzziness) {
|
||||
int base = parse(value);
|
||||
int delta = fuzziness.asInt();
|
||||
return rangeQuery(field, base - delta, base + delta, true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Field> createFields(String name, Number value,
|
||||
boolean indexed, boolean docValued, boolean stored) {
|
||||
|
@ -663,13 +631,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
return LongPoint.newRangeQuery(field, l, u);
|
||||
}
|
||||
|
||||
@Override
|
||||
Query fuzzyQuery(String field, Object value, Fuzziness fuzziness) {
|
||||
long base = parse(value);
|
||||
long delta = fuzziness.asLong();
|
||||
return rangeQuery(field, base - delta, base + delta, true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Field> createFields(String name, Number value,
|
||||
boolean indexed, boolean docValued, boolean stored) {
|
||||
|
@ -722,7 +683,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
abstract Query termsQuery(String field, List<Object> values);
|
||||
abstract Query rangeQuery(String field, Object lowerTerm, Object upperTerm,
|
||||
boolean includeLower, boolean includeUpper);
|
||||
abstract Query fuzzyQuery(String field, Object value, Fuzziness fuzziness);
|
||||
abstract Number parse(XContentParser parser, boolean coerce) throws IOException;
|
||||
abstract Number parse(Object value);
|
||||
public abstract List<Field> createFields(String name, Number value, boolean indexed,
|
||||
|
@ -791,13 +751,6 @@ public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
return query;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength,
|
||||
int maxExpansions, boolean transpositions) {
|
||||
failIfNotIndexed();
|
||||
return type.fuzzyQuery(name(), value, fuzziness);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldStats stats(IndexReader reader) throws IOException {
|
||||
return type.stats(reader, name(), isSearchable(), isAggregatable());
|
||||
|
|
|
@ -355,7 +355,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
}
|
||||
}
|
||||
|
||||
public static final class StringFieldType extends MappedFieldType {
|
||||
public static final class StringFieldType extends org.elasticsearch.index.mapper.StringFieldType {
|
||||
|
||||
private boolean fielddata;
|
||||
private double fielddataMinFrequency;
|
||||
|
@ -485,15 +485,6 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
+ "use significant memory.");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query regexpQuery(String value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryShardContext context) {
|
||||
RegexpQuery query = new RegexpQuery(new Term(name(), indexedValueForSearch(value)), flags, maxDeterminizedStates);
|
||||
if (method != null) {
|
||||
query.setRewriteMethod(method);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
}
|
||||
|
||||
private Boolean includeInAll;
|
||||
|
|
|
@ -21,12 +21,7 @@ package org.elasticsearch.index.mapper.core;
|
|||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.RegexpQuery;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
|
@ -39,8 +34,8 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.StringFieldType;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
@ -172,7 +167,7 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
|
|||
}
|
||||
}
|
||||
|
||||
public static final class TextFieldType extends MappedFieldType {
|
||||
public static final class TextFieldType extends StringFieldType {
|
||||
|
||||
private boolean fielddata;
|
||||
private double fielddataMinFrequency;
|
||||
|
@ -300,17 +295,6 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu
|
|||
}
|
||||
return new PagedBytesIndexFieldData.Builder(fielddataMinFrequency, fielddataMaxFrequency, fielddataMinSegmentSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query regexpQuery(String value, int flags, int maxDeterminizedStates,
|
||||
@Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryShardContext context) {
|
||||
failIfNotIndexed();
|
||||
RegexpQuery query = new RegexpQuery(new Term(name(), indexedValueForSearch(value)), flags, maxDeterminizedStates);
|
||||
if (method != null) {
|
||||
query.setRewriteMethod(method);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
}
|
||||
|
||||
private Boolean includeInAll;
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.mapper.geo;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.geo.GeoHashUtils;
|
||||
import org.apache.lucene.util.LegacyNumericUtils;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -47,6 +48,8 @@ import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
|||
import org.elasticsearch.index.mapper.core.LegacyNumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
|
@ -366,6 +369,11 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
}
|
||||
return DocValueFormat.GEOHASH;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Geo fields do not support exact searching, use dedicated geo queries instead: [" + name() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
protected FieldMapper latMapper;
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.locationtech.spatial4j.shape.Shape;
|
|||
import org.locationtech.spatial4j.shape.jts.JtsGeometry;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.spatial.prefix.PrefixTreeStrategy;
|
||||
import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
|
||||
import org.apache.lucene.spatial.prefix.TermQueryPrefixTreeStrategy;
|
||||
|
@ -46,6 +47,8 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
@ -412,6 +415,10 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
throw new IllegalArgumentException("Unknown prefix tree strategy [" + strategyName + "]");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termQuery(Object value, QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Geo fields do not support exact searching, use dedicated geo queries instead");
|
||||
}
|
||||
}
|
||||
|
||||
protected Explicit<Boolean> coerce;
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.lucene.all.AllEntries;
|
||||
import org.elasticsearch.common.lucene.all.AllField;
|
||||
|
@ -36,6 +35,7 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.StringFieldType;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
|
||||
|
@ -177,7 +177,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
static final class AllFieldType extends MappedFieldType {
|
||||
static final class AllFieldType extends StringFieldType {
|
||||
|
||||
public AllFieldType() {
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -32,6 +31,7 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TermBasedFieldType;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -125,7 +125,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
public static final class FieldNamesFieldType extends MappedFieldType {
|
||||
public static final class FieldNamesFieldType extends TermBasedFieldType {
|
||||
|
||||
private boolean enabled = Defaults.ENABLED;
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TermBasedFieldType;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
|
@ -89,7 +90,7 @@ public class IdFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
static final class IdFieldType extends MappedFieldType {
|
||||
static final class IdFieldType extends TermBasedFieldType {
|
||||
|
||||
public IdFieldType() {
|
||||
}
|
||||
|
@ -116,62 +117,14 @@ public class IdFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
@Override
|
||||
public Query termQuery(Object value, @Nullable QueryShardContext context) {
|
||||
if (indexOptions() != IndexOptions.NONE || context == null) {
|
||||
return super.termQuery(value, context);
|
||||
}
|
||||
final BytesRef[] uids = Uid.createUidsForTypesAndId(context.queryTypes(), value);
|
||||
return new TermsQuery(UidFieldMapper.NAME, uids);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query termsQuery(List values, @Nullable QueryShardContext context) {
|
||||
if (indexOptions() != IndexOptions.NONE || context == null) {
|
||||
return super.termsQuery(values, context);
|
||||
}
|
||||
return new TermsQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(context.queryTypes(), values));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query prefixQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryShardContext context) {
|
||||
if (indexOptions() != IndexOptions.NONE || context == null) {
|
||||
return super.prefixQuery(value, method, context);
|
||||
}
|
||||
Collection<String> queryTypes = context.queryTypes();
|
||||
BooleanQuery.Builder query = new BooleanQuery.Builder();
|
||||
for (String queryType : queryTypes) {
|
||||
PrefixQuery prefixQuery = new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value))));
|
||||
if (method != null) {
|
||||
prefixQuery.setRewriteMethod(method);
|
||||
}
|
||||
query.add(prefixQuery, BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
return query.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query regexpQuery(String value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryShardContext context) {
|
||||
if (indexOptions() != IndexOptions.NONE || context == null) {
|
||||
return super.regexpQuery(value, flags, maxDeterminizedStates, method, context);
|
||||
}
|
||||
Collection<String> queryTypes = context.queryTypes();
|
||||
if (queryTypes.size() == 1) {
|
||||
RegexpQuery regexpQuery = new RegexpQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(Iterables.getFirst(queryTypes, null), BytesRefs.toBytesRef(value))),
|
||||
flags, maxDeterminizedStates);
|
||||
if (method != null) {
|
||||
regexpQuery.setRewriteMethod(method);
|
||||
}
|
||||
return regexpQuery;
|
||||
}
|
||||
BooleanQuery.Builder query = new BooleanQuery.Builder();
|
||||
for (String queryType : queryTypes) {
|
||||
RegexpQuery regexpQuery = new RegexpQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value))), flags, maxDeterminizedStates);
|
||||
if (method != null) {
|
||||
regexpQuery.setRewriteMethod(method);
|
||||
}
|
||||
query.add(regexpQuery, BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
return query.build();
|
||||
}
|
||||
}
|
||||
|
||||
private IdFieldMapper(Settings indexSettings, MappedFieldType existing) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue