mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-24 17:09:48 +00:00
Merge branch 'master' into feature/reindex
This commit is contained in:
commit
821a20f582
@ -223,6 +223,10 @@ tasks.idea.dependsOn(buildSrcIdea)
|
||||
// eclipse configuration
|
||||
allprojects {
|
||||
apply plugin: 'eclipse'
|
||||
// Name all the non-root projects after their path so that paths get grouped together when imported into eclipse.
|
||||
if (path != ':') {
|
||||
eclipse.project.name = path
|
||||
}
|
||||
|
||||
plugins.withType(JavaBasePlugin) {
|
||||
File eclipseBuild = project.file('build-eclipse')
|
||||
|
@ -662,9 +662,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoverySource.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoverySourceHandler.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoveryState.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoveryStatus.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoveryTarget.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]SharedFSRecoverySourceHandler.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]StartRecoveryRequest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]store[/\\]IndicesStore.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]store[/\\]TransportNodesListShardStoreMetaData.java" checks="LineLength" />
|
||||
|
@ -1,5 +1,5 @@
|
||||
elasticsearch = 3.0.0-SNAPSHOT
|
||||
lucene = 5.5.0-snapshot-4de5f1d
|
||||
lucene = 5.5.0-snapshot-850c6c2
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.5
|
||||
|
@ -257,7 +257,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||
}
|
||||
|
||||
/**
|
||||
* Retruns <code>true</code> iff the given class is a registered for an exception to be read.
|
||||
* Returns <code>true</code> iff the given class is a registered for an exception to be read.
|
||||
*/
|
||||
public static boolean isRegistered(Class<? extends Throwable> exception) {
|
||||
return CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE.containsKey(exception);
|
||||
@ -372,7 +372,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the root cause of this exception or mupltiple if different shards caused different exceptions
|
||||
* Returns the root cause of this exception or multiple if different shards caused different exceptions
|
||||
*/
|
||||
public ElasticsearchException[] guessRootCauses() {
|
||||
final Throwable cause = getCause();
|
||||
@ -383,7 +383,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the root cause of this exception or mupltiple if different shards caused different exceptions.
|
||||
* Returns the root cause of this exception or multiple if different shards caused different exceptions.
|
||||
* If the given exception is not an instance of {@link org.elasticsearch.ElasticsearchException} an empty array
|
||||
* is returned.
|
||||
*/
|
||||
|
@ -40,7 +40,7 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesI
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets to reutrn all the data.
|
||||
* Sets to return all the data.
|
||||
*/
|
||||
public NodesInfoRequestBuilder all() {
|
||||
request.all();
|
||||
|
@ -105,7 +105,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
||||
for (IndexShard indexShard : indexService) {
|
||||
if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) {
|
||||
// only report on fully started shards
|
||||
shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indexShard, SHARD_STATS_FLAGS), indexShard.commitStats()));
|
||||
shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -49,17 +49,14 @@ import java.util.List;
|
||||
public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAction<ClearIndicesCacheRequest, ClearIndicesCacheResponse, TransportBroadcastByNodeAction.EmptyResult> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
private final IndicesRequestCache indicesRequestCache;
|
||||
|
||||
@Inject
|
||||
public TransportClearIndicesCacheAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService,
|
||||
IndicesRequestCache indicesQueryCache, ActionFilters actionFilters,
|
||||
TransportService transportService, IndicesService indicesService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ClearIndicesCacheAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ClearIndicesCacheRequest::new, ThreadPool.Names.MANAGEMENT);
|
||||
this.indicesService = indicesService;
|
||||
this.indicesRequestCache = indicesQueryCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -101,7 +98,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
|
||||
}
|
||||
if (request.requestCache()) {
|
||||
clearedAtLeastOne = true;
|
||||
indicesRequestCache.clear(shard);
|
||||
indicesService.getIndicesRequestCache().clear(shard);
|
||||
}
|
||||
if (request.recycler()) {
|
||||
logger.debug("Clear CacheRecycler on index [{}]", service.index());
|
||||
@ -117,7 +114,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
|
||||
} else {
|
||||
service.cache().clear("api");
|
||||
service.fieldData().clear();
|
||||
indicesRequestCache.clear(shard);
|
||||
indicesService.getIndicesRequestCache().clear(shard);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.cache.request.RequestCacheStats;
|
||||
import org.elasticsearch.index.engine.SegmentsStats;
|
||||
@ -40,10 +41,13 @@ import org.elasticsearch.index.refresh.RefreshStats;
|
||||
import org.elasticsearch.index.search.stats.SearchStats;
|
||||
import org.elasticsearch.index.shard.DocsStats;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.store.StoreStats;
|
||||
import org.elasticsearch.index.suggest.stats.SuggestStats;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
import org.elasticsearch.index.warmer.WarmerStats;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionStats;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -122,7 +126,8 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
}
|
||||
|
||||
|
||||
public CommonStats(IndexShard indexShard, CommonStatsFlags flags) {
|
||||
public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) {
|
||||
|
||||
CommonStatsFlags.Flag[] setFlags = flags.getFlags();
|
||||
|
||||
for (CommonStatsFlags.Flag flag : setFlags) {
|
||||
@ -155,7 +160,7 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
warmer = indexShard.warmerStats();
|
||||
break;
|
||||
case QueryCache:
|
||||
queryCache = indexShard.queryCacheStats();
|
||||
queryCache = indicesQueryCache.getStats(indexShard.shardId());
|
||||
break;
|
||||
case FieldData:
|
||||
fieldData = indexShard.fieldDataStats(flags.fieldDataFields());
|
||||
|
@ -162,6 +162,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
|
||||
flags.set(CommonStatsFlags.Flag.Recovery);
|
||||
}
|
||||
|
||||
return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indexShard, flags), indexShard.commitStats());
|
||||
return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), indexShard.commitStats());
|
||||
}
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ public class IndexConstraint {
|
||||
}
|
||||
|
||||
/**
|
||||
* @return On what property of a field the contraint is going to be applied on (min or max value)
|
||||
* @return On what property of a field the constraint is going to be applied on (min or max value)
|
||||
*/
|
||||
public Property getProperty() {
|
||||
return property;
|
||||
|
@ -373,7 +373,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
}
|
||||
|
||||
/**
|
||||
* The source of the document to index, recopied to a new array if it is unsage.
|
||||
* The source of the document to index, recopied to a new array if it is unsafe.
|
||||
*/
|
||||
public BytesReference source() {
|
||||
return source;
|
||||
|
@ -164,7 +164,7 @@ public class PutIndexedScriptRequest extends ActionRequest<PutIndexedScriptReque
|
||||
}
|
||||
|
||||
/**
|
||||
* The source of the document to index, recopied to a new array if it is unsage.
|
||||
* The source of the document to index, recopied to a new array if it is unsafe.
|
||||
*/
|
||||
public BytesReference source() {
|
||||
return source;
|
||||
|
@ -55,7 +55,7 @@ public class ClearScrollResponse extends ActionResponse implements StatusToXCont
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The number of seach contexts that were freed. If this is <code>0</code> the assumption can be made,
|
||||
* @return The number of search contexts that were freed. If this is <code>0</code> the assumption can be made,
|
||||
* that the scroll id specified in the request did not exist. (never existed, was expired, or completely consumed)
|
||||
*/
|
||||
public int getNumFreed() {
|
||||
|
@ -223,7 +223,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the boost a specific index will receive when the query is executeed against it.
|
||||
* Sets the boost a specific index will receive when the query is executed against it.
|
||||
*
|
||||
* @param index The index to apply the boost against
|
||||
* @param indexBoost The boost to apply to the index
|
||||
|
@ -486,7 +486,7 @@ public final class TermVectorsFields extends Fields {
|
||||
|
||||
// read a vInt. this is used if the integer might be negative. In this case,
|
||||
// the writer writes a 0 for -1 or value +1 and accordingly we have to
|
||||
// substract 1 again
|
||||
// subtract 1 again
|
||||
// adds one to mock not existing term freq
|
||||
int readPotentiallyNegativeVInt(StreamInput stream) throws IOException {
|
||||
return stream.readVInt() - 1;
|
||||
@ -494,7 +494,7 @@ public final class TermVectorsFields extends Fields {
|
||||
|
||||
// read a vLong. this is used if the integer might be negative. In this
|
||||
// case, the writer writes a 0 for -1 or value +1 and accordingly we have to
|
||||
// substract 1 again
|
||||
// subtract 1 again
|
||||
// adds one to mock not existing term freq
|
||||
long readPotentiallyNegativeVLong(StreamInput stream) throws IOException {
|
||||
return stream.readVLong() - 1;
|
||||
|
@ -175,7 +175,7 @@ final class Bootstrap {
|
||||
JarHell.checkJarHell();
|
||||
|
||||
// install SM after natives, shutdown hooks, etc.
|
||||
setupSecurity(settings, environment);
|
||||
Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
|
||||
|
||||
// We do not need to reload system properties here as we have already applied them in building the settings and
|
||||
// reloading could cause multiple prompts to the user for values if a system property was specified with a prompt
|
||||
@ -188,14 +188,6 @@ final class Bootstrap {
|
||||
node = new Node(nodeSettings);
|
||||
}
|
||||
|
||||
|
||||
|
||||
private void setupSecurity(Settings settings, Environment environment) throws Exception {
|
||||
if (BootstrapSettings.SECURITY_MANAGER_ENABLED_SETTING.get(settings)) {
|
||||
Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "Exception#printStackTrace()")
|
||||
private static void setupLogging(Settings settings, Environment environment) {
|
||||
try {
|
||||
|
@ -27,14 +27,6 @@ public final class BootstrapSettings {
|
||||
private BootstrapSettings() {
|
||||
}
|
||||
|
||||
// TODO: remove this: http://www.openbsd.org/papers/hackfest2015-pledge/mgp00005.jpg
|
||||
/**
|
||||
* option to turn off our security manager completely, for example
|
||||
* if you want to have your own configuration or just disable
|
||||
*/
|
||||
public static final Setting<Boolean> SECURITY_MANAGER_ENABLED_SETTING =
|
||||
Setting.boolSetting("security.manager.enabled", true, false, Scope.CLUSTER);
|
||||
|
||||
// TODO: remove this hack when insecure defaults are removed from java
|
||||
public static final Setting<Boolean> SECURITY_FILTER_BAD_DEFAULTS_SETTING =
|
||||
Setting.boolSetting("security.manager.filter_bad_defaults", true, false, Scope.CLUSTER);
|
||||
|
@ -108,7 +108,7 @@ class JNANatives {
|
||||
if (value == JNACLibrary.RLIM_INFINITY) {
|
||||
return "unlimited";
|
||||
} else {
|
||||
// TODO, on java 8 use Long.toUnsignedString, since thats what it is.
|
||||
// TODO, on java 8 use Long.toUnsignedString, since that's what it is.
|
||||
return Long.toString(value);
|
||||
}
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ final class Seccomp {
|
||||
int prctl(int option, NativeLong arg2, NativeLong arg3, NativeLong arg4, NativeLong arg5);
|
||||
/**
|
||||
* used to call seccomp(2), its too new...
|
||||
* this is the only way, DONT use it on some other architecture unless you know wtf you are doing
|
||||
* this is the only way, DON'T use it on some other architecture unless you know wtf you are doing
|
||||
*/
|
||||
NativeLong syscall(NativeLong number, Object... args);
|
||||
};
|
||||
|
@ -93,7 +93,7 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable {
|
||||
// because they would make obtain/release too costly: we really need constant-time
|
||||
// operations.
|
||||
// Ultimately a better solution would be to only store one kind of data and have the
|
||||
// ability to intepret it either as a source of bytes, doubles, longs, etc. eg. thanks
|
||||
// ability to interpret it either as a source of bytes, doubles, longs, etc. eg. thanks
|
||||
// to direct ByteBuffers or sun.misc.Unsafe on a byte[] but this would have other issues
|
||||
// that would need to be addressed such as garbage collection of native memory or safety
|
||||
// of Unsafe writes.
|
||||
|
@ -107,7 +107,7 @@ public class Requests {
|
||||
}
|
||||
|
||||
/**
|
||||
* Creats a new bulk request.
|
||||
* Creates a new bulk request.
|
||||
*/
|
||||
public static BulkRequest bulkRequest() {
|
||||
return new BulkRequest();
|
||||
|
@ -38,9 +38,9 @@ public interface LocalNodeMasterListener {
|
||||
* The name of the executor that the implementation of the callbacks of this lister should be executed on. The thread
|
||||
* that is responsible for managing instances of this lister is the same thread handling the cluster state events. If
|
||||
* the work done is the callbacks above is inexpensive, this value may be {@link org.elasticsearch.threadpool.ThreadPool.Names#SAME SAME}
|
||||
* (indicating that the callbaks will run on the same thread as the cluster state events are fired with). On the other hand,
|
||||
* (indicating that the callbacks will run on the same thread as the cluster state events are fired with). On the other hand,
|
||||
* if the logic in the callbacks are heavier and take longer to process (or perhaps involve blocking due to IO operations),
|
||||
* prefer to execute them on a separte more appropriate executor (eg. {@link org.elasticsearch.threadpool.ThreadPool.Names#GENERIC GENERIC}
|
||||
* prefer to execute them on a separate more appropriate executor (eg. {@link org.elasticsearch.threadpool.ThreadPool.Names#GENERIC GENERIC}
|
||||
* or {@link org.elasticsearch.threadpool.ThreadPool.Names#MANAGEMENT MANAGEMENT}).
|
||||
*
|
||||
* @return The name of the executor that will run the callbacks of this listener.
|
||||
|
@ -959,7 +959,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
|
||||
public MetaData build() {
|
||||
// TODO: We should move these datastructures to IndexNameExpressionResolver, this will give the following benefits:
|
||||
// 1) The datastructures will only be rebuilded when needed. Now during serailizing we rebuild these datastructures
|
||||
// 1) The datastructures will only be rebuilded when needed. Now during serializing we rebuild these datastructures
|
||||
// while these datastructures aren't even used.
|
||||
// 2) The aliasAndIndexLookup can be updated instead of rebuilding it all the time.
|
||||
|
||||
|
@ -96,6 +96,13 @@ public class AllocationId implements ToXContent {
|
||||
return new AllocationId(Strings.randomBase64UUID(), null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new allocation id for initializing allocation based on an existing id.
|
||||
*/
|
||||
public static AllocationId newInitializing(String existingAllocationId) {
|
||||
return new AllocationId(existingAllocationId, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new allocation id for the target initializing shard that is the result
|
||||
* of a relocation.
|
||||
|
@ -26,6 +26,7 @@ import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.index.Index;
|
||||
@ -420,11 +421,13 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
|
||||
/**
|
||||
* Moves a shard from unassigned to initialize state
|
||||
*
|
||||
* @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated.
|
||||
*/
|
||||
public void initialize(ShardRouting shard, String nodeId, long expectedSize) {
|
||||
public void initialize(ShardRouting shard, String nodeId, @Nullable String existingAllocationId, long expectedSize) {
|
||||
ensureMutable();
|
||||
assert shard.unassigned() : shard;
|
||||
shard.initialize(nodeId, expectedSize);
|
||||
shard.initialize(nodeId, existingAllocationId, expectedSize);
|
||||
node(nodeId).add(shard);
|
||||
inactiveShardCount++;
|
||||
if (shard.primary()) {
|
||||
@ -692,10 +695,12 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
|
||||
/**
|
||||
* Initializes the current unassigned shard and moves it from the unassigned list.
|
||||
*
|
||||
* @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated.
|
||||
*/
|
||||
public void initialize(String nodeId, long expectedShardSize) {
|
||||
public void initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize) {
|
||||
innerRemove();
|
||||
nodes.initialize(new ShardRouting(current), nodeId, expectedShardSize);
|
||||
nodes.initialize(new ShardRouting(current), nodeId, existingAllocationId, expectedShardSize);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -711,7 +716,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
|
||||
/**
|
||||
* Unsupported operation, just there for the interface. Use {@link #removeAndIgnore()} or
|
||||
* {@link #initialize(String, long)}.
|
||||
* {@link #initialize(String, String, long)}.
|
||||
*/
|
||||
@Override
|
||||
public void remove() {
|
||||
|
@ -410,14 +410,20 @@ public final class ShardRouting implements Streamable, ToXContent {
|
||||
|
||||
/**
|
||||
* Initializes an unassigned shard on a node.
|
||||
*
|
||||
* @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated.
|
||||
*/
|
||||
void initialize(String nodeId, long expectedShardSize) {
|
||||
void initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize) {
|
||||
ensureNotFrozen();
|
||||
assert state == ShardRoutingState.UNASSIGNED : this;
|
||||
assert relocatingNodeId == null : this;
|
||||
state = ShardRoutingState.INITIALIZING;
|
||||
currentNodeId = nodeId;
|
||||
allocationId = AllocationId.newInitializing();
|
||||
if (existingAllocationId == null) {
|
||||
allocationId = AllocationId.newInitializing();
|
||||
} else {
|
||||
allocationId = AllocationId.newInitializing(existingAllocationId);
|
||||
}
|
||||
this.expectedShardSize = expectedShardSize;
|
||||
}
|
||||
|
||||
|
@ -611,7 +611,7 @@ public class AllocationService extends AbstractComponent {
|
||||
return routingNodes;
|
||||
}
|
||||
|
||||
/** ovrride this to control time based decisions during allocation */
|
||||
/** override this to control time based decisions during allocation */
|
||||
protected long currentNanoTime() {
|
||||
return System.nanoTime();
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ import java.util.List;
|
||||
|
||||
/**
|
||||
* This {@link RoutingAllocation} keeps a shard which routing
|
||||
* allocation has faild
|
||||
* allocation has failed.
|
||||
*/
|
||||
public class FailedRerouteAllocation extends RoutingAllocation {
|
||||
|
||||
|
@ -571,7 +571,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocates all given shards on the minimal eligable node for the shards index
|
||||
* Allocates all given shards on the minimal eligible node for the shards index
|
||||
* with respect to the weight function. All given shards must be unassigned.
|
||||
*/
|
||||
private boolean allocateUnassigned(RoutingNodes.UnassignedShards unassigned) {
|
||||
@ -611,7 +611,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
* The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with 2 replica and 1 shard would look like:
|
||||
* [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)]
|
||||
* if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with
|
||||
* the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ingoreUnassigned.
|
||||
* the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ignoreUnassigned.
|
||||
*/
|
||||
ShardRouting[] primary = unassigned.drain();
|
||||
ShardRouting[] secondary = new ShardRouting[primary.length];
|
||||
@ -702,7 +702,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId());
|
||||
}
|
||||
routingNodes.initialize(shard, minNode.getNodeId(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
routingNodes.initialize(shard, minNode.getNodeId(), null, allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
changed = true;
|
||||
continue; // don't add to ignoreUnassigned
|
||||
} else {
|
||||
@ -733,7 +733,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
secondary = tmp;
|
||||
secondaryLength = 0;
|
||||
} while (primaryLength > 0);
|
||||
// clear everything we have either added it or moved to ingoreUnassigned
|
||||
// clear everything we have either added it or moved to ignoreUnassigned
|
||||
return changed;
|
||||
}
|
||||
|
||||
@ -790,7 +790,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
routingNodes.relocate(candidate, minNode.getNodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
|
||||
} else {
|
||||
routingNodes.initialize(candidate, minNode.getNodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
routingNodes.initialize(candidate, minNode.getNodeId(), null, allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
}
|
||||
return true;
|
||||
|
||||
|
@ -242,7 +242,7 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom
|
||||
if (shardRoutingChanges != null) {
|
||||
shardRoutingChanges.accept(unassigned);
|
||||
}
|
||||
it.initialize(routingNode.nodeId(), allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
it.initialize(routingNode.nodeId(), null, allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
return;
|
||||
}
|
||||
assert false : "shard to initialize not found in list of unassigned shards";
|
||||
|
@ -54,7 +54,7 @@ import java.util.Map;
|
||||
* <p>
|
||||
* Awareness can also be used to prevent over-allocation in the case of node or
|
||||
* even "zone" failure. For example in cloud-computing infrastructures like
|
||||
* Amazone AWS a cluster might span over multiple "zones". Awareness can be used
|
||||
* Amazon AWS a cluster might span over multiple "zones". Awareness can be used
|
||||
* to distribute replicas to individual zones by setting:
|
||||
* <pre>
|
||||
* cluster.routing.allocation.awareness.attributes: zone
|
||||
|
@ -30,7 +30,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||
* Similar to the {@link ClusterRebalanceAllocationDecider} this
|
||||
* {@link AllocationDecider} controls the number of currently in-progress
|
||||
* re-balance (relocation) operations and restricts node allocations if the
|
||||
* configured threashold is reached. The default number of concurrent rebalance
|
||||
* configured threshold is reached. The default number of concurrent rebalance
|
||||
* operations is set to <tt>2</tt>
|
||||
* <p>
|
||||
* Re-balance operations can be controlled in real-time via the cluster update API using
|
||||
|
@ -282,7 +282,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||
|
||||
/**
|
||||
* Returns the size of all shards that are currently being relocated to
|
||||
* the node, but may not be finished transfering yet.
|
||||
* the node, but may not be finished transferring yet.
|
||||
*
|
||||
* If subtractShardsMovingAway is set then the size of shards moving away is subtracted from the total size
|
||||
* of all shards
|
||||
|
@ -20,8 +20,11 @@
|
||||
package org.elasticsearch.common.geo;
|
||||
|
||||
import org.apache.lucene.util.BitUtil;
|
||||
import org.apache.lucene.util.GeoHashUtils;
|
||||
import org.apache.lucene.util.GeoUtils;
|
||||
|
||||
import static org.apache.lucene.spatial.util.GeoHashUtils.mortonEncode;
|
||||
import static org.apache.lucene.spatial.util.GeoHashUtils.stringEncode;
|
||||
import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonUnhashLat;
|
||||
import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonUnhashLon;
|
||||
|
||||
/**
|
||||
*
|
||||
@ -81,14 +84,14 @@ public final class GeoPoint {
|
||||
}
|
||||
|
||||
public GeoPoint resetFromIndexHash(long hash) {
|
||||
lon = GeoUtils.mortonUnhashLon(hash);
|
||||
lat = GeoUtils.mortonUnhashLat(hash);
|
||||
lon = mortonUnhashLon(hash);
|
||||
lat = mortonUnhashLat(hash);
|
||||
return this;
|
||||
}
|
||||
|
||||
public GeoPoint resetFromGeoHash(String geohash) {
|
||||
final long hash = GeoHashUtils.mortonEncode(geohash);
|
||||
return this.reset(GeoUtils.mortonUnhashLat(hash), GeoUtils.mortonUnhashLon(hash));
|
||||
final long hash = mortonEncode(geohash);
|
||||
return this.reset(mortonUnhashLat(hash), mortonUnhashLon(hash));
|
||||
}
|
||||
|
||||
public GeoPoint resetFromGeoHash(long geohashLong) {
|
||||
@ -113,11 +116,11 @@ public final class GeoPoint {
|
||||
}
|
||||
|
||||
public final String geohash() {
|
||||
return GeoHashUtils.stringEncode(lon, lat);
|
||||
return stringEncode(lon, lat);
|
||||
}
|
||||
|
||||
public final String getGeohash() {
|
||||
return GeoHashUtils.stringEncode(lon, lat);
|
||||
return stringEncode(lon, lat);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.common.geo;
|
||||
|
||||
import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
|
||||
import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
|
||||
import org.apache.lucene.util.GeoDistanceUtils;
|
||||
import org.apache.lucene.util.SloppyMath;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
@ -29,6 +28,8 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
|
||||
|
||||
import static org.apache.lucene.spatial.util.GeoDistanceUtils.maxRadialDistanceMeters;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
@ -70,7 +71,7 @@ public class GeoUtils {
|
||||
* maximum distance/radius from the point 'center' before overlapping
|
||||
**/
|
||||
public static double maxRadialDistance(GeoPoint center, double initialRadius) {
|
||||
final double maxRadius = GeoDistanceUtils.maxRadialDistanceMeters(center.lon(), center.lat());
|
||||
final double maxRadius = maxRadialDistanceMeters(center.lon(), center.lat());
|
||||
return Math.min(initialRadius, maxRadius);
|
||||
}
|
||||
|
||||
|
@ -104,7 +104,7 @@ public class NetworkService extends AbstractComponent {
|
||||
*/
|
||||
public InetAddress[] resolveBindHostAddresses(String bindHosts[]) throws IOException {
|
||||
// first check settings
|
||||
if (bindHosts == null) {
|
||||
if (bindHosts == null || bindHosts.length == 0) {
|
||||
if (GLOBAL_NETWORK_BINDHOST_SETTING.exists(settings) || GLOBAL_NETWORK_HOST_SETTING.exists(settings)) {
|
||||
// if we have settings use them (we have a fallback to GLOBAL_NETWORK_HOST_SETTING inline
|
||||
bindHosts = GLOBAL_NETWORK_BINDHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
|
||||
|
@ -22,6 +22,7 @@ import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
|
||||
import org.elasticsearch.bootstrap.BootstrapSettings;
|
||||
import org.elasticsearch.cache.recycler.PageCacheRecycler;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.transport.TransportClientNodesService;
|
||||
@ -58,6 +59,7 @@ import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
import org.elasticsearch.http.HttpTransportSettings;
|
||||
import org.elasticsearch.http.netty.NettyHttpServerTransport;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
@ -88,7 +90,6 @@ import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
import org.elasticsearch.transport.netty.NettyTransport;
|
||||
import org.elasticsearch.tribe.TribeService;
|
||||
import org.elasticsearch.bootstrap.BootstrapSettings;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
@ -141,7 +142,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||
String component = key.substring("logger.".length());
|
||||
if ("_root".equals(component)) {
|
||||
final String rootLevel = value.get(key);
|
||||
ESLoggerFactory.getRootLogger().setLevel(rootLevel == null ? ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings).name() : rootLevel);
|
||||
ESLoggerFactory.getRootLogger().setLevel(rootLevel == null ? ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings)
|
||||
.name() : rootLevel);
|
||||
} else {
|
||||
ESLoggerFactory.getLogger(component).setLevel(value.get(key));
|
||||
}
|
||||
@ -150,240 +152,252 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||
}
|
||||
|
||||
public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(
|
||||
Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_SNIFF,
|
||||
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
|
||||
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
|
||||
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
|
||||
BalancedShardsAllocator.THRESHOLD_SETTING,
|
||||
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING,
|
||||
ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING,
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING,
|
||||
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
|
||||
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
|
||||
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
|
||||
FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING,
|
||||
FsRepository.REPOSITORIES_COMPRESS_SETTING,
|
||||
FsRepository.REPOSITORIES_LOCATION_SETTING,
|
||||
IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING,
|
||||
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
|
||||
IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING,
|
||||
IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING,
|
||||
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
|
||||
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
|
||||
MetaData.SETTING_READ_ONLY_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING,
|
||||
ThreadPool.THREADPOOL_GROUP_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING,
|
||||
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING,
|
||||
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING,
|
||||
SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING,
|
||||
DestructiveOperations.REQUIRES_NAME_SETTING,
|
||||
DiscoverySettings.PUBLISH_TIMEOUT_SETTING,
|
||||
DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING,
|
||||
DiscoverySettings.COMMIT_TIMEOUT_SETTING,
|
||||
DiscoverySettings.NO_MASTER_BLOCK_SETTING,
|
||||
GatewayService.EXPECTED_DATA_NODES_SETTING,
|
||||
GatewayService.EXPECTED_MASTER_NODES_SETTING,
|
||||
GatewayService.EXPECTED_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_DATA_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_TIME_SETTING,
|
||||
NetworkModule.HTTP_ENABLED,
|
||||
NetworkModule.HTTP_TYPE_SETTING,
|
||||
NetworkModule.TRANSPORT_SERVICE_TYPE_SETTING,
|
||||
NetworkModule.TRANSPORT_TYPE_SETTING,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS,
|
||||
HttpTransportSettings.SETTING_CORS_ENABLED,
|
||||
HttpTransportSettings.SETTING_CORS_MAX_AGE,
|
||||
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
|
||||
HttpTransportSettings.SETTING_PIPELINING,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN,
|
||||
HttpTransportSettings.SETTING_HTTP_PORT,
|
||||
HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT,
|
||||
HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS,
|
||||
HttpTransportSettings.SETTING_HTTP_COMPRESSION,
|
||||
HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_METHODS,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS,
|
||||
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH,
|
||||
HttpTransportSettings.SETTING_HTTP_RESET_COOKIES,
|
||||
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
|
||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
|
||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
|
||||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING,
|
||||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING,
|
||||
InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
|
||||
SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING,
|
||||
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING,
|
||||
TransportService.TRACE_LOG_EXCLUDE_SETTING,
|
||||
TransportService.TRACE_LOG_INCLUDE_SETTING,
|
||||
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
|
||||
ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING,
|
||||
InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING,
|
||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
|
||||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
|
||||
Transport.TRANSPORT_TCP_COMPRESS,
|
||||
TransportSettings.TRANSPORT_PROFILES_SETTING,
|
||||
TransportSettings.HOST,
|
||||
TransportSettings.PUBLISH_HOST,
|
||||
TransportSettings.BIND_HOST,
|
||||
TransportSettings.PUBLISH_PORT,
|
||||
TransportSettings.PORT,
|
||||
NettyTransport.WORKER_COUNT,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_RECOVERY,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_BULK,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_REG,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_STATE,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_PING,
|
||||
NettyTransport.PING_SCHEDULE,
|
||||
NettyTransport.TCP_BLOCKING_CLIENT,
|
||||
NettyTransport.TCP_CONNECT_TIMEOUT,
|
||||
NettyTransport.NETTY_MAX_CUMULATION_BUFFER_CAPACITY,
|
||||
NettyTransport.NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS,
|
||||
NettyTransport.NETTY_RECEIVE_PREDICTOR_SIZE,
|
||||
NettyTransport.NETTY_RECEIVE_PREDICTOR_MIN,
|
||||
NettyTransport.NETTY_RECEIVE_PREDICTOR_MAX,
|
||||
NetworkService.NETWORK_SERVER,
|
||||
NettyTransport.NETTY_BOSS_COUNT,
|
||||
NettyTransport.TCP_NO_DELAY,
|
||||
NettyTransport.TCP_KEEP_ALIVE,
|
||||
NettyTransport.TCP_REUSE_ADDRESS,
|
||||
NettyTransport.TCP_SEND_BUFFER_SIZE,
|
||||
NettyTransport.TCP_RECEIVE_BUFFER_SIZE,
|
||||
NettyTransport.TCP_BLOCKING_SERVER,
|
||||
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING,
|
||||
NetworkService.TcpSettings.TCP_NO_DELAY,
|
||||
NetworkService.TcpSettings.TCP_KEEP_ALIVE,
|
||||
NetworkService.TcpSettings.TCP_REUSE_ADDRESS,
|
||||
NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE,
|
||||
NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE,
|
||||
NetworkService.TcpSettings.TCP_BLOCKING,
|
||||
NetworkService.TcpSettings.TCP_BLOCKING_SERVER,
|
||||
NetworkService.TcpSettings.TCP_BLOCKING_CLIENT,
|
||||
NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT,
|
||||
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
|
||||
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
|
||||
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING,
|
||||
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
|
||||
ScriptService.SCRIPT_CACHE_EXPIRE_SETTING,
|
||||
ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING,
|
||||
IndicesService.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
|
||||
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
|
||||
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
|
||||
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
|
||||
IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL,
|
||||
HunspellService.HUNSPELL_LAZY_LOAD,
|
||||
HunspellService.HUNSPELL_IGNORE_CASE,
|
||||
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
|
||||
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
|
||||
Environment.PATH_CONF_SETTING,
|
||||
Environment.PATH_DATA_SETTING,
|
||||
Environment.PATH_HOME_SETTING,
|
||||
Environment.PATH_LOGS_SETTING,
|
||||
Environment.PATH_PLUGINS_SETTING,
|
||||
Environment.PATH_REPO_SETTING,
|
||||
Environment.PATH_SCRIPTS_SETTING,
|
||||
Environment.PATH_SHARED_DATA_SETTING,
|
||||
Environment.PIDFILE_SETTING,
|
||||
DiscoveryService.DISCOVERY_SEED_SETTING,
|
||||
DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING,
|
||||
DiscoveryModule.DISCOVERY_TYPE_SETTING,
|
||||
DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING,
|
||||
FaultDetection.PING_RETRIES_SETTING,
|
||||
FaultDetection.PING_TIMEOUT_SETTING,
|
||||
FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING,
|
||||
FaultDetection.PING_INTERVAL_SETTING,
|
||||
FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING,
|
||||
ZenDiscovery.PING_TIMEOUT_SETTING,
|
||||
ZenDiscovery.JOIN_TIMEOUT_SETTING,
|
||||
ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING,
|
||||
ZenDiscovery.JOIN_RETRY_DELAY_SETTING,
|
||||
ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING,
|
||||
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
|
||||
ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING,
|
||||
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
|
||||
ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING,
|
||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
|
||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
|
||||
SearchService.DEFAULT_KEEPALIVE_SETTING,
|
||||
SearchService.KEEPALIVE_INTERVAL_SETTING,
|
||||
Node.WRITE_PORTS_FIELD_SETTING,
|
||||
Node.NODE_NAME_SETTING,
|
||||
Node.NODE_CLIENT_SETTING,
|
||||
Node.NODE_DATA_SETTING,
|
||||
Node.NODE_MASTER_SETTING,
|
||||
Node.NODE_LOCAL_SETTING,
|
||||
Node.NODE_MODE_SETTING,
|
||||
Node.NODE_INGEST_SETTING,
|
||||
Node.NODE_ATTRIBUTES,
|
||||
URLRepository.ALLOWED_URLS_SETTING,
|
||||
URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING,
|
||||
URLRepository.REPOSITORIES_URL_SETTING,
|
||||
URLRepository.SUPPORTED_PROTOCOLS_SETTING,
|
||||
TransportMasterNodeReadAction.FORCE_LOCAL_SETTING,
|
||||
AutoCreateIndex.AUTO_CREATE_INDEX_SETTING,
|
||||
BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX,
|
||||
ClusterName.CLUSTER_NAME_SETTING,
|
||||
Client.CLIENT_TYPE_SETTING_S,
|
||||
InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING,
|
||||
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
|
||||
EsExecutors.PROCESSORS_SETTING,
|
||||
ThreadContext.DEFAULT_HEADERS_SETTING,
|
||||
ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING,
|
||||
ESLoggerFactory.LOG_LEVEL_SETTING,
|
||||
TribeService.BLOCKS_METADATA_SETTING,
|
||||
TribeService.BLOCKS_WRITE_SETTING,
|
||||
TribeService.BLOCKS_WRITE_INDICES_SETTING,
|
||||
TribeService.BLOCKS_READ_INDICES_SETTING,
|
||||
TribeService.BLOCKS_METADATA_INDICES_SETTING,
|
||||
TribeService.ON_CONFLICT_SETTING,
|
||||
TribeService.TRIBE_NAME_SETTING,
|
||||
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING,
|
||||
NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING,
|
||||
NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH,
|
||||
OsService.REFRESH_INTERVAL_SETTING,
|
||||
ProcessService.REFRESH_INTERVAL_SETTING,
|
||||
JvmService.REFRESH_INTERVAL_SETTING,
|
||||
FsService.REFRESH_INTERVAL_SETTING,
|
||||
JvmGcMonitorService.ENABLED_SETTING,
|
||||
JvmGcMonitorService.REFRESH_INTERVAL_SETTING,
|
||||
JvmGcMonitorService.GC_SETTING,
|
||||
PageCacheRecycler.LIMIT_HEAP_SETTING,
|
||||
PageCacheRecycler.WEIGHT_BYTES_SETTING,
|
||||
PageCacheRecycler.WEIGHT_INT_SETTING,
|
||||
PageCacheRecycler.WEIGHT_LONG_SETTING,
|
||||
PageCacheRecycler.WEIGHT_OBJECTS_SETTING,
|
||||
PageCacheRecycler.TYPE_SETTING,
|
||||
PluginsService.MANDATORY_SETTING,
|
||||
BootstrapSettings.SECURITY_MANAGER_ENABLED_SETTING,
|
||||
BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING,
|
||||
BootstrapSettings.MLOCKALL_SETTING,
|
||||
BootstrapSettings.SECCOMP_SETTING,
|
||||
BootstrapSettings.CTRLHANDLER_SETTING
|
||||
)));
|
||||
Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind
|
||||
// of odd here and should only be valid if we are a transport client
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_SNIFF,
|
||||
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
|
||||
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
|
||||
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
|
||||
BalancedShardsAllocator.THRESHOLD_SETTING,
|
||||
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING,
|
||||
ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING,
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING,
|
||||
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
|
||||
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
|
||||
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
|
||||
FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING,
|
||||
FsRepository.REPOSITORIES_COMPRESS_SETTING,
|
||||
FsRepository.REPOSITORIES_LOCATION_SETTING,
|
||||
IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING,
|
||||
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
|
||||
IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING,
|
||||
IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING,
|
||||
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
|
||||
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
|
||||
MetaData.SETTING_READ_ONLY_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING,
|
||||
ThreadPool.THREADPOOL_GROUP_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING,
|
||||
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING,
|
||||
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING,
|
||||
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING,
|
||||
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING,
|
||||
SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING,
|
||||
DestructiveOperations.REQUIRES_NAME_SETTING,
|
||||
DiscoverySettings.PUBLISH_TIMEOUT_SETTING,
|
||||
DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING,
|
||||
DiscoverySettings.COMMIT_TIMEOUT_SETTING,
|
||||
DiscoverySettings.NO_MASTER_BLOCK_SETTING,
|
||||
GatewayService.EXPECTED_DATA_NODES_SETTING,
|
||||
GatewayService.EXPECTED_MASTER_NODES_SETTING,
|
||||
GatewayService.EXPECTED_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_DATA_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_NODES_SETTING,
|
||||
GatewayService.RECOVER_AFTER_TIME_SETTING,
|
||||
NetworkModule.HTTP_ENABLED,
|
||||
NetworkModule.HTTP_TYPE_SETTING,
|
||||
NetworkModule.TRANSPORT_SERVICE_TYPE_SETTING,
|
||||
NetworkModule.TRANSPORT_TYPE_SETTING,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS,
|
||||
HttpTransportSettings.SETTING_CORS_ENABLED,
|
||||
HttpTransportSettings.SETTING_CORS_MAX_AGE,
|
||||
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
|
||||
HttpTransportSettings.SETTING_PIPELINING,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN,
|
||||
HttpTransportSettings.SETTING_HTTP_PORT,
|
||||
HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT,
|
||||
HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS,
|
||||
HttpTransportSettings.SETTING_HTTP_COMPRESSION,
|
||||
HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_METHODS,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS,
|
||||
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE,
|
||||
HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH,
|
||||
HttpTransportSettings.SETTING_HTTP_RESET_COOKIES,
|
||||
NettyHttpServerTransport.SETTING_HTTP_NETTY_MAX_CUMULATION_BUFFER_CAPACITY,
|
||||
NettyHttpServerTransport.SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS,
|
||||
NettyHttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE,
|
||||
NettyHttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN,
|
||||
NettyHttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX,
|
||||
NettyHttpServerTransport.SETTING_HTTP_WORKER_COUNT,
|
||||
NettyHttpServerTransport.SETTING_HTTP_TCP_NO_DELAY,
|
||||
NettyHttpServerTransport.SETTING_HTTP_TCP_KEEP_ALIVE,
|
||||
NettyHttpServerTransport.SETTING_HTTP_TCP_BLOCKING_SERVER,
|
||||
NettyHttpServerTransport.SETTING_HTTP_TCP_REUSE_ADDRESS,
|
||||
NettyHttpServerTransport.SETTING_HTTP_TCP_SEND_BUFFER_SIZE,
|
||||
NettyHttpServerTransport.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE,
|
||||
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
|
||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
|
||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
|
||||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING,
|
||||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING,
|
||||
InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
|
||||
SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING,
|
||||
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING,
|
||||
TransportService.TRACE_LOG_EXCLUDE_SETTING,
|
||||
TransportService.TRACE_LOG_INCLUDE_SETTING,
|
||||
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
|
||||
ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING,
|
||||
InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING,
|
||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
|
||||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
|
||||
Transport.TRANSPORT_TCP_COMPRESS,
|
||||
TransportSettings.TRANSPORT_PROFILES_SETTING,
|
||||
TransportSettings.HOST,
|
||||
TransportSettings.PUBLISH_HOST,
|
||||
TransportSettings.BIND_HOST,
|
||||
TransportSettings.PUBLISH_PORT,
|
||||
TransportSettings.PORT,
|
||||
NettyTransport.WORKER_COUNT,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_RECOVERY,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_BULK,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_REG,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_STATE,
|
||||
NettyTransport.CONNECTIONS_PER_NODE_PING,
|
||||
NettyTransport.PING_SCHEDULE,
|
||||
NettyTransport.TCP_BLOCKING_CLIENT,
|
||||
NettyTransport.TCP_CONNECT_TIMEOUT,
|
||||
NettyTransport.NETTY_MAX_CUMULATION_BUFFER_CAPACITY,
|
||||
NettyTransport.NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS,
|
||||
NettyTransport.NETTY_RECEIVE_PREDICTOR_SIZE,
|
||||
NettyTransport.NETTY_RECEIVE_PREDICTOR_MIN,
|
||||
NettyTransport.NETTY_RECEIVE_PREDICTOR_MAX,
|
||||
NetworkService.NETWORK_SERVER,
|
||||
NettyTransport.NETTY_BOSS_COUNT,
|
||||
NettyTransport.TCP_NO_DELAY,
|
||||
NettyTransport.TCP_KEEP_ALIVE,
|
||||
NettyTransport.TCP_REUSE_ADDRESS,
|
||||
NettyTransport.TCP_SEND_BUFFER_SIZE,
|
||||
NettyTransport.TCP_RECEIVE_BUFFER_SIZE,
|
||||
NettyTransport.TCP_BLOCKING_SERVER,
|
||||
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING,
|
||||
NetworkService.TcpSettings.TCP_NO_DELAY,
|
||||
NetworkService.TcpSettings.TCP_KEEP_ALIVE,
|
||||
NetworkService.TcpSettings.TCP_REUSE_ADDRESS,
|
||||
NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE,
|
||||
NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE,
|
||||
NetworkService.TcpSettings.TCP_BLOCKING,
|
||||
NetworkService.TcpSettings.TCP_BLOCKING_SERVER,
|
||||
NetworkService.TcpSettings.TCP_BLOCKING_CLIENT,
|
||||
NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT,
|
||||
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
|
||||
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
|
||||
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING,
|
||||
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
|
||||
ScriptService.SCRIPT_CACHE_EXPIRE_SETTING,
|
||||
ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING,
|
||||
IndicesService.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
|
||||
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
|
||||
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
|
||||
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
|
||||
IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL,
|
||||
HunspellService.HUNSPELL_LAZY_LOAD,
|
||||
HunspellService.HUNSPELL_IGNORE_CASE,
|
||||
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
|
||||
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
|
||||
Environment.PATH_CONF_SETTING,
|
||||
Environment.PATH_DATA_SETTING,
|
||||
Environment.PATH_HOME_SETTING,
|
||||
Environment.PATH_LOGS_SETTING,
|
||||
Environment.PATH_PLUGINS_SETTING,
|
||||
Environment.PATH_REPO_SETTING,
|
||||
Environment.PATH_SCRIPTS_SETTING,
|
||||
Environment.PATH_SHARED_DATA_SETTING,
|
||||
Environment.PIDFILE_SETTING,
|
||||
DiscoveryService.DISCOVERY_SEED_SETTING,
|
||||
DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING,
|
||||
DiscoveryModule.DISCOVERY_TYPE_SETTING,
|
||||
DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING,
|
||||
FaultDetection.PING_RETRIES_SETTING,
|
||||
FaultDetection.PING_TIMEOUT_SETTING,
|
||||
FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING,
|
||||
FaultDetection.PING_INTERVAL_SETTING,
|
||||
FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING,
|
||||
ZenDiscovery.PING_TIMEOUT_SETTING,
|
||||
ZenDiscovery.JOIN_TIMEOUT_SETTING,
|
||||
ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING,
|
||||
ZenDiscovery.JOIN_RETRY_DELAY_SETTING,
|
||||
ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING,
|
||||
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
|
||||
ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING,
|
||||
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
|
||||
ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING,
|
||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
|
||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
|
||||
SearchService.DEFAULT_KEEPALIVE_SETTING,
|
||||
SearchService.KEEPALIVE_INTERVAL_SETTING,
|
||||
Node.WRITE_PORTS_FIELD_SETTING,
|
||||
Node.NODE_NAME_SETTING,
|
||||
Node.NODE_CLIENT_SETTING,
|
||||
Node.NODE_DATA_SETTING,
|
||||
Node.NODE_MASTER_SETTING,
|
||||
Node.NODE_LOCAL_SETTING,
|
||||
Node.NODE_MODE_SETTING,
|
||||
Node.NODE_INGEST_SETTING,
|
||||
Node.NODE_ATTRIBUTES,
|
||||
URLRepository.ALLOWED_URLS_SETTING,
|
||||
URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING,
|
||||
URLRepository.REPOSITORIES_URL_SETTING,
|
||||
URLRepository.SUPPORTED_PROTOCOLS_SETTING,
|
||||
TransportMasterNodeReadAction.FORCE_LOCAL_SETTING,
|
||||
AutoCreateIndex.AUTO_CREATE_INDEX_SETTING,
|
||||
BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX,
|
||||
ClusterName.CLUSTER_NAME_SETTING,
|
||||
Client.CLIENT_TYPE_SETTING_S,
|
||||
InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING,
|
||||
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
|
||||
EsExecutors.PROCESSORS_SETTING,
|
||||
ThreadContext.DEFAULT_HEADERS_SETTING,
|
||||
ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING,
|
||||
ESLoggerFactory.LOG_LEVEL_SETTING,
|
||||
TribeService.BLOCKS_METADATA_SETTING,
|
||||
TribeService.BLOCKS_WRITE_SETTING,
|
||||
TribeService.BLOCKS_WRITE_INDICES_SETTING,
|
||||
TribeService.BLOCKS_READ_INDICES_SETTING,
|
||||
TribeService.BLOCKS_METADATA_INDICES_SETTING,
|
||||
TribeService.ON_CONFLICT_SETTING,
|
||||
TribeService.TRIBE_NAME_SETTING,
|
||||
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING,
|
||||
NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING,
|
||||
NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH,
|
||||
OsService.REFRESH_INTERVAL_SETTING,
|
||||
ProcessService.REFRESH_INTERVAL_SETTING,
|
||||
JvmService.REFRESH_INTERVAL_SETTING,
|
||||
FsService.REFRESH_INTERVAL_SETTING,
|
||||
JvmGcMonitorService.ENABLED_SETTING,
|
||||
JvmGcMonitorService.REFRESH_INTERVAL_SETTING,
|
||||
JvmGcMonitorService.GC_SETTING,
|
||||
PageCacheRecycler.LIMIT_HEAP_SETTING,
|
||||
PageCacheRecycler.WEIGHT_BYTES_SETTING,
|
||||
PageCacheRecycler.WEIGHT_INT_SETTING,
|
||||
PageCacheRecycler.WEIGHT_LONG_SETTING,
|
||||
PageCacheRecycler.WEIGHT_OBJECTS_SETTING,
|
||||
PageCacheRecycler.TYPE_SETTING,
|
||||
PluginsService.MANDATORY_SETTING,
|
||||
BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING,
|
||||
BootstrapSettings.MLOCKALL_SETTING,
|
||||
BootstrapSettings.SECCOMP_SETTING,
|
||||
BootstrapSettings.CTRLHANDLER_SETTING
|
||||
)));
|
||||
}
|
||||
|
@ -20,7 +20,6 @@
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.tribe.TribeService;
|
||||
|
||||
import java.util.HashMap;
|
||||
@ -90,7 +89,7 @@ public class SettingsModule extends AbstractModule {
|
||||
|
||||
/**
|
||||
* Registers a settings filter pattern that allows to filter out certain settings that for instance contain sensitive information
|
||||
* or if a setting is for internal purposes only. The given patter must either be a valid settings key or a simple regesp pattern.
|
||||
* or if a setting is for internal purposes only. The given pattern must either be a valid settings key or a simple regexp pattern.
|
||||
*/
|
||||
public void registerSettingsFilter(String filter) {
|
||||
if (SettingsFilter.isValidPattern(filter) == false) {
|
||||
@ -108,6 +107,18 @@ public class SettingsModule extends AbstractModule {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a setting has already been registered
|
||||
*/
|
||||
public boolean exists(Setting<?> setting) {
|
||||
switch (setting.getScope()) {
|
||||
case CLUSTER:
|
||||
return clusterSettings.containsKey(setting.getKey());
|
||||
case INDEX:
|
||||
return indexSettings.containsKey(setting.getKey());
|
||||
}
|
||||
throw new IllegalArgumentException("setting scope is unknown. This should never happen!");
|
||||
}
|
||||
|
||||
private void validateTribeSettings(Settings settings, ClusterSettings clusterSettings) {
|
||||
Map<String, Settings> groups = settings.filter(TRIBE_CLIENT_NODE_SETTINGS_PREDICATE).getGroups("tribe.", true);
|
||||
|
@ -80,14 +80,32 @@ public class CancellableThreads {
|
||||
* @param interruptable code to run
|
||||
*/
|
||||
public void execute(Interruptable interruptable) {
|
||||
try {
|
||||
executeIO(interruptable);
|
||||
} catch (IOException e) {
|
||||
assert false : "the passed interruptable can not result in an IOException";
|
||||
throw new RuntimeException("unexpected IO exception", e);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* run the Interruptable, capturing the executing thread. Concurrent calls to {@link #cancel(String)} will interrupt this thread
|
||||
* causing the call to prematurely return.
|
||||
*
|
||||
* @param interruptable code to run
|
||||
*/
|
||||
public void executeIO(IOInterruptable interruptable) throws IOException {
|
||||
boolean wasInterrupted = add();
|
||||
RuntimeException throwable = null;
|
||||
RuntimeException runtimeException = null;
|
||||
IOException ioException = null;
|
||||
|
||||
try {
|
||||
interruptable.run();
|
||||
} catch (InterruptedException | ThreadInterruptedException e) {
|
||||
// assume this is us and ignore
|
||||
} catch (RuntimeException t) {
|
||||
throwable = t;
|
||||
runtimeException = t;
|
||||
} catch (IOException e) {
|
||||
ioException = e;
|
||||
} finally {
|
||||
remove();
|
||||
}
|
||||
@ -101,10 +119,14 @@ public class CancellableThreads {
|
||||
}
|
||||
synchronized (this) {
|
||||
if (isCancelled()) {
|
||||
onCancel(reason, throwable);
|
||||
} else if (throwable != null) {
|
||||
onCancel(reason, ioException != null ? ioException : runtimeException);
|
||||
} else if (ioException != null) {
|
||||
// if we're not canceling, we throw the original exception
|
||||
throw throwable;
|
||||
throw ioException;
|
||||
}
|
||||
if (runtimeException != null) {
|
||||
// if we're not canceling, we throw the original exception
|
||||
throw runtimeException;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -131,10 +153,14 @@ public class CancellableThreads {
|
||||
}
|
||||
|
||||
|
||||
public interface Interruptable {
|
||||
public interface Interruptable extends IOInterruptable {
|
||||
void run() throws InterruptedException;
|
||||
}
|
||||
|
||||
public interface IOInterruptable {
|
||||
void run() throws IOException, InterruptedException;
|
||||
}
|
||||
|
||||
public static class ExecutionCancelledException extends ElasticsearchException {
|
||||
|
||||
public ExecutionCancelledException(String msg) {
|
||||
|
@ -41,7 +41,7 @@ public class DiscoverySettings extends AbstractComponent {
|
||||
public final static ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
|
||||
/**
|
||||
* sets the timeout for a complete publishing cycle, including both sending and committing. the master
|
||||
* will continute to process the next cluster state update after this time has elapsed
|
||||
* will continue to process the next cluster state update after this time has elapsed
|
||||
**/
|
||||
public static final Setting<TimeValue> PUBLISH_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);
|
||||
|
||||
|
@ -328,7 +328,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
||||
ClusterState newNodeSpecificClusterState = null;
|
||||
synchronized (this) {
|
||||
// we do the marshaling intentionally, to check it works well...
|
||||
// check if we publsihed cluster state at least once and node was in the cluster when we published cluster state the last time
|
||||
// check if we published cluster state at least once and node was in the cluster when we published cluster state the last time
|
||||
if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode().id())) {
|
||||
// both conditions are true - which means we can try sending cluster state as diffs
|
||||
if (clusterStateDiffBytes == null) {
|
||||
|
@ -79,7 +79,7 @@ public class ElectMasterService extends AbstractComponent {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the given nodes sorted by likelyhood of being elected as master, most likely first.
|
||||
* Returns the given nodes sorted by likelihood of being elected as master, most likely first.
|
||||
* Non-master nodes are not removed but are rather put in the end
|
||||
*/
|
||||
public List<DiscoveryNode> sortByMasterLikelihood(Iterable<DiscoveryNode> nodes) {
|
||||
|
@ -342,7 +342,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
|
||||
// sort the nodes by likelihood of being an active master
|
||||
List<DiscoveryNode> sortedNodesToPing = electMasterService.sortByMasterLikelihood(nodesToPingSet);
|
||||
|
||||
// new add the the unicast targets first
|
||||
// new add the unicast targets first
|
||||
List<DiscoveryNode> nodesToPing = CollectionUtils.arrayAsArrayList(configuredTargetNodes);
|
||||
nodesToPing.addAll(sortedNodesToPing);
|
||||
|
||||
|
@ -166,7 +166,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
||||
/**
|
||||
* Called by the response handler of the async action to fetch data. Verifies that its still working
|
||||
* on the same cache generation, otherwise the results are discarded. It then goes and fills the relevant data for
|
||||
* the shard (response + failures), issueing a reroute at the end of it to make sure there will be another round
|
||||
* the shard (response + failures), issuing a reroute at the end of it to make sure there will be another round
|
||||
* of allocations taking this new data into account.
|
||||
*/
|
||||
protected synchronized void processAsyncFetch(ShardId shardId, T[] responses, FailedNodeException[] failures) {
|
||||
|
@ -255,7 +255,7 @@ public abstract class MetaDataStateFormat<T> {
|
||||
List<PathAndStateId> files = new ArrayList<>();
|
||||
long maxStateId = -1;
|
||||
boolean maxStateIdIsLegacy = true;
|
||||
if (dataLocations != null) { // select all eligable files first
|
||||
if (dataLocations != null) { // select all eligible files first
|
||||
for (Path dataLocation : dataLocations) {
|
||||
final Path stateDir = dataLocation.resolve(STATE_DIR_NAME);
|
||||
// now, iterate over the current versions, and find latest one
|
||||
|
@ -32,15 +32,14 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards;
|
||||
import org.elasticsearch.index.shard.ShardStateMetaData;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
@ -98,7 +97,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
continue;
|
||||
}
|
||||
|
||||
final AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState = fetchData(shard, allocation);
|
||||
final AsyncShardFetch.FetchResult<NodeGatewayStartedShards> shardState = fetchData(shard, allocation);
|
||||
if (shardState.hasData() == false) {
|
||||
logger.trace("{}: ignoring allocation, still fetching shard started state", shard);
|
||||
allocation.setHasPendingAsyncFetch();
|
||||
@ -110,7 +109,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
final boolean snapshotRestore = shard.restoreSource() != null;
|
||||
final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData);
|
||||
|
||||
final NodesResult nodesResult;
|
||||
final NodeShardsResult nodeShardsResult;
|
||||
final boolean enoughAllocationsFound;
|
||||
|
||||
if (lastActiveAllocationIds.isEmpty()) {
|
||||
@ -118,20 +117,20 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
// when we load an old index (after upgrading cluster) or restore a snapshot of an old index
|
||||
// fall back to old version-based allocation mode
|
||||
// Note that once the shard has been active, lastActiveAllocationIds will be non-empty
|
||||
nodesResult = buildVersionBasedNodes(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState);
|
||||
nodeShardsResult = buildVersionBasedNodeShardsResult(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState);
|
||||
if (snapshotRestore || recoverOnAnyNode) {
|
||||
enoughAllocationsFound = nodesResult.allocationsFound > 0;
|
||||
enoughAllocationsFound = nodeShardsResult.allocationsFound > 0;
|
||||
} else {
|
||||
enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(shard, indexMetaData, nodesResult);
|
||||
enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult);
|
||||
}
|
||||
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_3_0_0, nodesResult.allocationsFound, shard);
|
||||
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_3_0_0, nodeShardsResult.allocationsFound, shard);
|
||||
} else {
|
||||
assert lastActiveAllocationIds.isEmpty() == false;
|
||||
// use allocation ids to select nodes
|
||||
nodesResult = buildAllocationIdBasedNodes(shard, snapshotRestore || recoverOnAnyNode,
|
||||
nodeShardsResult = buildAllocationIdBasedNodeShardsResult(shard, snapshotRestore || recoverOnAnyNode,
|
||||
allocation.getIgnoreNodes(shard.shardId()), lastActiveAllocationIds, shardState);
|
||||
enoughAllocationsFound = nodesResult.allocationsFound > 0;
|
||||
logger.debug("[{}][{}]: found {} allocations of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodesResult.allocationsFound, shard, lastActiveAllocationIds);
|
||||
enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0;
|
||||
logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodeShardsResult.orderedAllocationCandidates.size(), shard, lastActiveAllocationIds);
|
||||
}
|
||||
|
||||
if (enoughAllocationsFound == false){
|
||||
@ -144,25 +143,25 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
} else {
|
||||
// we can't really allocate, so ignore it and continue
|
||||
unassignedIterator.removeAndIgnore();
|
||||
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodesResult.allocationsFound);
|
||||
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodeShardsResult.allocationsFound);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
final NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesResult.nodes);
|
||||
if (nodesToAllocate.yesNodes.isEmpty() == false) {
|
||||
DiscoveryNode node = nodesToAllocate.yesNodes.get(0);
|
||||
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node);
|
||||
final NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodeShardsResult.orderedAllocationCandidates);
|
||||
if (nodesToAllocate.yesNodeShards.isEmpty() == false) {
|
||||
NodeGatewayStartedShards nodeShardState = nodesToAllocate.yesNodeShards.get(0);
|
||||
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodeShardState.getNode());
|
||||
changed = true;
|
||||
unassignedIterator.initialize(node.id(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
} else if (nodesToAllocate.throttleNodes.isEmpty() == true && nodesToAllocate.noNodes.isEmpty() == false) {
|
||||
DiscoveryNode node = nodesToAllocate.noNodes.get(0);
|
||||
logger.debug("[{}][{}]: forcing allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node);
|
||||
unassignedIterator.initialize(nodeShardState.getNode().id(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
} else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) {
|
||||
NodeGatewayStartedShards nodeShardState = nodesToAllocate.noNodeShards.get(0);
|
||||
logger.debug("[{}][{}]: forcing allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodeShardState.getNode());
|
||||
changed = true;
|
||||
unassignedIterator.initialize(node.id(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
unassignedIterator.initialize(nodeShardState.getNode().id(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
} else {
|
||||
// we are throttling this, but we have enough to allocate to this node, ignore it for now
|
||||
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodes);
|
||||
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodeShards);
|
||||
unassignedIterator.removeAndIgnore();
|
||||
}
|
||||
}
|
||||
@ -174,11 +173,12 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
* lastActiveAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but
|
||||
* entries with matching allocation id are always at the front of the list.
|
||||
*/
|
||||
protected NodesResult buildAllocationIdBasedNodes(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
|
||||
Set<String> lastActiveAllocationIds, AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState) {
|
||||
LinkedList<DiscoveryNode> matchingNodes = new LinkedList<>();
|
||||
LinkedList<DiscoveryNode> nonMatchingNodes = new LinkedList<>();
|
||||
for (TransportNodesListGatewayStartedShards.NodeGatewayStartedShards nodeShardState : shardState.getData().values()) {
|
||||
protected NodeShardsResult buildAllocationIdBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
|
||||
Set<String> lastActiveAllocationIds, AsyncShardFetch.FetchResult<NodeGatewayStartedShards> shardState) {
|
||||
LinkedList<NodeGatewayStartedShards> matchingNodeShardStates = new LinkedList<>();
|
||||
LinkedList<NodeGatewayStartedShards> nonMatchingNodeShardStates = new LinkedList<>();
|
||||
int numberOfAllocationsFound = 0;
|
||||
for (NodeGatewayStartedShards nodeShardState : shardState.getData().values()) {
|
||||
DiscoveryNode node = nodeShardState.getNode();
|
||||
String allocationId = nodeShardState.allocationId();
|
||||
|
||||
@ -199,36 +199,37 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
}
|
||||
|
||||
if (allocationId != null) {
|
||||
numberOfAllocationsFound++;
|
||||
if (lastActiveAllocationIds.contains(allocationId)) {
|
||||
if (nodeShardState.primary()) {
|
||||
matchingNodes.addFirst(node);
|
||||
matchingNodeShardStates.addFirst(nodeShardState);
|
||||
} else {
|
||||
matchingNodes.addLast(node);
|
||||
matchingNodeShardStates.addLast(nodeShardState);
|
||||
}
|
||||
} else if (matchAnyShard) {
|
||||
if (nodeShardState.primary()) {
|
||||
nonMatchingNodes.addFirst(node);
|
||||
nonMatchingNodeShardStates.addFirst(nodeShardState);
|
||||
} else {
|
||||
nonMatchingNodes.addLast(node);
|
||||
nonMatchingNodeShardStates.addLast(nodeShardState);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
List<DiscoveryNode> nodes = new ArrayList<>();
|
||||
nodes.addAll(matchingNodes);
|
||||
nodes.addAll(nonMatchingNodes);
|
||||
List<NodeGatewayStartedShards> nodeShardStates = new ArrayList<>();
|
||||
nodeShardStates.addAll(matchingNodeShardStates);
|
||||
nodeShardStates.addAll(nonMatchingNodeShardStates);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("{} candidates for allocation: {}", shard, nodes.stream().map(DiscoveryNode::name).collect(Collectors.joining(", ")));
|
||||
logger.trace("{} candidates for allocation: {}", shard, nodeShardStates.stream().map(s -> s.getNode().getName()).collect(Collectors.joining(", ")));
|
||||
}
|
||||
return new NodesResult(nodes, nodes.size());
|
||||
return new NodeShardsResult(nodeShardStates, numberOfAllocationsFound);
|
||||
}
|
||||
|
||||
/**
|
||||
* used by old version-based allocation
|
||||
*/
|
||||
private boolean isEnoughVersionBasedAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesResult nodesAndVersions) {
|
||||
private boolean isEnoughVersionBasedAllocationsFound(IndexMetaData indexMetaData, NodeShardsResult nodeShardsResult) {
|
||||
// check if the counts meets the minimum set
|
||||
int requiredAllocation = 1;
|
||||
// if we restore from a repository one copy is more then enough
|
||||
@ -253,45 +254,44 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
requiredAllocation = Integer.parseInt(initialShards);
|
||||
}
|
||||
|
||||
return nodesAndVersions.allocationsFound >= requiredAllocation;
|
||||
return nodeShardsResult.allocationsFound >= requiredAllocation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Split the list of nodes to lists of yes/no/throttle nodes based on allocation deciders
|
||||
* Split the list of node shard states into groups yes/no/throttle based on allocation deciders
|
||||
*/
|
||||
private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, List<DiscoveryNode> nodes) {
|
||||
List<DiscoveryNode> yesNodes = new ArrayList<>();
|
||||
List<DiscoveryNode> throttledNodes = new ArrayList<>();
|
||||
List<DiscoveryNode> noNodes = new ArrayList<>();
|
||||
for (DiscoveryNode discoNode : nodes) {
|
||||
RoutingNode node = allocation.routingNodes().node(discoNode.id());
|
||||
private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, List<NodeGatewayStartedShards> nodeShardStates) {
|
||||
List<NodeGatewayStartedShards> yesNodeShards = new ArrayList<>();
|
||||
List<NodeGatewayStartedShards> throttledNodeShards = new ArrayList<>();
|
||||
List<NodeGatewayStartedShards> noNodeShards = new ArrayList<>();
|
||||
for (NodeGatewayStartedShards nodeShardState : nodeShardStates) {
|
||||
RoutingNode node = allocation.routingNodes().node(nodeShardState.getNode().id());
|
||||
if (node == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
|
||||
if (decision.type() == Decision.Type.THROTTLE) {
|
||||
throttledNodes.add(discoNode);
|
||||
throttledNodeShards.add(nodeShardState);
|
||||
} else if (decision.type() == Decision.Type.NO) {
|
||||
noNodes.add(discoNode);
|
||||
noNodeShards.add(nodeShardState);
|
||||
} else {
|
||||
yesNodes.add(discoNode);
|
||||
yesNodeShards.add(nodeShardState);
|
||||
}
|
||||
}
|
||||
return new NodesToAllocate(Collections.unmodifiableList(yesNodes), Collections.unmodifiableList(throttledNodes), Collections.unmodifiableList(noNodes));
|
||||
return new NodesToAllocate(Collections.unmodifiableList(yesNodeShards), Collections.unmodifiableList(throttledNodeShards), Collections.unmodifiableList(noNodeShards));
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a list of nodes. If matchAnyShard is set to false, only nodes that have the highest shard version
|
||||
* are added to the list. Otherwise, any node that has a shard is added to the list, but entries with highest
|
||||
* version are always at the front of the list.
|
||||
* Builds a list of previously started shards. If matchAnyShard is set to false, only shards with the highest shard version are added to
|
||||
* the list. Otherwise, any existing shard is added to the list, but entries with highest version are always at the front of the list.
|
||||
*/
|
||||
NodesResult buildVersionBasedNodes(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
|
||||
AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState) {
|
||||
final Map<DiscoveryNode, Long> nodesWithVersion = new HashMap<>();
|
||||
NodeShardsResult buildVersionBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
|
||||
AsyncShardFetch.FetchResult<NodeGatewayStartedShards> shardState) {
|
||||
final List<NodeGatewayStartedShards> allocationCandidates = new ArrayList<>();
|
||||
int numberOfAllocationsFound = 0;
|
||||
long highestVersion = ShardStateMetaData.NO_VERSION;
|
||||
for (TransportNodesListGatewayStartedShards.NodeGatewayStartedShards nodeShardState : shardState.getData().values()) {
|
||||
for (NodeGatewayStartedShards nodeShardState : shardState.getData().values()) {
|
||||
long version = nodeShardState.legacyVersion();
|
||||
DiscoveryNode node = nodeShardState.getNode();
|
||||
|
||||
@ -315,38 +315,29 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
if (version > highestVersion) {
|
||||
highestVersion = version;
|
||||
if (matchAnyShard == false) {
|
||||
nodesWithVersion.clear();
|
||||
allocationCandidates.clear();
|
||||
}
|
||||
nodesWithVersion.put(node, version);
|
||||
allocationCandidates.add(nodeShardState);
|
||||
} else if (version == highestVersion) {
|
||||
// If the candidate is the same, add it to the
|
||||
// list, but keep the current candidate
|
||||
nodesWithVersion.put(node, version);
|
||||
allocationCandidates.add(nodeShardState);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Now that we have a map of nodes to versions along with the
|
||||
// number of allocations found (and not ignored), we need to sort
|
||||
// it so the node with the highest version is at the beginning
|
||||
List<DiscoveryNode> nodesWithHighestVersion = new ArrayList<>();
|
||||
nodesWithHighestVersion.addAll(nodesWithVersion.keySet());
|
||||
CollectionUtil.timSort(nodesWithHighestVersion, new Comparator<DiscoveryNode>() {
|
||||
@Override
|
||||
public int compare(DiscoveryNode o1, DiscoveryNode o2) {
|
||||
return Long.compare(nodesWithVersion.get(o2), nodesWithVersion.get(o1));
|
||||
}
|
||||
});
|
||||
// sort array so the node with the highest version is at the beginning
|
||||
CollectionUtil.timSort(allocationCandidates, Comparator.comparing(NodeGatewayStartedShards::legacyVersion).reversed());
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("[");
|
||||
for (DiscoveryNode n : nodesWithVersion.keySet()) {
|
||||
sb.append("[").append(n.getName()).append("]").append(" -> ").append(nodesWithVersion.get(n)).append(", ");
|
||||
for (NodeGatewayStartedShards n : allocationCandidates) {
|
||||
sb.append("[").append(n.getNode().getName()).append("]").append(" -> ").append(n.legacyVersion()).append(", ");
|
||||
}
|
||||
sb.append("]");
|
||||
logger.trace("{} candidates for allocation: {}", shard, sb.toString());
|
||||
}
|
||||
|
||||
return new NodesResult(Collections.unmodifiableList(nodesWithHighestVersion), numberOfAllocationsFound);
|
||||
return new NodeShardsResult(Collections.unmodifiableList(allocationCandidates), numberOfAllocationsFound);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -358,27 +349,28 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
&& IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING.get(metaData.getSettings(), this.settings);
|
||||
}
|
||||
|
||||
protected abstract AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);
|
||||
protected abstract AsyncShardFetch.FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);
|
||||
|
||||
static class NodesResult {
|
||||
public final List<DiscoveryNode> nodes;
|
||||
static class NodeShardsResult {
|
||||
public final List<NodeGatewayStartedShards> orderedAllocationCandidates;
|
||||
public final int allocationsFound;
|
||||
|
||||
public NodesResult(List<DiscoveryNode> nodes, int allocationsFound) {
|
||||
this.nodes = nodes;
|
||||
public NodeShardsResult(List<NodeGatewayStartedShards> orderedAllocationCandidates, int allocationsFound) {
|
||||
this.orderedAllocationCandidates = orderedAllocationCandidates;
|
||||
this.allocationsFound = allocationsFound;
|
||||
}
|
||||
}
|
||||
|
||||
static class NodesToAllocate {
|
||||
final List<DiscoveryNode> yesNodes;
|
||||
final List<DiscoveryNode> throttleNodes;
|
||||
final List<DiscoveryNode> noNodes;
|
||||
final List<NodeGatewayStartedShards> yesNodeShards;
|
||||
final List<NodeGatewayStartedShards> throttleNodeShards;
|
||||
final List<NodeGatewayStartedShards> noNodeShards;
|
||||
|
||||
public NodesToAllocate(List<DiscoveryNode> yesNodes, List<DiscoveryNode> throttleNodes, List<DiscoveryNode> noNodes) {
|
||||
this.yesNodes = yesNodes;
|
||||
this.throttleNodes = throttleNodes;
|
||||
this.noNodes = noNodes;
|
||||
public NodesToAllocate(List<NodeGatewayStartedShards> yesNodeShards, List<NodeGatewayStartedShards> throttleNodeShards,
|
||||
List<NodeGatewayStartedShards> noNodeShards) {
|
||||
this.yesNodeShards = yesNodeShards;
|
||||
this.throttleNodeShards = throttleNodeShards;
|
||||
this.noNodeShards = noNodeShards;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
||||
logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node());
|
||||
// we found a match
|
||||
changed = true;
|
||||
unassignedIterator.initialize(nodeWithHighestMatch.nodeId(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
unassignedIterator.initialize(nodeWithHighestMatch.nodeId(), null, allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
}
|
||||
} else if (matchingNodes.hasAnyData() == false) {
|
||||
// if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation of the replica shard needs to be delayed
|
||||
|
@ -335,5 +335,28 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
NodeGatewayStartedShards that = (NodeGatewayStartedShards) o;
|
||||
|
||||
if (legacyVersion != that.legacyVersion) return false;
|
||||
if (primary != that.primary) return false;
|
||||
if (allocationId != null ? !allocationId.equals(that.allocationId) : that.allocationId != null) return false;
|
||||
return storeException != null ? storeException.equals(that.storeException) : that.storeException == null;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = Long.hashCode(legacyVersion);
|
||||
result = 31 * result + (allocationId != null ? allocationId.hashCode() : 0);
|
||||
result = 31 * result + (primary ? 1 : 0);
|
||||
result = 31 * result + (storeException != null ? storeException.hashCode() : 0);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -25,6 +25,11 @@ import org.elasticsearch.common.transport.PortsRange;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static org.elasticsearch.common.settings.Setting.listSetting;
|
||||
|
||||
public final class HttpTransportSettings {
|
||||
|
||||
public static final Setting<Boolean> SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER);
|
||||
@ -37,6 +42,10 @@ public final class HttpTransportSettings {
|
||||
public static final Setting<Integer> SETTING_PIPELINING_MAX_EVENTS = Setting.intSetting("http.pipelining.max_events", 10000, false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_HTTP_COMPRESSION = Setting.boolSetting("http.compression", false, false, Scope.CLUSTER);
|
||||
public static final Setting<Integer> SETTING_HTTP_COMPRESSION_LEVEL = Setting.intSetting("http.compression_level", 6, false, Scope.CLUSTER);
|
||||
public static final Setting<List<String>> SETTING_HTTP_HOST = listSetting("http.host", emptyList(), s -> s, false, Scope.CLUSTER);
|
||||
public static final Setting<List<String>> SETTING_HTTP_PUBLISH_HOST = listSetting("http.publish_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER);
|
||||
public static final Setting<List<String>> SETTING_HTTP_BIND_HOST = listSetting("http.bind_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER);
|
||||
|
||||
public static final Setting<PortsRange> SETTING_HTTP_PORT = new Setting<PortsRange>("http.port", "9200-9300", PortsRange::new, false, Scope.CLUSTER);
|
||||
public static final Setting<Integer> SETTING_HTTP_PUBLISH_PORT = Setting.intSetting("http.publish_port", 0, 0, false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER);
|
||||
|
@ -39,7 +39,7 @@ public class ESHttpContentDecompressor extends HttpContentDecompressor {
|
||||
// compression is enabled so handle the request according to the headers (compressed and uncompressed)
|
||||
return super.newContentDecoder(contentEncoding);
|
||||
} else {
|
||||
// if compression is disabled only allow "indentity" (uncompressed) requests
|
||||
// if compression is disabled only allow "identity" (uncompressed) requests
|
||||
if (HttpHeaders.Values.IDENTITY.equals(contentEncoding)) {
|
||||
// nothing to handle here
|
||||
return null;
|
||||
@ -48,4 +48,4 @@ public class ESHttpContentDecompressor extends HttpContentDecompressor {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -20,13 +20,13 @@
|
||||
package org.elasticsearch.http.netty;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.netty.NettyUtils;
|
||||
import org.elasticsearch.common.netty.OpenChannelsHandler;
|
||||
import org.elasticsearch.common.network.NetworkAddress;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
@ -50,8 +50,8 @@ import org.elasticsearch.http.netty.cors.CorsConfigBuilder;
|
||||
import org.elasticsearch.http.netty.cors.CorsHandler;
|
||||
import org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.rest.support.RestUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.BindTransportException;
|
||||
import org.jboss.netty.bootstrap.ServerBootstrap;
|
||||
import org.jboss.netty.channel.AdaptiveReceiveBufferSizePredictorFactory;
|
||||
@ -81,19 +81,16 @@ import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static org.elasticsearch.common.settings.Setting.boolSetting;
|
||||
import static org.elasticsearch.common.settings.Setting.byteSizeSetting;
|
||||
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_REUSE_ADDRESS;
|
||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_BIND_HOST;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED;
|
||||
@ -102,6 +99,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONT
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PORT;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_HOST;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING;
|
||||
@ -117,6 +115,52 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
||||
NettyUtils.setup();
|
||||
}
|
||||
|
||||
public static Setting<ByteSizeValue> SETTING_HTTP_NETTY_MAX_CUMULATION_BUFFER_CAPACITY =
|
||||
Setting.byteSizeSetting("http.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
|
||||
public static Setting<Integer> SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS =
|
||||
Setting.intSetting("http.netty.max_composite_buffer_components", -1, false, Setting.Scope.CLUSTER);
|
||||
|
||||
public static final Setting<Integer> SETTING_HTTP_WORKER_COUNT = new Setting<>("http.netty.worker_count",
|
||||
(s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2),
|
||||
(s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), false, Setting.Scope.CLUSTER);
|
||||
|
||||
public static final Setting<Boolean> SETTING_HTTP_TCP_NO_DELAY = boolSetting("http.tcp_no_delay", NetworkService.TcpSettings
|
||||
.TCP_NO_DELAY, false,
|
||||
Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_HTTP_TCP_KEEP_ALIVE = boolSetting("http.tcp.keep_alive", NetworkService.TcpSettings
|
||||
.TCP_KEEP_ALIVE, false,
|
||||
Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_HTTP_TCP_BLOCKING_SERVER = boolSetting("http.tcp.blocking_server", NetworkService
|
||||
.TcpSettings.TCP_BLOCKING_SERVER,
|
||||
false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_HTTP_TCP_REUSE_ADDRESS = boolSetting("http.tcp.reuse_address", NetworkService
|
||||
.TcpSettings.TCP_REUSE_ADDRESS,
|
||||
false, Setting.Scope.CLUSTER);
|
||||
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("http.tcp.send_buffer_size",
|
||||
NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("http.tcp" +
|
||||
".receive_buffer_size", NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting(
|
||||
"transport.netty.receive_predictor_size",
|
||||
settings -> {
|
||||
long defaultReceiverPredictor = 512 * 1024;
|
||||
if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
|
||||
// we can guess a better default...
|
||||
long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / SETTING_HTTP_WORKER_COUNT.get
|
||||
(settings));
|
||||
defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
|
||||
}
|
||||
return new ByteSizeValue(defaultReceiverPredictor).toString();
|
||||
}, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting("http.netty" +
|
||||
".receive_predictor_min",
|
||||
SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting("http.netty" +
|
||||
".receive_predictor_max",
|
||||
SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER);
|
||||
|
||||
|
||||
protected final NetworkService networkService;
|
||||
protected final BigArrays bigArrays;
|
||||
|
||||
@ -175,47 +219,36 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
||||
private final CorsConfig corsConfig;
|
||||
|
||||
@Inject
|
||||
@SuppressForbidden(reason = "sets org.jboss.netty.epollBugWorkaround based on netty.epollBugWorkaround")
|
||||
// TODO: why be confusing like this? just let the user do it with the netty parameter instead!
|
||||
public NettyHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool) {
|
||||
super(settings);
|
||||
this.networkService = networkService;
|
||||
this.bigArrays = bigArrays;
|
||||
this.threadPool = threadPool;
|
||||
|
||||
if (settings.getAsBoolean("netty.epollBugWorkaround", false)) {
|
||||
System.setProperty("org.jboss.netty.epollBugWorkaround", "true");
|
||||
}
|
||||
ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings);
|
||||
this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
|
||||
this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
|
||||
this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings);
|
||||
this.resetCookies = SETTING_HTTP_RESET_COOKIES.get(settings);
|
||||
this.maxCumulationBufferCapacity = settings.getAsBytesSize("http.netty.max_cumulation_buffer_capacity", null);
|
||||
this.maxCompositeBufferComponents = settings.getAsInt("http.netty.max_composite_buffer_components", -1);
|
||||
this.workerCount = settings.getAsInt("http.netty.worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2);
|
||||
this.blockingServer = settings.getAsBoolean("http.netty.http.blocking_server", TCP_BLOCKING.get(settings));
|
||||
this.maxCumulationBufferCapacity = SETTING_HTTP_NETTY_MAX_CUMULATION_BUFFER_CAPACITY.get(settings);
|
||||
this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings);
|
||||
this.workerCount = SETTING_HTTP_WORKER_COUNT.get(settings);
|
||||
this.blockingServer = SETTING_HTTP_TCP_BLOCKING_SERVER.get(settings);
|
||||
this.port = SETTING_HTTP_PORT.get(settings);
|
||||
this.bindHosts = settings.getAsArray("http.netty.bind_host", settings.getAsArray("http.bind_host", settings.getAsArray("http.host", null)));
|
||||
this.publishHosts = settings.getAsArray("http.netty.publish_host", settings.getAsArray("http.publish_host", settings.getAsArray("http.host", null)));
|
||||
this.bindHosts = SETTING_HTTP_BIND_HOST.get(settings).toArray(Strings.EMPTY_ARRAY);
|
||||
this.publishHosts = SETTING_HTTP_PUBLISH_HOST.get(settings).toArray(Strings.EMPTY_ARRAY);
|
||||
this.publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings);
|
||||
this.tcpNoDelay = settings.getAsBoolean("http.netty.tcp_no_delay", TCP_NO_DELAY.get(settings));
|
||||
this.tcpKeepAlive = settings.getAsBoolean("http.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings));
|
||||
this.reuseAddress = settings.getAsBoolean("http.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings));
|
||||
this.tcpSendBufferSize = settings.getAsBytesSize("http.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings));
|
||||
this.tcpReceiveBufferSize = settings.getAsBytesSize("http.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings));
|
||||
this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings);
|
||||
this.tcpKeepAlive = SETTING_HTTP_TCP_KEEP_ALIVE.get(settings);
|
||||
this.reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings);
|
||||
this.tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings);
|
||||
this.tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings);
|
||||
this.detailedErrorsEnabled = SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings);
|
||||
|
||||
long defaultReceiverPredictor = 512 * 1024;
|
||||
if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
|
||||
// we can guess a better default...
|
||||
long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / workerCount);
|
||||
defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
|
||||
}
|
||||
|
||||
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
|
||||
ByteSizeValue receivePredictorMin = settings.getAsBytesSize("http.netty.receive_predictor_min", settings.getAsBytesSize("http.netty.receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
|
||||
ByteSizeValue receivePredictorMax = settings.getAsBytesSize("http.netty.receive_predictor_max", settings.getAsBytesSize("http.netty.receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
|
||||
ByteSizeValue receivePredictorMin = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN.get(settings);
|
||||
ByteSizeValue receivePredictorMax = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX.get(settings);
|
||||
if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
|
||||
receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes());
|
||||
} else {
|
||||
@ -479,7 +512,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
||||
(int) transport.maxHeaderSize.bytes(),
|
||||
(int) transport.maxChunkSize.bytes()
|
||||
);
|
||||
if (transport.maxCumulationBufferCapacity != null) {
|
||||
if (transport.maxCumulationBufferCapacity.bytes() >= 0) {
|
||||
if (transport.maxCumulationBufferCapacity.bytes() > Integer.MAX_VALUE) {
|
||||
requestDecoder.setMaxCumulationBufferCapacity(Integer.MAX_VALUE);
|
||||
} else {
|
||||
|
@ -132,7 +132,7 @@ public final class CorsConfig {
|
||||
* xhr.withCredentials = true;
|
||||
* </pre>
|
||||
* The default value for 'withCredentials' is false in which case no cookies are sent.
|
||||
* Settning this to true will included cookies in cross origin requests.
|
||||
* Setting this to true will included cookies in cross origin requests.
|
||||
*
|
||||
* @return {@code true} if cookies are supported.
|
||||
*/
|
||||
@ -205,7 +205,7 @@ public final class CorsConfig {
|
||||
* and this setting will check that the Origin is valid and if it is not valid no
|
||||
* further processing will take place, and a error will be returned to the calling client.
|
||||
*
|
||||
* @return {@code true} if a CORS request should short-curcuit upon receiving an invalid Origin header.
|
||||
* @return {@code true} if a CORS request should short-circuit upon receiving an invalid Origin header.
|
||||
*/
|
||||
public boolean isShortCircuit() {
|
||||
return shortCircuit;
|
||||
|
@ -133,7 +133,7 @@ public final class CorsConfigBuilder {
|
||||
/**
|
||||
* Web browsers may set the 'Origin' request header to 'null' if a resource is loaded
|
||||
* from the local file system. Calling this method will enable a successful CORS response
|
||||
* with a wildcard for the the CORS response header 'Access-Control-Allow-Origin'.
|
||||
* with a wildcard for the CORS response header 'Access-Control-Allow-Origin'.
|
||||
*
|
||||
* @return {@link CorsConfigBuilder} to support method chaining.
|
||||
*/
|
||||
|
@ -258,4 +258,16 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onStoreClosed(ShardId shardId) {
|
||||
for (IndexEventListener listener : listeners) {
|
||||
try {
|
||||
listener.onStoreClosed(shardId);
|
||||
} catch (Throwable t) {
|
||||
logger.warn("failed to invoke on store closed", t);
|
||||
throw t;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ public final class IndexModule {
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an {@link IndexStore} type to this index module. Typically stores are registered with a refrence to
|
||||
* Adds an {@link IndexStore} type to this index module. Typically stores are registered with a reference to
|
||||
* it's constructor:
|
||||
* <pre>
|
||||
* indexModule.addIndexStore("my_store_type", MyStore::new);
|
||||
@ -241,7 +241,7 @@ public final class IndexModule {
|
||||
IndexSearcherWrapper newWrapper(final IndexService indexService);
|
||||
}
|
||||
|
||||
public IndexService newIndexService(NodeEnvironment environment, IndexService.ShardStoreDeleter shardStoreDeleter, NodeServicesProvider servicesProvider, MapperRegistry mapperRegistry, IndicesFieldDataCache indicesFieldDataCache,
|
||||
public IndexService newIndexService(NodeEnvironment environment, IndexService.ShardStoreDeleter shardStoreDeleter, NodeServicesProvider servicesProvider, IndicesQueryCache indicesQueryCache, MapperRegistry mapperRegistry, IndicesFieldDataCache indicesFieldDataCache,
|
||||
IndexingOperationListener... listeners) throws IOException {
|
||||
IndexSearcherWrapperFactory searcherWrapperFactory = indexSearcherWrapper.get() == null ? (shard) -> null : indexSearcherWrapper.get();
|
||||
IndexEventListener eventListener = freeze();
|
||||
@ -263,7 +263,7 @@ public final class IndexModule {
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, store::setMaxRate);
|
||||
final String queryCacheType = indexSettings.getValue(INDEX_QUERY_CACHE_TYPE_SETTING);
|
||||
final BiFunction<IndexSettings, IndicesQueryCache, QueryCache> queryCacheProvider = queryCaches.get(queryCacheType);
|
||||
final QueryCache queryCache = queryCacheProvider.apply(indexSettings, servicesProvider.getIndicesQueryCache());
|
||||
final QueryCache queryCache = queryCacheProvider.apply(indexSettings, indicesQueryCache);
|
||||
return new IndexService(indexSettings, environment, new SimilarityService(indexSettings, similarities), shardStoreDeleter, analysisRegistry, engineFactory.get(),
|
||||
servicesProvider, queryCache, store, eventListener, searcherWrapperFactory, mapperRegistry, indicesFieldDataCache, listeners);
|
||||
}
|
||||
|
@ -321,8 +321,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
||||
warmer.warm(searcher, shard, IndexService.this.indexSettings, toLevel);
|
||||
}
|
||||
};
|
||||
|
||||
store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> nodeServicesProvider.getIndicesQueryCache().onClose(shardId)));
|
||||
store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> eventListener.onStoreClosed(shardId)));
|
||||
if (useShadowEngine(primary, indexSettings)) {
|
||||
indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider, searchSlowLog, engineWarmer); // no indexing listeners - shadow engines don't index
|
||||
} else {
|
||||
@ -691,7 +690,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
||||
}
|
||||
|
||||
boolean mustReschedule() {
|
||||
// don't re-schedule if its closed or if we dont' have a single shard here..., we are done
|
||||
// don't re-schedule if its closed or if we don't have a single shard here..., we are done
|
||||
return indexService.closed.get() == false
|
||||
&& closed.get() == false && interval.millis() > 0;
|
||||
}
|
||||
|
@ -329,7 +329,7 @@ public final class IndexSettings {
|
||||
public boolean isShadowReplicaIndex() { return isShadowReplicaIndex; }
|
||||
|
||||
/**
|
||||
* Returns the node settings. The settings retured from {@link #getSettings()} are a merged version of the
|
||||
* Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the
|
||||
* index settings and the node settings where node settings are overwritten by index settings.
|
||||
*/
|
||||
public Settings getNodeSettings() {
|
||||
|
@ -23,7 +23,6 @@ import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
@ -36,7 +35,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
||||
public final class NodeServicesProvider {
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
private final IndicesQueryCache indicesQueryCache;
|
||||
private final BigArrays bigArrays;
|
||||
private final Client client;
|
||||
private final IndicesQueriesRegistry indicesQueriesRegistry;
|
||||
@ -44,9 +42,8 @@ public final class NodeServicesProvider {
|
||||
private final CircuitBreakerService circuitBreakerService;
|
||||
|
||||
@Inject
|
||||
public NodeServicesProvider(ThreadPool threadPool, IndicesQueryCache indicesQueryCache, BigArrays bigArrays, Client client, ScriptService scriptService, IndicesQueriesRegistry indicesQueriesRegistry, CircuitBreakerService circuitBreakerService) {
|
||||
public NodeServicesProvider(ThreadPool threadPool, BigArrays bigArrays, Client client, ScriptService scriptService, IndicesQueriesRegistry indicesQueriesRegistry, CircuitBreakerService circuitBreakerService) {
|
||||
this.threadPool = threadPool;
|
||||
this.indicesQueryCache = indicesQueryCache;
|
||||
this.bigArrays = bigArrays;
|
||||
this.client = client;
|
||||
this.indicesQueriesRegistry = indicesQueriesRegistry;
|
||||
@ -58,10 +55,6 @@ public final class NodeServicesProvider {
|
||||
return threadPool;
|
||||
}
|
||||
|
||||
public IndicesQueryCache getIndicesQueryCache() {
|
||||
return indicesQueryCache;
|
||||
}
|
||||
|
||||
public BigArrays getBigArrays() { return bigArrays; }
|
||||
|
||||
public Client getClient() {
|
||||
|
@ -57,7 +57,7 @@ public final class AnalysisRegistry implements Closeable {
|
||||
private final Map<String, Analyzer> cachedAnalyzer = new ConcurrentHashMap<>();
|
||||
private final PrebuiltAnalysis prebuiltAnalysis;
|
||||
private final HunspellService hunspellService;
|
||||
private final Environment environemnt;
|
||||
private final Environment environment;
|
||||
|
||||
public AnalysisRegistry(HunspellService hunspellService, Environment environment) {
|
||||
this(hunspellService, environment, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
|
||||
@ -70,7 +70,7 @@ public final class AnalysisRegistry implements Closeable {
|
||||
Map<String, AnalysisModule.AnalysisProvider<AnalyzerProvider>> analyzers) {
|
||||
prebuiltAnalysis = new PrebuiltAnalysis();
|
||||
this.hunspellService = hunspellService;
|
||||
this.environemnt = environment;
|
||||
this.environment = environment;
|
||||
final Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> charFilterBuilder = new HashMap<>(charFilters);
|
||||
final Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilterBuilder = new HashMap<>(tokenFilters);
|
||||
final Map<String, AnalysisModule.AnalysisProvider<TokenizerFactory>> tokenizerBuilder = new HashMap<>(tokenizers);
|
||||
@ -115,13 +115,13 @@ public final class AnalysisRegistry implements Closeable {
|
||||
AnalysisModule.AnalysisProvider<AnalyzerProvider> provider = analyzers.get(analyzer);
|
||||
return provider == null ? null : cachedAnalyzer.computeIfAbsent(analyzer, (key) -> {
|
||||
try {
|
||||
return provider.get(environemnt, key).get();
|
||||
return provider.get(environment, key).get();
|
||||
} catch (IOException ex) {
|
||||
throw new ElasticsearchException("failed to load analyzer for name " + key, ex);
|
||||
}}
|
||||
);
|
||||
}
|
||||
return analyzerProvider.get(environemnt, analyzer).get();
|
||||
return analyzerProvider.get(environment, analyzer).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -324,7 +324,7 @@ public final class AnalysisRegistry implements Closeable {
|
||||
if (type == null) {
|
||||
throw new IllegalArgumentException("Unknown " + toBuild + " type [" + typeName + "] for [" + name + "]");
|
||||
}
|
||||
factory = type.get(settings, environemnt, name, currentSettings);
|
||||
factory = type.get(settings, environment, name, currentSettings);
|
||||
}
|
||||
factories.put(name, factory);
|
||||
} else {
|
||||
@ -335,7 +335,7 @@ public final class AnalysisRegistry implements Closeable {
|
||||
if (type == null) {
|
||||
throw new IllegalArgumentException("Unknown " + toBuild + " type [" + typeName + "] for [" + name + "]");
|
||||
}
|
||||
final T factory = type.get(settings, environemnt, name, currentSettings);
|
||||
final T factory = type.get(settings, environment, name, currentSettings);
|
||||
factories.put(name, factory);
|
||||
}
|
||||
|
||||
@ -355,9 +355,9 @@ public final class AnalysisRegistry implements Closeable {
|
||||
AnalysisModule.AnalysisProvider<T> defaultProvider = defaultInstance.get(name);
|
||||
final T instance;
|
||||
if (defaultProvider == null) {
|
||||
instance = provider.get(settings, environemnt, name, defaultSettings);
|
||||
instance = provider.get(settings, environment, name, defaultSettings);
|
||||
} else {
|
||||
instance = defaultProvider.get(settings, environemnt, name, defaultSettings);
|
||||
instance = defaultProvider.get(settings, environment, name, defaultSettings);
|
||||
}
|
||||
factories.put(name, instance);
|
||||
String camelCase = Strings.toCamelCase(name);
|
||||
@ -371,7 +371,7 @@ public final class AnalysisRegistry implements Closeable {
|
||||
final AnalysisModule.AnalysisProvider<T> provider = entry.getValue();
|
||||
final String camelCase = Strings.toCamelCase(name);
|
||||
if (factories.containsKey(name) == false || (defaultInstance.containsKey(camelCase) == false && factories.containsKey(camelCase) == false)) {
|
||||
final T instance = provider.get(settings, environemnt, name, defaultSettings);
|
||||
final T instance = provider.get(settings, environment, name, defaultSettings);
|
||||
if (factories.containsKey(name) == false) {
|
||||
factories.put(name, instance);
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ package org.elasticsearch.index.engine;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
/** Holds a deleted version, which just adds a timestmap to {@link VersionValue} so we know when we can expire the deletion. */
|
||||
/** Holds a deleted version, which just adds a timestamp to {@link VersionValue} so we know when we can expire the deletion. */
|
||||
|
||||
class DeleteVersionValue extends VersionValue {
|
||||
private final long time;
|
||||
|
@ -29,7 +29,7 @@ import java.io.IOException;
|
||||
* An engine is already closed.
|
||||
* <p>
|
||||
* Note, the relationship between shard and engine indicates that engine closed is shard closed, and
|
||||
* we might get something slipping through the the shard and into the engine while the shard is closing.
|
||||
* we might get something slipping through the shard and into the engine while the shard is closing.
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
@ -36,6 +36,7 @@ import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.core.BooleanFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
@ -94,6 +95,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
|
||||
static {
|
||||
Map<String, IndexFieldData.Builder> buildersByTypeBuilder = new HashMap<>();
|
||||
buildersByTypeBuilder.put("string", new PagedBytesIndexFieldData.Builder());
|
||||
buildersByTypeBuilder.put(KeywordFieldMapper.CONTENT_TYPE, MISSING_DOC_VALUES_BUILDER);
|
||||
buildersByTypeBuilder.put("float", MISSING_DOC_VALUES_BUILDER);
|
||||
buildersByTypeBuilder.put("double", MISSING_DOC_VALUES_BUILDER);
|
||||
buildersByTypeBuilder.put("byte", MISSING_DOC_VALUES_BUILDER);
|
||||
@ -110,6 +112,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
|
||||
|
||||
docValuesBuildersByType = MapBuilder.<String, IndexFieldData.Builder>newMapBuilder()
|
||||
.put("string", new DocValuesIndexFieldData.Builder())
|
||||
.put(KeywordFieldMapper.CONTENT_TYPE, new DocValuesIndexFieldData.Builder())
|
||||
.put("float", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.FLOAT))
|
||||
.put("double", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.DOUBLE))
|
||||
.put("byte", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BYTE))
|
||||
@ -126,6 +129,9 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
|
||||
.put(Tuple.tuple("string", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder())
|
||||
.put(Tuple.tuple("string", DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
.put(Tuple.tuple(KeywordFieldMapper.CONTENT_TYPE, DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder())
|
||||
.put(Tuple.tuple(KeywordFieldMapper.CONTENT_TYPE, DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
.put(Tuple.tuple("float", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.FLOAT))
|
||||
.put(Tuple.tuple("float", DISABLED_FORMAT), DISABLED_BUILDER)
|
||||
|
||||
|
@ -321,7 +321,7 @@ public final class OrdinalsBuilder implements Closeable {
|
||||
}
|
||||
|
||||
/**
|
||||
* Retruns the current ordinal or <tt>0</tt> if this build has not been advanced via
|
||||
* Returns the current ordinal or <tt>0</tt> if this build has not been advanced via
|
||||
* {@link #nextOrdinal()}.
|
||||
*/
|
||||
public long currentOrdinal() {
|
||||
@ -457,7 +457,7 @@ public final class OrdinalsBuilder implements Closeable {
|
||||
* This method iterates all terms in the given {@link TermsEnum} and
|
||||
* associates each terms ordinal with the terms documents. The caller must
|
||||
* exhaust the returned {@link BytesRefIterator} which returns all values
|
||||
* where the first returned value is associted with the ordinal <tt>1</tt>
|
||||
* where the first returned value is associated with the ordinal <tt>1</tt>
|
||||
* etc.
|
||||
* <p>
|
||||
* If the {@link TermsEnum} contains prefix coded numerical values the terms
|
||||
|
@ -19,10 +19,12 @@
|
||||
|
||||
package org.elasticsearch.index.fielddata.plain;
|
||||
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefIterator;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
@ -45,7 +47,7 @@ abstract class AbstractIndexGeoPointFieldData extends AbstractIndexFieldData<Ato
|
||||
}
|
||||
|
||||
protected static class GeoPointTermsEnum extends BaseGeoPointTermsEnum {
|
||||
protected GeoPointTermsEnum(BytesRefIterator termsEnum) {
|
||||
protected GeoPointTermsEnum(BytesRefIterator termsEnum, GeoPointField.TermEncoding termEncoding) {
|
||||
super(termsEnum);
|
||||
}
|
||||
|
||||
|
@ -23,6 +23,7 @@ import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.RandomAccessOrds;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
@ -48,25 +49,20 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
*/
|
||||
public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData {
|
||||
private final CircuitBreakerService breakerService;
|
||||
private final boolean indexCreatedBefore22;
|
||||
|
||||
public static class Builder implements IndexFieldData.Builder {
|
||||
@Override
|
||||
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
|
||||
CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
return new GeoPointArrayIndexFieldData(indexSettings, fieldType.name(), fieldType.fieldDataType(), cache,
|
||||
breakerService, fieldType.fieldDataType().getSettings()
|
||||
.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).before(Version.V_2_2_0) ||
|
||||
indexSettings.getIndexVersionCreated().before(Version.V_2_2_0));
|
||||
breakerService);
|
||||
}
|
||||
}
|
||||
|
||||
public GeoPointArrayIndexFieldData(IndexSettings indexSettings, String fieldName,
|
||||
FieldDataType fieldDataType, IndexFieldDataCache cache, CircuitBreakerService breakerService,
|
||||
final boolean indexCreatedBefore22) {
|
||||
FieldDataType fieldDataType, IndexFieldDataCache cache, CircuitBreakerService breakerService) {
|
||||
super(indexSettings, fieldName, fieldDataType, cache);
|
||||
this.breakerService = breakerService;
|
||||
this.indexCreatedBefore22 = indexCreatedBefore22;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -82,7 +78,8 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData
|
||||
estimator.afterLoad(null, data.ramBytesUsed());
|
||||
return data;
|
||||
}
|
||||
return (indexCreatedBefore22 == true) ? loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data);
|
||||
return (indexSettings.getIndexVersionCreated().before(Version.V_2_2_0) == true) ?
|
||||
loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -95,7 +92,9 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData
|
||||
OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
|
||||
boolean success = false;
|
||||
try (OrdinalsBuilder builder = new OrdinalsBuilder(reader.maxDoc(), acceptableTransientOverheadRatio)) {
|
||||
final GeoPointTermsEnum iter = new GeoPointTermsEnum(builder.buildFromTerms(OrdinalsBuilder.wrapNumeric64Bit(terms.iterator())));
|
||||
final GeoPointField.TermEncoding termEncoding = indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_3_0) ?
|
||||
GeoPointField.TermEncoding.PREFIX : GeoPointField.TermEncoding.NUMERIC;
|
||||
final GeoPointTermsEnum iter = new GeoPointTermsEnum(builder.buildFromTerms(OrdinalsBuilder.wrapNumeric64Bit(terms.iterator())), termEncoding);
|
||||
Long hashedPoint;
|
||||
long numTerms = 0;
|
||||
while ((hashedPoint = iter.next()) != null) {
|
||||
@ -181,4 +180,4 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -43,7 +43,6 @@ import java.util.Map;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.doc;
|
||||
|
||||
public class DocumentMapperParser {
|
||||
|
||||
@ -111,7 +110,7 @@ public class DocumentMapperParser {
|
||||
|
||||
Mapper.TypeParser.ParserContext parserContext = parserContext(type);
|
||||
// parse RootObjectMapper
|
||||
DocumentMapper.Builder docBuilder = doc((RootObjectMapper.Builder) rootObjectTypeParser.parse(type, mapping, parserContext), mapperService);
|
||||
DocumentMapper.Builder docBuilder = new DocumentMapper.Builder((RootObjectMapper.Builder) rootObjectTypeParser.parse(type, mapping, parserContext), mapperService);
|
||||
Iterator<Map.Entry<String, Object>> iterator = mapping.entrySet().iterator();
|
||||
// parse DocumentMapper
|
||||
while(iterator.hasNext()) {
|
||||
|
@ -28,7 +28,15 @@ import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.mapper.core.BinaryFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.BooleanFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.DateFieldMapper.DateFieldType;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.FloatFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.IntegerFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper.StringFieldType;
|
||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
@ -323,7 +331,7 @@ class DocumentParser implements Closeable {
|
||||
context.path().remove();
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.object(currentFieldName).enabled(true);
|
||||
builder = new ObjectMapper.Builder(currentFieldName).enabled(true);
|
||||
// if this is a non root object, then explicitly set the dynamic behavior if set
|
||||
if (!(mapper instanceof RootObjectMapper) && mapper.dynamic() != ObjectMapper.Defaults.DYNAMIC) {
|
||||
((ObjectMapper.Builder) builder).dynamic(mapper.dynamic());
|
||||
@ -442,37 +450,37 @@ class DocumentParser implements Closeable {
|
||||
if (fieldType instanceof StringFieldType) {
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "string");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.stringField(currentFieldName);
|
||||
builder = new StringFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
} else if (fieldType instanceof DateFieldType) {
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "date");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.dateField(currentFieldName);
|
||||
builder = new DateFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
} else if (fieldType.numericType() != null) {
|
||||
switch (fieldType.numericType()) {
|
||||
case LONG:
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.longField(currentFieldName);
|
||||
builder = new LongFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
break;
|
||||
case DOUBLE:
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.doubleField(currentFieldName);
|
||||
builder = new DoubleFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
break;
|
||||
case INT:
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "integer");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.integerField(currentFieldName);
|
||||
builder = new IntegerFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
break;
|
||||
case FLOAT:
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "float");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.floatField(currentFieldName);
|
||||
builder = new FloatFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -503,7 +511,7 @@ class DocumentParser implements Closeable {
|
||||
dateTimeFormatter.parser().parseMillis(text);
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "date");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.dateField(currentFieldName).dateTimeFormatter(dateTimeFormatter);
|
||||
builder = new DateFieldMapper.Builder(currentFieldName).dateTimeFormatter(dateTimeFormatter);
|
||||
}
|
||||
return builder;
|
||||
} catch (Exception e) {
|
||||
@ -518,7 +526,7 @@ class DocumentParser implements Closeable {
|
||||
Long.parseLong(text);
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.longField(currentFieldName);
|
||||
builder = new LongFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
return builder;
|
||||
} catch (NumberFormatException e) {
|
||||
@ -528,7 +536,7 @@ class DocumentParser implements Closeable {
|
||||
Double.parseDouble(text);
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.doubleField(currentFieldName);
|
||||
builder = new DoubleFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
return builder;
|
||||
} catch (NumberFormatException e) {
|
||||
@ -537,7 +545,7 @@ class DocumentParser implements Closeable {
|
||||
}
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.stringField(currentFieldName);
|
||||
builder = new StringFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
return builder;
|
||||
} else if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
@ -545,7 +553,7 @@ class DocumentParser implements Closeable {
|
||||
if (numberType == XContentParser.NumberType.INT || numberType == XContentParser.NumberType.LONG) {
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.longField(currentFieldName);
|
||||
builder = new LongFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
return builder;
|
||||
} else if (numberType == XContentParser.NumberType.FLOAT || numberType == XContentParser.NumberType.DOUBLE) {
|
||||
@ -554,20 +562,20 @@ class DocumentParser implements Closeable {
|
||||
// no templates are defined, we use float by default instead of double
|
||||
// since this is much more space-efficient and should be enough most of
|
||||
// the time
|
||||
builder = MapperBuilders.floatField(currentFieldName);
|
||||
builder = new FloatFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "boolean");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.booleanField(currentFieldName);
|
||||
builder = new BooleanFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
return builder;
|
||||
} else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "binary");
|
||||
if (builder == null) {
|
||||
builder = MapperBuilders.binaryField(currentFieldName);
|
||||
builder = new BinaryFieldMapper.Builder(currentFieldName);
|
||||
}
|
||||
return builder;
|
||||
} else {
|
||||
@ -677,7 +685,7 @@ class DocumentParser implements Closeable {
|
||||
if (!(parent instanceof RootObjectMapper) && parent.dynamic() != ObjectMapper.Defaults.DYNAMIC) {
|
||||
((ObjectMapper.Builder) builder).dynamic(parent.dynamic());
|
||||
}
|
||||
builder = MapperBuilders.object(paths[i]).enabled(true);
|
||||
builder = new ObjectMapper.Builder(paths[i]).enabled(true);
|
||||
}
|
||||
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
|
||||
mapper = (ObjectMapper) builder.build(builderContext);
|
||||
|
@ -1,110 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.elasticsearch.index.mapper.core.BinaryFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.BooleanFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.ByteFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.FloatFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.IntegerFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.ShortFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.TokenCountFieldMapper;
|
||||
import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ip.IpFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
import org.elasticsearch.index.mapper.object.RootObjectMapper;
|
||||
|
||||
public final class MapperBuilders {
|
||||
|
||||
private MapperBuilders() {}
|
||||
|
||||
public static DocumentMapper.Builder doc(RootObjectMapper.Builder objectBuilder, MapperService mapperService) {
|
||||
return new DocumentMapper.Builder(objectBuilder, mapperService);
|
||||
}
|
||||
|
||||
public static RootObjectMapper.Builder rootObject(String name) {
|
||||
return new RootObjectMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static ObjectMapper.Builder object(String name) {
|
||||
return new ObjectMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static BooleanFieldMapper.Builder booleanField(String name) {
|
||||
return new BooleanFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static StringFieldMapper.Builder stringField(String name) {
|
||||
return new StringFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static BinaryFieldMapper.Builder binaryField(String name) {
|
||||
return new BinaryFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static DateFieldMapper.Builder dateField(String name) {
|
||||
return new DateFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static IpFieldMapper.Builder ipField(String name) {
|
||||
return new IpFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static ShortFieldMapper.Builder shortField(String name) {
|
||||
return new ShortFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static ByteFieldMapper.Builder byteField(String name) {
|
||||
return new ByteFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static IntegerFieldMapper.Builder integerField(String name) {
|
||||
return new IntegerFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static TokenCountFieldMapper.Builder tokenCountField(String name) {
|
||||
return new TokenCountFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static LongFieldMapper.Builder longField(String name) {
|
||||
return new LongFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static FloatFieldMapper.Builder floatField(String name) {
|
||||
return new FloatFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static DoubleFieldMapper.Builder doubleField(String name) {
|
||||
return new DoubleFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static GeoShapeFieldMapper.Builder geoShapeField(String name) {
|
||||
return new GeoShapeFieldMapper.Builder(name);
|
||||
}
|
||||
|
||||
public static CompletionFieldMapper.Builder completionField(String name) {
|
||||
return new CompletionFieldMapper.Builder(name);
|
||||
}
|
||||
}
|
@ -42,7 +42,6 @@ import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.binaryField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
|
||||
|
||||
/**
|
||||
@ -79,7 +78,7 @@ public class BinaryFieldMapper extends FieldMapper {
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
BinaryFieldMapper.Builder builder = binaryField(name);
|
||||
BinaryFieldMapper.Builder builder = new BinaryFieldMapper.Builder(name);
|
||||
parseField(builder, name, node, parserContext);
|
||||
return builder;
|
||||
}
|
||||
|
@ -41,7 +41,6 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.booleanField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
||||
|
||||
@ -96,7 +95,7 @@ public class BooleanFieldMapper extends FieldMapper {
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
BooleanFieldMapper.Builder builder = booleanField(name);
|
||||
BooleanFieldMapper.Builder builder = new BooleanFieldMapper.Builder(name);
|
||||
parseField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
@ -49,7 +49,6 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeByteValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.byteField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
@ -97,7 +96,7 @@ public class ByteFieldMapper extends NumberFieldMapper {
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
ByteFieldMapper.Builder builder = byteField(name);
|
||||
ByteFieldMapper.Builder builder = new ByteFieldMapper.Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
@ -58,7 +58,6 @@ import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.completionField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
||||
|
||||
/**
|
||||
@ -119,7 +118,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
||||
|
||||
@Override
|
||||
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
CompletionFieldMapper.Builder builder = completionField(name);
|
||||
CompletionFieldMapper.Builder builder = new CompletionFieldMapper.Builder(name);
|
||||
NamedAnalyzer indexAnalyzer = null;
|
||||
NamedAnalyzer searchAnalyzer = null;
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
|
@ -63,7 +63,6 @@ import java.util.Objects;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.dateField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseDateTimeFormatter;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
@ -153,7 +152,7 @@ public class DateFieldMapper extends NumberFieldMapper {
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
DateFieldMapper.Builder builder = dateField(name);
|
||||
DateFieldMapper.Builder builder = new DateFieldMapper.Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
boolean configuredFormat = false;
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
|
@ -51,7 +51,6 @@ import java.util.Map;
|
||||
|
||||
import static org.apache.lucene.util.NumericUtils.doubleToSortableLong;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeDoubleValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.doubleField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
@ -98,7 +97,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
DoubleFieldMapper.Builder builder = doubleField(name);
|
||||
DoubleFieldMapper.Builder builder = new DoubleFieldMapper.Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
@ -52,7 +52,6 @@ import java.util.Map;
|
||||
|
||||
import static org.apache.lucene.util.NumericUtils.floatToSortableInt;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeFloatValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.floatField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
@ -99,7 +98,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
FloatFieldMapper.Builder builder = floatField(name);
|
||||
FloatFieldMapper.Builder builder = new FloatFieldMapper.Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
@ -51,7 +51,6 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.integerField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
@ -104,7 +103,7 @@ public class IntegerFieldMapper extends NumberFieldMapper {
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
IntegerFieldMapper.Builder builder = integerField(name);
|
||||
IntegerFieldMapper.Builder builder = new IntegerFieldMapper.Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
@ -0,0 +1,274 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper.core;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
||||
|
||||
/**
|
||||
* A field mapper for keywords. This mapper accepts strings and indexes them as-is.
|
||||
*/
|
||||
public final class KeywordFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll {
|
||||
|
||||
public static final String CONTENT_TYPE = "keyword";
|
||||
|
||||
public static class Defaults {
|
||||
public static final MappedFieldType FIELD_TYPE = new KeywordFieldType();
|
||||
|
||||
static {
|
||||
FIELD_TYPE.setTokenized(false);
|
||||
FIELD_TYPE.setOmitNorms(true);
|
||||
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS);
|
||||
FIELD_TYPE.freeze();
|
||||
}
|
||||
|
||||
public static final String NULL_VALUE = null;
|
||||
public static final int IGNORE_ABOVE = Integer.MAX_VALUE;
|
||||
}
|
||||
|
||||
public static class Builder extends FieldMapper.Builder<Builder, KeywordFieldMapper> {
|
||||
|
||||
protected String nullValue = Defaults.NULL_VALUE;
|
||||
protected int ignoreAbove = Defaults.IGNORE_ABOVE;
|
||||
|
||||
public Builder(String name) {
|
||||
super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
|
||||
builder = this;
|
||||
}
|
||||
|
||||
public Builder ignoreAbove(int ignoreAbove) {
|
||||
if (ignoreAbove < 0) {
|
||||
throw new IllegalArgumentException("[ignore_above] must be positive, got " + ignoreAbove);
|
||||
}
|
||||
this.ignoreAbove = ignoreAbove;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder indexOptions(IndexOptions indexOptions) {
|
||||
if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) > 0) {
|
||||
throw new IllegalArgumentException("The [keyword] field does not support positions, got [index_options]="
|
||||
+ indexOptionToString(fieldType.indexOptions()));
|
||||
}
|
||||
return super.indexOptions(indexOptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeywordFieldMapper build(BuilderContext context) {
|
||||
setupFieldType(context);
|
||||
KeywordFieldMapper fieldMapper = new KeywordFieldMapper(
|
||||
name, fieldType, defaultFieldType, ignoreAbove,
|
||||
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
||||
return fieldMapper.includeInAll(includeInAll);
|
||||
}
|
||||
}
|
||||
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
KeywordFieldMapper.Builder builder = new KeywordFieldMapper.Builder(name);
|
||||
parseField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
String propName = Strings.toUnderscoreCase(entry.getKey());
|
||||
Object propNode = entry.getValue();
|
||||
if (propName.equals("null_value")) {
|
||||
if (propNode == null) {
|
||||
throw new MapperParsingException("Property [null_value] cannot be null.");
|
||||
}
|
||||
builder.nullValue(propNode.toString());
|
||||
iterator.remove();
|
||||
} else if (propName.equals("ignore_above")) {
|
||||
builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1));
|
||||
iterator.remove();
|
||||
} else if (parseMultiField(builder, name, parserContext, propName, propNode)) {
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
public static final class KeywordFieldType extends MappedFieldType {
|
||||
|
||||
public KeywordFieldType() {}
|
||||
|
||||
protected KeywordFieldType(KeywordFieldType ref) {
|
||||
super(ref);
|
||||
}
|
||||
|
||||
public KeywordFieldType clone() {
|
||||
return new KeywordFieldType(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String typeName() {
|
||||
return CONTENT_TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String value(Object value) {
|
||||
if (value == null) {
|
||||
return null;
|
||||
}
|
||||
return value.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query nullValueQuery() {
|
||||
if (nullValue() == null) {
|
||||
return null;
|
||||
}
|
||||
return termQuery(nullValue(), null);
|
||||
}
|
||||
}
|
||||
|
||||
private Boolean includeInAll;
|
||||
private int ignoreAbove;
|
||||
|
||||
protected KeywordFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||
int ignoreAbove, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
||||
assert fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) <= 0;
|
||||
this.ignoreAbove = ignoreAbove;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected KeywordFieldMapper clone() {
|
||||
return (KeywordFieldMapper) super.clone();
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeywordFieldMapper includeInAll(Boolean includeInAll) {
|
||||
if (includeInAll != null) {
|
||||
KeywordFieldMapper clone = clone();
|
||||
clone.includeInAll = includeInAll;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeywordFieldMapper includeInAllIfNotSet(Boolean includeInAll) {
|
||||
if (includeInAll != null && this.includeInAll == null) {
|
||||
KeywordFieldMapper clone = clone();
|
||||
clone.includeInAll = includeInAll;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeywordFieldMapper unsetIncludeInAll() {
|
||||
if (includeInAll != null) {
|
||||
KeywordFieldMapper clone = clone();
|
||||
clone.includeInAll = null;
|
||||
return clone;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
|
||||
final String value;
|
||||
if (context.externalValueSet()) {
|
||||
value = context.externalValue().toString();
|
||||
} else {
|
||||
XContentParser parser = context.parser();
|
||||
if (parser.currentToken() == XContentParser.Token.VALUE_NULL) {
|
||||
value = fieldType().nullValueAsString();
|
||||
} else {
|
||||
value = parser.textOrNull();
|
||||
}
|
||||
}
|
||||
|
||||
if (value == null || value.length() > ignoreAbove) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (context.includeInAll(includeInAll, this)) {
|
||||
context.allEntries().addText(fieldType().name(), value, fieldType().boost());
|
||||
}
|
||||
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
Field field = new Field(fieldType().name(), value, fieldType());
|
||||
fields.add(field);
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
fields.add(new SortedSetDocValuesField(fieldType().name(), new BytesRef(value)));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String contentType() {
|
||||
return CONTENT_TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
this.includeInAll = ((KeywordFieldMapper) mergeWith).includeInAll;
|
||||
this.ignoreAbove = ((KeywordFieldMapper) mergeWith).ignoreAbove;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
||||
super.doXContentBody(builder, includeDefaults, params);
|
||||
|
||||
if (includeDefaults || fieldType().nullValue() != null) {
|
||||
builder.field("null_value", fieldType().nullValue());
|
||||
}
|
||||
|
||||
if (includeInAll != null) {
|
||||
builder.field("include_in_all", includeInAll);
|
||||
} else if (includeDefaults) {
|
||||
builder.field("include_in_all", true);
|
||||
}
|
||||
|
||||
if (includeDefaults || ignoreAbove != Defaults.IGNORE_ABOVE) {
|
||||
builder.field("ignore_above", ignoreAbove);
|
||||
}
|
||||
}
|
||||
}
|
@ -51,7 +51,6 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeLongValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.longField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
@ -103,7 +102,7 @@ public class LongFieldMapper extends NumberFieldMapper {
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
LongFieldMapper.Builder builder = longField(name);
|
||||
LongFieldMapper.Builder builder = new LongFieldMapper.Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
@ -51,7 +51,6 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeShortValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.shortField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
@ -101,7 +100,7 @@ public class ShortFieldMapper extends NumberFieldMapper {
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
ShortFieldMapper.Builder builder = shortField(name);
|
||||
ShortFieldMapper.Builder builder = new ShortFieldMapper.Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
@ -45,7 +45,6 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.lucene.index.IndexOptions.NONE;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.stringField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseTextField;
|
||||
|
||||
@ -146,7 +145,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String fieldName, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
StringFieldMapper.Builder builder = stringField(fieldName);
|
||||
StringFieldMapper.Builder builder = new StringFieldMapper.Builder(fieldName);
|
||||
// hack for the fact that string can't just accept true/false for
|
||||
// the index property and still accepts no/not_analyzed/analyzed
|
||||
final Object index = node.remove("index");
|
||||
|
@ -43,7 +43,6 @@ import java.util.Map;
|
||||
|
||||
import static org.apache.lucene.index.IndexOptions.NONE;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.tokenCountField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
@ -98,7 +97,7 @@ public class TokenCountFieldMapper extends IntegerFieldMapper {
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
TokenCountFieldMapper.Builder builder = tokenCountField(name);
|
||||
TokenCountFieldMapper.Builder builder = new TokenCountFieldMapper.Builder(name);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
String propName = Strings.toUnderscoreCase(entry.getKey());
|
||||
|
@ -21,7 +21,8 @@ package org.elasticsearch.index.mapper.geo;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.util.GeoHashUtils;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.util.GeoHashUtils;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
@ -29,7 +30,6 @@ import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
@ -42,6 +42,7 @@ import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.TokenCountFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -50,8 +51,6 @@ import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.doubleField;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.stringField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
||||
|
||||
@ -159,8 +158,8 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
||||
|
||||
context.path().add(name);
|
||||
if (enableLatLon) {
|
||||
NumberFieldMapper.Builder<?, ?> latMapperBuilder = doubleField(Names.LAT).includeInAll(false);
|
||||
NumberFieldMapper.Builder<?, ?> lonMapperBuilder = doubleField(Names.LON).includeInAll(false);
|
||||
NumberFieldMapper.Builder<?, ?> latMapperBuilder = new DoubleFieldMapper.Builder(Names.LAT).includeInAll(false);
|
||||
NumberFieldMapper.Builder<?, ?> lonMapperBuilder = new DoubleFieldMapper.Builder(Names.LON).includeInAll(false);
|
||||
if (precisionStep != null) {
|
||||
latMapperBuilder.precisionStep(precisionStep);
|
||||
lonMapperBuilder.precisionStep(precisionStep);
|
||||
@ -172,7 +171,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
||||
StringFieldMapper geoHashMapper = null;
|
||||
if (enableGeoHash || enableGeoHashPrefix) {
|
||||
// TODO: possible also implicitly enable geohash if geohash precision is set
|
||||
geoHashMapper = stringField(Names.GEOHASH).index(true).tokenized(false).includeInAll(false).store(fieldType.stored())
|
||||
geoHashMapper = new StringFieldMapper.Builder(Names.GEOHASH).index(true).tokenized(false).includeInAll(false).store(fieldType.stored())
|
||||
.omitNorms(true).indexOptions(IndexOptions.DOCS).build(context);
|
||||
geoPointFieldType.setGeoHashEnabled(geoHashMapper.fieldType(), geoHashPrecision, enableGeoHashPrefix);
|
||||
}
|
||||
|
@ -20,9 +20,10 @@
|
||||
package org.elasticsearch.index.mapper.geo;
|
||||
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.GeoPointField;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
@ -59,8 +60,6 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
||||
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS);
|
||||
FIELD_TYPE.setTokenized(false);
|
||||
FIELD_TYPE.setOmitNorms(true);
|
||||
FIELD_TYPE.setNumericType(FieldType.NumericType.LONG);
|
||||
FIELD_TYPE.setNumericPrecisionStep(GeoPointField.PRECISION_STEP);
|
||||
FIELD_TYPE.setDocValuesType(DocValuesType.SORTED_NUMERIC);
|
||||
FIELD_TYPE.setHasDocValues(true);
|
||||
FIELD_TYPE.freeze();
|
||||
@ -83,6 +82,10 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
||||
DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
CopyTo copyTo) {
|
||||
fieldType.setTokenized(false);
|
||||
if (context.indexCreatedVersion().before(Version.V_2_3_0)) {
|
||||
fieldType.setNumericPrecisionStep(GeoPointField.PRECISION_STEP);
|
||||
fieldType.setNumericType(FieldType.NumericType.LONG);
|
||||
}
|
||||
setupFieldType(context);
|
||||
return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper,
|
||||
geoHashMapper, multiFields, ignoreMalformed, copyTo);
|
||||
@ -90,6 +93,10 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
||||
|
||||
@Override
|
||||
public GeoPointFieldMapper build(BuilderContext context) {
|
||||
if (context.indexCreatedVersion().before(Version.V_2_3_0)) {
|
||||
fieldType.setNumericPrecisionStep(GeoPointField.PRECISION_STEP);
|
||||
fieldType.setNumericType(FieldType.NumericType.LONG);
|
||||
}
|
||||
return super.build(context);
|
||||
}
|
||||
}
|
||||
|
@ -45,6 +45,7 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
@ -53,7 +54,6 @@ import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.geoShapeField;
|
||||
|
||||
|
||||
/**
|
||||
@ -160,7 +160,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
||||
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
Builder builder = geoShapeField(name);
|
||||
Builder builder = new Builder(name);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
String fieldName = Strings.toUnderscoreCase(entry.getKey());
|
||||
|
@ -38,12 +38,11 @@ import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperBuilders;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -132,15 +131,15 @@ public class ParentFieldMapper extends MetadataFieldMapper {
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
|
||||
StringFieldMapper parentJoinField = createParentJoinFieldMapper(typeName, new BuilderContext(indexSettings, new ContentPath(0)));
|
||||
KeywordFieldMapper parentJoinField = createParentJoinFieldMapper(typeName, new BuilderContext(indexSettings, new ContentPath(0)));
|
||||
MappedFieldType childJoinFieldType = Defaults.FIELD_TYPE.clone();
|
||||
childJoinFieldType.setName(joinField(null));
|
||||
return new ParentFieldMapper(parentJoinField, childJoinFieldType, null, indexSettings);
|
||||
}
|
||||
}
|
||||
|
||||
static StringFieldMapper createParentJoinFieldMapper(String docType, BuilderContext context) {
|
||||
StringFieldMapper.Builder parentJoinField = MapperBuilders.stringField(joinField(docType));
|
||||
static KeywordFieldMapper createParentJoinFieldMapper(String docType, BuilderContext context) {
|
||||
KeywordFieldMapper.Builder parentJoinField = new KeywordFieldMapper.Builder(joinField(docType));
|
||||
parentJoinField.indexOptions(IndexOptions.NONE);
|
||||
parentJoinField.docValues(true);
|
||||
parentJoinField.fieldType().setDocValuesType(DocValuesType.SORTED);
|
||||
@ -206,9 +205,9 @@ public class ParentFieldMapper extends MetadataFieldMapper {
|
||||
private final String parentType;
|
||||
// has no impact of field data settings, is just here for creating a join field,
|
||||
// the parent field mapper in the child type pointing to this type determines the field data settings for this join field
|
||||
private final StringFieldMapper parentJoinField;
|
||||
private final KeywordFieldMapper parentJoinField;
|
||||
|
||||
private ParentFieldMapper(StringFieldMapper parentJoinField, MappedFieldType childJoinFieldType, String parentType, Settings indexSettings) {
|
||||
private ParentFieldMapper(KeywordFieldMapper parentJoinField, MappedFieldType childJoinFieldType, String parentType, Settings indexSettings) {
|
||||
super(NAME, childJoinFieldType, Defaults.FIELD_TYPE, indexSettings);
|
||||
this.parentType = parentType;
|
||||
this.parentJoinField = parentJoinField;
|
||||
|
@ -57,7 +57,6 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.ipField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||
|
||||
/**
|
||||
@ -139,7 +138,7 @@ public class IpFieldMapper extends NumberFieldMapper {
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
IpFieldMapper.Builder builder = ipField(name);
|
||||
IpFieldMapper.Builder builder = new Builder(name);
|
||||
parseNumberField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
@ -50,7 +50,6 @@ import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.object;
|
||||
|
||||
/**
|
||||
*
|
||||
@ -300,7 +299,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
|
||||
}
|
||||
|
||||
protected Builder createBuilder(String name) {
|
||||
return object(name);
|
||||
return new Builder(name);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,7 +85,7 @@ public final class ExtractQueryTermsService {
|
||||
/**
|
||||
* Extracts all query terms from the provided query and adds it to specified list.
|
||||
*
|
||||
* From boolean query with no should clauses or phrase queries only the the longest term are selected,
|
||||
* From boolean query with no should clauses or phrase queries only the longest term are selected,
|
||||
* since that those terms are likely to be the rarest. Boolean query's must_not clauses are always ignored.
|
||||
*
|
||||
* If from part of the query, no query terms can be extracted then term extraction is stopped and
|
||||
|
@ -41,7 +41,7 @@ public class PercolateStats implements Streamable, ToXContent {
|
||||
private long numQueries;
|
||||
|
||||
/**
|
||||
* Noop constructor for serialazation purposes.
|
||||
* Noop constructor for serialization purposes.
|
||||
*/
|
||||
public PercolateStats() {
|
||||
}
|
||||
|
@ -26,10 +26,9 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperBuilders;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -61,17 +60,16 @@ public class PercolatorFieldMapper extends FieldMapper {
|
||||
@Override
|
||||
public PercolatorFieldMapper build(BuilderContext context) {
|
||||
context.path().add(name);
|
||||
StringFieldMapper extractedTermsField = createStringFieldBuilder(EXTRACTED_TERMS_FIELD_NAME).build(context);
|
||||
StringFieldMapper unknownQueryField = createStringFieldBuilder(UNKNOWN_QUERY_FIELD_NAME).build(context);
|
||||
KeywordFieldMapper extractedTermsField = createStringFieldBuilder(EXTRACTED_TERMS_FIELD_NAME).build(context);
|
||||
KeywordFieldMapper unknownQueryField = createStringFieldBuilder(UNKNOWN_QUERY_FIELD_NAME).build(context);
|
||||
context.path().remove();
|
||||
return new PercolatorFieldMapper(name(), fieldType, defaultFieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo, queryShardContext, extractedTermsField, unknownQueryField);
|
||||
}
|
||||
|
||||
static StringFieldMapper.Builder createStringFieldBuilder(String name) {
|
||||
StringFieldMapper.Builder queryMetaDataFieldBuilder = MapperBuilders.stringField(name);
|
||||
static KeywordFieldMapper.Builder createStringFieldBuilder(String name) {
|
||||
KeywordFieldMapper.Builder queryMetaDataFieldBuilder = new KeywordFieldMapper.Builder(name);
|
||||
queryMetaDataFieldBuilder.docValues(false);
|
||||
queryMetaDataFieldBuilder.store(false);
|
||||
queryMetaDataFieldBuilder.tokenized(false);
|
||||
queryMetaDataFieldBuilder.indexOptions(IndexOptions.DOCS);
|
||||
return queryMetaDataFieldBuilder;
|
||||
}
|
||||
@ -111,10 +109,10 @@ public class PercolatorFieldMapper extends FieldMapper {
|
||||
|
||||
private final boolean mapUnmappedFieldAsString;
|
||||
private final QueryShardContext queryShardContext;
|
||||
private final StringFieldMapper queryTermsField;
|
||||
private final StringFieldMapper unknownQueryField;
|
||||
private final KeywordFieldMapper queryTermsField;
|
||||
private final KeywordFieldMapper unknownQueryField;
|
||||
|
||||
public PercolatorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, QueryShardContext queryShardContext, StringFieldMapper queryTermsField, StringFieldMapper unknownQueryField) {
|
||||
public PercolatorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, QueryShardContext queryShardContext, KeywordFieldMapper queryTermsField, KeywordFieldMapper unknownQueryField) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
||||
this.queryShardContext = queryShardContext;
|
||||
this.queryTermsField = queryTermsField;
|
||||
|
@ -165,7 +165,7 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder<CommonTermsQue
|
||||
|
||||
/**
|
||||
* Sets the minimum number of high frequent query terms that need to match in order to
|
||||
* produce a hit when there are no low frequen terms.
|
||||
* produce a hit when there are no low frequent terms.
|
||||
*/
|
||||
public CommonTermsQueryBuilder highFreqMinimumShouldMatch(String highFreqMinimumShouldMatch) {
|
||||
this.highFreqMinimumShouldMatch = highFreqMinimumShouldMatch;
|
||||
|
@ -154,7 +154,7 @@ public class FuzzyQueryBuilder extends AbstractQueryBuilder<FuzzyQueryBuilder> i
|
||||
}
|
||||
|
||||
private FuzzyQueryBuilder() {
|
||||
// for protoype
|
||||
// for prototype
|
||||
this.fieldName = null;
|
||||
this.value = null;
|
||||
}
|
||||
|
@ -19,7 +19,8 @@
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.GeoPointInBBoxQuery;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.geopoint.search.GeoPointInBBoxQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Numbers;
|
||||
@ -105,7 +106,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
|
||||
|
||||
// we do not check longitudes as the query generation code can deal with flipped left/right values
|
||||
}
|
||||
|
||||
|
||||
topLeft.reset(top, left);
|
||||
bottomRight.reset(bottom, right);
|
||||
return this;
|
||||
@ -133,7 +134,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
|
||||
public GeoPoint topLeft() {
|
||||
return topLeft;
|
||||
}
|
||||
|
||||
|
||||
/** Returns the bottom right corner of the bounding box. */
|
||||
public GeoPoint bottomRight() {
|
||||
return bottomRight;
|
||||
@ -168,7 +169,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
|
||||
this.validationMethod = method;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns geo coordinate validation method to use.
|
||||
* */
|
||||
@ -251,7 +252,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
|
||||
GeoPoint luceneBottomRight = new GeoPoint(bottomRight);
|
||||
if (GeoValidationMethod.isCoerce(validationMethod)) {
|
||||
// Special case: if the difference between the left and right is 360 and the right is greater than the left, we are asking for
|
||||
// the complete longitude range so need to set longitude to the complete longditude range
|
||||
// the complete longitude range so need to set longitude to the complete longitude range
|
||||
double right = luceneBottomRight.getLon();
|
||||
double left = luceneTopLeft.getLon();
|
||||
|
||||
@ -264,8 +265,13 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
|
||||
}
|
||||
}
|
||||
|
||||
if (context.indexVersionCreated().onOrAfter(Version.V_2_2_0)) {
|
||||
return new GeoPointInBBoxQuery(fieldType.name(), luceneTopLeft.lon(), luceneBottomRight.lat(),
|
||||
final Version indexVersionCreated = context.indexVersionCreated();
|
||||
if (indexVersionCreated.onOrAfter(Version.V_2_2_0)) {
|
||||
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format
|
||||
// if index created V_2_3 > use prefix encoded postings format
|
||||
final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ?
|
||||
GeoPointField.TermEncoding.NUMERIC : GeoPointField.TermEncoding.PREFIX;
|
||||
return new GeoPointInBBoxQuery(fieldType.name(), encoding, luceneTopLeft.lon(), luceneBottomRight.lat(),
|
||||
luceneBottomRight.lon(), luceneTopLeft.lat());
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,8 @@
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.GeoPointDistanceQuery;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
@ -197,7 +198,7 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
|
||||
return this.optimizeBbox;
|
||||
}
|
||||
|
||||
/** Set validaton method for geo coordinates. */
|
||||
/** Set validation method for geo coordinates. */
|
||||
public void setValidationMethod(GeoValidationMethod method) {
|
||||
this.validationMethod = method;
|
||||
}
|
||||
@ -229,14 +230,19 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
|
||||
|
||||
double normDistance = geoDistance.normalize(this.distance, DistanceUnit.DEFAULT);
|
||||
|
||||
if (shardContext.indexVersionCreated().before(Version.V_2_2_0)) {
|
||||
final Version indexVersionCreated = shardContext.indexVersionCreated();
|
||||
if (indexVersionCreated.before(Version.V_2_2_0)) {
|
||||
GeoPointFieldMapperLegacy.GeoPointFieldType geoFieldType = ((GeoPointFieldMapperLegacy.GeoPointFieldType) fieldType);
|
||||
IndexGeoPointFieldData indexFieldData = shardContext.getForField(fieldType);
|
||||
return new GeoDistanceRangeQuery(center, null, normDistance, true, false, geoDistance, geoFieldType, indexFieldData, optimizeBbox);
|
||||
}
|
||||
|
||||
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format
|
||||
// if index created V_2_3 > use prefix encoded postings format
|
||||
final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ?
|
||||
GeoPointField.TermEncoding.NUMERIC : GeoPointField.TermEncoding.PREFIX;
|
||||
normDistance = GeoUtils.maxRadialDistance(center, normDistance);
|
||||
return new GeoPointDistanceQuery(fieldType.name(), center.lon(), center.lat(), normDistance);
|
||||
return new GeoPointDistanceQuery(fieldType.name(), encoding, center.lon(), center.lat(), normDistance);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -19,9 +19,10 @@
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.GeoPointDistanceRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.GeoDistanceUtils;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceRangeQuery;
|
||||
import org.apache.lucene.spatial.util.GeoDistanceUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.geo.GeoDistance;
|
||||
@ -41,7 +42,7 @@ import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.apache.lucene.util.GeoUtils.TOLERANCE;
|
||||
import static org.apache.lucene.spatial.util.GeoEncodingUtils.TOLERANCE;
|
||||
|
||||
public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistanceRangeQueryBuilder> {
|
||||
|
||||
@ -267,16 +268,22 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
|
||||
toValue = GeoDistanceUtils.maxRadialDistanceMeters(point.lon(), point.lat());
|
||||
}
|
||||
|
||||
if (indexCreatedBeforeV2_2 == true) {
|
||||
final Version indexVersionCreated = context.indexVersionCreated();
|
||||
if (indexVersionCreated.before(Version.V_2_2_0)) {
|
||||
GeoPointFieldMapperLegacy.GeoPointFieldType geoFieldType = ((GeoPointFieldMapperLegacy.GeoPointFieldType) fieldType);
|
||||
IndexGeoPointFieldData indexFieldData = context.getForField(fieldType);
|
||||
return new GeoDistanceRangeQuery(point, fromValue, toValue, includeLower, includeUpper, geoDistance, geoFieldType,
|
||||
indexFieldData, optimizeBbox);
|
||||
indexFieldData, optimizeBbox);
|
||||
}
|
||||
|
||||
return new GeoPointDistanceRangeQuery(fieldType.name(), point.lon(), point.lat(),
|
||||
(includeLower) ? fromValue : fromValue + TOLERANCE,
|
||||
(includeUpper) ? toValue : toValue - TOLERANCE);
|
||||
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format
|
||||
// if index created V_2_3 > use prefix encoded postings format
|
||||
final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ?
|
||||
GeoPointField.TermEncoding.NUMERIC : GeoPointField.TermEncoding.PREFIX;
|
||||
|
||||
return new GeoPointDistanceRangeQuery(fieldType.name(), encoding, point.lon(), point.lat(),
|
||||
(includeLower) ? fromValue : fromValue + TOLERANCE,
|
||||
(includeUpper) ? toValue : toValue - TOLERANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -19,7 +19,8 @@
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.GeoPointInPolygonQuery;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.spatial.geopoint.search.GeoPointInPolygonQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
@ -136,7 +137,8 @@ public class GeoPolygonQueryBuilder extends AbstractQueryBuilder<GeoPolygonQuery
|
||||
}
|
||||
}
|
||||
|
||||
if (context.indexVersionCreated().before(Version.V_2_2_0)) {
|
||||
final Version indexVersionCreated = context.indexVersionCreated();
|
||||
if (indexVersionCreated.before(Version.V_2_2_0)) {
|
||||
IndexGeoPointFieldData indexFieldData = context.getForField(fieldType);
|
||||
return new GeoPolygonQuery(indexFieldData, shell.toArray(new GeoPoint[shellSize]));
|
||||
}
|
||||
@ -149,7 +151,11 @@ public class GeoPolygonQueryBuilder extends AbstractQueryBuilder<GeoPolygonQuery
|
||||
lats[i] = p.lat();
|
||||
lons[i] = p.lon();
|
||||
}
|
||||
return new GeoPointInPolygonQuery(fieldType.name(), lons, lats);
|
||||
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format
|
||||
// if index created V_2_3 > use prefix encoded postings format
|
||||
final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ?
|
||||
GeoPointField.TermEncoding.NUMERIC : GeoPointField.TermEncoding.PREFIX;
|
||||
return new GeoPointInPolygonQuery(fieldType.name(), encoding, lons, lats);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -20,7 +20,7 @@
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.GeoHashUtils;
|
||||
import org.apache.lucene.spatial.util.GeoHashUtils;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
|
@ -634,7 +634,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
|
||||
}
|
||||
|
||||
/**
|
||||
* The analyzer that will be used to analyze the text. Defaults to the analyzer associated with the fied.
|
||||
* The analyzer that will be used to analyze the text. Defaults to the analyzer associated with the field.
|
||||
*/
|
||||
public MoreLikeThisQueryBuilder analyzer(String analyzer) {
|
||||
this.analyzer = analyzer;
|
||||
@ -703,7 +703,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
|
||||
* Converts an array of String ids to and Item[].
|
||||
* @param ids the ids to convert
|
||||
* @return the new items array
|
||||
* @deprecated construct the items array externaly and use it in the constructor / setter
|
||||
* @deprecated construct the items array externally and use it in the constructor / setter
|
||||
*/
|
||||
@Deprecated
|
||||
public static Item[] ids(String... ids) {
|
||||
|
@ -29,7 +29,7 @@ import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Parser for the The More Like This Query (MLT Query) which finds documents that are "like" a given set of documents.
|
||||
* Parser for the More Like This Query (MLT Query) which finds documents that are "like" a given set of documents.
|
||||
*
|
||||
* The documents are provided as a set of strings and/or a list of {@link Item}.
|
||||
*/
|
||||
|
@ -44,7 +44,6 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperBuilders;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
@ -278,7 +277,7 @@ public class QueryShardContext {
|
||||
if (fieldMapping != null || allowUnmappedFields) {
|
||||
return fieldMapping;
|
||||
} else if (mapUnmappedFieldAsString) {
|
||||
StringFieldMapper.Builder builder = MapperBuilders.stringField(name);
|
||||
StringFieldMapper.Builder builder = new StringFieldMapper.Builder(name);
|
||||
return builder.build(new Mapper.BuilderContext(indexSettings.getSettings(), new ContentPath(1))).fieldType();
|
||||
} else {
|
||||
throw new QueryShardException(this, "No field mapping can be found for the field with name [{}]", name);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user