Merge branch 'master' into feature/search-request-refactoring
# Conflicts: # plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/FunctionScoreTests.java
This commit is contained in:
commit
4557d1b560
|
@ -334,11 +334,14 @@
|
|||
<!-- Guice -->
|
||||
<exclude>src/main/java/org/elasticsearch/common/inject/**</exclude>
|
||||
<exclude>src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java</exclude>
|
||||
<exclude>src/main/java/org/elasticsearch/common/network/InetAddresses.java</exclude>
|
||||
<exclude>src/main/java/org/apache/lucene/**/X*.java</exclude>
|
||||
<!-- t-digest -->
|
||||
<exclude>src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestState.java</exclude>
|
||||
<!-- netty pipelining -->
|
||||
<exclude>src/main/java/org/elasticsearch/http/netty/pipelining/**</exclude>
|
||||
<exclude>src/test/java/org/elasticsearch/common/network/InetAddressesTests.java</exclude>
|
||||
<exclude>src/test/java/org/elasticsearch/common/collect/EvictingQueueTests.java</exclude>
|
||||
</excludes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
|
|
@ -482,7 +482,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
RESOURCE_NOT_FOUND_EXCEPTION(org.elasticsearch.ResourceNotFoundException.class, org.elasticsearch.ResourceNotFoundException::new, 19),
|
||||
ACTION_TRANSPORT_EXCEPTION(org.elasticsearch.transport.ActionTransportException.class, org.elasticsearch.transport.ActionTransportException::new, 20),
|
||||
ELASTICSEARCH_GENERATION_EXCEPTION(org.elasticsearch.ElasticsearchGenerationException.class, org.elasticsearch.ElasticsearchGenerationException::new, 21),
|
||||
CREATE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.CreateFailedEngineException.class, org.elasticsearch.index.engine.CreateFailedEngineException::new, 22),
|
||||
// 22 was CreateFailedEngineException
|
||||
INDEX_SHARD_STARTED_EXCEPTION(org.elasticsearch.index.shard.IndexShardStartedException.class, org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
|
||||
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class, org.elasticsearch.search.SearchContextMissingException::new, 24),
|
||||
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 25),
|
||||
|
@ -514,7 +514,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
INDEX_SHARD_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.IndexShardAlreadyExistsException.class, org.elasticsearch.index.IndexShardAlreadyExistsException::new, 51),
|
||||
VERSION_CONFLICT_ENGINE_EXCEPTION(org.elasticsearch.index.engine.VersionConflictEngineException.class, org.elasticsearch.index.engine.VersionConflictEngineException::new, 52),
|
||||
ENGINE_EXCEPTION(org.elasticsearch.index.engine.EngineException.class, org.elasticsearch.index.engine.EngineException::new, 53),
|
||||
DOCUMENT_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.index.engine.DocumentAlreadyExistsException.class, org.elasticsearch.index.engine.DocumentAlreadyExistsException::new, 54),
|
||||
// 54 was DocumentAlreadyExistsException, which is superseded by VersionConflictEngineException
|
||||
NO_SUCH_NODE_EXCEPTION(org.elasticsearch.action.NoSuchNodeException.class, org.elasticsearch.action.NoSuchNodeException::new, 55),
|
||||
SETTINGS_EXCEPTION(org.elasticsearch.common.settings.SettingsException.class, org.elasticsearch.common.settings.SettingsException::new, 56),
|
||||
INDEX_TEMPLATE_MISSING_EXCEPTION(org.elasticsearch.indices.IndexTemplateMissingException.class, org.elasticsearch.indices.IndexTemplateMissingException::new, 57),
|
||||
|
|
|
@ -259,6 +259,8 @@ public class Version {
|
|||
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_0_beta2_ID = 2000002;
|
||||
public static final Version V_2_0_0_beta2 = new Version(V_2_0_0_beta2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_0_rc1_ID = 2000051;
|
||||
public static final Version V_2_0_0_rc1 = new Version(V_2_0_0_rc1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_0_ID = 2000099;
|
||||
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_1_0_ID = 2010099;
|
||||
|
@ -287,6 +289,8 @@ public class Version {
|
|||
return V_2_1_0;
|
||||
case V_2_0_0_ID:
|
||||
return V_2_0_0;
|
||||
case V_2_0_0_rc1_ID:
|
||||
return V_2_0_0_rc1;
|
||||
case V_2_0_0_beta2_ID:
|
||||
return V_2_0_0_beta2;
|
||||
case V_2_0_0_beta1_ID:
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.action;
|
|||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.HppcMaps;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -32,8 +33,8 @@ import java.io.IOException;
|
|||
*/
|
||||
public class UnavailableShardsException extends ElasticsearchException {
|
||||
|
||||
public UnavailableShardsException(@Nullable ShardId shardId, String message) {
|
||||
super(buildMessage(shardId, message));
|
||||
public UnavailableShardsException(@Nullable ShardId shardId, String message, Object... args) {
|
||||
super(buildMessage(shardId, message), args);
|
||||
}
|
||||
|
||||
private static String buildMessage(ShardId shardId, String message) {
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.segments;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
|
||||
public class IndexShardSegments implements Iterable<ShardSegments> {
|
||||
|
@ -49,6 +49,6 @@ public class IndexShardSegments implements Iterable<ShardSegments> {
|
|||
|
||||
@Override
|
||||
public Iterator<ShardSegments> iterator() {
|
||||
return Iterators.forArray(shards);
|
||||
return Arrays.stream(shards).iterator();
|
||||
}
|
||||
}
|
|
@ -19,13 +19,13 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.stats;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
|
||||
/**
|
||||
|
@ -57,7 +57,7 @@ public class IndexShardStats implements Iterable<ShardStats>, Streamable {
|
|||
|
||||
@Override
|
||||
public Iterator<ShardStats> iterator() {
|
||||
return Iterators.forArray(shards);
|
||||
return Arrays.stream(shards).iterator();
|
||||
}
|
||||
|
||||
private CommonStats total = null;
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.get;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
|
||||
public class IndexShardUpgradeStatus implements Iterable<ShardUpgradeStatus> {
|
||||
|
@ -49,7 +49,7 @@ public class IndexShardUpgradeStatus implements Iterable<ShardUpgradeStatus> {
|
|||
|
||||
@Override
|
||||
public Iterator<ShardUpgradeStatus> iterator() {
|
||||
return Iterators.forArray(shards);
|
||||
return Arrays.stream(shards).iterator();
|
||||
}
|
||||
|
||||
public long getTotalBytes() {
|
||||
|
|
|
@ -19,13 +19,13 @@
|
|||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
|
||||
/**
|
||||
|
@ -95,7 +95,7 @@ public class BulkResponse extends ActionResponse implements Iterable<BulkItemRes
|
|||
|
||||
@Override
|
||||
public Iterator<BulkItemResponse> iterator() {
|
||||
return Iterators.forArray(responses);
|
||||
return Arrays.stream(responses).iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.elasticsearch.common.xcontent.XContentHelper;
|
|||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.engine.DocumentAlreadyExistsException;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.mapper.Mapping;
|
||||
|
@ -97,6 +96,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
protected TransportRequestOptions transportOptions() {
|
||||
return BulkAction.INSTANCE.transportOptions(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BulkShardResponse newResponseInstance() {
|
||||
return new BulkShardResponse();
|
||||
|
@ -416,7 +416,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
} catch (Throwable t) {
|
||||
t = ExceptionsHelper.unwrapCause(t);
|
||||
boolean retry = false;
|
||||
if (t instanceof VersionConflictEngineException || (t instanceof DocumentAlreadyExistsException && translate.operation() == UpdateHelper.Operation.UPSERT)) {
|
||||
if (t instanceof VersionConflictEngineException) {
|
||||
retry = true;
|
||||
}
|
||||
return new UpdateResult(translate, indexRequest, retry, t, null);
|
||||
|
@ -460,20 +460,12 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, indexRequest.source()).index(shardId.getIndex()).type(indexRequest.type()).id(indexRequest.id())
|
||||
.routing(indexRequest.routing()).parent(indexRequest.parent()).timestamp(indexRequest.timestamp()).ttl(indexRequest.ttl());
|
||||
|
||||
final Engine.IndexingOperation operation;
|
||||
if (indexRequest.opType() == IndexRequest.OpType.INDEX) {
|
||||
operation = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.REPLICA);
|
||||
} else {
|
||||
assert indexRequest.opType() == IndexRequest.OpType.CREATE : indexRequest.opType();
|
||||
operation = indexShard.prepareCreate(sourceToParse,
|
||||
indexRequest.version(), indexRequest.versionType(),
|
||||
Engine.Operation.Origin.REPLICA);
|
||||
}
|
||||
final Engine.Index operation = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.REPLICA);
|
||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
if (update != null) {
|
||||
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
|
||||
}
|
||||
operation.execute(indexShard);
|
||||
indexShard.index(operation);
|
||||
location = locationToSync(location, operation.getTranslogLocation());
|
||||
} catch (Throwable e) {
|
||||
// if its not an ignore replica failure, we need to make sure to bubble up the failure
|
||||
|
@ -500,7 +492,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
}
|
||||
}
|
||||
|
||||
processAfter(request.refresh(), indexShard, location);
|
||||
processAfter(request.refresh(), indexShard, location);
|
||||
}
|
||||
|
||||
private void applyVersion(BulkItemRequest item, long version, VersionType versionType) {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.get;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.*;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
|
@ -27,6 +26,7 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -37,10 +37,7 @@ import org.elasticsearch.index.VersionType;
|
|||
import org.elasticsearch.search.fetch.source.FetchSourceContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.*;
|
||||
|
||||
public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements Iterable<MultiGetRequest.Item>, CompositeIndicesRequest, RealtimeRequest {
|
||||
|
||||
|
@ -498,7 +495,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
|
||||
@Override
|
||||
public Iterator<Item> iterator() {
|
||||
return Iterators.unmodifiableIterator(items.iterator());
|
||||
return Collections.unmodifiableCollection(items).iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,10 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.get;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.percolate.PercolateResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -31,7 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
|
||||
public class MultiGetResponse extends ActionResponse implements Iterable<MultiGetItemResponse>, ToXContent {
|
||||
|
@ -126,7 +124,7 @@ public class MultiGetResponse extends ActionResponse implements Iterable<MultiGe
|
|||
|
||||
@Override
|
||||
public Iterator<MultiGetItemResponse> iterator() {
|
||||
return Iterators.forArray(responses);
|
||||
return Arrays.stream(responses).iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -49,14 +49,14 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
/**
|
||||
* Index request to index a typed JSON document into a specific index and make it searchable. Best
|
||||
* created using {@link org.elasticsearch.client.Requests#indexRequest(String)}.
|
||||
* <p>
|
||||
*
|
||||
* The index requires the {@link #index()}, {@link #type(String)}, {@link #id(String)} and
|
||||
* {@link #source(byte[])} to be set.
|
||||
* <p>
|
||||
*
|
||||
* The source (content to index) can be set in its bytes form using ({@link #source(byte[])}),
|
||||
* its string form ({@link #source(String)}) or using a {@link org.elasticsearch.common.xcontent.XContentBuilder}
|
||||
* ({@link #source(org.elasticsearch.common.xcontent.XContentBuilder)}).
|
||||
* <p>
|
||||
*
|
||||
* If the {@link #id(String)} is not set, it will be automatically generated.
|
||||
*
|
||||
* @see IndexResponse
|
||||
|
@ -114,7 +114,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
|
||||
public static OpType fromString(String sOpType) {
|
||||
String lowersOpType = sOpType.toLowerCase(Locale.ROOT);
|
||||
switch(lowersOpType){
|
||||
switch (lowersOpType) {
|
||||
case "create":
|
||||
return OpType.CREATE;
|
||||
case "index":
|
||||
|
@ -216,6 +216,14 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
if (source == null) {
|
||||
validationException = addValidationError("source is missing", validationException);
|
||||
}
|
||||
|
||||
if (opType() == OpType.CREATE) {
|
||||
if (versionType != VersionType.INTERNAL || version != Versions.MATCH_DELETED) {
|
||||
validationException = addValidationError("create operations do not support versioning. use index instead", validationException);
|
||||
return validationException;
|
||||
}
|
||||
}
|
||||
|
||||
if (!versionType.validateVersionForWrites(version)) {
|
||||
validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException);
|
||||
}
|
||||
|
@ -370,7 +378,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
|
||||
/**
|
||||
* Sets the document source to index.
|
||||
* <p>
|
||||
*
|
||||
* Note, its preferable to either set it using {@link #source(org.elasticsearch.common.xcontent.XContentBuilder)}
|
||||
* or using the {@link #source(byte[])}.
|
||||
*/
|
||||
|
@ -480,6 +488,10 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
*/
|
||||
public IndexRequest opType(OpType opType) {
|
||||
this.opType = opType;
|
||||
if (opType == OpType.CREATE) {
|
||||
version(Versions.MATCH_DELETED);
|
||||
versionType(VersionType.INTERNAL);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
/**
|
||||
* Performs the index operation.
|
||||
* <p>
|
||||
*
|
||||
* Allows for the following settings:
|
||||
* <ul>
|
||||
* <li><b>autoCreateIndex</b>: When set to <tt>true</tt>, will automatically create an index if one does not exists.
|
||||
|
@ -167,6 +167,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
IndexShard indexShard = indexService.getShard(shardRequest.shardId.id());
|
||||
|
||||
final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard);
|
||||
|
||||
final IndexResponse response = result.response;
|
||||
final Translog.Location location = result.location;
|
||||
processAfter(request.refresh(), indexShard, location);
|
||||
|
@ -180,18 +181,12 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).index(shardId.getIndex()).type(request.type()).id(request.id())
|
||||
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
||||
|
||||
final Engine.IndexingOperation operation;
|
||||
if (request.opType() == IndexRequest.OpType.INDEX) {
|
||||
operation = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA);
|
||||
} else {
|
||||
assert request.opType() == IndexRequest.OpType.CREATE : request.opType();
|
||||
operation = indexShard.prepareCreate(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA);
|
||||
}
|
||||
final Engine.Index operation = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA);
|
||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
if (update != null) {
|
||||
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
|
||||
}
|
||||
operation.execute(indexShard);
|
||||
indexShard.index(operation);
|
||||
processAfter(request.refresh(), indexShard, operation.getTranslogLocation());
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.action.percolate;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -30,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
|
||||
/**
|
||||
|
@ -52,7 +52,7 @@ public class MultiPercolateResponse extends ActionResponse implements Iterable<M
|
|||
|
||||
@Override
|
||||
public Iterator<Item> iterator() {
|
||||
return Iterators.forArray(items);
|
||||
return Arrays.stream(items).iterator();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -32,7 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
|
||||
/**
|
||||
|
@ -122,7 +121,7 @@ public class MultiSearchResponse extends ActionResponse implements Iterable<Mult
|
|||
|
||||
@Override
|
||||
public Iterator<Item> iterator() {
|
||||
return Iterators.forArray(items);
|
||||
return Arrays.stream(items).iterator();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.*;
|
||||
|
@ -95,17 +96,22 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
|
||||
private final NodesRequest request;
|
||||
private final String[] nodesIds;
|
||||
private final DiscoveryNode[] nodes;
|
||||
private final ActionListener<NodesResponse> listener;
|
||||
private final ClusterState clusterState;
|
||||
private final AtomicReferenceArray<Object> responses;
|
||||
private final AtomicInteger counter = new AtomicInteger();
|
||||
|
||||
private AsyncAction(NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
clusterState = clusterService.state();
|
||||
ClusterState clusterState = clusterService.state();
|
||||
String[] nodesIds = resolveNodes(request, clusterState);
|
||||
this.nodesIds = filterNodeIds(clusterState.nodes(), nodesIds);
|
||||
ImmutableOpenMap<String, DiscoveryNode> nodes = clusterState.nodes().nodes();
|
||||
this.nodes = new DiscoveryNode[nodesIds.length];
|
||||
for (int i = 0; i < nodesIds.length; i++) {
|
||||
this.nodes[i] = nodes.get(nodesIds[i]);
|
||||
}
|
||||
this.responses = new AtomicReferenceArray<>(this.nodesIds.length);
|
||||
}
|
||||
|
||||
|
@ -128,7 +134,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
for (int i = 0; i < nodesIds.length; i++) {
|
||||
final String nodeId = nodesIds[i];
|
||||
final int idx = i;
|
||||
final DiscoveryNode node = clusterState.nodes().nodes().get(nodeId);
|
||||
final DiscoveryNode node = nodes[i];
|
||||
try {
|
||||
if (node == null) {
|
||||
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
|
||||
|
|
|
@ -56,7 +56,6 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.DocumentAlreadyExistsException;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.mapper.Mapping;
|
||||
|
@ -188,9 +187,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
if (cause instanceof VersionConflictEngineException) {
|
||||
return true;
|
||||
}
|
||||
if (cause instanceof DocumentAlreadyExistsException) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1036,22 +1032,17 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
|
||||
/** Utility method to create either an index or a create operation depending
|
||||
* on the {@link OpType} of the request. */
|
||||
private final Engine.IndexingOperation prepareIndexOperationOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) {
|
||||
private final Engine.Index prepareIndexOperationOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) {
|
||||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()).index(request.index()).type(request.type()).id(request.id())
|
||||
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
||||
if (request.opType() == IndexRequest.OpType.INDEX) {
|
||||
return indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY);
|
||||
} else {
|
||||
assert request.opType() == IndexRequest.OpType.CREATE : request.opType();
|
||||
return indexShard.prepareCreate(sourceToParse,
|
||||
request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/** Execute the given {@link IndexRequest} on a primary shard, throwing a
|
||||
* {@link RetryOnPrimaryException} if the operation needs to be re-tried. */
|
||||
protected final WriteResult<IndexResponse> executeIndexRequestOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) throws Throwable {
|
||||
Engine.IndexingOperation operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard);
|
||||
Engine.Index operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard);
|
||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
final ShardId shardId = indexShard.shardId();
|
||||
if (update != null) {
|
||||
|
@ -1064,7 +1055,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
"Dynamics mappings are not available on the node that holds the primary yet");
|
||||
}
|
||||
}
|
||||
final boolean created = operation.execute(indexShard);
|
||||
final boolean created = indexShard.index(operation);
|
||||
|
||||
// update the version on request so it will happen on the replicas
|
||||
final long version = operation.version();
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.support.single.instance;
|
||||
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.UnavailableShardsException;
|
||||
|
@ -35,6 +36,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
|||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -42,6 +44,7 @@ import org.elasticsearch.node.NodeClosedException;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.*;
|
||||
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
|
@ -111,9 +114,8 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||
private volatile ClusterStateObserver observer;
|
||||
private ShardIterator shardIt;
|
||||
private DiscoveryNodes nodes;
|
||||
private final AtomicBoolean operationStarted = new AtomicBoolean();
|
||||
|
||||
private AsyncSingleAction(Request request, ActionListener<Response> listener) {
|
||||
AsyncSingleAction(Request request, ActionListener<Response> listener) {
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
}
|
||||
|
@ -123,14 +125,14 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||
doStart();
|
||||
}
|
||||
|
||||
protected boolean doStart() {
|
||||
protected void doStart() {
|
||||
nodes = observer.observedState().nodes();
|
||||
try {
|
||||
ClusterBlockException blockException = checkGlobalBlock(observer.observedState());
|
||||
if (blockException != null) {
|
||||
if (blockException.retryable()) {
|
||||
retry(blockException);
|
||||
return false;
|
||||
return;
|
||||
} else {
|
||||
throw blockException;
|
||||
}
|
||||
|
@ -138,13 +140,14 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||
request.concreteIndex(indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request));
|
||||
// check if we need to execute, and if not, return
|
||||
if (!resolveRequest(observer.observedState(), request, listener)) {
|
||||
return true;
|
||||
listener.onFailure(new IllegalStateException(LoggerMessageFormat.format("{} request {} could not be resolved", new ShardId(request.index, request.shardId), actionName)));
|
||||
return;
|
||||
}
|
||||
blockException = checkRequestBlock(observer.observedState(), request);
|
||||
if (blockException != null) {
|
||||
if (blockException.retryable()) {
|
||||
retry(blockException);
|
||||
return false;
|
||||
return;
|
||||
} else {
|
||||
throw blockException;
|
||||
}
|
||||
|
@ -152,13 +155,13 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||
shardIt = shards(observer.observedState(), request);
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(e);
|
||||
return true;
|
||||
return;
|
||||
}
|
||||
|
||||
// no shardIt, might be in the case between index gateway recovery and shardIt initialization
|
||||
if (shardIt.size() == 0) {
|
||||
retry(null);
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
// this transport only make sense with an iterator that returns a single shard routing (like primary)
|
||||
|
@ -169,11 +172,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||
|
||||
if (!shard.active()) {
|
||||
retry(null);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!operationStarted.compareAndSet(false, true)) {
|
||||
return true;
|
||||
return;
|
||||
}
|
||||
|
||||
request.shardId = shardIt.shardId().id();
|
||||
|
@ -197,24 +196,30 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
Throwable cause = exp.unwrapCause();
|
||||
// if we got disconnected from the node, or the node / shard is not in the right state (being closed)
|
||||
if (exp.unwrapCause() instanceof ConnectTransportException || exp.unwrapCause() instanceof NodeClosedException ||
|
||||
if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException ||
|
||||
retryOnFailure(exp)) {
|
||||
operationStarted.set(false);
|
||||
// we already marked it as started when we executed it (removed the listener) so pass false
|
||||
// to re-add to the cluster listener
|
||||
retry(null);
|
||||
retry(cause);
|
||||
} else {
|
||||
listener.onFailure(exp);
|
||||
}
|
||||
}
|
||||
});
|
||||
return true;
|
||||
}
|
||||
|
||||
void retry(final @Nullable Throwable failure) {
|
||||
if (observer.isTimedOut()) {
|
||||
// we running as a last attempt after a timeout has happened. don't retry
|
||||
Throwable listenFailure = failure;
|
||||
if (listenFailure == null) {
|
||||
if (shardIt == null) {
|
||||
listenFailure = new UnavailableShardsException(new ShardId(request.concreteIndex(), -1), "Timeout waiting for [{}], request: {}", request.timeout(), actionName);
|
||||
} else {
|
||||
listenFailure = new UnavailableShardsException(shardIt.shardId(), "[{}] shardIt, [{}] active : Timeout waiting for [{}], request: {}", shardIt.size(), shardIt.sizeActive(), request.timeout(), actionName);
|
||||
}
|
||||
}
|
||||
listener.onFailure(listenFailure);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -232,17 +237,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||
@Override
|
||||
public void onTimeout(TimeValue timeout) {
|
||||
// just to be on the safe side, see if we can start it now?
|
||||
if (!doStart()) {
|
||||
Throwable listenFailure = failure;
|
||||
if (listenFailure == null) {
|
||||
if (shardIt == null) {
|
||||
listenFailure = new UnavailableShardsException(new ShardId(request.concreteIndex(), -1), "Timeout waiting for [" + timeout + "], request: " + request.toString());
|
||||
} else {
|
||||
listenFailure = new UnavailableShardsException(shardIt.shardId(), "[" + shardIt.size() + "] shardIt, [" + shardIt.sizeActive() + "] active : Timeout waiting for [" + timeout + "], request: " + request.toString());
|
||||
}
|
||||
}
|
||||
listener.onFailure(listenFailure);
|
||||
}
|
||||
doStart();
|
||||
}
|
||||
}, request.timeout());
|
||||
}
|
||||
|
|
|
@ -19,11 +19,11 @@
|
|||
|
||||
package org.elasticsearch.action.termvectors;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.*;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -74,7 +74,7 @@ public class MultiTermVectorsRequest extends ActionRequest<MultiTermVectorsReque
|
|||
|
||||
@Override
|
||||
public Iterator<TermVectorsRequest> iterator() {
|
||||
return Iterators.unmodifiableIterator(requests.iterator());
|
||||
return Collections.unmodifiableCollection(requests).iterator();
|
||||
}
|
||||
|
||||
public boolean isEmpty() {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.termvectors;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -30,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
|
||||
public class MultiTermVectorsResponse extends ActionResponse implements Iterable<MultiTermVectorsItemResponse>, ToXContent {
|
||||
|
@ -120,7 +120,7 @@ public class MultiTermVectorsResponse extends ActionResponse implements Iterable
|
|||
|
||||
@Override
|
||||
public Iterator<MultiTermVectorsItemResponse> iterator() {
|
||||
return Iterators.forArray(responses);
|
||||
return Arrays.stream(responses).iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -48,9 +48,8 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.engine.DocumentAlreadyExistsException;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
|
@ -170,7 +169,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard);
|
||||
switch (result.operation()) {
|
||||
case UPSERT:
|
||||
IndexRequest upsertRequest = new IndexRequest((IndexRequest)result.action(), request);
|
||||
IndexRequest upsertRequest = new IndexRequest(result.action(), request);
|
||||
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
|
||||
final BytesReference upsertSourceBytes = upsertRequest.source();
|
||||
indexAction.execute(upsertRequest, new ActionListener<IndexResponse>() {
|
||||
|
@ -189,7 +188,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
e = ExceptionsHelper.unwrapCause(e);
|
||||
if (e instanceof VersionConflictEngineException || e instanceof DocumentAlreadyExistsException) {
|
||||
if (e instanceof VersionConflictEngineException) {
|
||||
if (retryCount < request.retryOnConflict()) {
|
||||
threadPool.executor(executor()).execute(new ActionRunnable<UpdateResponse>(listener) {
|
||||
@Override
|
||||
|
@ -205,7 +204,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
});
|
||||
break;
|
||||
case INDEX:
|
||||
IndexRequest indexRequest = new IndexRequest((IndexRequest)result.action(), request);
|
||||
IndexRequest indexRequest = new IndexRequest(result.action(), request);
|
||||
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
|
||||
final BytesReference indexSourceBytes = indexRequest.source();
|
||||
indexAction.execute(indexRequest, new ActionListener<IndexResponse>() {
|
||||
|
@ -235,7 +234,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
});
|
||||
break;
|
||||
case DELETE:
|
||||
DeleteRequest deleteRequest = new DeleteRequest((DeleteRequest)result.action(), request);
|
||||
DeleteRequest deleteRequest = new DeleteRequest(result.action(), request);
|
||||
deleteAction.execute(deleteRequest, new ActionListener<DeleteResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteResponse response) {
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.common.PidFile;
|
|||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.cli.CliTool;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.CreationException;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
|
@ -249,13 +248,13 @@ final class Bootstrap {
|
|||
|
||||
Environment environment = initialSettings(foreground);
|
||||
Settings settings = environment.settings();
|
||||
setupLogging(settings, environment);
|
||||
checkForCustomConfFile();
|
||||
|
||||
if (environment.pidFile() != null) {
|
||||
PidFile.create(environment.pidFile(), true);
|
||||
}
|
||||
|
||||
setupLogging(settings, environment);
|
||||
|
||||
if (System.getProperty("es.max-open-files", "false").equals("true")) {
|
||||
ESLogger logger = Loggers.getLogger(Bootstrap.class);
|
||||
logger.info("max_open_files [{}]", ProcessProbe.getInstance().getMaxFileDescriptorCount());
|
||||
|
@ -330,4 +329,21 @@ final class Bootstrap {
|
|||
System.err.flush();
|
||||
}
|
||||
}
|
||||
|
||||
private static void checkForCustomConfFile() {
|
||||
String confFileSetting = System.getProperty("es.default.config");
|
||||
checkUnsetAndMaybeExit(confFileSetting, "es.default.config");
|
||||
confFileSetting = System.getProperty("es.config");
|
||||
checkUnsetAndMaybeExit(confFileSetting, "es.config");
|
||||
confFileSetting = System.getProperty("elasticsearch.config");
|
||||
checkUnsetAndMaybeExit(confFileSetting, "elasticsearch.config");
|
||||
}
|
||||
|
||||
private static void checkUnsetAndMaybeExit(String confFileSetting, String settingName) {
|
||||
if (confFileSetting != null && confFileSetting.isEmpty() == false) {
|
||||
ESLogger logger = Loggers.getLogger(Bootstrap.class);
|
||||
logger.info("{} is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed.", settingName);
|
||||
System.exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -165,7 +165,7 @@ final class Security {
|
|||
Map<String,String> m = new HashMap<>();
|
||||
m.put("repository-s3", "org.elasticsearch.plugin.repository.s3.S3RepositoryPlugin");
|
||||
m.put("discovery-ec2", "org.elasticsearch.plugin.discovery.ec2.Ec2DiscoveryPlugin");
|
||||
m.put("cloud-gce", "org.elasticsearch.plugin.cloud.gce.CloudGcePlugin");
|
||||
m.put("discovery-gce", "org.elasticsearch.plugin.discovery.gce.GceDiscoveryPlugin");
|
||||
m.put("lang-expression", "org.elasticsearch.script.expression.ExpressionPlugin");
|
||||
m.put("lang-groovy", "org.elasticsearch.script.groovy.GroovyPlugin");
|
||||
m.put("lang-javascript", "org.elasticsearch.plugin.javascript.JavaScriptPlugin");
|
||||
|
|
|
@ -73,7 +73,7 @@ public class MappingUpdatedAction extends AbstractComponent {
|
|||
throw new IllegalArgumentException("_default_ mapping should not be updated");
|
||||
}
|
||||
return client.preparePutMapping(index).setType(type).setSource(mappingUpdate.toString())
|
||||
.setMasterNodeTimeout(timeout).setTimeout(timeout);
|
||||
.setMasterNodeTimeout(timeout).setTimeout(timeout);
|
||||
}
|
||||
|
||||
public void updateMappingOnMaster(String index, String type, Mapping mappingUpdate, final TimeValue timeout, final MappingUpdateListener listener) {
|
||||
|
|
|
@ -34,7 +34,7 @@ import org.elasticsearch.index.Index;
|
|||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.similarity.SimilarityLookupService;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.IndexStoreModule;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
||||
|
@ -322,11 +322,11 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
Index index = new Index(indexMetaData.getIndex());
|
||||
Settings settings = indexMetaData.settings();
|
||||
try {
|
||||
SimilarityLookupService similarityLookupService = new SimilarityLookupService(index, settings);
|
||||
SimilarityService similarityService = new SimilarityService(index, settings);
|
||||
// We cannot instantiate real analysis server at this point because the node might not have
|
||||
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it
|
||||
try (AnalysisService analysisService = new FakeAnalysisService(index, settings)) {
|
||||
try (MapperService mapperService = new MapperService(index, settings, analysisService, similarityLookupService, scriptService)) {
|
||||
try (MapperService mapperService = new MapperService(index, settings, analysisService, similarityService, scriptService)) {
|
||||
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
|
||||
MappingMetaData mappingMetaData = cursor.value;
|
||||
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), false, false);
|
||||
|
|
|
@ -19,13 +19,10 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.collect.Iterators;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* A {@link RoutingNode} represents a cluster node associated with a single {@link DiscoveryNode} including all shards
|
||||
|
@ -51,7 +48,7 @@ public class RoutingNode implements Iterable<ShardRouting> {
|
|||
|
||||
@Override
|
||||
public Iterator<ShardRouting> iterator() {
|
||||
return Iterators.unmodifiableIterator(shards.iterator());
|
||||
return Collections.unmodifiableCollection(shards).iterator();
|
||||
}
|
||||
|
||||
Iterator<ShardRouting> mutableIterator() {
|
||||
|
|
|
@ -21,13 +21,13 @@ package org.elasticsearch.cluster.routing;
|
|||
|
||||
import com.carrotsearch.hppc.ObjectIntHashMap;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.*;
|
||||
|
@ -144,7 +144,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
|
||||
@Override
|
||||
public Iterator<RoutingNode> iterator() {
|
||||
return Iterators.unmodifiableIterator(nodesToShards.values().iterator());
|
||||
return Collections.unmodifiableCollection(nodesToShards.values()).iterator();
|
||||
}
|
||||
|
||||
public RoutingTable routingTable() {
|
||||
|
|
|
@ -0,0 +1,176 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.collect;
|
||||
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.Queue;
|
||||
|
||||
/**
|
||||
* An {@code EvictingQueue} is a non-blocking queue which is limited to a maximum size; when new elements are added to a
|
||||
* full queue, elements are evicted from the head of the queue to accommodate the new elements.
|
||||
*
|
||||
* @param <T> The type of elements in the queue.
|
||||
*/
|
||||
public class EvictingQueue<T> implements Queue<T> {
|
||||
private final int maximumSize;
|
||||
private final ArrayDeque<T> queue;
|
||||
|
||||
/**
|
||||
* Construct a new {@code EvictingQueue} that holds {@code maximumSize} elements.
|
||||
*
|
||||
* @param maximumSize The maximum number of elements that the queue can hold
|
||||
* @throws IllegalArgumentException if {@code maximumSize} is less than zero
|
||||
*/
|
||||
public EvictingQueue(int maximumSize) {
|
||||
if (maximumSize < 0) {
|
||||
throw new IllegalArgumentException("maximumSize < 0");
|
||||
}
|
||||
this.maximumSize = maximumSize;
|
||||
this.queue = new ArrayDeque<>(maximumSize);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the number of additional elements that the queue can accommodate before evictions occur
|
||||
*/
|
||||
public int remainingCapacity() {
|
||||
return this.maximumSize - this.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the given element to the queue, possibly forcing an eviction from the head if {@link #remainingCapacity()} is
|
||||
* zero.
|
||||
*
|
||||
* @param t the element to add
|
||||
* @return true if the element was added (always the case for {@code EvictingQueue}
|
||||
*/
|
||||
@Override
|
||||
public boolean add(T t) {
|
||||
if (maximumSize == 0) {
|
||||
return true;
|
||||
}
|
||||
if (queue.size() == maximumSize) {
|
||||
queue.remove();
|
||||
}
|
||||
queue.add(t);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see #add(Object)
|
||||
*/
|
||||
@Override
|
||||
public boolean offer(T t) {
|
||||
return add(t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public T remove() {
|
||||
return queue.remove();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public T poll() {
|
||||
return queue.poll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public T element() {
|
||||
return queue.element();
|
||||
}
|
||||
|
||||
@Override
|
||||
public T peek() {
|
||||
return queue.peek();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
return queue.size();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
return queue.isEmpty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean contains(Object o) {
|
||||
return queue.contains(o);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<T> iterator() {
|
||||
return queue.iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object[] toArray() {
|
||||
return queue.toArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T1> T1[] toArray(T1[] a) {
|
||||
return queue.toArray(a);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean remove(Object o) {
|
||||
return queue.remove(o);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean containsAll(Collection<?> c) {
|
||||
return queue.containsAll(c);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the given elements to the queue, possibly forcing evictions from the head if {@link #remainingCapacity()} is
|
||||
* zero or becomes zero during the execution of this method.
|
||||
*
|
||||
* @param c the collection of elements to add
|
||||
* @return true if any elements were added to the queue
|
||||
*/
|
||||
@Override
|
||||
public boolean addAll(Collection<? extends T> c) {
|
||||
boolean modified = false;
|
||||
for (T e : c)
|
||||
if (add(e))
|
||||
modified = true;
|
||||
return modified;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean removeAll(Collection<?> c) {
|
||||
return queue.removeAll(c);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean retainAll(Collection<?> c) {
|
||||
return queue.retainAll(c);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
queue.clear();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.collect;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
public class Iterators {
|
||||
public static <T> Iterator<T> concat(Iterator<? extends T>... iterators) {
|
||||
if (iterators == null) {
|
||||
throw new NullPointerException("iterators");
|
||||
}
|
||||
|
||||
return new ConcatenatedIterator<>(iterators);
|
||||
}
|
||||
|
||||
static class ConcatenatedIterator<T> implements Iterator<T> {
|
||||
private final Iterator<? extends T>[] iterators;
|
||||
private int index = 0;
|
||||
|
||||
public ConcatenatedIterator(Iterator<? extends T>... iterators) {
|
||||
if (iterators == null) {
|
||||
throw new NullPointerException("iterators");
|
||||
}
|
||||
for (int i = 0; i < iterators.length; i++) {
|
||||
if (iterators[i] == null) {
|
||||
throw new NullPointerException("iterators[" + i + "]");
|
||||
}
|
||||
}
|
||||
this.iterators = iterators;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
boolean hasNext = false;
|
||||
while (index < iterators.length && !(hasNext = iterators[index].hasNext())) {
|
||||
index++;
|
||||
}
|
||||
|
||||
return hasNext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public T next() {
|
||||
if (!hasNext()) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
return iterators[index].next();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,9 +19,8 @@
|
|||
|
||||
package org.elasticsearch.common.io;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
|
@ -35,6 +34,7 @@ import java.nio.file.*;
|
|||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.Arrays;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
import static java.nio.file.FileVisitResult.CONTINUE;
|
||||
import static java.nio.file.FileVisitResult.SKIP_SUBTREE;
|
||||
|
@ -328,7 +328,7 @@ public final class FileSystemUtils {
|
|||
*/
|
||||
public static Path[] files(Path from, DirectoryStream.Filter<Path> filter) throws IOException {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(from, filter)) {
|
||||
return Iterators.toArray(stream.iterator(), Path.class);
|
||||
return toArray(stream);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -337,7 +337,7 @@ public final class FileSystemUtils {
|
|||
*/
|
||||
public static Path[] files(Path directory) throws IOException {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(directory)) {
|
||||
return Iterators.toArray(stream.iterator(), Path.class);
|
||||
return toArray(stream);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -346,8 +346,12 @@ public final class FileSystemUtils {
|
|||
*/
|
||||
public static Path[] files(Path directory, String glob) throws IOException {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(directory, glob)) {
|
||||
return Iterators.toArray(stream.iterator(), Path.class);
|
||||
return toArray(stream);
|
||||
}
|
||||
}
|
||||
|
||||
private static Path[] toArray(DirectoryStream<Path> stream) {
|
||||
return StreamSupport.stream(stream.spliterator(), false).toArray(length -> new Path[length]);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -33,10 +33,24 @@ import java.util.concurrent.ConcurrentMap;
|
|||
/** Utility class to resolve the Lucene doc ID and version for a given uid. */
|
||||
public class Versions {
|
||||
|
||||
public static final long MATCH_ANY = -3L; // Version was not specified by the user
|
||||
/** used to indicate the write operation should succeed regardless of current version **/
|
||||
public static final long MATCH_ANY = -3L;
|
||||
|
||||
/** indicates that the current document was not found in lucene and in the version map */
|
||||
public static final long NOT_FOUND = -1L;
|
||||
|
||||
/**
|
||||
* used when the document is old and doesn't contain any version information in the index
|
||||
* see {@link PerThreadIDAndVersionLookup#lookup(org.apache.lucene.util.BytesRef)}
|
||||
*/
|
||||
public static final long NOT_SET = -2L;
|
||||
|
||||
/**
|
||||
* used to indicate that the write operation should be executed if the document is currently deleted
|
||||
* i.e., not found in the index and/or found as deleted (with version) in the version map
|
||||
*/
|
||||
public static final long MATCH_DELETED = -4L;
|
||||
|
||||
// TODO: is there somewhere else we can store these?
|
||||
private static final ConcurrentMap<IndexReader, CloseableThreadLocal<PerThreadIDAndVersionLookup>> lookupStates = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
|
||||
|
||||
|
|
|
@ -0,0 +1,357 @@
|
|||
/*
|
||||
* Copyright (C) 2008 The Guava Authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.network;
|
||||
|
||||
import java.net.Inet4Address;
|
||||
import java.net.Inet6Address;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.Locale;
|
||||
|
||||
public class InetAddresses {
|
||||
private static int IPV4_PART_COUNT = 4;
|
||||
private static int IPV6_PART_COUNT = 8;
|
||||
|
||||
public static boolean isInetAddress(String ipString) {
|
||||
return ipStringToBytes(ipString) != null;
|
||||
}
|
||||
|
||||
private static byte[] ipStringToBytes(String ipString) {
|
||||
// Make a first pass to categorize the characters in this string.
|
||||
boolean hasColon = false;
|
||||
boolean hasDot = false;
|
||||
for (int i = 0; i < ipString.length(); i++) {
|
||||
char c = ipString.charAt(i);
|
||||
if (c == '.') {
|
||||
hasDot = true;
|
||||
} else if (c == ':') {
|
||||
if (hasDot) {
|
||||
return null; // Colons must not appear after dots.
|
||||
}
|
||||
hasColon = true;
|
||||
} else if (Character.digit(c, 16) == -1) {
|
||||
return null; // Everything else must be a decimal or hex digit.
|
||||
}
|
||||
}
|
||||
|
||||
// Now decide which address family to parse.
|
||||
if (hasColon) {
|
||||
if (hasDot) {
|
||||
ipString = convertDottedQuadToHex(ipString);
|
||||
if (ipString == null) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return textToNumericFormatV6(ipString);
|
||||
} else if (hasDot) {
|
||||
return textToNumericFormatV4(ipString);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private static String convertDottedQuadToHex(String ipString) {
|
||||
int lastColon = ipString.lastIndexOf(':');
|
||||
String initialPart = ipString.substring(0, lastColon + 1);
|
||||
String dottedQuad = ipString.substring(lastColon + 1);
|
||||
byte[] quad = textToNumericFormatV4(dottedQuad);
|
||||
if (quad == null) {
|
||||
return null;
|
||||
}
|
||||
String penultimate = Integer.toHexString(((quad[0] & 0xff) << 8) | (quad[1] & 0xff));
|
||||
String ultimate = Integer.toHexString(((quad[2] & 0xff) << 8) | (quad[3] & 0xff));
|
||||
return initialPart + penultimate + ":" + ultimate;
|
||||
}
|
||||
|
||||
private static byte[] textToNumericFormatV4(String ipString) {
|
||||
String[] address = ipString.split("\\.", IPV4_PART_COUNT + 1);
|
||||
if (address.length != IPV4_PART_COUNT) {
|
||||
return null;
|
||||
}
|
||||
|
||||
byte[] bytes = new byte[IPV4_PART_COUNT];
|
||||
try {
|
||||
for (int i = 0; i < bytes.length; i++) {
|
||||
bytes[i] = parseOctet(address[i]);
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
private static byte parseOctet(String ipPart) {
|
||||
// Note: we already verified that this string contains only hex digits.
|
||||
int octet = Integer.parseInt(ipPart);
|
||||
// Disallow leading zeroes, because no clear standard exists on
|
||||
// whether these should be interpreted as decimal or octal.
|
||||
if (octet > 255 || (ipPart.startsWith("0") && ipPart.length() > 1)) {
|
||||
throw new NumberFormatException();
|
||||
}
|
||||
return (byte) octet;
|
||||
}
|
||||
|
||||
private static byte[] textToNumericFormatV6(String ipString) {
|
||||
// An address can have [2..8] colons, and N colons make N+1 parts.
|
||||
String[] parts = ipString.split(":", IPV6_PART_COUNT + 2);
|
||||
if (parts.length < 3 || parts.length > IPV6_PART_COUNT + 1) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Disregarding the endpoints, find "::" with nothing in between.
|
||||
// This indicates that a run of zeroes has been skipped.
|
||||
int skipIndex = -1;
|
||||
for (int i = 1; i < parts.length - 1; i++) {
|
||||
if (parts[i].length() == 0) {
|
||||
if (skipIndex >= 0) {
|
||||
return null; // Can't have more than one ::
|
||||
}
|
||||
skipIndex = i;
|
||||
}
|
||||
}
|
||||
|
||||
int partsHi; // Number of parts to copy from above/before the "::"
|
||||
int partsLo; // Number of parts to copy from below/after the "::"
|
||||
if (skipIndex >= 0) {
|
||||
// If we found a "::", then check if it also covers the endpoints.
|
||||
partsHi = skipIndex;
|
||||
partsLo = parts.length - skipIndex - 1;
|
||||
if (parts[0].length() == 0 && --partsHi != 0) {
|
||||
return null; // ^: requires ^::
|
||||
}
|
||||
if (parts[parts.length - 1].length() == 0 && --partsLo != 0) {
|
||||
return null; // :$ requires ::$
|
||||
}
|
||||
} else {
|
||||
// Otherwise, allocate the entire address to partsHi. The endpoints
|
||||
// could still be empty, but parseHextet() will check for that.
|
||||
partsHi = parts.length;
|
||||
partsLo = 0;
|
||||
}
|
||||
|
||||
// If we found a ::, then we must have skipped at least one part.
|
||||
// Otherwise, we must have exactly the right number of parts.
|
||||
int partsSkipped = IPV6_PART_COUNT - (partsHi + partsLo);
|
||||
if (!(skipIndex >= 0 ? partsSkipped >= 1 : partsSkipped == 0)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Now parse the hextets into a byte array.
|
||||
ByteBuffer rawBytes = ByteBuffer.allocate(2 * IPV6_PART_COUNT);
|
||||
try {
|
||||
for (int i = 0; i < partsHi; i++) {
|
||||
rawBytes.putShort(parseHextet(parts[i]));
|
||||
}
|
||||
for (int i = 0; i < partsSkipped; i++) {
|
||||
rawBytes.putShort((short) 0);
|
||||
}
|
||||
for (int i = partsLo; i > 0; i--) {
|
||||
rawBytes.putShort(parseHextet(parts[parts.length - i]));
|
||||
}
|
||||
} catch (NumberFormatException ex) {
|
||||
return null;
|
||||
}
|
||||
return rawBytes.array();
|
||||
}
|
||||
|
||||
private static short parseHextet(String ipPart) {
|
||||
// Note: we already verified that this string contains only hex digits.
|
||||
int hextet = Integer.parseInt(ipPart, 16);
|
||||
if (hextet > 0xffff) {
|
||||
throw new NumberFormatException();
|
||||
}
|
||||
return (short) hextet;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the string representation of an {@link InetAddress} suitable
|
||||
* for inclusion in a URI.
|
||||
*
|
||||
* <p>For IPv4 addresses, this is identical to
|
||||
* {@link InetAddress#getHostAddress()}, but for IPv6 addresses it
|
||||
* compresses zeroes and surrounds the text with square brackets; for example
|
||||
* {@code "[2001:db8::1]"}.
|
||||
*
|
||||
* <p>Per section 3.2.2 of
|
||||
* <a target="_parent"
|
||||
* href="http://tools.ietf.org/html/rfc3986#section-3.2.2"
|
||||
* >http://tools.ietf.org/html/rfc3986</a>,
|
||||
* a URI containing an IPv6 string literal is of the form
|
||||
* {@code "http://[2001:db8::1]:8888/index.html"}.
|
||||
*
|
||||
* <p>Use of either {@link InetAddresses#toAddrString},
|
||||
* {@link InetAddress#getHostAddress()}, or this method is recommended over
|
||||
* {@link InetAddress#toString()} when an IP address string literal is
|
||||
* desired. This is because {@link InetAddress#toString()} prints the
|
||||
* hostname and the IP address string joined by a "/".
|
||||
*
|
||||
* @param ip {@link InetAddress} to be converted to URI string literal
|
||||
* @return {@code String} containing URI-safe string literal
|
||||
*/
|
||||
public static String toUriString(InetAddress ip) {
|
||||
if (ip instanceof Inet6Address) {
|
||||
return "[" + toAddrString(ip) + "]";
|
||||
}
|
||||
return toAddrString(ip);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the string representation of an {@link InetAddress}.
|
||||
*
|
||||
* <p>For IPv4 addresses, this is identical to
|
||||
* {@link InetAddress#getHostAddress()}, but for IPv6 addresses, the output
|
||||
* follows <a href="http://tools.ietf.org/html/rfc5952">RFC 5952</a>
|
||||
* section 4. The main difference is that this method uses "::" for zero
|
||||
* compression, while Java's version uses the uncompressed form.
|
||||
*
|
||||
* <p>This method uses hexadecimal for all IPv6 addresses, including
|
||||
* IPv4-mapped IPv6 addresses such as "::c000:201". The output does not
|
||||
* include a Scope ID.
|
||||
*
|
||||
* @param ip {@link InetAddress} to be converted to an address string
|
||||
* @return {@code String} containing the text-formatted IP address
|
||||
* @since 10.0
|
||||
*/
|
||||
public static String toAddrString(InetAddress ip) {
|
||||
if (ip == null) {
|
||||
throw new NullPointerException("ip");
|
||||
}
|
||||
if (ip instanceof Inet4Address) {
|
||||
// For IPv4, Java's formatting is good enough.
|
||||
byte[] bytes = ip.getAddress();
|
||||
return (bytes[0] & 0xff) + "." + (bytes[1] & 0xff) + "." + (bytes[2] & 0xff) + "." + (bytes[3] & 0xff);
|
||||
}
|
||||
if (!(ip instanceof Inet6Address)) {
|
||||
throw new IllegalArgumentException("ip");
|
||||
}
|
||||
byte[] bytes = ip.getAddress();
|
||||
int[] hextets = new int[IPV6_PART_COUNT];
|
||||
for (int i = 0; i < hextets.length; i++) {
|
||||
hextets[i] = (bytes[2 * i] & 255) << 8 | bytes[2 * i + 1] & 255;
|
||||
}
|
||||
compressLongestRunOfZeroes(hextets);
|
||||
return hextetsToIPv6String(hextets);
|
||||
}
|
||||
|
||||
/**
|
||||
* Identify and mark the longest run of zeroes in an IPv6 address.
|
||||
*
|
||||
* <p>Only runs of two or more hextets are considered. In case of a tie, the
|
||||
* leftmost run wins. If a qualifying run is found, its hextets are replaced
|
||||
* by the sentinel value -1.
|
||||
*
|
||||
* @param hextets {@code int[]} mutable array of eight 16-bit hextets
|
||||
*/
|
||||
private static void compressLongestRunOfZeroes(int[] hextets) {
|
||||
int bestRunStart = -1;
|
||||
int bestRunLength = -1;
|
||||
int runStart = -1;
|
||||
for (int i = 0; i < hextets.length + 1; i++) {
|
||||
if (i < hextets.length && hextets[i] == 0) {
|
||||
if (runStart < 0) {
|
||||
runStart = i;
|
||||
}
|
||||
} else if (runStart >= 0) {
|
||||
int runLength = i - runStart;
|
||||
if (runLength > bestRunLength) {
|
||||
bestRunStart = runStart;
|
||||
bestRunLength = runLength;
|
||||
}
|
||||
runStart = -1;
|
||||
}
|
||||
}
|
||||
if (bestRunLength >= 2) {
|
||||
Arrays.fill(hextets, bestRunStart, bestRunStart + bestRunLength, -1);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a list of hextets into a human-readable IPv6 address.
|
||||
*
|
||||
* <p>In order for "::" compression to work, the input should contain negative
|
||||
* sentinel values in place of the elided zeroes.
|
||||
*
|
||||
* @param hextets {@code int[]} array of eight 16-bit hextets, or -1s
|
||||
*/
|
||||
private static String hextetsToIPv6String(int[] hextets) {
|
||||
/*
|
||||
* While scanning the array, handle these state transitions:
|
||||
* start->num => "num" start->gap => "::"
|
||||
* num->num => ":num" num->gap => "::"
|
||||
* gap->num => "num" gap->gap => ""
|
||||
*/
|
||||
StringBuilder buf = new StringBuilder(39);
|
||||
boolean lastWasNumber = false;
|
||||
for (int i = 0; i < hextets.length; i++) {
|
||||
boolean thisIsNumber = hextets[i] >= 0;
|
||||
if (thisIsNumber) {
|
||||
if (lastWasNumber) {
|
||||
buf.append(':');
|
||||
}
|
||||
buf.append(Integer.toHexString(hextets[i]));
|
||||
} else {
|
||||
if (i == 0 || lastWasNumber) {
|
||||
buf.append("::");
|
||||
}
|
||||
}
|
||||
lastWasNumber = thisIsNumber;
|
||||
}
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link InetAddress} having the given string representation.
|
||||
*
|
||||
* <p>This deliberately avoids all nameservice lookups (e.g. no DNS).
|
||||
*
|
||||
* @param ipString {@code String} containing an IPv4 or IPv6 string literal, e.g.
|
||||
* {@code "192.168.0.1"} or {@code "2001:db8::1"}
|
||||
* @return {@link InetAddress} representing the argument
|
||||
* @throws IllegalArgumentException if the argument is not a valid IP string literal
|
||||
*/
|
||||
public static InetAddress forString(String ipString) {
|
||||
byte[] addr = ipStringToBytes(ipString);
|
||||
|
||||
// The argument was malformed, i.e. not an IP string literal.
|
||||
if (addr == null) {
|
||||
throw new IllegalArgumentException(String.format(Locale.ROOT, "'%s' is not an IP string literal.", ipString));
|
||||
}
|
||||
|
||||
return bytesToInetAddress(addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a byte array into an InetAddress.
|
||||
*
|
||||
* {@link InetAddress#getByAddress} is documented as throwing a checked
|
||||
* exception "if IP address is of illegal length." We replace it with
|
||||
* an unchecked exception, for use by callers who already know that addr
|
||||
* is an array of length 4 or 16.
|
||||
*
|
||||
* @param addr the raw 4-byte or 16-byte IP address in big-endian order
|
||||
* @return an InetAddress object created from the raw IP address
|
||||
*/
|
||||
private static InetAddress bytesToInetAddress(byte[] addr) {
|
||||
try {
|
||||
return InetAddress.getByAddress(addr);
|
||||
} catch (UnknownHostException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.common.network;
|
||||
|
||||
import com.google.common.net.InetAddresses;
|
||||
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
import java.net.Inet6Address;
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -73,7 +72,7 @@ public class NetworkService extends AbstractComponent {
|
|||
/**
|
||||
* Resolves a custom value handling, return <tt>null</tt> if can't handle it.
|
||||
*/
|
||||
InetAddress[] resolveIfPossible(String value);
|
||||
InetAddress[] resolveIfPossible(String value) throws IOException;
|
||||
}
|
||||
|
||||
private final List<CustomNameResolver> customNameResolvers = new CopyOnWriteArrayList<>();
|
||||
|
@ -162,7 +161,7 @@ public class NetworkService extends AbstractComponent {
|
|||
return address;
|
||||
}
|
||||
|
||||
private InetAddress[] resolveInetAddress(String host) throws UnknownHostException, IOException {
|
||||
private InetAddress[] resolveInetAddress(String host) throws IOException {
|
||||
if ((host.startsWith("#") && host.endsWith("#")) || (host.startsWith("_") && host.endsWith("_"))) {
|
||||
host = host.substring(1, host.length() - 1);
|
||||
// allow custom resolvers to have special names
|
||||
|
|
|
@ -23,15 +23,12 @@ import com.carrotsearch.hppc.DoubleArrayList;
|
|||
import com.carrotsearch.hppc.FloatArrayList;
|
||||
import com.carrotsearch.hppc.LongArrayList;
|
||||
import com.carrotsearch.hppc.ObjectArrayList;
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.apache.lucene.util.*;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/** Collections-related utility methods. */
|
||||
public enum CollectionUtils {
|
||||
CollectionUtils;
|
||||
|
||||
public class CollectionUtils {
|
||||
public static void sort(LongArrayList list) {
|
||||
sort(list.buffer, list.size());
|
||||
}
|
||||
|
@ -366,13 +363,6 @@ public enum CollectionUtils {
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* Combines multiple iterators into a single iterator.
|
||||
*/
|
||||
public static <T> Iterator<T> concat(Iterator<? extends T>... iterators) {
|
||||
return Iterators.<T>concat(iterators);
|
||||
}
|
||||
|
||||
public static <E> ArrayList<E> iterableAsArrayList(Iterable<? extends E> elements) {
|
||||
if (elements == null) {
|
||||
throw new NullPointerException("elements");
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -34,6 +35,7 @@ import org.elasticsearch.index.termvectors.TermVectorsService;
|
|||
import org.elasticsearch.indices.IndicesLifecycle;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.memory.IndexingMemoryController;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
/**
|
||||
|
@ -58,9 +60,10 @@ public final class IndexServicesProvider {
|
|||
private final EngineFactory factory;
|
||||
private final BigArrays bigArrays;
|
||||
private final IndexSearcherWrapper indexSearcherWrapper;
|
||||
private final IndexingMemoryController indexingMemoryController;
|
||||
|
||||
@Inject
|
||||
public IndexServicesProvider(IndicesLifecycle indicesLifecycle, ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, IndicesQueryCache indicesQueryCache, CodecService codecService, TermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService, @Nullable IndicesWarmer warmer, SimilarityService similarityService, EngineFactory factory, BigArrays bigArrays, @Nullable IndexSearcherWrapper indexSearcherWrapper) {
|
||||
public IndexServicesProvider(IndicesLifecycle indicesLifecycle, ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, IndicesQueryCache indicesQueryCache, CodecService codecService, TermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService, @Nullable IndicesWarmer warmer, SimilarityService similarityService, EngineFactory factory, BigArrays bigArrays, @Nullable IndexSearcherWrapper indexSearcherWrapper, IndexingMemoryController indexingMemoryController) {
|
||||
this.indicesLifecycle = indicesLifecycle;
|
||||
this.threadPool = threadPool;
|
||||
this.mapperService = mapperService;
|
||||
|
@ -76,6 +79,7 @@ public final class IndexServicesProvider {
|
|||
this.factory = factory;
|
||||
this.bigArrays = bigArrays;
|
||||
this.indexSearcherWrapper = indexSearcherWrapper;
|
||||
this.indexingMemoryController = indexingMemoryController;
|
||||
}
|
||||
|
||||
public IndicesLifecycle getIndicesLifecycle() {
|
||||
|
@ -134,5 +138,11 @@ public final class IndexServicesProvider {
|
|||
return bigArrays;
|
||||
}
|
||||
|
||||
public IndexSearcherWrapper getIndexSearcherWrapper() { return indexSearcherWrapper; }
|
||||
public IndexSearcherWrapper getIndexSearcherWrapper() {
|
||||
return indexSearcherWrapper;
|
||||
}
|
||||
|
||||
public IndexingMemoryController getIndexingMemoryController() {
|
||||
return indexingMemoryController;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index;
|
||||
|
||||
import org.elasticsearch.common.inject.BindingAnnotation;
|
||||
|
||||
import java.lang.annotation.Documented;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
import static java.lang.annotation.ElementType.FIELD;
|
||||
import static java.lang.annotation.ElementType.PARAMETER;
|
||||
import static java.lang.annotation.RetentionPolicy.RUNTIME;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
@BindingAnnotation
|
||||
@Target({FIELD, PARAMETER})
|
||||
@Retention(RUNTIME)
|
||||
@Documented
|
||||
public @interface LocalNodeId {
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class LocalNodeIdModule extends AbstractModule {
|
||||
|
||||
private final String localNodeId;
|
||||
|
||||
public LocalNodeIdModule(String localNodeId) {
|
||||
this.localNodeId = localNodeId;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(String.class).annotatedWith(LocalNodeId.class).toInstance(localNodeId);
|
||||
}
|
||||
}
|
|
@ -31,24 +31,37 @@ import java.io.IOException;
|
|||
public enum VersionType implements Writeable<VersionType> {
|
||||
INTERNAL((byte) 0) {
|
||||
@Override
|
||||
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion) {
|
||||
return isVersionConflict(currentVersion, expectedVersion);
|
||||
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||
return isVersionConflict(currentVersion, expectedVersion, deleted);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||
if (expectedVersion == Versions.MATCH_DELETED) {
|
||||
return "document already exists (current version [" + currentVersion + "])";
|
||||
}
|
||||
return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) {
|
||||
return isVersionConflict(currentVersion, expectedVersion);
|
||||
return isVersionConflict(currentVersion, expectedVersion, false);
|
||||
}
|
||||
|
||||
private boolean isVersionConflict(long currentVersion, long expectedVersion) {
|
||||
@Override
|
||||
public String explainConflictForReads(long currentVersion, long expectedVersion) {
|
||||
return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]";
|
||||
}
|
||||
|
||||
private boolean isVersionConflict(long currentVersion, long expectedVersion, boolean deleted) {
|
||||
if (currentVersion == Versions.NOT_SET) {
|
||||
return false;
|
||||
}
|
||||
if (expectedVersion == Versions.MATCH_ANY) {
|
||||
return false;
|
||||
}
|
||||
if (currentVersion == Versions.NOT_FOUND) {
|
||||
return true;
|
||||
if (expectedVersion == Versions.MATCH_DELETED) {
|
||||
return deleted == false;
|
||||
}
|
||||
if (currentVersion != expectedVersion) {
|
||||
return true;
|
||||
|
@ -63,8 +76,7 @@ public enum VersionType implements Writeable<VersionType> {
|
|||
|
||||
@Override
|
||||
public boolean validateVersionForWrites(long version) {
|
||||
// not allowing Versions.NOT_FOUND as it is not a valid input value.
|
||||
return version > 0L || version == Versions.MATCH_ANY;
|
||||
return version > 0L || version == Versions.MATCH_ANY || version == Versions.MATCH_DELETED;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -82,7 +94,7 @@ public enum VersionType implements Writeable<VersionType> {
|
|||
},
|
||||
EXTERNAL((byte) 1) {
|
||||
@Override
|
||||
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion) {
|
||||
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||
if (currentVersion == Versions.NOT_SET) {
|
||||
return false;
|
||||
}
|
||||
|
@ -98,6 +110,11 @@ public enum VersionType implements Writeable<VersionType> {
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||
return "current version [" + currentVersion + "] is higher or equal to the one provided [" + expectedVersion + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) {
|
||||
if (currentVersion == Versions.NOT_SET) {
|
||||
|
@ -115,6 +132,11 @@ public enum VersionType implements Writeable<VersionType> {
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String explainConflictForReads(long currentVersion, long expectedVersion) {
|
||||
return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public long updateVersion(long currentVersion, long expectedVersion) {
|
||||
return expectedVersion;
|
||||
|
@ -133,7 +155,7 @@ public enum VersionType implements Writeable<VersionType> {
|
|||
},
|
||||
EXTERNAL_GTE((byte) 2) {
|
||||
@Override
|
||||
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion) {
|
||||
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||
if (currentVersion == Versions.NOT_SET) {
|
||||
return false;
|
||||
}
|
||||
|
@ -149,6 +171,11 @@ public enum VersionType implements Writeable<VersionType> {
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||
return "current version [" + currentVersion + "] is higher than the one provided [" + expectedVersion + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) {
|
||||
if (currentVersion == Versions.NOT_SET) {
|
||||
|
@ -166,6 +193,11 @@ public enum VersionType implements Writeable<VersionType> {
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String explainConflictForReads(long currentVersion, long expectedVersion) {
|
||||
return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public long updateVersion(long currentVersion, long expectedVersion) {
|
||||
return expectedVersion;
|
||||
|
@ -187,7 +219,7 @@ public enum VersionType implements Writeable<VersionType> {
|
|||
*/
|
||||
FORCE((byte) 3) {
|
||||
@Override
|
||||
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion) {
|
||||
public boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||
if (currentVersion == Versions.NOT_SET) {
|
||||
return false;
|
||||
}
|
||||
|
@ -195,16 +227,26 @@ public enum VersionType implements Writeable<VersionType> {
|
|||
return false;
|
||||
}
|
||||
if (expectedVersion == Versions.MATCH_ANY) {
|
||||
return true;
|
||||
throw new IllegalStateException("you must specify a version when use VersionType.FORCE");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted) {
|
||||
throw new AssertionError("VersionType.FORCE should never result in a write conflict");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isVersionConflictForReads(long currentVersion, long expectedVersion) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String explainConflictForReads(long currentVersion, long expectedVersion) {
|
||||
throw new AssertionError("VersionType.FORCE should never result in a read conflict");
|
||||
}
|
||||
|
||||
@Override
|
||||
public long updateVersion(long currentVersion, long expectedVersion) {
|
||||
return expectedVersion;
|
||||
|
@ -237,17 +279,46 @@ public enum VersionType implements Writeable<VersionType> {
|
|||
/**
|
||||
* Checks whether the current version conflicts with the expected version, based on the current version type.
|
||||
*
|
||||
* @param currentVersion the current version for the document
|
||||
* @param expectedVersion the version specified for the write operation
|
||||
* @param deleted true if the document is currently deleted (note that #currentVersion will typically be
|
||||
* {@link Versions#NOT_FOUND}, but may be something else if the document was recently deleted
|
||||
* @return true if versions conflict false o.w.
|
||||
*/
|
||||
public abstract boolean isVersionConflictForWrites(long currentVersion, long expectedVersion);
|
||||
public abstract boolean isVersionConflictForWrites(long currentVersion, long expectedVersion, boolean deleted);
|
||||
|
||||
|
||||
/**
|
||||
* Returns a human readable explanation for a version conflict on write.
|
||||
*
|
||||
* Note that this method is only called if {@link #isVersionConflictForWrites(long, long, boolean)} returns true;
|
||||
*
|
||||
* @param currentVersion the current version for the document
|
||||
* @param expectedVersion the version specified for the write operation
|
||||
* @param deleted true if the document is currently deleted (note that #currentVersion will typically be
|
||||
* {@link Versions#NOT_FOUND}, but may be something else if the document was recently deleted
|
||||
*/
|
||||
public abstract String explainConflictForWrites(long currentVersion, long expectedVersion, boolean deleted);
|
||||
|
||||
/**
|
||||
* Checks whether the current version conflicts with the expected version, based on the current version type.
|
||||
*
|
||||
* @param currentVersion the current version for the document
|
||||
* @param expectedVersion the version specified for the read operation
|
||||
* @return true if versions conflict false o.w.
|
||||
*/
|
||||
public abstract boolean isVersionConflictForReads(long currentVersion, long expectedVersion);
|
||||
|
||||
/**
|
||||
* Returns a human readable explanation for a version conflict on read.
|
||||
*
|
||||
* Note that this method is only called if {@link #isVersionConflictForReads(long, long)} returns true;
|
||||
*
|
||||
* @param currentVersion the current version for the document
|
||||
* @param expectedVersion the version specified for the read operation
|
||||
*/
|
||||
public abstract String explainConflictForReads(long currentVersion, long expectedVersion);
|
||||
|
||||
/**
|
||||
* Returns the new version for a document, based on its current one and the specified in the request
|
||||
*
|
||||
|
|
|
@ -18,22 +18,17 @@
|
|||
*/
|
||||
package org.elasticsearch.index.codec.postingsformat;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.apache.lucene.codecs.FieldsConsumer;
|
||||
import org.apache.lucene.codecs.FieldsProducer;
|
||||
import org.apache.lucene.codecs.PostingsFormat;
|
||||
import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
|
||||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.FilterLeafReader;
|
||||
import org.apache.lucene.index.SegmentReadState;
|
||||
import org.apache.lucene.index.SegmentWriteState;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.util.BloomFilter;
|
||||
import org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat.BloomFilteredFieldsConsumer;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,66 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.engine;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class CreateFailedEngineException extends EngineException {
|
||||
|
||||
private final String type;
|
||||
|
||||
private final String id;
|
||||
|
||||
public CreateFailedEngineException(ShardId shardId, String type, String id, Throwable cause) {
|
||||
super(shardId, "Create failed for [" + type + "#" + id + "]", cause);
|
||||
Objects.requireNonNull(type, "type must not be null");
|
||||
Objects.requireNonNull(id, "id must not be null");
|
||||
this.type = type;
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public CreateFailedEngineException(StreamInput in) throws IOException{
|
||||
super(in);
|
||||
type = in.readString();
|
||||
id = in.readString();
|
||||
}
|
||||
|
||||
public String type() {
|
||||
return this.type;
|
||||
}
|
||||
|
||||
public String id() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(type);
|
||||
out.writeString(id);
|
||||
}
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.engine;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class DocumentAlreadyExistsException extends EngineException {
|
||||
|
||||
public DocumentAlreadyExistsException(ShardId shardId, String type, String id) {
|
||||
super(shardId, "[" + type + "][" + id + "]: document already exists");
|
||||
}
|
||||
|
||||
public DocumentAlreadyExistsException(StreamInput in) throws IOException{
|
||||
super(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestStatus status() {
|
||||
return RestStatus.CONFLICT;
|
||||
}
|
||||
}
|
|
@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.ParseContext.Document;
|
|||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
@ -60,7 +59,6 @@ import java.util.concurrent.locks.Lock;
|
|||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -144,7 +142,8 @@ public abstract class Engine implements Closeable {
|
|||
return new MergeStats();
|
||||
}
|
||||
|
||||
/** A throttling class that can be activated, causing the
|
||||
/**
|
||||
* A throttling class that can be activated, causing the
|
||||
* {@code acquireThrottle} method to block on a lock when throttling
|
||||
* is enabled
|
||||
*/
|
||||
|
@ -203,9 +202,7 @@ public abstract class Engine implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
public abstract void create(Create create) throws EngineException;
|
||||
|
||||
public abstract boolean index(Index index) throws EngineException;
|
||||
public abstract boolean index(Index operation) throws EngineException;
|
||||
|
||||
public abstract void delete(Delete delete) throws EngineException;
|
||||
|
||||
|
@ -216,7 +213,8 @@ public abstract class Engine implements Closeable {
|
|||
/**
|
||||
* Attempts to do a special commit where the given syncID is put into the commit data. The attempt
|
||||
* succeeds if there are not pending writes in lucene and the current point is equal to the expected one.
|
||||
* @param syncId id of this sync
|
||||
*
|
||||
* @param syncId id of this sync
|
||||
* @param expectedCommitId the expected value of
|
||||
* @return true if the sync commit was made, false o.w.
|
||||
*/
|
||||
|
@ -243,7 +241,8 @@ public abstract class Engine implements Closeable {
|
|||
if (get.versionType().isVersionConflictForReads(docIdAndVersion.version, get.version())) {
|
||||
Releasables.close(searcher);
|
||||
Uid uid = Uid.createUid(get.uid().text());
|
||||
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), docIdAndVersion.version, get.version());
|
||||
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(),
|
||||
get.versionType().explainConflictForReads(docIdAndVersion.version, get.version()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -328,7 +327,7 @@ public abstract class Engine implements Closeable {
|
|||
} catch (IOException e) {
|
||||
// Fall back to reading from the store if reading from the commit fails
|
||||
try {
|
||||
return store. readLastCommittedSegmentsInfo();
|
||||
return store.readLastCommittedSegmentsInfo();
|
||||
} catch (IOException e2) {
|
||||
e2.addSuppressed(e);
|
||||
throw e2;
|
||||
|
@ -366,6 +365,9 @@ public abstract class Engine implements Closeable {
|
|||
stats.addIndexWriterMaxMemoryInBytes(0);
|
||||
}
|
||||
|
||||
/** How much heap Lucene's IndexWriter is using */
|
||||
abstract public long indexWriterRAMBytesUsed();
|
||||
|
||||
protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boolean verbose) {
|
||||
ensureOpen();
|
||||
Map<String, Segment> segments = new HashMap<>();
|
||||
|
@ -469,7 +471,8 @@ public abstract class Engine implements Closeable {
|
|||
|
||||
/**
|
||||
* Flushes the state of the engine including the transaction log, clearing memory.
|
||||
* @param force if <code>true</code> a lucene commit is executed even if no changes need to be committed.
|
||||
*
|
||||
* @param force if <code>true</code> a lucene commit is executed even if no changes need to be committed.
|
||||
* @param waitIfOngoing if <code>true</code> this call will block until all currently running flushes have finished.
|
||||
* Otherwise this call will return without blocking.
|
||||
* @return the commit Id for the resulting commit
|
||||
|
@ -607,62 +610,97 @@ public abstract class Engine implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
public static interface Operation {
|
||||
static enum Type {
|
||||
CREATE,
|
||||
INDEX,
|
||||
DELETE
|
||||
}
|
||||
|
||||
static enum Origin {
|
||||
PRIMARY,
|
||||
REPLICA,
|
||||
RECOVERY
|
||||
}
|
||||
|
||||
Type opType();
|
||||
|
||||
Origin origin();
|
||||
}
|
||||
|
||||
public static abstract class IndexingOperation implements Operation {
|
||||
|
||||
public static abstract class Operation {
|
||||
private final Term uid;
|
||||
private final ParsedDocument doc;
|
||||
private long version;
|
||||
private final VersionType versionType;
|
||||
private final Origin origin;
|
||||
private Translog.Location location;
|
||||
|
||||
private final long startTime;
|
||||
private long endTime;
|
||||
|
||||
public IndexingOperation(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) {
|
||||
public Operation(Term uid, long version, VersionType versionType, Origin origin, long startTime) {
|
||||
this.uid = uid;
|
||||
this.doc = doc;
|
||||
this.version = version;
|
||||
this.versionType = versionType;
|
||||
this.origin = origin;
|
||||
this.startTime = startTime;
|
||||
}
|
||||
|
||||
public IndexingOperation(Term uid, ParsedDocument doc) {
|
||||
this(uid, doc, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime());
|
||||
public static enum Origin {
|
||||
PRIMARY,
|
||||
REPLICA,
|
||||
RECOVERY
|
||||
}
|
||||
|
||||
@Override
|
||||
public Origin origin() {
|
||||
return this.origin;
|
||||
}
|
||||
|
||||
public ParsedDocument parsedDoc() {
|
||||
return this.doc;
|
||||
}
|
||||
|
||||
public Term uid() {
|
||||
return this.uid;
|
||||
}
|
||||
|
||||
public long version() {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
public void updateVersion(long version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public void setTranslogLocation(Translog.Location location) {
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
public Translog.Location getTranslogLocation() {
|
||||
return this.location;
|
||||
}
|
||||
|
||||
public VersionType versionType() {
|
||||
return this.versionType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns operation start time in nanoseconds.
|
||||
*/
|
||||
public long startTime() {
|
||||
return this.startTime;
|
||||
}
|
||||
|
||||
public void endTime(long endTime) {
|
||||
this.endTime = endTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns operation end time in nanoseconds.
|
||||
*/
|
||||
public long endTime() {
|
||||
return this.endTime;
|
||||
}
|
||||
}
|
||||
|
||||
public static class Index extends Operation {
|
||||
|
||||
private final ParsedDocument doc;
|
||||
|
||||
public Index(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) {
|
||||
super(uid, version, versionType, origin, startTime);
|
||||
this.doc = doc;
|
||||
}
|
||||
|
||||
public Index(Term uid, ParsedDocument doc) {
|
||||
this(uid, doc, Versions.MATCH_ANY);
|
||||
}
|
||||
|
||||
public Index(Term uid, ParsedDocument doc, long version) {
|
||||
this(uid, doc, version, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime());
|
||||
}
|
||||
|
||||
public ParsedDocument parsedDoc() {
|
||||
return this.doc;
|
||||
}
|
||||
|
||||
public String type() {
|
||||
return this.doc.type();
|
||||
}
|
||||
|
@ -683,27 +721,12 @@ public abstract class Engine implements Closeable {
|
|||
return this.doc.ttl();
|
||||
}
|
||||
|
||||
public long version() {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateVersion(long version) {
|
||||
this.version = version;
|
||||
super.updateVersion(version);
|
||||
this.doc.version().setLongValue(version);
|
||||
}
|
||||
|
||||
public void setTranslogLocation(Translog.Location location) {
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
public Translog.Location getTranslogLocation() {
|
||||
return this.location;
|
||||
}
|
||||
|
||||
public VersionType versionType() {
|
||||
return this.versionType;
|
||||
}
|
||||
|
||||
public String parent() {
|
||||
return this.doc.parent();
|
||||
}
|
||||
|
@ -715,96 +738,17 @@ public abstract class Engine implements Closeable {
|
|||
public BytesReference source() {
|
||||
return this.doc.source();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns operation start time in nanoseconds.
|
||||
*/
|
||||
public long startTime() {
|
||||
return this.startTime;
|
||||
}
|
||||
|
||||
public void endTime(long endTime) {
|
||||
this.endTime = endTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns operation end time in nanoseconds.
|
||||
*/
|
||||
public long endTime() {
|
||||
return this.endTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute this operation against the provided {@link IndexShard} and
|
||||
* return whether the document was created.
|
||||
*/
|
||||
public abstract boolean execute(IndexShard shard);
|
||||
}
|
||||
|
||||
public static final class Create extends IndexingOperation {
|
||||
|
||||
public Create(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) {
|
||||
super(uid, doc, version, versionType, origin, startTime);
|
||||
}
|
||||
|
||||
public Create(Term uid, ParsedDocument doc) {
|
||||
super(uid, doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Type opType() {
|
||||
return Type.CREATE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean execute(IndexShard shard) {
|
||||
shard.create(this);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public static final class Index extends IndexingOperation {
|
||||
|
||||
public Index(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) {
|
||||
super(uid, doc, version, versionType, origin, startTime);
|
||||
}
|
||||
|
||||
public Index(Term uid, ParsedDocument doc) {
|
||||
super(uid, doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Type opType() {
|
||||
return Type.INDEX;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean execute(IndexShard shard) {
|
||||
return shard.index(this);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Delete implements Operation {
|
||||
public static class Delete extends Operation {
|
||||
private final String type;
|
||||
private final String id;
|
||||
private final Term uid;
|
||||
private long version;
|
||||
private final VersionType versionType;
|
||||
private final Origin origin;
|
||||
private boolean found;
|
||||
|
||||
private final long startTime;
|
||||
private long endTime;
|
||||
private Translog.Location location;
|
||||
|
||||
public Delete(String type, String id, Term uid, long version, VersionType versionType, Origin origin, long startTime, boolean found) {
|
||||
super(uid, version, versionType, origin, startTime);
|
||||
this.type = type;
|
||||
this.id = id;
|
||||
this.uid = uid;
|
||||
this.version = version;
|
||||
this.versionType = versionType;
|
||||
this.origin = origin;
|
||||
this.startTime = startTime;
|
||||
this.found = found;
|
||||
}
|
||||
|
||||
|
@ -816,16 +760,6 @@ public abstract class Engine implements Closeable {
|
|||
this(template.type(), template.id(), template.uid(), template.version(), versionType, template.origin(), template.startTime(), template.found());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Type opType() {
|
||||
return Type.DELETE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Origin origin() {
|
||||
return this.origin;
|
||||
}
|
||||
|
||||
public String type() {
|
||||
return this.type;
|
||||
}
|
||||
|
@ -834,55 +768,14 @@ public abstract class Engine implements Closeable {
|
|||
return this.id;
|
||||
}
|
||||
|
||||
public Term uid() {
|
||||
return this.uid;
|
||||
}
|
||||
|
||||
public void updateVersion(long version, boolean found) {
|
||||
this.version = version;
|
||||
updateVersion(version);
|
||||
this.found = found;
|
||||
}
|
||||
|
||||
/**
|
||||
* before delete execution this is the version to be deleted. After this is the version of the "delete" transaction record.
|
||||
*/
|
||||
public long version() {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
public VersionType versionType() {
|
||||
return this.versionType;
|
||||
}
|
||||
|
||||
public boolean found() {
|
||||
return this.found;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns operation start time in nanoseconds.
|
||||
*/
|
||||
public long startTime() {
|
||||
return this.startTime;
|
||||
}
|
||||
|
||||
public void endTime(long endTime) {
|
||||
this.endTime = endTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns operation end time in nanoseconds.
|
||||
*/
|
||||
public long endTime() {
|
||||
return this.endTime;
|
||||
}
|
||||
|
||||
public void setTranslogLocation(Translog.Location location) {
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
public Translog.Location getTranslogLocation() {
|
||||
return this.location;
|
||||
}
|
||||
}
|
||||
|
||||
public static class DeleteByQuery {
|
||||
|
@ -1135,12 +1028,18 @@ public abstract class Engine implements Closeable {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
CommitId commitId = (CommitId) o;
|
||||
|
||||
if (!Arrays.equals(id, commitId.id)) return false;
|
||||
if (!Arrays.equals(id, commitId.id)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1151,5 +1050,6 @@ public abstract class Engine implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
public void onSettingsChanged() {}
|
||||
public void onSettingsChanged() {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
|
|||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.TranslogConfig;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.memory.IndexingMemoryController;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -107,8 +108,6 @@ public final class EngineConfig {
|
|||
|
||||
public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS);
|
||||
public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60);
|
||||
public static final ByteSizeValue DEFAULT_INDEX_BUFFER_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB);
|
||||
public static final ByteSizeValue INACTIVE_SHARD_INDEXING_BUFFER = ByteSizeValue.parseBytesSizeValue("500kb", "INACTIVE_SHARD_INDEXING_BUFFER");
|
||||
|
||||
public static final String DEFAULT_VERSION_MAP_SIZE = "25%";
|
||||
|
||||
|
@ -139,7 +138,8 @@ public final class EngineConfig {
|
|||
this.failedEngineListener = failedEngineListener;
|
||||
this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush);
|
||||
codecName = indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME);
|
||||
indexingBufferSize = DEFAULT_INDEX_BUFFER_SIZE;
|
||||
// We start up inactive and rely on IndexingMemoryController to give us our fair share once we start indexing:
|
||||
indexingBufferSize = IndexingMemoryController.INACTIVE_SHARD_INDEXING_BUFFER;
|
||||
gcDeletesInMillis = indexSettings.getAsTime(INDEX_GC_DELETES_SETTING, EngineConfig.DEFAULT_GC_DELETES).millis();
|
||||
versionMapSizeSetting = indexSettings.get(INDEX_VERSION_MAP_SIZE, DEFAULT_VERSION_MAP_SIZE);
|
||||
updateVersionMapSize();
|
||||
|
@ -258,10 +258,10 @@ public final class EngineConfig {
|
|||
|
||||
/**
|
||||
* Returns a {@link org.elasticsearch.index.indexing.ShardIndexingService} used inside the engine to inform about
|
||||
* pre and post index and create operations. The operations are used for statistic purposes etc.
|
||||
* pre and post index. The operations are used for statistic purposes etc.
|
||||
*
|
||||
* @see org.elasticsearch.index.indexing.ShardIndexingService#postCreate(org.elasticsearch.index.engine.Engine.Create)
|
||||
* @see org.elasticsearch.index.indexing.ShardIndexingService#preCreate(org.elasticsearch.index.engine.Engine.Create)
|
||||
* @see org.elasticsearch.index.indexing.ShardIndexingService#postIndex(Engine.Index)
|
||||
* @see org.elasticsearch.index.indexing.ShardIndexingService#preIndex(Engine.Index)
|
||||
*
|
||||
*/
|
||||
public ShardIndexingService getIndexingService() {
|
||||
|
|
|
@ -30,16 +30,16 @@ import java.io.IOException;
|
|||
*/
|
||||
public class EngineException extends ElasticsearchException {
|
||||
|
||||
public EngineException(ShardId shardId, String msg) {
|
||||
this(shardId, msg, null);
|
||||
public EngineException(ShardId shardId, String msg, Object... params) {
|
||||
this(shardId, msg, null, params);
|
||||
}
|
||||
|
||||
public EngineException(ShardId shardId, String msg, Throwable cause) {
|
||||
super(msg, cause);
|
||||
public EngineException(ShardId shardId, String msg, Throwable cause, Object... params) {
|
||||
super(msg, cause, params);
|
||||
setShard(shardId);
|
||||
}
|
||||
|
||||
public EngineException(StreamInput in) throws IOException{
|
||||
public EngineException(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
}
|
|
@ -316,7 +316,8 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
if (get.versionType().isVersionConflictForReads(versionValue.version(), get.version())) {
|
||||
Uid uid = Uid.createUid(get.uid().text());
|
||||
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), versionValue.version(), get.version());
|
||||
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(),
|
||||
get.versionType().explainConflictForReads(versionValue.version(), get.version()));
|
||||
}
|
||||
Translog.Operation op = translog.read(versionValue.translogLocation());
|
||||
if (op != null) {
|
||||
|
@ -331,96 +332,7 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void create(Create create) throws EngineException {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
if (create.origin() == Operation.Origin.RECOVERY) {
|
||||
// Don't throttle recovery operations
|
||||
innerCreate(create);
|
||||
} else {
|
||||
try (Releasable r = throttle.acquireThrottle()) {
|
||||
innerCreate(create);
|
||||
}
|
||||
}
|
||||
} catch (OutOfMemoryError | IllegalStateException | IOException t) {
|
||||
maybeFailEngine("create", t);
|
||||
throw new CreateFailedEngineException(shardId, create.type(), create.id(), t);
|
||||
}
|
||||
checkVersionMapRefresh();
|
||||
}
|
||||
|
||||
private void innerCreate(Create create) throws IOException {
|
||||
synchronized (dirtyLock(create.uid())) {
|
||||
final long currentVersion;
|
||||
final VersionValue versionValue;
|
||||
versionValue = versionMap.getUnderLock(create.uid().bytes());
|
||||
if (versionValue == null) {
|
||||
currentVersion = loadCurrentVersionFromIndex(create.uid());
|
||||
} else {
|
||||
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > engineConfig.getGcDeletesInMillis()) {
|
||||
currentVersion = Versions.NOT_FOUND; // deleted, and GC
|
||||
} else {
|
||||
currentVersion = versionValue.version();
|
||||
}
|
||||
}
|
||||
innerCreateUnderLock(create, currentVersion, versionValue);
|
||||
}
|
||||
}
|
||||
|
||||
private void innerCreateUnderLock(Create create, long currentVersion, VersionValue versionValue) throws IOException {
|
||||
|
||||
// same logic as index
|
||||
long updatedVersion;
|
||||
long expectedVersion = create.version();
|
||||
if (create.versionType().isVersionConflictForWrites(currentVersion, expectedVersion)) {
|
||||
if (create.origin() == Operation.Origin.RECOVERY) {
|
||||
return;
|
||||
} else {
|
||||
throw new VersionConflictEngineException(shardId, create.type(), create.id(), currentVersion, expectedVersion);
|
||||
}
|
||||
}
|
||||
updatedVersion = create.versionType().updateVersion(currentVersion, expectedVersion);
|
||||
|
||||
// if the doc exists
|
||||
boolean doUpdate = false;
|
||||
if ((versionValue != null && versionValue.delete() == false) || (versionValue == null && currentVersion != Versions.NOT_FOUND)) {
|
||||
if (create.origin() == Operation.Origin.RECOVERY) {
|
||||
return;
|
||||
} else if (create.origin() == Operation.Origin.REPLICA) {
|
||||
// #7142: the primary already determined it's OK to index this document, and we confirmed above that the version doesn't
|
||||
// conflict, so we must also update here on the replica to remain consistent:
|
||||
doUpdate = true;
|
||||
} else {
|
||||
// On primary, we throw DAEE if the _uid is already in the index with an older version:
|
||||
assert create.origin() == Operation.Origin.PRIMARY;
|
||||
throw new DocumentAlreadyExistsException(shardId, create.type(), create.id());
|
||||
}
|
||||
}
|
||||
|
||||
create.updateVersion(updatedVersion);
|
||||
|
||||
if (doUpdate) {
|
||||
if (create.docs().size() > 1) {
|
||||
indexWriter.updateDocuments(create.uid(), create.docs());
|
||||
} else {
|
||||
indexWriter.updateDocument(create.uid(), create.docs().get(0));
|
||||
}
|
||||
} else {
|
||||
if (create.docs().size() > 1) {
|
||||
indexWriter.addDocuments(create.docs());
|
||||
} else {
|
||||
indexWriter.addDocument(create.docs().get(0));
|
||||
}
|
||||
}
|
||||
Translog.Location translogLocation = translog.add(new Translog.Create(create));
|
||||
|
||||
versionMap.putUnderLock(create.uid().bytes(), new VersionValue(updatedVersion, translogLocation));
|
||||
create.setTranslogLocation(translogLocation);
|
||||
indexingService.postCreateUnderLock(create);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean index(Index index) throws EngineException {
|
||||
public boolean index(Index index) {
|
||||
final boolean created;
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
|
@ -440,6 +352,67 @@ public class InternalEngine extends Engine {
|
|||
return created;
|
||||
}
|
||||
|
||||
private boolean innerIndex(Index index) throws IOException {
|
||||
synchronized (dirtyLock(index.uid())) {
|
||||
final long currentVersion;
|
||||
final boolean deleted;
|
||||
VersionValue versionValue = versionMap.getUnderLock(index.uid().bytes());
|
||||
if (versionValue == null) {
|
||||
currentVersion = loadCurrentVersionFromIndex(index.uid());
|
||||
deleted = currentVersion == Versions.NOT_FOUND;
|
||||
} else {
|
||||
deleted = versionValue.delete();
|
||||
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > engineConfig.getGcDeletesInMillis()) {
|
||||
currentVersion = Versions.NOT_FOUND; // deleted, and GC
|
||||
} else {
|
||||
currentVersion = versionValue.version();
|
||||
}
|
||||
}
|
||||
|
||||
long expectedVersion = index.version();
|
||||
if (index.versionType().isVersionConflictForWrites(currentVersion, expectedVersion, deleted)) {
|
||||
if (index.origin() == Operation.Origin.RECOVERY) {
|
||||
return false;
|
||||
} else {
|
||||
throw new VersionConflictEngineException(shardId, index.type(), index.id(),
|
||||
index.versionType().explainConflictForWrites(currentVersion, expectedVersion, deleted));
|
||||
}
|
||||
}
|
||||
long updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion);
|
||||
|
||||
final boolean created;
|
||||
index.updateVersion(updatedVersion);
|
||||
|
||||
if (currentVersion == Versions.NOT_FOUND) {
|
||||
// document does not exists, we can optimize for create
|
||||
created = true;
|
||||
if (index.docs().size() > 1) {
|
||||
indexWriter.addDocuments(index.docs());
|
||||
} else {
|
||||
indexWriter.addDocument(index.docs().get(0));
|
||||
}
|
||||
} else {
|
||||
if (versionValue != null) {
|
||||
created = versionValue.delete(); // we have a delete which is not GC'ed...
|
||||
} else {
|
||||
created = false;
|
||||
}
|
||||
if (index.docs().size() > 1) {
|
||||
indexWriter.updateDocuments(index.uid(), index.docs());
|
||||
} else {
|
||||
indexWriter.updateDocument(index.uid(), index.docs().get(0));
|
||||
}
|
||||
}
|
||||
Translog.Location translogLocation = translog.add(new Translog.Index(index));
|
||||
|
||||
versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, translogLocation));
|
||||
index.setTranslogLocation(translogLocation);
|
||||
|
||||
indexingService.postIndexUnderLock(index);
|
||||
return created;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Forces a refresh if the versionMap is using too much RAM
|
||||
*/
|
||||
|
@ -467,62 +440,6 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
}
|
||||
|
||||
private boolean innerIndex(Index index) throws IOException {
|
||||
synchronized (dirtyLock(index.uid())) {
|
||||
final long currentVersion;
|
||||
VersionValue versionValue = versionMap.getUnderLock(index.uid().bytes());
|
||||
if (versionValue == null) {
|
||||
currentVersion = loadCurrentVersionFromIndex(index.uid());
|
||||
} else {
|
||||
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > engineConfig.getGcDeletesInMillis()) {
|
||||
currentVersion = Versions.NOT_FOUND; // deleted, and GC
|
||||
} else {
|
||||
currentVersion = versionValue.version();
|
||||
}
|
||||
}
|
||||
|
||||
long updatedVersion;
|
||||
long expectedVersion = index.version();
|
||||
if (index.versionType().isVersionConflictForWrites(currentVersion, expectedVersion)) {
|
||||
if (index.origin() == Operation.Origin.RECOVERY) {
|
||||
return false;
|
||||
} else {
|
||||
throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, expectedVersion);
|
||||
}
|
||||
}
|
||||
updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion);
|
||||
|
||||
final boolean created;
|
||||
index.updateVersion(updatedVersion);
|
||||
if (currentVersion == Versions.NOT_FOUND) {
|
||||
// document does not exists, we can optimize for create
|
||||
created = true;
|
||||
if (index.docs().size() > 1) {
|
||||
indexWriter.addDocuments(index.docs());
|
||||
} else {
|
||||
indexWriter.addDocument(index.docs().get(0));
|
||||
}
|
||||
} else {
|
||||
if (versionValue != null) {
|
||||
created = versionValue.delete(); // we have a delete which is not GC'ed...
|
||||
} else {
|
||||
created = false;
|
||||
}
|
||||
if (index.docs().size() > 1) {
|
||||
indexWriter.updateDocuments(index.uid(), index.docs());
|
||||
} else {
|
||||
indexWriter.updateDocument(index.uid(), index.docs().get(0));
|
||||
}
|
||||
}
|
||||
Translog.Location translogLocation = translog.add(new Translog.Index(index));
|
||||
|
||||
versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, translogLocation));
|
||||
index.setTranslogLocation(translogLocation);
|
||||
indexingService.postIndexUnderLock(index);
|
||||
return created;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(Delete delete) throws EngineException {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
|
@ -549,10 +466,13 @@ public class InternalEngine extends Engine {
|
|||
private void innerDelete(Delete delete) throws IOException {
|
||||
synchronized (dirtyLock(delete.uid())) {
|
||||
final long currentVersion;
|
||||
final boolean deleted;
|
||||
VersionValue versionValue = versionMap.getUnderLock(delete.uid().bytes());
|
||||
if (versionValue == null) {
|
||||
currentVersion = loadCurrentVersionFromIndex(delete.uid());
|
||||
deleted = currentVersion == Versions.NOT_FOUND;
|
||||
} else {
|
||||
deleted = versionValue.delete();
|
||||
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > engineConfig.getGcDeletesInMillis()) {
|
||||
currentVersion = Versions.NOT_FOUND; // deleted, and GC
|
||||
} else {
|
||||
|
@ -562,11 +482,12 @@ public class InternalEngine extends Engine {
|
|||
|
||||
long updatedVersion;
|
||||
long expectedVersion = delete.version();
|
||||
if (delete.versionType().isVersionConflictForWrites(currentVersion, expectedVersion)) {
|
||||
if (delete.versionType().isVersionConflictForWrites(currentVersion, expectedVersion, deleted)) {
|
||||
if (delete.origin() == Operation.Origin.RECOVERY) {
|
||||
return;
|
||||
} else {
|
||||
throw new VersionConflictEngineException(shardId, delete.type(), delete.id(), currentVersion, expectedVersion);
|
||||
throw new VersionConflictEngineException(shardId, delete.type(), delete.id(),
|
||||
delete.versionType().explainConflictForWrites(currentVersion, expectedVersion, deleted));
|
||||
}
|
||||
}
|
||||
updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion);
|
||||
|
@ -904,6 +825,11 @@ public class InternalEngine extends Engine {
|
|||
stats.addIndexWriterMaxMemoryInBytes((long) (indexWriter.getConfig().getRAMBufferSizeMB() * 1024 * 1024));
|
||||
}
|
||||
|
||||
@Override
|
||||
public long indexWriterRAMBytesUsed() {
|
||||
return indexWriter.ramBytesUsed();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Segment> segments(boolean verbose) {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.index.engine;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.Accountables;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -32,7 +31,6 @@ import org.elasticsearch.common.unit.ByteSizeValue;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
public class Segment implements Streamable {
|
||||
|
|
|
@ -102,11 +102,6 @@ public class ShadowEngine extends Engine {
|
|||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void create(Create create) throws EngineException {
|
||||
throw new UnsupportedOperationException(shardId + " create operation not allowed on shadow engine");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean index(Index index) throws EngineException {
|
||||
throw new UnsupportedOperationException(shardId + " index operation not allowed on shadow engine");
|
||||
|
@ -245,4 +240,9 @@ public class ShadowEngine extends Engine {
|
|||
return lastCommittedSegmentInfos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long indexWriterRAMBytesUsed() {
|
||||
// No IndexWriter
|
||||
throw new UnsupportedOperationException("ShadowEngine has no IndexWriter");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,8 +29,16 @@ import java.io.IOException;
|
|||
*/
|
||||
public class VersionConflictEngineException extends EngineException {
|
||||
|
||||
public VersionConflictEngineException(ShardId shardId, String type, String id, long current, long provided) {
|
||||
super(shardId, "[" + type + "][" + id + "]: version conflict, current [" + current + "], provided [" + provided + "]");
|
||||
public VersionConflictEngineException(ShardId shardId, String type, String id, String explanation) {
|
||||
this(shardId, null, type, id, explanation);
|
||||
}
|
||||
|
||||
public VersionConflictEngineException(ShardId shardId, Throwable cause, String type, String id, String explanation) {
|
||||
this(shardId, "[{}][{}]: version conflict, {}", cause, type, id, explanation);
|
||||
}
|
||||
|
||||
public VersionConflictEngineException(ShardId shardId, String msg, Throwable cause, Object... params) {
|
||||
super(shardId, msg, cause, params);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -28,39 +28,8 @@ public abstract class IndexingOperationListener {
|
|||
/**
|
||||
* Called before the indexing occurs.
|
||||
*/
|
||||
public Engine.Create preCreate(Engine.Create create) {
|
||||
return create;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called after the indexing occurs, under a locking scheme to maintain
|
||||
* concurrent updates to the same doc.
|
||||
* <p>
|
||||
* Note, long operations should not occur under this callback.
|
||||
*/
|
||||
public void postCreateUnderLock(Engine.Create create) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Called after create index operation occurred.
|
||||
*/
|
||||
public void postCreate(Engine.Create create) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Called after create index operation occurred with exception.
|
||||
*/
|
||||
public void postCreate(Engine.Create create, Throwable ex) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Called before the indexing occurs.
|
||||
*/
|
||||
public Engine.Index preIndex(Engine.Index index) {
|
||||
return index;
|
||||
public Engine.Index preIndex(Engine.Index operation) {
|
||||
return operation;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -128,10 +128,6 @@ public final class IndexingSlowLog {
|
|||
postIndexing(index.parsedDoc(), tookInNanos);
|
||||
}
|
||||
|
||||
void postCreate(Engine.Create create, long tookInNanos) {
|
||||
postIndexing(create.parsedDoc(), tookInNanos);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads how much of the source to log. The user can specify any value they
|
||||
* like and numbers are interpreted the maximum number of characters to log
|
||||
|
|
|
@ -85,25 +85,6 @@ public class ShardIndexingService extends AbstractIndexShardComponent {
|
|||
listeners.remove(listener);
|
||||
}
|
||||
|
||||
public Engine.Create preCreate(Engine.Create create) {
|
||||
totalStats.indexCurrent.inc();
|
||||
typeStats(create.type()).indexCurrent.inc();
|
||||
for (IndexingOperationListener listener : listeners) {
|
||||
create = listener.preCreate(create);
|
||||
}
|
||||
return create;
|
||||
}
|
||||
|
||||
public void postCreateUnderLock(Engine.Create create) {
|
||||
for (IndexingOperationListener listener : listeners) {
|
||||
try {
|
||||
listener.postCreateUnderLock(create);
|
||||
} catch (Exception e) {
|
||||
logger.warn("postCreateUnderLock listener [{}] failed", e, listener);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void throttlingActivated() {
|
||||
totalStats.setThrottled(true);
|
||||
}
|
||||
|
@ -112,40 +93,13 @@ public class ShardIndexingService extends AbstractIndexShardComponent {
|
|||
totalStats.setThrottled(false);
|
||||
}
|
||||
|
||||
public void postCreate(Engine.Create create) {
|
||||
long took = create.endTime() - create.startTime();
|
||||
totalStats.indexMetric.inc(took);
|
||||
totalStats.indexCurrent.dec();
|
||||
StatsHolder typeStats = typeStats(create.type());
|
||||
typeStats.indexMetric.inc(took);
|
||||
typeStats.indexCurrent.dec();
|
||||
slowLog.postCreate(create, took);
|
||||
for (IndexingOperationListener listener : listeners) {
|
||||
try {
|
||||
listener.postCreate(create);
|
||||
} catch (Exception e) {
|
||||
logger.warn("postCreate listener [{}] failed", e, listener);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void postCreate(Engine.Create create, Throwable ex) {
|
||||
for (IndexingOperationListener listener : listeners) {
|
||||
try {
|
||||
listener.postCreate(create, ex);
|
||||
} catch (Throwable t) {
|
||||
logger.warn("postCreate listener [{}] failed", t, listener);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public Engine.Index preIndex(Engine.Index index) {
|
||||
public Engine.Index preIndex(Engine.Index operation) {
|
||||
totalStats.indexCurrent.inc();
|
||||
typeStats(index.type()).indexCurrent.inc();
|
||||
typeStats(operation.type()).indexCurrent.inc();
|
||||
for (IndexingOperationListener listener : listeners) {
|
||||
index = listener.preIndex(index);
|
||||
operation = listener.preIndex(operation);
|
||||
}
|
||||
return index;
|
||||
return operation;
|
||||
}
|
||||
|
||||
public void postIndexUnderLock(Engine.Index index) {
|
||||
|
|
|
@ -43,7 +43,7 @@ import org.elasticsearch.index.mapper.ip.IpFieldMapper;
|
|||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
import org.elasticsearch.index.mapper.object.RootObjectMapper;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.index.similarity.SimilarityLookupService;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
||||
|
@ -57,7 +57,7 @@ public class DocumentMapperParser {
|
|||
final MapperService mapperService;
|
||||
final AnalysisService analysisService;
|
||||
private static final ESLogger logger = Loggers.getLogger(DocumentMapperParser.class);
|
||||
private final SimilarityLookupService similarityLookupService;
|
||||
private final SimilarityService similarityService;
|
||||
private final ScriptService scriptService;
|
||||
|
||||
private final RootObjectMapper.TypeParser rootObjectTypeParser = new RootObjectMapper.TypeParser();
|
||||
|
@ -71,12 +71,12 @@ public class DocumentMapperParser {
|
|||
private volatile SortedMap<String, Mapper.TypeParser> additionalRootMappers;
|
||||
|
||||
public DocumentMapperParser(@IndexSettings Settings indexSettings, MapperService mapperService, AnalysisService analysisService,
|
||||
SimilarityLookupService similarityLookupService, ScriptService scriptService) {
|
||||
SimilarityService similarityService, ScriptService scriptService) {
|
||||
this.indexSettings = indexSettings;
|
||||
this.parseFieldMatcher = new ParseFieldMatcher(indexSettings);
|
||||
this.mapperService = mapperService;
|
||||
this.analysisService = analysisService;
|
||||
this.similarityLookupService = similarityLookupService;
|
||||
this.similarityService = similarityService;
|
||||
this.scriptService = scriptService;
|
||||
MapBuilder<String, Mapper.TypeParser> typeParsersBuilder = new MapBuilder<String, Mapper.TypeParser>()
|
||||
.put(ByteFieldMapper.CONTENT_TYPE, new ByteFieldMapper.TypeParser())
|
||||
|
@ -142,7 +142,7 @@ public class DocumentMapperParser {
|
|||
}
|
||||
|
||||
public Mapper.TypeParser.ParserContext parserContext(String type) {
|
||||
return new Mapper.TypeParser.ParserContext(type, analysisService, similarityLookupService, mapperService, typeParsers, indexVersionCreated, parseFieldMatcher);
|
||||
return new Mapper.TypeParser.ParserContext(type, analysisService, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher);
|
||||
}
|
||||
|
||||
public DocumentMapper parse(String source) throws MapperParsingException {
|
||||
|
|
|
@ -122,7 +122,7 @@ class DocumentParser implements Closeable {
|
|||
// entire type is disabled
|
||||
parser.skipChildren();
|
||||
} else if (emptyDoc == false) {
|
||||
Mapper update = parseObject(context, mapping.root);
|
||||
Mapper update = parseObject(context, mapping.root, true);
|
||||
if (update != null) {
|
||||
context.addDynamicMappingsUpdate(update);
|
||||
}
|
||||
|
@ -194,7 +194,7 @@ class DocumentParser implements Closeable {
|
|||
return doc;
|
||||
}
|
||||
|
||||
static ObjectMapper parseObject(ParseContext context, ObjectMapper mapper) throws IOException {
|
||||
static ObjectMapper parseObject(ParseContext context, ObjectMapper mapper, boolean atRoot) throws IOException {
|
||||
if (mapper.isEnabled() == false) {
|
||||
context.parser().skipChildren();
|
||||
return null;
|
||||
|
@ -202,6 +202,10 @@ class DocumentParser implements Closeable {
|
|||
XContentParser parser = context.parser();
|
||||
|
||||
String currentFieldName = parser.currentName();
|
||||
if (atRoot && MapperService.isMetadataField(currentFieldName) &&
|
||||
Version.indexCreated(context.indexSettings()).onOrAfter(Version.V_2_0_0_beta1)) {
|
||||
throw new MapperParsingException("Field [" + currentFieldName + "] is a metadata field and cannot be added inside a document. Use the index API request parameters.");
|
||||
}
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
if (token == XContentParser.Token.VALUE_NULL) {
|
||||
// the object is null ("obj1" : null), simply bail
|
||||
|
@ -302,7 +306,7 @@ class DocumentParser implements Closeable {
|
|||
|
||||
private static Mapper parseObjectOrField(ParseContext context, Mapper mapper) throws IOException {
|
||||
if (mapper instanceof ObjectMapper) {
|
||||
return parseObject(context, (ObjectMapper) mapper);
|
||||
return parseObject(context, (ObjectMapper) mapper, false);
|
||||
} else {
|
||||
FieldMapper fieldMapper = (FieldMapper)mapper;
|
||||
Mapper update = fieldMapper.parse(context);
|
||||
|
|
|
@ -34,8 +34,8 @@ import org.elasticsearch.index.analysis.NamedAnalyzer;
|
|||
import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.mapper.core.TypeParsers;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
import org.elasticsearch.index.similarity.SimilarityLookupService;
|
||||
import org.elasticsearch.index.similarity.SimilarityProvider;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -447,7 +447,7 @@ public abstract class FieldMapper extends Mapper {
|
|||
if (fieldType().similarity() != null) {
|
||||
builder.field("similarity", fieldType().similarity().name());
|
||||
} else if (includeDefaults) {
|
||||
builder.field("similarity", SimilarityLookupService.DEFAULT_SIMILARITY);
|
||||
builder.field("similarity", SimilarityService.DEFAULT_SIMILARITY);
|
||||
}
|
||||
|
||||
if (includeDefaults || hasCustomFieldDataSettings()) {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
|
@ -27,9 +26,10 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.similarity.SimilarityLookupService;
|
||||
import org.elasticsearch.index.similarity.SimilarityProvider;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
||||
|
||||
|
@ -85,18 +85,18 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
|||
|
||||
private final AnalysisService analysisService;
|
||||
|
||||
private final SimilarityLookupService similarityLookupService;
|
||||
private final Function<String, SimilarityProvider> similarityLookupService;
|
||||
|
||||
private final MapperService mapperService;
|
||||
|
||||
private final ImmutableMap<String, TypeParser> typeParsers;
|
||||
private final Function<String, TypeParser> typeParsers;
|
||||
|
||||
private final Version indexVersionCreated;
|
||||
|
||||
private final ParseFieldMatcher parseFieldMatcher;
|
||||
|
||||
public ParserContext(String type, AnalysisService analysisService, SimilarityLookupService similarityLookupService,
|
||||
MapperService mapperService, ImmutableMap<String, TypeParser> typeParsers,
|
||||
public ParserContext(String type, AnalysisService analysisService, Function<String, SimilarityProvider> similarityLookupService,
|
||||
MapperService mapperService, Function<String, TypeParser> typeParsers,
|
||||
Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher) {
|
||||
this.type = type;
|
||||
this.analysisService = analysisService;
|
||||
|
@ -115,8 +115,8 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
|||
return analysisService;
|
||||
}
|
||||
|
||||
public SimilarityLookupService similarityLookupService() {
|
||||
return similarityLookupService;
|
||||
public SimilarityProvider getSimilarity(String name) {
|
||||
return similarityLookupService.apply(name);
|
||||
}
|
||||
|
||||
public MapperService mapperService() {
|
||||
|
@ -124,7 +124,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
|||
}
|
||||
|
||||
public TypeParser typeParser(String type) {
|
||||
return typeParsers.get(Strings.toUnderscoreCase(type));
|
||||
return typeParsers.apply(Strings.toUnderscoreCase(type));
|
||||
}
|
||||
|
||||
public Version indexVersionCreated() {
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.mapper;
|
|||
|
||||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Iterators;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
|
||||
|
@ -39,6 +38,7 @@ import org.elasticsearch.ElasticsearchGenerationException;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
|
@ -52,7 +52,7 @@ import org.elasticsearch.index.mapper.Mapper.BuilderContext;
|
|||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.index.similarity.SimilarityLookupService;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.indices.InvalidTypeNameException;
|
||||
import org.elasticsearch.indices.TypeMissingException;
|
||||
import org.elasticsearch.percolator.PercolatorService;
|
||||
|
@ -72,6 +72,7 @@ import java.util.Set;
|
|||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.unmodifiableSet;
|
||||
|
@ -124,12 +125,12 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
|
||||
@Inject
|
||||
public MapperService(Index index, @IndexSettings Settings indexSettings, AnalysisService analysisService,
|
||||
SimilarityLookupService similarityLookupService,
|
||||
SimilarityService similarityService,
|
||||
ScriptService scriptService) {
|
||||
super(index, indexSettings);
|
||||
this.analysisService = analysisService;
|
||||
this.fieldTypes = new FieldTypeLookup();
|
||||
this.documentParser = new DocumentMapperParser(indexSettings, this, analysisService, similarityLookupService, scriptService);
|
||||
this.documentParser = new DocumentMapperParser(indexSettings, this, analysisService, similarityService, scriptService);
|
||||
this.indexAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultIndexAnalyzer(), p -> p.indexAnalyzer());
|
||||
this.searchAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchAnalyzer(), p -> p.searchAnalyzer());
|
||||
this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer());
|
||||
|
@ -184,13 +185,13 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
*/
|
||||
public Iterable<DocumentMapper> docMappers(final boolean includingDefaultMapping) {
|
||||
return () -> {
|
||||
final Iterator<DocumentMapper> iterator;
|
||||
final Collection<DocumentMapper> documentMappers;
|
||||
if (includingDefaultMapping) {
|
||||
iterator = mappers.values().iterator();
|
||||
documentMappers = mappers.values();
|
||||
} else {
|
||||
iterator = mappers.values().stream().filter(mapper -> !DEFAULT_MAPPING.equals(mapper.type())).iterator();
|
||||
documentMappers = mappers.values().stream().filter(mapper -> !DEFAULT_MAPPING.equals(mapper.type())).collect(Collectors.toList());
|
||||
}
|
||||
return Iterators.unmodifiableIterator(iterator);
|
||||
return Collections.unmodifiableCollection(documentMappers).iterator();
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -173,7 +173,7 @@ public class TypeParsers {
|
|||
builder.omitNorms(nodeBooleanValue(propNode));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("similarity")) {
|
||||
builder.similarity(parserContext.similarityLookupService().similarity(propNode.toString()));
|
||||
builder.similarity(parserContext.getSimilarity(propNode.toString()));
|
||||
iterator.remove();
|
||||
} else if (parseMultiField(builder, name, parserContext, propName, propNode)) {
|
||||
iterator.remove();
|
||||
|
@ -277,7 +277,7 @@ public class TypeParsers {
|
|||
// ignore for old indexes
|
||||
iterator.remove();
|
||||
} else if (propName.equals("similarity")) {
|
||||
builder.similarity(parserContext.similarityLookupService().similarity(propNode.toString()));
|
||||
builder.similarity(parserContext.getSimilarity(propNode.toString()));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("fielddata")) {
|
||||
final Settings settings = Settings.builder().put(SettingsLoader.Helper.loadNestedFromMap(nodeMapValue(propNode, "fielddata"))).build();
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.mapper.geo;
|
|||
|
||||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -30,6 +29,7 @@ import org.apache.lucene.util.XGeoHashUtils;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.geo.GeoDistance;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
|
@ -39,14 +39,7 @@ import org.elasticsearch.common.util.ByteUtils;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.*;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper.CustomNumericDocValuesField;
|
||||
|
@ -54,18 +47,10 @@ import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
|||
import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.doubleField;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.geoPointField;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.stringField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parsePathType;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.*;
|
||||
import static org.elasticsearch.index.mapper.core.TypeParsers.*;
|
||||
|
||||
/**
|
||||
* Parsing: We handle:
|
||||
|
|
|
@ -41,7 +41,7 @@ import org.elasticsearch.index.mapper.MergeResult;
|
|||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.similarity.SimilarityLookupService;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
@ -300,7 +300,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
if (fieldType().similarity() != null) {
|
||||
builder.field("similarity", fieldType().similarity().name());
|
||||
} else if (includeDefaults) {
|
||||
builder.field("similarity", SimilarityLookupService.DEFAULT_SIMILARITY);
|
||||
builder.field("similarity", SimilarityService.DEFAULT_SIMILARITY);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.index.mapper.ip;
|
||||
|
||||
import com.google.common.net.InetAddresses;
|
||||
import org.apache.lucene.analysis.NumericTokenStream;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
|
@ -29,6 +28,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.network.InetAddresses;
|
||||
import org.elasticsearch.common.Numbers;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
|
|
@ -242,29 +242,12 @@ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent
|
|||
private class RealTimePercolatorOperationListener extends IndexingOperationListener {
|
||||
|
||||
@Override
|
||||
public Engine.Create preCreate(Engine.Create create) {
|
||||
public Engine.Index preIndex(Engine.Index operation) {
|
||||
// validate the query here, before we index
|
||||
if (PercolatorService.TYPE_NAME.equals(create.type())) {
|
||||
parsePercolatorDocument(create.id(), create.source());
|
||||
if (PercolatorService.TYPE_NAME.equals(operation.type())) {
|
||||
parsePercolatorDocument(operation.id(), operation.source());
|
||||
}
|
||||
return create;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postCreateUnderLock(Engine.Create create) {
|
||||
// add the query under a doc lock
|
||||
if (PercolatorService.TYPE_NAME.equals(create.type())) {
|
||||
addPercolateQuery(create.id(), create.source());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Engine.Index preIndex(Engine.Index index) {
|
||||
// validate the query here, before we index
|
||||
if (PercolatorService.TYPE_NAME.equals(index.type())) {
|
||||
parsePercolatorDocument(index.id(), index.source());
|
||||
}
|
||||
return index;
|
||||
return operation;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -202,7 +202,7 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
|
|||
}
|
||||
|
||||
/** Returns validation method for coordinates. */
|
||||
public GeoValidationMethod getValidationMethod(GeoValidationMethod method) {
|
||||
public GeoValidationMethod getValidationMethod() {
|
||||
return this.validationMethod;
|
||||
}
|
||||
|
||||
|
@ -221,6 +221,7 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
|
|||
}
|
||||
}
|
||||
|
||||
GeoPoint point = new GeoPoint(this.point);
|
||||
if (GeoValidationMethod.isCoerce(validationMethod)) {
|
||||
GeoUtils.normalizePoint(point, true, true);
|
||||
}
|
||||
|
|
|
@ -148,7 +148,7 @@ public class QueryShardContext {
|
|||
}
|
||||
|
||||
public Similarity searchSimilarity() {
|
||||
return indexQueryParser.similarityService != null ? indexQueryParser.similarityService.similarity() : null;
|
||||
return indexQueryParser.similarityService != null ? indexQueryParser.similarityService.similarity(indexQueryParser.mapperService) : null;
|
||||
}
|
||||
|
||||
public String defaultField() {
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.index.query.functionscore.DecayFunctionBuilder;
|
|||
|
||||
public class ExponentialDecayFunctionBuilder extends DecayFunctionBuilder<ExponentialDecayFunctionBuilder> {
|
||||
|
||||
private static final DecayFunction EXP_DECAY_FUNCTION = new ExponentialDecayScoreFunction();
|
||||
public static final DecayFunction EXP_DECAY_FUNCTION = new ExponentialDecayScoreFunction();
|
||||
|
||||
public ExponentialDecayFunctionBuilder(String fieldName, Object origin, Object scale, Object offset) {
|
||||
super(fieldName, origin, scale, offset);
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.index.query.functionscore.DecayFunctionBuilder;
|
|||
|
||||
public class GaussDecayFunctionBuilder extends DecayFunctionBuilder<GaussDecayFunctionBuilder> {
|
||||
|
||||
private static final DecayFunction GAUSS_DECAY_FUNCTION = new GaussScoreFunction();
|
||||
public static final DecayFunction GAUSS_DECAY_FUNCTION = new GaussScoreFunction();
|
||||
|
||||
public GaussDecayFunctionBuilder(String fieldName, Object origin, Object scale, Object offset) {
|
||||
super(fieldName, origin, scale, offset);
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.elasticsearch.index.query.functionscore.DecayFunctionBuilder;
|
|||
|
||||
public class LinearDecayFunctionBuilder extends DecayFunctionBuilder<LinearDecayFunctionBuilder> {
|
||||
|
||||
private static final DecayFunction LINEAR_DECAY_FUNCTION = new LinearDecayScoreFunction();
|
||||
public static final DecayFunction LINEAR_DECAY_FUNCTION = new LinearDecayScoreFunction();
|
||||
|
||||
public LinearDecayFunctionBuilder(String fieldName, Object origin, Object scale, Object offset) {
|
||||
super(fieldName, origin, scale, offset);
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
|
|||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
|
||||
import org.elasticsearch.action.termvectors.TermVectorsRequest;
|
||||
import org.elasticsearch.action.termvectors.TermVectorsResponse;
|
||||
import org.elasticsearch.bootstrap.Elasticsearch;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
|
@ -43,6 +42,7 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.metrics.MeanMetric;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -84,8 +84,8 @@ import org.elasticsearch.index.settings.IndexSettings;
|
|||
import org.elasticsearch.index.settings.IndexSettingsService;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.snapshots.IndexShardRepository;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.store.Store.MetadataSnapshot;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.store.StoreFileMetaData;
|
||||
import org.elasticsearch.index.store.StoreStats;
|
||||
import org.elasticsearch.index.suggest.stats.ShardSuggestMetric;
|
||||
|
@ -100,6 +100,7 @@ import org.elasticsearch.index.warmer.WarmerStats;
|
|||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.InternalIndicesLifecycle;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.memory.IndexingMemoryController;
|
||||
import org.elasticsearch.indices.recovery.RecoveryFailedException;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.percolator.PercolatorService;
|
||||
|
@ -118,6 +119,7 @@ import java.util.concurrent.TimeUnit;
|
|||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
|
||||
public class IndexShard extends AbstractIndexShardComponent implements IndexSettingsService.Listener {
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
|
@ -190,6 +192,13 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
|||
|
||||
private final IndexSearcherWrapper searcherWrapper;
|
||||
|
||||
/** True if this shard is still indexing (recently) and false if we've been idle for long enough (as periodically checked by {@link
|
||||
* IndexingMemoryController}). */
|
||||
private final AtomicBoolean active = new AtomicBoolean();
|
||||
|
||||
private volatile long lastWriteNS;
|
||||
private final IndexingMemoryController indexingMemoryController;
|
||||
|
||||
@Inject
|
||||
public IndexShard(ShardId shardId, @IndexSettings Settings indexSettings, ShardPath path, Store store, IndexServicesProvider provider) {
|
||||
super(shardId, indexSettings);
|
||||
|
@ -242,11 +251,16 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
|||
this.flushThresholdSize = indexSettings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(512, ByteSizeUnit.MB));
|
||||
this.disableFlush = indexSettings.getAsBoolean(INDEX_TRANSLOG_DISABLE_FLUSH, false);
|
||||
this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId);
|
||||
this.indexingMemoryController = provider.getIndexingMemoryController();
|
||||
|
||||
this.searcherWrapper = provider.getIndexSearcherWrapper();
|
||||
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, queryParserService, indexingService, mapperService, indexFieldDataService);
|
||||
if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) {
|
||||
percolatorQueriesRegistry.enableRealTimePercolator();
|
||||
}
|
||||
|
||||
// We start up inactive
|
||||
active.set(false);
|
||||
}
|
||||
|
||||
public Store store() {
|
||||
|
@ -278,7 +292,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
|||
return indexFieldDataService;
|
||||
}
|
||||
|
||||
public MapperService mapperService() { return mapperService;}
|
||||
public MapperService mapperService() {
|
||||
return mapperService;
|
||||
}
|
||||
|
||||
public ShardSearchStats searchService() {
|
||||
return this.searchService;
|
||||
|
@ -423,40 +439,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
|||
return previousState;
|
||||
}
|
||||
|
||||
public Engine.Create prepareCreate(SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin) {
|
||||
try {
|
||||
return prepareCreate(docMapper(source.type()), source, version, versionType, origin);
|
||||
} catch (Throwable t) {
|
||||
verifyNotClosed(t);
|
||||
throw t;
|
||||
}
|
||||
}
|
||||
|
||||
static Engine.Create prepareCreate(DocumentMapperForType docMapper, SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin) {
|
||||
long startTime = System.nanoTime();
|
||||
ParsedDocument doc = docMapper.getDocumentMapper().parse(source);
|
||||
if (docMapper.getMapping() != null) {
|
||||
doc.addDynamicMappingsUpdate(docMapper.getMapping());
|
||||
}
|
||||
return new Engine.Create(docMapper.getDocumentMapper().uidMapper().term(doc.uid().stringValue()), doc, version, versionType, origin, startTime);
|
||||
}
|
||||
|
||||
public void create(Engine.Create create) {
|
||||
writeAllowed(create.origin());
|
||||
create = indexingService.preCreate(create);
|
||||
try {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("index [{}][{}]{}", create.type(), create.id(), create.docs());
|
||||
}
|
||||
getEngine().create(create);
|
||||
create.endTime(System.nanoTime());
|
||||
} catch (Throwable ex) {
|
||||
indexingService.postCreate(create, ex);
|
||||
throw ex;
|
||||
}
|
||||
indexingService.postCreate(create);
|
||||
}
|
||||
|
||||
public Engine.Index prepareIndex(SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin) {
|
||||
try {
|
||||
return prepareIndex(docMapper(source.type()), source, version, versionType, origin);
|
||||
|
@ -480,7 +462,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
|||
* updated.
|
||||
*/
|
||||
public boolean index(Engine.Index index) {
|
||||
writeAllowed(index.origin());
|
||||
ensureWriteAllowed(index);
|
||||
markLastWrite(index);
|
||||
index = indexingService.preIndex(index);
|
||||
final boolean created;
|
||||
try {
|
||||
|
@ -504,7 +487,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
|||
}
|
||||
|
||||
public void delete(Engine.Delete delete) {
|
||||
writeAllowed(delete.origin());
|
||||
ensureWriteAllowed(delete);
|
||||
markLastWrite(delete);
|
||||
delete = indexingService.preDelete(delete);
|
||||
try {
|
||||
if (logger.isTraceEnabled()) {
|
||||
|
@ -914,7 +898,24 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
|||
}
|
||||
}
|
||||
|
||||
private void writeAllowed(Engine.Operation.Origin origin) throws IllegalIndexShardStateException {
|
||||
/** Returns timestamp of last indexing operation */
|
||||
public long getLastWriteNS() {
|
||||
return lastWriteNS;
|
||||
}
|
||||
|
||||
/** Records timestamp of the last write operation, possibly switching {@code active} to true if we were inactive. */
|
||||
private void markLastWrite(Engine.Operation op) {
|
||||
lastWriteNS = op.startTime();
|
||||
if (active.getAndSet(true) == false) {
|
||||
// We are currently inactive, but a new write operation just showed up, so we now notify IMC
|
||||
// to wake up and fix our indexing buffer. We could do this async instead, but cost should
|
||||
// be low, and it's rare this happens.
|
||||
indexingMemoryController.forceCheck();
|
||||
}
|
||||
}
|
||||
|
||||
private void ensureWriteAllowed(Engine.Operation op) throws IllegalIndexShardStateException {
|
||||
Engine.Operation.Origin origin = op.origin();
|
||||
IndexShardState state = this.state; // one time volatile read
|
||||
|
||||
if (origin == Engine.Operation.Origin.PRIMARY) {
|
||||
|
@ -976,6 +977,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
|||
this.failedEngineListener.delegates.add(failedEngineListener);
|
||||
}
|
||||
|
||||
/** Change the indexing and translog buffer sizes. If {@code IndexWriter} is currently using more than
|
||||
* the new buffering indexing size then we do a refresh to free up the heap. */
|
||||
public void updateBufferSize(ByteSizeValue shardIndexingBufferSize, ByteSizeValue shardTranslogBufferSize) {
|
||||
|
||||
final EngineConfig config = engineConfig;
|
||||
|
@ -994,27 +997,50 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
|||
// so we push changes these changes down to IndexWriter:
|
||||
engine.onSettingsChanged();
|
||||
|
||||
if (shardIndexingBufferSize == EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER) {
|
||||
// it's inactive: make sure we do a refresh / full IW flush in this case, since the memory
|
||||
// changes only after a "data" change has happened to the writer
|
||||
// the index writer lazily allocates memory and a refresh will clean it all up.
|
||||
logger.debug("updating index_buffer_size from [{}] to (inactive) [{}]", preValue, shardIndexingBufferSize);
|
||||
long iwBytesUsed = engine.indexWriterRAMBytesUsed();
|
||||
|
||||
String message = LoggerMessageFormat.format("updating index_buffer_size from [{}] to [{}]; IndexWriter now using [{}] bytes",
|
||||
preValue, shardIndexingBufferSize, iwBytesUsed);
|
||||
|
||||
if (iwBytesUsed > shardIndexingBufferSize.bytes()) {
|
||||
// our allowed buffer was changed to less than we are currently using; we ask IW to refresh
|
||||
// so it clears its buffers (otherwise it won't clear until the next indexing/delete op)
|
||||
logger.debug(message + "; now refresh to clear IndexWriter memory");
|
||||
|
||||
// TODO: should IW have an API to move segments to disk, but not refresh? Its flush method is protected...
|
||||
try {
|
||||
refresh("update index buffer");
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to refresh after setting shard to inactive", e);
|
||||
logger.warn("failed to refresh after decreasing index buffer", e);
|
||||
}
|
||||
} else {
|
||||
logger.debug("updating index_buffer_size from [{}] to [{}]", preValue, shardIndexingBufferSize);
|
||||
logger.debug(message);
|
||||
}
|
||||
}
|
||||
|
||||
engine.getTranslog().updateBuffer(shardTranslogBufferSize);
|
||||
}
|
||||
|
||||
public void markAsInactive() {
|
||||
updateBufferSize(EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER, TranslogConfig.INACTIVE_SHARD_TRANSLOG_BUFFER);
|
||||
indicesLifecycle.onShardInactive(this);
|
||||
/** Called by {@link IndexingMemoryController} to check whether more than {@code inactiveTimeNS} has passed since the last
|
||||
* indexing operation, and become inactive (reducing indexing and translog buffers to tiny values) if so. This returns true
|
||||
* if the shard is inactive. */
|
||||
public boolean checkIdle(long inactiveTimeNS) {
|
||||
if (System.nanoTime() - lastWriteNS >= inactiveTimeNS) {
|
||||
boolean wasActive = active.getAndSet(false);
|
||||
if (wasActive) {
|
||||
updateBufferSize(IndexingMemoryController.INACTIVE_SHARD_INDEXING_BUFFER, IndexingMemoryController.INACTIVE_SHARD_TRANSLOG_BUFFER);
|
||||
logger.debug("shard is now inactive");
|
||||
indicesLifecycle.onShardInactive(this);
|
||||
}
|
||||
}
|
||||
|
||||
return active.get() == false;
|
||||
}
|
||||
|
||||
/** Returns {@code true} if this shard is active (has seen indexing ops in the last {@link
|
||||
* IndexingMemoryController#SHARD_INACTIVE_TIME_SETTING} (default 5 minutes), else {@code false}. */
|
||||
public boolean getActive() {
|
||||
return active.get();
|
||||
}
|
||||
|
||||
public final boolean isFlushOnClose() {
|
||||
|
@ -1426,7 +1452,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
|||
};
|
||||
return new EngineConfig(shardId,
|
||||
threadPool, indexingService, indexSettings, warmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig,
|
||||
mapperService.indexAnalyzer(), similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig);
|
||||
mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig);
|
||||
}
|
||||
|
||||
private static class IndexShardOperationCounter extends AbstractRefCounted {
|
||||
|
@ -1499,6 +1525,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
|||
/**
|
||||
* Schedules a flush if needed but won't schedule more than one flush concurrently. The flush will be executed on the
|
||||
* Flush thread-pool asynchronously.
|
||||
*
|
||||
* @return <code>true</code> if a new flush is scheduled otherwise <code>false</code>.
|
||||
*/
|
||||
public boolean maybeFlush() {
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
package org.elasticsearch.index.shard;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexServicesProvider;
|
||||
|
@ -26,8 +28,7 @@ import org.elasticsearch.index.engine.EngineConfig;
|
|||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
|
||||
/**
|
||||
* ShadowIndexShard extends {@link IndexShard} to add file synchronization
|
||||
|
@ -82,4 +83,9 @@ public final class ShadowIndexShard extends IndexShard {
|
|||
public boolean allowsPrimaryPromotion() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TranslogStats translogStats() {
|
||||
return null; // shadow engine has no translog
|
||||
}
|
||||
}
|
||||
|
|
|
@ -145,19 +145,7 @@ public class TranslogRecoveryPerformer {
|
|||
public void performRecoveryOperation(Engine engine, Translog.Operation operation, boolean allowMappingUpdates) {
|
||||
try {
|
||||
switch (operation.opType()) {
|
||||
case CREATE:
|
||||
Translog.Create create = (Translog.Create) operation;
|
||||
Engine.Create engineCreate = IndexShard.prepareCreate(docMapper(create.type()),
|
||||
source(create.source()).index(shardId.getIndex()).type(create.type()).id(create.id())
|
||||
.routing(create.routing()).parent(create.parent()).timestamp(create.timestamp()).ttl(create.ttl()),
|
||||
create.version(), create.versionType().versionTypeForReplicationAndRecovery(), Engine.Operation.Origin.RECOVERY);
|
||||
maybeAddMappingUpdate(engineCreate.type(), engineCreate.parsedDoc().dynamicMappingsUpdate(), engineCreate.id(), allowMappingUpdates);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[translog] recover [create] op of [{}][{}]", create.type(), create.id());
|
||||
}
|
||||
engine.create(engineCreate);
|
||||
break;
|
||||
case SAVE:
|
||||
case INDEX:
|
||||
Translog.Index index = (Translog.Index) operation;
|
||||
Engine.Index engineIndex = IndexShard.prepareIndex(docMapper(index.type()), source(index.source()).type(index.type()).id(index.id())
|
||||
.routing(index.routing()).parent(index.parent()).timestamp(index.timestamp()).ttl(index.ttl()),
|
||||
|
|
|
@ -21,8 +21,6 @@ package org.elasticsearch.index.similarity;
|
|||
|
||||
import org.apache.lucene.search.similarities.BM25Similarity;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.inject.assistedinject.Assisted;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
|
@ -40,8 +38,7 @@ public class BM25SimilarityProvider extends AbstractSimilarityProvider {
|
|||
|
||||
private final BM25Similarity similarity;
|
||||
|
||||
@Inject
|
||||
public BM25SimilarityProvider(@Assisted String name, @Assisted Settings settings) {
|
||||
public BM25SimilarityProvider(String name, Settings settings) {
|
||||
super(name);
|
||||
float k1 = settings.getAsFloat("k1", 1.2f);
|
||||
float b = settings.getAsFloat("b", 0.75f);
|
||||
|
|
|
@ -62,8 +62,7 @@ public class DFRSimilarityProvider extends AbstractSimilarityProvider {
|
|||
|
||||
private final DFRSimilarity similarity;
|
||||
|
||||
@Inject
|
||||
public DFRSimilarityProvider(@Assisted String name, @Assisted Settings settings) {
|
||||
public DFRSimilarityProvider(String name, Settings settings) {
|
||||
super(name);
|
||||
BasicModel basicModel = parseBasicModel(settings);
|
||||
AfterEffect afterEffect = parseAfterEffect(settings);
|
||||
|
|
|
@ -20,8 +20,6 @@
|
|||
package org.elasticsearch.index.similarity;
|
||||
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.inject.assistedinject.Assisted;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
|
@ -37,8 +35,7 @@ public class DefaultSimilarityProvider extends AbstractSimilarityProvider {
|
|||
|
||||
private final DefaultSimilarity similarity = new DefaultSimilarity();
|
||||
|
||||
@Inject
|
||||
public DefaultSimilarityProvider(@Assisted String name, @Assisted Settings settings) {
|
||||
public DefaultSimilarityProvider(String name, Settings settings) {
|
||||
super(name);
|
||||
boolean discountOverlaps = settings.getAsBoolean("discount_overlaps", true);
|
||||
this.similarity.setDiscountOverlaps(discountOverlaps);
|
||||
|
|
|
@ -22,8 +22,6 @@ package org.elasticsearch.index.similarity;
|
|||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.lucene.search.similarities.*;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.inject.assistedinject.Assisted;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
|
@ -56,8 +54,7 @@ public class IBSimilarityProvider extends AbstractSimilarityProvider {
|
|||
|
||||
private final IBSimilarity similarity;
|
||||
|
||||
@Inject
|
||||
public IBSimilarityProvider(@Assisted String name, @Assisted Settings settings) {
|
||||
public IBSimilarityProvider(String name, Settings settings) {
|
||||
super(name);
|
||||
Distribution distribution = parseDistribution(settings);
|
||||
Lambda lambda = parseLambda(settings);
|
||||
|
|
|
@ -21,8 +21,6 @@ package org.elasticsearch.index.similarity;
|
|||
|
||||
import org.apache.lucene.search.similarities.LMDirichletSimilarity;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.inject.assistedinject.Assisted;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
|
@ -38,8 +36,7 @@ public class LMDirichletSimilarityProvider extends AbstractSimilarityProvider {
|
|||
|
||||
private final LMDirichletSimilarity similarity;
|
||||
|
||||
@Inject
|
||||
public LMDirichletSimilarityProvider(@Assisted String name, @Assisted Settings settings) {
|
||||
public LMDirichletSimilarityProvider(String name, Settings settings) {
|
||||
super(name);
|
||||
float mu = settings.getAsFloat("mu", 2000f);
|
||||
this.similarity = new LMDirichletSimilarity(mu);
|
||||
|
|
|
@ -38,8 +38,7 @@ public class LMJelinekMercerSimilarityProvider extends AbstractSimilarityProvide
|
|||
|
||||
private final LMJelinekMercerSimilarity similarity;
|
||||
|
||||
@Inject
|
||||
public LMJelinekMercerSimilarityProvider(@Assisted String name, @Assisted Settings settings) {
|
||||
public LMJelinekMercerSimilarityProvider(String name, Settings settings) {
|
||||
super(name);
|
||||
float lambda = settings.getAsFloat("lambda", 0.1f);
|
||||
this.similarity = new LMJelinekMercerSimilarity(lambda);
|
||||
|
|
|
@ -1,73 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.similarity;
|
||||
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
* {@link SimilarityProvider} for pre-built Similarities
|
||||
*/
|
||||
public class PreBuiltSimilarityProvider extends AbstractSimilarityProvider {
|
||||
|
||||
public static class Factory implements SimilarityProvider.Factory {
|
||||
|
||||
private final PreBuiltSimilarityProvider similarity;
|
||||
|
||||
public Factory(String name, Similarity similarity) {
|
||||
this.similarity = new PreBuiltSimilarityProvider(name, similarity);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SimilarityProvider create(String name, Settings settings) {
|
||||
return similarity;
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return similarity.name();
|
||||
}
|
||||
|
||||
public SimilarityProvider get() {
|
||||
return similarity;
|
||||
}
|
||||
}
|
||||
|
||||
private final Similarity similarity;
|
||||
|
||||
/**
|
||||
* Creates a new {@link PreBuiltSimilarityProvider} with the given name and given
|
||||
* pre-built Similarity
|
||||
*
|
||||
* @param name Name of the Provider
|
||||
* @param similarity Pre-built Similarity
|
||||
*/
|
||||
public PreBuiltSimilarityProvider(String name, Similarity similarity) {
|
||||
super(name);
|
||||
this.similarity = similarity;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public Similarity get() {
|
||||
return similarity;
|
||||
}
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.similarity;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.lucene.search.similarities.BM25Similarity;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
/**
|
||||
* Cache of pre-defined Similarities
|
||||
*/
|
||||
public class Similarities {
|
||||
|
||||
private static final ImmutableMap<String, PreBuiltSimilarityProvider.Factory> PRE_BUILT_SIMILARITIES;
|
||||
|
||||
static {
|
||||
MapBuilder<String, PreBuiltSimilarityProvider.Factory> similarities = MapBuilder.newMapBuilder();
|
||||
similarities.put(SimilarityLookupService.DEFAULT_SIMILARITY,
|
||||
new PreBuiltSimilarityProvider.Factory(SimilarityLookupService.DEFAULT_SIMILARITY, new DefaultSimilarity()));
|
||||
similarities.put("BM25", new PreBuiltSimilarityProvider.Factory("BM25", new BM25Similarity()));
|
||||
|
||||
PRE_BUILT_SIMILARITIES = similarities.immutableMap();
|
||||
}
|
||||
|
||||
private Similarities() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the list of pre-defined SimilarityProvider Factories
|
||||
*
|
||||
* @return Pre-defined SimilarityProvider Factories
|
||||
*/
|
||||
public static Collection<PreBuiltSimilarityProvider.Factory> listFactories() {
|
||||
return PRE_BUILT_SIMILARITIES.values();
|
||||
}
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.similarity;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Service for looking up configured {@link SimilarityProvider} implementations by name.
|
||||
* <p>
|
||||
* The service instantiates the Providers through their Factories using configuration
|
||||
* values found with the {@link SimilarityModule#SIMILARITY_SETTINGS_PREFIX} prefix.
|
||||
*/
|
||||
public class SimilarityLookupService extends AbstractIndexComponent {
|
||||
|
||||
public final static String DEFAULT_SIMILARITY = "default";
|
||||
|
||||
private final ImmutableMap<String, SimilarityProvider> similarities;
|
||||
|
||||
public SimilarityLookupService(Index index, Settings indexSettings) {
|
||||
this(index, indexSettings, ImmutableMap.<String, SimilarityProvider.Factory>of());
|
||||
}
|
||||
|
||||
@Inject
|
||||
public SimilarityLookupService(Index index, @IndexSettings Settings indexSettings, Map<String, SimilarityProvider.Factory> similarities) {
|
||||
super(index, indexSettings);
|
||||
|
||||
MapBuilder<String, SimilarityProvider> providers = MapBuilder.newMapBuilder();
|
||||
|
||||
Map<String, Settings> similaritySettings = indexSettings.getGroups(SimilarityModule.SIMILARITY_SETTINGS_PREFIX);
|
||||
for (Map.Entry<String, SimilarityProvider.Factory> entry : similarities.entrySet()) {
|
||||
String name = entry.getKey();
|
||||
SimilarityProvider.Factory factory = entry.getValue();
|
||||
|
||||
Settings settings = similaritySettings.get(name);
|
||||
if (settings == null) {
|
||||
settings = Settings.Builder.EMPTY_SETTINGS;
|
||||
}
|
||||
providers.put(name, factory.create(name, settings));
|
||||
}
|
||||
|
||||
// For testing
|
||||
for (PreBuiltSimilarityProvider.Factory factory : Similarities.listFactories()) {
|
||||
if (!providers.containsKey(factory.name())) {
|
||||
providers.put(factory.name(), factory.get());
|
||||
}
|
||||
}
|
||||
|
||||
this.similarities = providers.immutableMap();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link SimilarityProvider} with the given name
|
||||
*
|
||||
* @param name Name of the SimilarityProvider to find
|
||||
* @return {@link SimilarityProvider} with the given name, or {@code null} if no Provider exists
|
||||
*/
|
||||
public SimilarityProvider similarity(String name) {
|
||||
return similarities.get(name);
|
||||
}
|
||||
}
|
|
@ -20,19 +20,18 @@
|
|||
package org.elasticsearch.index.similarity;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.Scopes;
|
||||
import org.elasticsearch.common.inject.assistedinject.FactoryProvider;
|
||||
import org.elasticsearch.common.inject.multibindings.MapBinder;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
/**
|
||||
* {@link SimilarityModule} is responsible gathering registered and configured {@link SimilarityProvider}
|
||||
* implementations and making them available through the {@link SimilarityLookupService} and {@link SimilarityService}.
|
||||
* implementations and making them available through the {@link SimilarityService}.
|
||||
*
|
||||
* New {@link SimilarityProvider} implementations can be registered through {@link #addSimilarity(String, Class)}
|
||||
* New {@link SimilarityProvider} implementations can be registered through {@link #addSimilarity(String, BiFunction)}
|
||||
* while existing Providers can be referenced through Settings under the {@link #SIMILARITY_SETTINGS_PREFIX} prefix
|
||||
* along with the "type" value. For example, to reference the {@link BM25SimilarityProvider}, the configuration
|
||||
* <tt>"index.similarity.my_similarity.type : "BM25"</tt> can be used.
|
||||
|
@ -42,16 +41,12 @@ public class SimilarityModule extends AbstractModule {
|
|||
public static final String SIMILARITY_SETTINGS_PREFIX = "index.similarity";
|
||||
|
||||
private final Settings settings;
|
||||
private final Map<String, Class<? extends SimilarityProvider>> similarities = new HashMap<>();
|
||||
private final Map<String, BiFunction<String, Settings, SimilarityProvider>> similarities = new HashMap<>();
|
||||
private final Index index;
|
||||
|
||||
public SimilarityModule(Settings settings) {
|
||||
public SimilarityModule(Index index, Settings settings) {
|
||||
this.settings = settings;
|
||||
addSimilarity("default", DefaultSimilarityProvider.class);
|
||||
addSimilarity("BM25", BM25SimilarityProvider.class);
|
||||
addSimilarity("DFR", DFRSimilarityProvider.class);
|
||||
addSimilarity("IB", IBSimilarityProvider.class);
|
||||
addSimilarity("LMDirichlet", LMDirichletSimilarityProvider.class);
|
||||
addSimilarity("LMJelinekMercer", LMJelinekMercerSimilarityProvider.class);
|
||||
this.index = index;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -60,36 +55,16 @@ public class SimilarityModule extends AbstractModule {
|
|||
* @param name Name of the SimilarityProvider
|
||||
* @param similarity SimilarityProvider to register
|
||||
*/
|
||||
public void addSimilarity(String name, Class<? extends SimilarityProvider> similarity) {
|
||||
public void addSimilarity(String name, BiFunction<String, Settings, SimilarityProvider> similarity) {
|
||||
if (similarities.containsKey(name) || SimilarityService.BUILT_IN.containsKey(name)) {
|
||||
throw new IllegalArgumentException("similarity for name: [" + name + " is already registered");
|
||||
}
|
||||
similarities.put(name, similarity);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
MapBinder<String, SimilarityProvider.Factory> similarityBinder =
|
||||
MapBinder.newMapBinder(binder(), String.class, SimilarityProvider.Factory.class);
|
||||
|
||||
Map<String, Settings> similaritySettings = settings.getGroups(SIMILARITY_SETTINGS_PREFIX);
|
||||
for (Map.Entry<String, Settings> entry : similaritySettings.entrySet()) {
|
||||
String name = entry.getKey();
|
||||
Settings settings = entry.getValue();
|
||||
|
||||
String typeName = settings.get("type");
|
||||
if (typeName == null) {
|
||||
throw new IllegalArgumentException("Similarity [" + name + "] must have an associated type");
|
||||
} else if (similarities.containsKey(typeName) == false) {
|
||||
throw new IllegalArgumentException("Unknown Similarity type [" + typeName + "] for [" + name + "]");
|
||||
}
|
||||
similarityBinder.addBinding(entry.getKey()).toProvider(FactoryProvider.newFactory(SimilarityProvider.Factory.class, similarities.get(typeName))).in(Scopes.SINGLETON);
|
||||
}
|
||||
|
||||
for (PreBuiltSimilarityProvider.Factory factory : Similarities.listFactories()) {
|
||||
if (!similarities.containsKey(factory.name())) {
|
||||
similarityBinder.addBinding(factory.name()).toInstance(factory);
|
||||
}
|
||||
}
|
||||
|
||||
bind(SimilarityLookupService.class).asEagerSingleton();
|
||||
bind(SimilarityService.class).asEagerSingleton();
|
||||
SimilarityService service = new SimilarityService(index, settings, new HashMap<>(similarities));
|
||||
bind(SimilarityService.class).toInstance(service);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,19 +40,4 @@ public interface SimilarityProvider {
|
|||
* @return Provided {@link Similarity}
|
||||
*/
|
||||
Similarity get();
|
||||
|
||||
/**
|
||||
* Factory for creating {@link SimilarityProvider} instances
|
||||
*/
|
||||
public static interface Factory {
|
||||
|
||||
/**
|
||||
* Creates a new {@link SimilarityProvider} instance
|
||||
*
|
||||
* @param name Name of the provider
|
||||
* @param settings Settings to be used by the Provider
|
||||
* @return {@link SimilarityProvider} instance created by the Factory
|
||||
*/
|
||||
SimilarityProvider create(String name, Settings settings);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,55 +25,96 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class SimilarityService extends AbstractIndexComponent {
|
||||
|
||||
private final SimilarityLookupService similarityLookupService;
|
||||
private final MapperService mapperService;
|
||||
|
||||
private final Similarity perFieldSimilarity;
|
||||
|
||||
public final static String DEFAULT_SIMILARITY = "default";
|
||||
private final Similarity defaultSimilarity;
|
||||
private final Similarity baseSimilarity;
|
||||
private final Map<String, SimilarityProvider> similarities;
|
||||
static final Map<String, BiFunction<String, Settings, SimilarityProvider>> DEFAULTS;
|
||||
static final Map<String, BiFunction<String, Settings, SimilarityProvider>> BUILT_IN;
|
||||
static {
|
||||
Map<String, BiFunction<String, Settings, SimilarityProvider>> defaults = new HashMap<>();
|
||||
Map<String, BiFunction<String, Settings, SimilarityProvider>> buildIn = new HashMap<>();
|
||||
defaults.put("default", DefaultSimilarityProvider::new);
|
||||
defaults.put("BM25", BM25SimilarityProvider::new);
|
||||
buildIn.put("default", DefaultSimilarityProvider::new);
|
||||
buildIn.put("BM25", BM25SimilarityProvider::new);
|
||||
buildIn.put("DFR", DFRSimilarityProvider::new);
|
||||
buildIn.put("IB", IBSimilarityProvider::new);
|
||||
buildIn.put("LMDirichlet", LMDirichletSimilarityProvider::new);
|
||||
buildIn.put("LMJelinekMercer", LMJelinekMercerSimilarityProvider::new);
|
||||
DEFAULTS = Collections.unmodifiableMap(defaults);
|
||||
BUILT_IN = Collections.unmodifiableMap(buildIn);
|
||||
}
|
||||
public SimilarityService(Index index) {
|
||||
this(index, Settings.Builder.EMPTY_SETTINGS);
|
||||
}
|
||||
|
||||
public SimilarityService(Index index, Settings settings) {
|
||||
this(index, settings, new SimilarityLookupService(index, settings), null);
|
||||
this(index, settings, Collections.EMPTY_MAP);
|
||||
}
|
||||
|
||||
@Inject
|
||||
public SimilarityService(Index index, @IndexSettings Settings indexSettings,
|
||||
final SimilarityLookupService similarityLookupService, final MapperService mapperService) {
|
||||
public SimilarityService(Index index, @IndexSettings Settings indexSettings, Map<String, BiFunction<String, Settings, SimilarityProvider>> similarities) {
|
||||
super(index, indexSettings);
|
||||
this.similarityLookupService = similarityLookupService;
|
||||
this.mapperService = mapperService;
|
||||
|
||||
Similarity defaultSimilarity = similarityLookupService.similarity(SimilarityLookupService.DEFAULT_SIMILARITY).get();
|
||||
Map<String, SimilarityProvider> providers = new HashMap<>(similarities.size());
|
||||
Map<String, Settings> similaritySettings = indexSettings.getGroups(SimilarityModule.SIMILARITY_SETTINGS_PREFIX);
|
||||
for (Map.Entry<String, Settings> entry : similaritySettings.entrySet()) {
|
||||
String name = entry.getKey();
|
||||
Settings settings = entry.getValue();
|
||||
String typeName = settings.get("type");
|
||||
if (typeName == null) {
|
||||
throw new IllegalArgumentException("Similarity [" + name + "] must have an associated type");
|
||||
} else if ((similarities.containsKey(typeName) || BUILT_IN.containsKey(typeName)) == false) {
|
||||
throw new IllegalArgumentException("Unknown Similarity type [" + typeName + "] for [" + name + "]");
|
||||
}
|
||||
BiFunction<String, Settings, SimilarityProvider> factory = similarities.getOrDefault(typeName, BUILT_IN.get(typeName));
|
||||
if (settings == null) {
|
||||
settings = Settings.Builder.EMPTY_SETTINGS;
|
||||
}
|
||||
providers.put(name, factory.apply(name, settings));
|
||||
}
|
||||
addSimilarities(similaritySettings, providers, DEFAULTS);
|
||||
this.similarities = providers;
|
||||
defaultSimilarity = providers.get(SimilarityService.DEFAULT_SIMILARITY).get();
|
||||
// Expert users can configure the base type as being different to default, but out-of-box we use default.
|
||||
Similarity baseSimilarity = (similarityLookupService.similarity("base") != null) ? similarityLookupService.similarity("base").get() :
|
||||
defaultSimilarity;
|
||||
|
||||
this.perFieldSimilarity = (mapperService != null) ? new PerFieldSimilarity(defaultSimilarity, baseSimilarity, mapperService) :
|
||||
baseSimilarity = (providers.get("base") != null) ? providers.get("base").get() :
|
||||
defaultSimilarity;
|
||||
}
|
||||
|
||||
public Similarity similarity() {
|
||||
return perFieldSimilarity;
|
||||
public Similarity similarity(MapperService mapperService) {
|
||||
// TODO we can maybe factor out MapperService here entirely by introducing an interface for the lookup?
|
||||
return (mapperService != null) ? new PerFieldSimilarity(defaultSimilarity, baseSimilarity, mapperService) :
|
||||
defaultSimilarity;
|
||||
}
|
||||
|
||||
public SimilarityLookupService similarityLookupService() {
|
||||
return similarityLookupService;
|
||||
private void addSimilarities(Map<String, Settings> similaritySettings, Map<String, SimilarityProvider> providers, Map<String, BiFunction<String, Settings, SimilarityProvider>> similarities) {
|
||||
for (Map.Entry<String, BiFunction<String, Settings, SimilarityProvider>> entry : similarities.entrySet()) {
|
||||
String name = entry.getKey();
|
||||
BiFunction<String, Settings, SimilarityProvider> factory = entry.getValue();
|
||||
Settings settings = similaritySettings.get(name);
|
||||
if (settings == null) {
|
||||
settings = Settings.Builder.EMPTY_SETTINGS;
|
||||
}
|
||||
providers.put(name, factory.apply(name, settings));
|
||||
}
|
||||
}
|
||||
|
||||
public MapperService mapperService() {
|
||||
return mapperService;
|
||||
public SimilarityProvider getSimilarity(String name) {
|
||||
return similarities.get(name);
|
||||
}
|
||||
|
||||
static class PerFieldSimilarity extends PerFieldSimilarityWrapper {
|
||||
|
|
|
@ -465,11 +465,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
}
|
||||
|
||||
/**
|
||||
* Adds a created / delete / index operations to the transaction log.
|
||||
* Adds a delete / index operations to the transaction log.
|
||||
*
|
||||
* @see org.elasticsearch.index.translog.Translog.Operation
|
||||
* @see org.elasticsearch.index.translog.Translog.Create
|
||||
* @see org.elasticsearch.index.translog.Translog.Index
|
||||
* @see Index
|
||||
* @see org.elasticsearch.index.translog.Translog.Delete
|
||||
*/
|
||||
public Location add(Operation operation) throws TranslogException {
|
||||
|
@ -874,8 +873,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
*/
|
||||
public interface Operation extends Streamable {
|
||||
enum Type {
|
||||
@Deprecated
|
||||
CREATE((byte) 1),
|
||||
SAVE((byte) 2),
|
||||
INDEX((byte) 2),
|
||||
DELETE((byte) 3),
|
||||
DELETE_BY_QUERY((byte) 4);
|
||||
|
||||
|
@ -894,7 +894,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
case 1:
|
||||
return CREATE;
|
||||
case 2:
|
||||
return SAVE;
|
||||
return INDEX;
|
||||
case 3:
|
||||
return DELETE;
|
||||
case 4:
|
||||
|
@ -929,199 +929,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
}
|
||||
}
|
||||
|
||||
public static class Create implements Operation {
|
||||
public static final int SERIALIZATION_FORMAT = 6;
|
||||
|
||||
private String id;
|
||||
private String type;
|
||||
private BytesReference source;
|
||||
private String routing;
|
||||
private String parent;
|
||||
private long timestamp;
|
||||
private long ttl;
|
||||
private long version = Versions.MATCH_ANY;
|
||||
private VersionType versionType = VersionType.INTERNAL;
|
||||
|
||||
public Create() {
|
||||
}
|
||||
|
||||
public Create(Engine.Create create) {
|
||||
this.id = create.id();
|
||||
this.type = create.type();
|
||||
this.source = create.source();
|
||||
this.routing = create.routing();
|
||||
this.parent = create.parent();
|
||||
this.timestamp = create.timestamp();
|
||||
this.ttl = create.ttl();
|
||||
this.version = create.version();
|
||||
this.versionType = create.versionType();
|
||||
}
|
||||
|
||||
public Create(String type, String id, byte[] source) {
|
||||
this.id = id;
|
||||
this.type = type;
|
||||
this.source = new BytesArray(source);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Type opType() {
|
||||
return Type.CREATE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long estimateSize() {
|
||||
return ((id.length() + type.length()) * 2) + source.length() + 12;
|
||||
}
|
||||
|
||||
public String id() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
public BytesReference source() {
|
||||
return this.source;
|
||||
}
|
||||
|
||||
public String type() {
|
||||
return this.type;
|
||||
}
|
||||
|
||||
public String routing() {
|
||||
return this.routing;
|
||||
}
|
||||
|
||||
public String parent() {
|
||||
return this.parent;
|
||||
}
|
||||
|
||||
public long timestamp() {
|
||||
return this.timestamp;
|
||||
}
|
||||
|
||||
public long ttl() {
|
||||
return this.ttl;
|
||||
}
|
||||
|
||||
public long version() {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
public VersionType versionType() {
|
||||
return versionType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Source getSource() {
|
||||
return new Source(source, routing, parent, timestamp, ttl);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
int version = in.readVInt(); // version
|
||||
id = in.readString();
|
||||
type = in.readString();
|
||||
source = in.readBytesReference();
|
||||
if (version >= 1) {
|
||||
if (in.readBoolean()) {
|
||||
routing = in.readString();
|
||||
}
|
||||
}
|
||||
if (version >= 2) {
|
||||
if (in.readBoolean()) {
|
||||
parent = in.readString();
|
||||
}
|
||||
}
|
||||
if (version >= 3) {
|
||||
this.version = in.readLong();
|
||||
}
|
||||
if (version >= 4) {
|
||||
this.timestamp = in.readLong();
|
||||
}
|
||||
if (version >= 5) {
|
||||
this.ttl = in.readLong();
|
||||
}
|
||||
if (version >= 6) {
|
||||
this.versionType = VersionType.fromValue(in.readByte());
|
||||
}
|
||||
|
||||
assert versionType.validateVersionForWrites(version);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(SERIALIZATION_FORMAT);
|
||||
out.writeString(id);
|
||||
out.writeString(type);
|
||||
out.writeBytesReference(source);
|
||||
if (routing == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeString(routing);
|
||||
}
|
||||
if (parent == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeString(parent);
|
||||
}
|
||||
out.writeLong(version);
|
||||
out.writeLong(timestamp);
|
||||
out.writeLong(ttl);
|
||||
out.writeByte(versionType.getValue());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Create create = (Create) o;
|
||||
|
||||
if (timestamp != create.timestamp ||
|
||||
ttl != create.ttl ||
|
||||
version != create.version ||
|
||||
id.equals(create.id) == false ||
|
||||
type.equals(create.type) == false ||
|
||||
source.equals(create.source) == false) {
|
||||
return false;
|
||||
}
|
||||
if (routing != null ? !routing.equals(create.routing) : create.routing != null) {
|
||||
return false;
|
||||
}
|
||||
if (parent != null ? !parent.equals(create.parent) : create.parent != null) {
|
||||
return false;
|
||||
}
|
||||
return versionType == create.versionType;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = id.hashCode();
|
||||
result = 31 * result + type.hashCode();
|
||||
result = 31 * result + source.hashCode();
|
||||
result = 31 * result + (routing != null ? routing.hashCode() : 0);
|
||||
result = 31 * result + (parent != null ? parent.hashCode() : 0);
|
||||
result = 31 * result + (int) (timestamp ^ (timestamp >>> 32));
|
||||
result = 31 * result + (int) (ttl ^ (ttl >>> 32));
|
||||
result = 31 * result + (int) (version ^ (version >>> 32));
|
||||
result = 31 * result + versionType.hashCode();
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Create{" +
|
||||
"id='" + id + '\'' +
|
||||
", type='" + type + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
public static class Index implements Operation {
|
||||
public static final int SERIALIZATION_FORMAT = 6;
|
||||
|
||||
|
@ -1158,7 +965,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
|
||||
@Override
|
||||
public Type opType() {
|
||||
return Type.SAVE;
|
||||
return Type.INDEX;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1667,13 +1474,14 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
static Translog.Operation newOperationFromType(Translog.Operation.Type type) throws IOException {
|
||||
switch (type) {
|
||||
case CREATE:
|
||||
return new Translog.Create();
|
||||
// the deserialization logic in Index was identical to that of Create when create was deprecated
|
||||
return new Index();
|
||||
case DELETE:
|
||||
return new Translog.Delete();
|
||||
case DELETE_BY_QUERY:
|
||||
return new Translog.DeleteByQuery();
|
||||
case SAVE:
|
||||
return new Translog.Index();
|
||||
case INDEX:
|
||||
return new Index();
|
||||
default:
|
||||
throw new IOException("No type for [" + type + "]");
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.util.BigArrays;
|
|||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.translog.Translog.TranslogGeneration;
|
||||
import org.elasticsearch.indices.memory.IndexingMemoryController;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.nio.file.Path;
|
||||
|
@ -42,7 +43,6 @@ public final class TranslogConfig {
|
|||
public static final String INDEX_TRANSLOG_FS_TYPE = "index.translog.fs.type";
|
||||
public static final String INDEX_TRANSLOG_BUFFER_SIZE = "index.translog.fs.buffer_size";
|
||||
public static final String INDEX_TRANSLOG_SYNC_INTERVAL = "index.translog.sync_interval";
|
||||
public static final ByteSizeValue INACTIVE_SHARD_TRANSLOG_BUFFER = ByteSizeValue.parseBytesSizeValue("1kb", "INACTIVE_SHARD_TRANSLOG_BUFFER");
|
||||
|
||||
private final TimeValue syncInterval;
|
||||
private final BigArrays bigArrays;
|
||||
|
@ -73,7 +73,7 @@ public final class TranslogConfig {
|
|||
this.threadPool = threadPool;
|
||||
this.bigArrays = bigArrays;
|
||||
this.type = TranslogWriter.Type.fromString(indexSettings.get(INDEX_TRANSLOG_FS_TYPE, TranslogWriter.Type.BUFFERED.name()));
|
||||
this.bufferSize = (int) indexSettings.getAsBytesSize(INDEX_TRANSLOG_BUFFER_SIZE, ByteSizeValue.parseBytesSizeValue("64k", INDEX_TRANSLOG_BUFFER_SIZE)).bytes(); // Not really interesting, updated by IndexingMemoryController...
|
||||
this.bufferSize = (int) indexSettings.getAsBytesSize(INDEX_TRANSLOG_BUFFER_SIZE, IndexingMemoryController.INACTIVE_SHARD_TRANSLOG_BUFFER).bytes(); // Not really interesting, updated by IndexingMemoryController...
|
||||
|
||||
syncInterval = indexSettings.getAsTime(INDEX_TRANSLOG_SYNC_INTERVAL, TimeValue.timeValueSeconds(5));
|
||||
if (syncInterval.millis() > 0 && threadPool != null) {
|
||||
|
|
|
@ -18,11 +18,10 @@
|
|||
*/
|
||||
package org.elasticsearch.index.translog;
|
||||
|
||||
import org.elasticsearch.action.support.ToXContentToBytes;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
|
||||
|
@ -31,17 +30,23 @@ import java.io.IOException;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public class TranslogStats implements ToXContent, Streamable {
|
||||
public class TranslogStats extends ToXContentToBytes implements Streamable {
|
||||
|
||||
private long translogSizeInBytes = 0;
|
||||
private int estimatedNumberOfOperations = -1;
|
||||
private long translogSizeInBytes;
|
||||
private int numberOfOperations;
|
||||
|
||||
public TranslogStats() {
|
||||
}
|
||||
|
||||
public TranslogStats(int estimatedNumberOfOperations, long translogSizeInBytes) {
|
||||
public TranslogStats(int numberOfOperations, long translogSizeInBytes) {
|
||||
if (numberOfOperations < 0) {
|
||||
throw new IllegalArgumentException("numberOfOperations must be >= 0");
|
||||
}
|
||||
if (translogSizeInBytes < 0) {
|
||||
throw new IllegalArgumentException("translogSizeInBytes must be >= 0");
|
||||
}
|
||||
assert translogSizeInBytes >= 0 : "translogSizeInBytes must be >= 0, got [" + translogSizeInBytes + "]";
|
||||
this.estimatedNumberOfOperations = estimatedNumberOfOperations;
|
||||
this.numberOfOperations = numberOfOperations;
|
||||
this.translogSizeInBytes = translogSizeInBytes;
|
||||
}
|
||||
|
||||
|
@ -50,22 +55,22 @@ public class TranslogStats implements ToXContent, Streamable {
|
|||
return;
|
||||
}
|
||||
|
||||
this.estimatedNumberOfOperations += translogStats.estimatedNumberOfOperations;
|
||||
this.translogSizeInBytes = +translogStats.translogSizeInBytes;
|
||||
this.numberOfOperations += translogStats.numberOfOperations;
|
||||
this.translogSizeInBytes += translogStats.translogSizeInBytes;
|
||||
}
|
||||
|
||||
public ByteSizeValue translogSizeInBytes() {
|
||||
return new ByteSizeValue(translogSizeInBytes);
|
||||
public long getTranslogSizeInBytes() {
|
||||
return translogSizeInBytes;
|
||||
}
|
||||
|
||||
public long estimatedNumberOfOperations() {
|
||||
return estimatedNumberOfOperations;
|
||||
return numberOfOperations;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.TRANSLOG);
|
||||
builder.field(Fields.OPERATIONS, estimatedNumberOfOperations);
|
||||
builder.field(Fields.OPERATIONS, numberOfOperations);
|
||||
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, translogSizeInBytes);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
|
@ -80,13 +85,13 @@ public class TranslogStats implements ToXContent, Streamable {
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
estimatedNumberOfOperations = in.readVInt();
|
||||
numberOfOperations = in.readVInt();
|
||||
translogSizeInBytes = in.readVLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(estimatedNumberOfOperations);
|
||||
out.writeVInt(numberOfOperations);
|
||||
out.writeVLong(translogSizeInBytes);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,7 +52,6 @@ import org.elasticsearch.index.IndexModule;
|
|||
import org.elasticsearch.index.IndexNameModule;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.LocalNodeIdModule;
|
||||
import org.elasticsearch.index.analysis.AnalysisModule;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.cache.IndexCache;
|
||||
|
@ -330,7 +329,6 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
|
||||
ModulesBuilder modules = new ModulesBuilder();
|
||||
modules.add(new IndexNameModule(index));
|
||||
modules.add(new LocalNodeIdModule(localNodeId));
|
||||
modules.add(new IndexSettingsModule(index, indexSettings));
|
||||
// plugin modules must be added here, before others or we can get crazy injection errors...
|
||||
for (Module pluginModule : pluginsService.indexModules(indexSettings)) {
|
||||
|
@ -338,7 +336,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
}
|
||||
modules.add(new IndexStoreModule(indexSettings));
|
||||
modules.add(new AnalysisModule(indexSettings, indicesAnalysisService));
|
||||
modules.add(new SimilarityModule(indexSettings));
|
||||
modules.add(new SimilarityModule(index, indexSettings));
|
||||
modules.add(new IndexCacheModule(indexSettings));
|
||||
modules.add(new IndexModule());
|
||||
|
||||
|
|
|
@ -29,12 +29,10 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.EngineClosedException;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.engine.FlushNotAllowedEngineException;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -42,9 +40,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import java.util.*;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class IndexingMemoryController extends AbstractLifecycleComponent<IndexingMemoryController> {
|
||||
|
||||
/** How much heap (% or bytes) we will share across all actively indexing shards on this node (default: 10%). */
|
||||
|
@ -83,6 +78,12 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
|||
/** How frequently we check shards to find inactive ones (default: 30 seconds). */
|
||||
public static final String SHARD_INACTIVE_INTERVAL_TIME_SETTING = "indices.memory.interval";
|
||||
|
||||
/** Once a shard becomes inactive, we reduce the {@code IndexWriter} buffer to this value (500 KB) to let active shards use the heap instead. */
|
||||
public static final ByteSizeValue INACTIVE_SHARD_INDEXING_BUFFER = ByteSizeValue.parseBytesSizeValue("500kb", "INACTIVE_SHARD_INDEXING_BUFFER");
|
||||
|
||||
/** Once a shard becomes inactive, we reduce the {@code Translog} buffer to this value (1 KB) to let active shards use the heap instead. */
|
||||
public static final ByteSizeValue INACTIVE_SHARD_TRANSLOG_BUFFER = ByteSizeValue.parseBytesSizeValue("1kb", "INACTIVE_SHARD_TRANSLOG_BUFFER");
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
private final IndicesService indicesService;
|
||||
|
||||
|
@ -164,7 +165,6 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
|||
|
||||
this.statusChecker = new ShardsIndicesStatusChecker();
|
||||
|
||||
|
||||
logger.debug("using indexing buffer size [{}], with {} [{}], {} [{}], {} [{}], {} [{}]",
|
||||
this.indexingBuffer,
|
||||
MIN_SHARD_INDEX_BUFFER_SIZE_SETTING, this.minShardIndexBufferSize,
|
||||
|
@ -175,7 +175,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
|||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
// its fine to run it on the scheduler thread, no busy work
|
||||
// it's fine to run it on the scheduler thread, no busy work
|
||||
this.scheduler = threadPool.scheduleWithFixedDelay(statusChecker, interval);
|
||||
}
|
||||
|
||||
|
@ -240,6 +240,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
|||
return null;
|
||||
}
|
||||
|
||||
/** set new indexing and translog buffers on this shard. this may cause the shard to refresh to free up heap. */
|
||||
protected void updateShardBuffers(ShardId shardId, ByteSizeValue shardIndexingBufferSize, ByteSizeValue shardTranslogBufferSize) {
|
||||
final IndexShard shard = getShard(shardId);
|
||||
if (shard != null) {
|
||||
|
@ -255,105 +256,86 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/** returns the current translog status (generation id + ops) for the given shard id. Returns null if unavailable. */
|
||||
protected ShardIndexingStatus getTranslogStatus(ShardId shardId) {
|
||||
/** returns {@link IndexShard#getActive} if the shard exists, else null */
|
||||
protected Boolean getShardActive(ShardId shardId) {
|
||||
final IndexShard indexShard = getShard(shardId);
|
||||
if (indexShard == null) {
|
||||
return null;
|
||||
}
|
||||
final Translog translog;
|
||||
try {
|
||||
translog = indexShard.getTranslog();
|
||||
} catch (EngineClosedException e) {
|
||||
// not ready yet to be checked for activity
|
||||
return null;
|
||||
}
|
||||
|
||||
ShardIndexingStatus status = new ShardIndexingStatus();
|
||||
status.translogId = translog.currentFileGeneration();
|
||||
status.translogNumberOfOperations = translog.totalOperations();
|
||||
return status;
|
||||
return indexShard.getActive();
|
||||
}
|
||||
|
||||
// used for tests
|
||||
void forceCheck() {
|
||||
/** check if any shards active status changed, now. */
|
||||
public void forceCheck() {
|
||||
statusChecker.run();
|
||||
}
|
||||
|
||||
class ShardsIndicesStatusChecker implements Runnable {
|
||||
|
||||
private final Map<ShardId, ShardIndexingStatus> shardsIndicesStatus = new HashMap<>();
|
||||
// True if the shard was active last time we checked
|
||||
private final Map<ShardId,Boolean> shardWasActive = new HashMap<>();
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
public synchronized void run() {
|
||||
EnumSet<ShardStatusChangeType> changes = purgeDeletedAndClosedShards();
|
||||
|
||||
final List<ShardId> activeToInactiveIndexingShards = new ArrayList<>();
|
||||
final int activeShards = updateShardStatuses(changes, activeToInactiveIndexingShards);
|
||||
for (ShardId indexShard : activeToInactiveIndexingShards) {
|
||||
markShardAsInactive(indexShard);
|
||||
}
|
||||
updateShardStatuses(changes);
|
||||
|
||||
if (changes.isEmpty() == false) {
|
||||
// Something changed: recompute indexing buffers:
|
||||
calcAndSetShardBuffers(activeShards, "[" + changes + "]");
|
||||
calcAndSetShardBuffers("[" + changes + "]");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* goes through all existing shards and check whether the changes their active status
|
||||
*
|
||||
* @return the current count of active shards
|
||||
* goes through all existing shards and check whether there are changes in their active status
|
||||
*/
|
||||
private int updateShardStatuses(EnumSet<ShardStatusChangeType> changes, List<ShardId> activeToInactiveIndexingShards) {
|
||||
int activeShards = 0;
|
||||
private void updateShardStatuses(EnumSet<ShardStatusChangeType> changes) {
|
||||
for (ShardId shardId : availableShards()) {
|
||||
|
||||
final ShardIndexingStatus currentStatus = getTranslogStatus(shardId);
|
||||
// Is the shard active now?
|
||||
Boolean isActive = getShardActive(shardId);
|
||||
|
||||
if (currentStatus == null) {
|
||||
if (isActive == null) {
|
||||
// shard was closed..
|
||||
continue;
|
||||
}
|
||||
|
||||
ShardIndexingStatus status = shardsIndicesStatus.get(shardId);
|
||||
if (status == null) {
|
||||
status = currentStatus;
|
||||
shardsIndicesStatus.put(shardId, status);
|
||||
// Was the shard active last time we checked?
|
||||
Boolean wasActive = shardWasActive.get(shardId);
|
||||
|
||||
if (wasActive == null) {
|
||||
// First time we are seeing this shard
|
||||
shardWasActive.put(shardId, isActive);
|
||||
changes.add(ShardStatusChangeType.ADDED);
|
||||
} else {
|
||||
final boolean lastActiveIndexing = status.activeIndexing;
|
||||
status.updateWith(currentTimeInNanos(), currentStatus, inactiveTime.nanos());
|
||||
if (lastActiveIndexing && (status.activeIndexing == false)) {
|
||||
activeToInactiveIndexingShards.add(shardId);
|
||||
changes.add(ShardStatusChangeType.BECAME_INACTIVE);
|
||||
logger.debug("marking shard {} as inactive (inactive_time[{}]) indexing wise, setting size to [{}]",
|
||||
shardId,
|
||||
inactiveTime, EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER);
|
||||
} else if ((lastActiveIndexing == false) && status.activeIndexing) {
|
||||
} else if (isActive) {
|
||||
// Shard is active now
|
||||
if (wasActive == false) {
|
||||
// Shard became active itself, since we last checked (due to new indexing op arriving)
|
||||
changes.add(ShardStatusChangeType.BECAME_ACTIVE);
|
||||
logger.debug("marking shard {} as active indexing wise", shardId);
|
||||
shardWasActive.put(shardId, true);
|
||||
} else if (checkIdle(shardId, inactiveTime.nanos()) == Boolean.TRUE) {
|
||||
// Make shard inactive now
|
||||
changes.add(ShardStatusChangeType.BECAME_INACTIVE);
|
||||
logger.debug("marking shard {} as inactive (inactive_time[{}]) indexing wise",
|
||||
shardId,
|
||||
inactiveTime);
|
||||
shardWasActive.put(shardId, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (status.activeIndexing) {
|
||||
activeShards++;
|
||||
}
|
||||
}
|
||||
|
||||
return activeShards;
|
||||
}
|
||||
|
||||
/**
|
||||
* purge any existing statuses that are no longer updated
|
||||
*
|
||||
* @return true if any change
|
||||
* @return the changes applied
|
||||
*/
|
||||
private EnumSet<ShardStatusChangeType> purgeDeletedAndClosedShards() {
|
||||
EnumSet<ShardStatusChangeType> changes = EnumSet.noneOf(ShardStatusChangeType.class);
|
||||
|
||||
Iterator<ShardId> statusShardIdIterator = shardsIndicesStatus.keySet().iterator();
|
||||
Iterator<ShardId> statusShardIdIterator = shardWasActive.keySet().iterator();
|
||||
while (statusShardIdIterator.hasNext()) {
|
||||
ShardId shardId = statusShardIdIterator.next();
|
||||
if (shardAvailable(shardId) == false) {
|
||||
|
@ -364,12 +346,25 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
|||
return changes;
|
||||
}
|
||||
|
||||
private void calcAndSetShardBuffers(int activeShards, String reason) {
|
||||
if (activeShards == 0) {
|
||||
private void calcAndSetShardBuffers(String reason) {
|
||||
|
||||
// Count how many shards are now active:
|
||||
int activeShardCount = 0;
|
||||
for (Map.Entry<ShardId,Boolean> ent : shardWasActive.entrySet()) {
|
||||
if (ent.getValue()) {
|
||||
activeShardCount++;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: we could be smarter here by taking into account how RAM the IndexWriter on each shard
|
||||
// is actually using (using IW.ramBytesUsed), so that small indices (e.g. Marvel) would not
|
||||
// get the same indexing buffer as large indices. But it quickly gets tricky...
|
||||
if (activeShardCount == 0) {
|
||||
logger.debug("no active shards (reason={})", reason);
|
||||
return;
|
||||
}
|
||||
ByteSizeValue shardIndexingBufferSize = new ByteSizeValue(indexingBuffer.bytes() / activeShards);
|
||||
|
||||
ByteSizeValue shardIndexingBufferSize = new ByteSizeValue(indexingBuffer.bytes() / activeShardCount);
|
||||
if (shardIndexingBufferSize.bytes() < minShardIndexBufferSize.bytes()) {
|
||||
shardIndexingBufferSize = minShardIndexBufferSize;
|
||||
}
|
||||
|
@ -377,7 +372,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
|||
shardIndexingBufferSize = maxShardIndexBufferSize;
|
||||
}
|
||||
|
||||
ByteSizeValue shardTranslogBufferSize = new ByteSizeValue(translogBuffer.bytes() / activeShards);
|
||||
ByteSizeValue shardTranslogBufferSize = new ByteSizeValue(translogBuffer.bytes() / activeShardCount);
|
||||
if (shardTranslogBufferSize.bytes() < minShardTranslogBufferSize.bytes()) {
|
||||
shardTranslogBufferSize = minShardTranslogBufferSize;
|
||||
}
|
||||
|
@ -385,11 +380,12 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
|||
shardTranslogBufferSize = maxShardTranslogBufferSize;
|
||||
}
|
||||
|
||||
logger.debug("recalculating shard indexing buffer (reason={}), total is [{}] with [{}] active shards, each shard set to indexing=[{}], translog=[{}]", reason, indexingBuffer, activeShards, shardIndexingBufferSize, shardTranslogBufferSize);
|
||||
for (ShardId shardId : availableShards()) {
|
||||
ShardIndexingStatus status = shardsIndicesStatus.get(shardId);
|
||||
if (status == null || status.activeIndexing) {
|
||||
updateShardBuffers(shardId, shardIndexingBufferSize, shardTranslogBufferSize);
|
||||
logger.debug("recalculating shard indexing buffer (reason={}), total is [{}] with [{}] active shards, each shard set to indexing=[{}], translog=[{}]", reason, indexingBuffer, activeShardCount, shardIndexingBufferSize, shardTranslogBufferSize);
|
||||
|
||||
for (Map.Entry<ShardId,Boolean> ent : shardWasActive.entrySet()) {
|
||||
if (ent.getValue()) {
|
||||
// This shard is active
|
||||
updateShardBuffers(ent.getKey(), shardIndexingBufferSize, shardTranslogBufferSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -399,13 +395,14 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
|||
return System.nanoTime();
|
||||
}
|
||||
|
||||
// update inactive indexing buffer size
|
||||
protected void markShardAsInactive(ShardId shardId) {
|
||||
/** ask this shard to check now whether it is inactive, and reduces its indexing and translog buffers if so. returns Boolean.TRUE if
|
||||
* it did deactive, Boolean.FALSE if it did not, and null if the shard is unknown */
|
||||
protected Boolean checkIdle(ShardId shardId, long inactiveTimeNS) {
|
||||
String ignoreReason = null;
|
||||
final IndexShard shard = getShard(shardId);
|
||||
if (shard != null) {
|
||||
try {
|
||||
shard.markAsInactive();
|
||||
return shard.checkIdle(inactiveTimeNS);
|
||||
} catch (EngineClosedException e) {
|
||||
// ignore
|
||||
ignoreReason = "EngineClosedException";
|
||||
|
@ -419,47 +416,10 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
|
|||
if (ignoreReason != null) {
|
||||
logger.trace("ignore [{}] while marking shard {} as inactive", ignoreReason, shardId);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private static enum ShardStatusChangeType {
|
||||
ADDED, DELETED, BECAME_ACTIVE, BECAME_INACTIVE
|
||||
}
|
||||
|
||||
static class ShardIndexingStatus {
|
||||
long translogId = -1;
|
||||
long translogNumberOfOperations = -1;
|
||||
boolean activeIndexing = true;
|
||||
long idleSinceNanoTime = -1; // contains the first time we saw this shard with no operations done on it
|
||||
|
||||
|
||||
/** update status based on a new sample. updates all internal variables */
|
||||
public void updateWith(long currentNanoTime, ShardIndexingStatus current, long inactiveNanoInterval) {
|
||||
final boolean idle = (translogId == current.translogId && translogNumberOfOperations == current.translogNumberOfOperations);
|
||||
if (activeIndexing && idle) {
|
||||
// no indexing activity detected.
|
||||
if (idleSinceNanoTime < 0) {
|
||||
// first time we see this, start the clock.
|
||||
idleSinceNanoTime = currentNanoTime;
|
||||
} else if ((currentNanoTime - idleSinceNanoTime) > inactiveNanoInterval) {
|
||||
// shard is inactive. mark it as such.
|
||||
activeIndexing = false;
|
||||
}
|
||||
} else if (activeIndexing == false // we weren't indexing before
|
||||
&& idle == false // but we do now
|
||||
&& current.translogNumberOfOperations > 0 // but only if we're really sure - see note bellow
|
||||
) {
|
||||
// since we sync flush once a shard becomes inactive, the translog id can change, however that
|
||||
// doesn't mean the an indexing operation has happened. Note that if we're really unlucky and a flush happens
|
||||
// immediately after an indexing operation we may not become active immediately. The following
|
||||
// indexing operation will mark the shard as active, so it's OK. If that one doesn't come, we might as well stay
|
||||
// inactive
|
||||
|
||||
activeIndexing = true;
|
||||
idleSinceNanoTime = -1;
|
||||
}
|
||||
|
||||
translogId = current.translogId;
|
||||
translogNumberOfOperations = current.translogNumberOfOperations;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.monitor.fs;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -30,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
|
@ -235,7 +235,7 @@ public class FsInfo implements Iterable<FsInfo.Path>, Streamable, ToXContent {
|
|||
|
||||
@Override
|
||||
public Iterator<Path> iterator() {
|
||||
return Iterators.forArray(paths);
|
||||
return Arrays.stream(paths).iterator();
|
||||
}
|
||||
|
||||
public static FsInfo readFsInfo(StreamInput in) throws IOException {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.monitor.jvm;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -32,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
|
|||
import java.io.IOException;
|
||||
import java.lang.management.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -378,7 +378,7 @@ public class JvmStats implements Streamable, ToXContent {
|
|||
|
||||
@Override
|
||||
public Iterator<GarbageCollector> iterator() {
|
||||
return Iterators.forArray(collectors);
|
||||
return Arrays.stream(collectors).iterator();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -546,7 +546,7 @@ public class JvmStats implements Streamable, ToXContent {
|
|||
|
||||
@Override
|
||||
public Iterator<MemoryPool> iterator() {
|
||||
return Iterators.forArray(pools);
|
||||
return Arrays.stream(pools).iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -83,42 +83,20 @@ public class InternalSettingsPreparer {
|
|||
initializeSettings(output, input, true);
|
||||
Environment environment = new Environment(output.build());
|
||||
|
||||
// TODO: can we simplify all of this and have a single filename, which is looked up in the config dir?
|
||||
boolean loadFromEnv = true;
|
||||
if (useSystemProperties(input)) {
|
||||
// if its default, then load it, but also load form env
|
||||
if (Strings.hasText(System.getProperty("es.default.config"))) {
|
||||
// TODO: we don't allow multiple config files, but having loadFromEnv true here allows just that
|
||||
loadFromEnv = true;
|
||||
output.loadFromPath(environment.configFile().resolve(System.getProperty("es.default.config")));
|
||||
}
|
||||
// TODO: these should be elseifs so that multiple files cannot be loaded
|
||||
// if explicit, just load it and don't load from env
|
||||
if (Strings.hasText(System.getProperty("es.config"))) {
|
||||
loadFromEnv = false;
|
||||
output.loadFromPath(environment.configFile().resolve(System.getProperty("es.config")));
|
||||
}
|
||||
if (Strings.hasText(System.getProperty("elasticsearch.config"))) {
|
||||
loadFromEnv = false;
|
||||
output.loadFromPath(environment.configFile().resolve(System.getProperty("elasticsearch.config")));
|
||||
boolean settingsFileFound = false;
|
||||
Set<String> foundSuffixes = new HashSet<>();
|
||||
for (String allowedSuffix : ALLOWED_SUFFIXES) {
|
||||
Path path = environment.configFile().resolve("elasticsearch" + allowedSuffix);
|
||||
if (Files.exists(path)) {
|
||||
if (!settingsFileFound) {
|
||||
output.loadFromPath(path);
|
||||
}
|
||||
settingsFileFound = true;
|
||||
foundSuffixes.add(allowedSuffix);
|
||||
}
|
||||
}
|
||||
if (loadFromEnv) {
|
||||
boolean settingsFileFound = false;
|
||||
Set<String> foundSuffixes = new HashSet<>();
|
||||
for (String allowedSuffix : ALLOWED_SUFFIXES) {
|
||||
Path path = environment.configFile().resolve("elasticsearch" + allowedSuffix);
|
||||
if (Files.exists(path)) {
|
||||
if (!settingsFileFound) {
|
||||
output.loadFromPath(path);
|
||||
}
|
||||
settingsFileFound = true;
|
||||
foundSuffixes.add(allowedSuffix);
|
||||
}
|
||||
}
|
||||
if (foundSuffixes.size() > 1) {
|
||||
throw new SettingsException("multiple settings files found with suffixes: " + Strings.collectionToDelimitedString(foundSuffixes, ","));
|
||||
}
|
||||
if (foundSuffixes.size() > 1) {
|
||||
throw new SettingsException("multiple settings files found with suffixes: " + Strings.collectionToDelimitedString(foundSuffixes, ","));
|
||||
}
|
||||
|
||||
// re-initialize settings now that the config file has been loaded
|
||||
|
|
|
@ -19,14 +19,8 @@
|
|||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.ElasticsearchCorruptionException;
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.*;
|
||||
import org.elasticsearch.bootstrap.JarHell;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
|
@ -41,21 +35,12 @@ import java.io.IOException;
|
|||
import java.io.OutputStream;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.SimpleFileVisitor;
|
||||
import java.nio.file.*;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.nio.file.attribute.PosixFileAttributeView;
|
||||
import java.nio.file.attribute.PosixFilePermission;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
import java.util.stream.StreamSupport;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipInputStream;
|
||||
|
||||
|
@ -90,10 +75,10 @@ public class PluginManager {
|
|||
"analysis-phonetic",
|
||||
"analysis-smartcn",
|
||||
"analysis-stempel",
|
||||
"cloud-gce",
|
||||
"delete-by-query",
|
||||
"discovery-azure",
|
||||
"discovery-ec2",
|
||||
"discovery-gce",
|
||||
"discovery-multicast",
|
||||
"lang-expression",
|
||||
"lang-groovy",
|
||||
|
@ -225,7 +210,6 @@ public class PluginManager {
|
|||
}
|
||||
|
||||
private void extract(PluginHandle pluginHandle, Terminal terminal, Path pluginFile) throws IOException {
|
||||
|
||||
// unzip plugin to a staging temp dir, named for the plugin
|
||||
Path tmp = Files.createTempDirectory(environment.tmpFile(), null);
|
||||
Path root = tmp.resolve(pluginHandle.name);
|
||||
|
@ -255,22 +239,74 @@ public class PluginManager {
|
|||
terminal.println("Installed %s into %s", pluginHandle.name, extractLocation.toAbsolutePath());
|
||||
|
||||
// cleanup
|
||||
IOUtils.rm(tmp, pluginFile);
|
||||
tryToDeletePath(terminal, tmp, pluginFile);
|
||||
|
||||
// take care of bin/ by moving and applying permissions if needed
|
||||
Path binFile = extractLocation.resolve("bin");
|
||||
if (Files.isDirectory(binFile)) {
|
||||
Path toLocation = pluginHandle.binDir(environment);
|
||||
terminal.println(VERBOSE, "Found bin, moving to %s", toLocation.toAbsolutePath());
|
||||
if (Files.exists(toLocation)) {
|
||||
IOUtils.rm(toLocation);
|
||||
Path sourcePluginBinDirectory = extractLocation.resolve("bin");
|
||||
Path destPluginBinDirectory = pluginHandle.binDir(environment);
|
||||
boolean needToCopyBinDirectory = Files.exists(sourcePluginBinDirectory);
|
||||
if (needToCopyBinDirectory) {
|
||||
if (Files.exists(destPluginBinDirectory) && !Files.isDirectory(destPluginBinDirectory)) {
|
||||
tryToDeletePath(terminal, extractLocation);
|
||||
throw new IOException("plugin bin directory " + destPluginBinDirectory + " is not a directory");
|
||||
}
|
||||
|
||||
try {
|
||||
copyBinDirectory(sourcePluginBinDirectory, destPluginBinDirectory, pluginHandle.name, terminal);
|
||||
} catch (IOException e) {
|
||||
// rollback and remove potentially before installed leftovers
|
||||
terminal.printError("Error copying bin directory [%s] to [%s], cleaning up, reason: %s", sourcePluginBinDirectory, pluginHandle.binDir(environment), e.getMessage());
|
||||
tryToDeletePath(terminal, extractLocation, pluginHandle.binDir(environment));
|
||||
throw e;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Path sourceConfigDirectory = extractLocation.resolve("config");
|
||||
Path destConfigDirectory = pluginHandle.configDir(environment);
|
||||
boolean needToCopyConfigDirectory = Files.exists(sourceConfigDirectory);
|
||||
if (needToCopyConfigDirectory) {
|
||||
if (Files.exists(destConfigDirectory) && !Files.isDirectory(destConfigDirectory)) {
|
||||
tryToDeletePath(terminal, extractLocation, pluginHandle.binDir(environment));
|
||||
throw new IOException("plugin config directory " + destConfigDirectory + " is not a directory");
|
||||
}
|
||||
|
||||
try {
|
||||
terminal.println(VERBOSE, "Found config, moving to %s", destConfigDirectory.toAbsolutePath());
|
||||
moveFilesWithoutOverwriting(sourceConfigDirectory, destConfigDirectory, ".new");
|
||||
terminal.println(VERBOSE, "Installed %s into %s", pluginHandle.name, destConfigDirectory.toAbsolutePath());
|
||||
} catch (IOException e) {
|
||||
terminal.printError("Error copying config directory [%s] to [%s], cleaning up, reason: %s", sourceConfigDirectory, pluginHandle.binDir(environment), e.getMessage());
|
||||
tryToDeletePath(terminal, extractLocation, pluginHandle.binDir(environment), pluginHandle.configDir(environment));
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void tryToDeletePath(Terminal terminal, Path ... paths) {
|
||||
for (Path path : paths) {
|
||||
try {
|
||||
IOUtils.rm(path);
|
||||
} catch (IOException e) {
|
||||
terminal.printError(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void copyBinDirectory(Path sourcePluginBinDirectory, Path destPluginBinDirectory, String pluginName, Terminal terminal) throws IOException {
|
||||
boolean canCopyFromSource = Files.exists(sourcePluginBinDirectory) && Files.isReadable(sourcePluginBinDirectory) && Files.isDirectory(sourcePluginBinDirectory);
|
||||
if (canCopyFromSource) {
|
||||
terminal.println(VERBOSE, "Found bin, moving to %s", destPluginBinDirectory.toAbsolutePath());
|
||||
if (Files.exists(destPluginBinDirectory)) {
|
||||
IOUtils.rm(destPluginBinDirectory);
|
||||
}
|
||||
try {
|
||||
FileSystemUtils.move(binFile, toLocation);
|
||||
Files.createDirectories(destPluginBinDirectory.getParent());
|
||||
FileSystemUtils.move(sourcePluginBinDirectory, destPluginBinDirectory);
|
||||
} catch (IOException e) {
|
||||
throw new IOException("Could not move [" + binFile + "] to [" + toLocation + "]", e);
|
||||
throw new IOException("Could not move [" + sourcePluginBinDirectory + "] to [" + destPluginBinDirectory + "]", e);
|
||||
}
|
||||
if (Environment.getFileStore(toLocation).supportsFileAttributeView(PosixFileAttributeView.class)) {
|
||||
if (Environment.getFileStore(destPluginBinDirectory).supportsFileAttributeView(PosixFileAttributeView.class)) {
|
||||
// add read and execute permissions to existing perms, so execution will work.
|
||||
// read should generally be set already, but set it anyway: don't rely on umask...
|
||||
final Set<PosixFilePermission> executePerms = new HashSet<>();
|
||||
|
@ -280,7 +316,7 @@ public class PluginManager {
|
|||
executePerms.add(PosixFilePermission.OWNER_EXECUTE);
|
||||
executePerms.add(PosixFilePermission.GROUP_EXECUTE);
|
||||
executePerms.add(PosixFilePermission.OTHERS_EXECUTE);
|
||||
Files.walkFileTree(toLocation, new SimpleFileVisitor<Path>() {
|
||||
Files.walkFileTree(destPluginBinDirectory, new SimpleFileVisitor<Path>() {
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
if (attrs.isRegularFile()) {
|
||||
|
@ -294,15 +330,7 @@ public class PluginManager {
|
|||
} else {
|
||||
terminal.println(VERBOSE, "Skipping posix permissions - filestore doesn't support posix permission");
|
||||
}
|
||||
terminal.println(VERBOSE, "Installed %s into %s", pluginHandle.name, toLocation.toAbsolutePath());
|
||||
}
|
||||
|
||||
Path configFile = extractLocation.resolve("config");
|
||||
if (Files.isDirectory(configFile)) {
|
||||
Path configDestLocation = pluginHandle.configDir(environment);
|
||||
terminal.println(VERBOSE, "Found config, moving to %s", configDestLocation.toAbsolutePath());
|
||||
moveFilesWithoutOverwriting(configFile, configDestLocation, ".new");
|
||||
terminal.println(VERBOSE, "Installed %s into %s", pluginHandle.name, configDestLocation.toAbsolutePath());
|
||||
terminal.println(VERBOSE, "Installed %s into %s", pluginName, destPluginBinDirectory.toAbsolutePath());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -437,7 +465,7 @@ public class PluginManager {
|
|||
}
|
||||
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(environment.pluginsFile())) {
|
||||
return Iterators.toArray(stream.iterator(), Path.class);
|
||||
return StreamSupport.stream(stream.spliterator(), false).toArray(length -> new Path[length]);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@ public class RestNodesHotThreadsAction extends BaseRestHandler {
|
|||
nodesHotThreadsRequest.type(request.param("type", nodesHotThreadsRequest.type()));
|
||||
nodesHotThreadsRequest.interval(TimeValue.parseTimeValue(request.param("interval"), nodesHotThreadsRequest.interval(), "interval"));
|
||||
nodesHotThreadsRequest.snapshots(request.paramAsInt("snapshots", nodesHotThreadsRequest.snapshots()));
|
||||
nodesHotThreadsRequest.timeout(request.param("timeout"));
|
||||
client.admin().cluster().nodesHotThreads(nodesHotThreadsRequest, new RestResponseListener<NodesHotThreadsResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(NodesHotThreadsResponse response) throws Exception {
|
||||
|
|
|
@ -87,6 +87,7 @@ public class RestNodesInfoAction extends BaseRestHandler {
|
|||
}
|
||||
|
||||
final NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(nodeIds);
|
||||
nodesInfoRequest.timeout(request.param("timeout"));
|
||||
// shortcut, dont do checks if only all is specified
|
||||
if (metrics.size() == 1 && metrics.contains("_all")) {
|
||||
nodesInfoRequest.all();
|
||||
|
|
|
@ -60,6 +60,7 @@ public class RestNodesStatsAction extends BaseRestHandler {
|
|||
Set<String> metrics = Strings.splitStringByCommaToSet(request.param("metric", "_all"));
|
||||
|
||||
NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(nodesIds);
|
||||
nodesStatsRequest.timeout(request.param("timeout"));
|
||||
|
||||
if (metrics.size() == 1 && metrics.contains("_all")) {
|
||||
nodesStatsRequest.all();
|
||||
|
|
|
@ -43,6 +43,7 @@ public class RestClusterStatsAction extends BaseRestHandler {
|
|||
@Override
|
||||
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
|
||||
ClusterStatsRequest clusterStatsRequest = new ClusterStatsRequest().nodesIds(request.paramAsStringArray("nodeId", null));
|
||||
clusterStatsRequest.timeout(request.param("timeout"));
|
||||
client.admin().cluster().clusterStats(clusterStatsRequest, new RestToXContentListener<ClusterStatsResponse>(channel));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.pipeline.movavg;
|
||||
|
||||
import com.google.common.collect.EvictingQueue;
|
||||
import org.elasticsearch.common.collect.EvictingQueue;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.search.aggregations.Aggregation;
|
||||
|
@ -102,7 +102,7 @@ public class MovAvgPipelineAggregator extends PipelineAggregator {
|
|||
InternalHistogram.Factory<? extends InternalHistogram.Bucket> factory = histo.getFactory();
|
||||
|
||||
List newBuckets = new ArrayList<>();
|
||||
EvictingQueue<Double> values = EvictingQueue.create(this.window);
|
||||
EvictingQueue<Double> values = new EvictingQueue<>(this.window);
|
||||
|
||||
long lastValidKey = 0;
|
||||
int lastValidPosition = 0;
|
||||
|
@ -202,7 +202,7 @@ public class MovAvgPipelineAggregator extends PipelineAggregator {
|
|||
private MovAvgModel minimize(List<? extends InternalHistogram.Bucket> buckets, InternalHistogram histo, MovAvgModel model) {
|
||||
|
||||
int counter = 0;
|
||||
EvictingQueue<Double> values = EvictingQueue.create(window);
|
||||
EvictingQueue<Double> values = new EvictingQueue<>(this.window);
|
||||
|
||||
double[] test = new double[window];
|
||||
ListIterator<? extends InternalHistogram.Bucket> iter = buckets.listIterator(buckets.size());
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.pipeline.movavg;
|
||||
|
||||
import com.google.common.collect.EvictingQueue;
|
||||
import org.elasticsearch.common.collect.EvictingQueue;
|
||||
import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel;
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.pipeline.serialdiff;
|
||||
|
||||
import com.google.common.collect.EvictingQueue;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.EvictingQueue;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
|
@ -86,7 +86,7 @@ public class SerialDiffPipelineAggregator extends PipelineAggregator {
|
|||
InternalHistogram.Factory<? extends InternalHistogram.Bucket> factory = histo.getFactory();
|
||||
|
||||
List newBuckets = new ArrayList<>();
|
||||
EvictingQueue<Double> lagWindow = EvictingQueue.create(lag);
|
||||
EvictingQueue<Double> lagWindow = new EvictingQueue<>(lag);
|
||||
int counter = 0;
|
||||
|
||||
for (InternalHistogram.Bucket bucket : buckets) {
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.search.internal;
|
||||
|
||||
import com.carrotsearch.hppc.IntObjectHashMap;
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -30,6 +29,7 @@ import org.elasticsearch.search.SearchHits;
|
|||
import org.elasticsearch.search.SearchShardTarget;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
|
@ -156,7 +156,7 @@ public class InternalSearchHits implements SearchHits {
|
|||
|
||||
@Override
|
||||
public Iterator<SearchHit> iterator() {
|
||||
return Iterators.forArray(hits());
|
||||
return Arrays.stream(hits()).iterator();
|
||||
}
|
||||
|
||||
public InternalSearchHit[] internalHits() {
|
||||
|
|
|
@ -52,8 +52,8 @@ grant codeBase "${es.security.plugin.discovery-ec2}" {
|
|||
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
|
||||
};
|
||||
|
||||
grant codeBase "${es.security.plugin.cloud-gce}" {
|
||||
// needed because of problems in cloud-gce
|
||||
grant codeBase "${es.security.plugin.discovery-gce}" {
|
||||
// needed because of problems in discovery-gce
|
||||
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
|
||||
};
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue